diff --git a/.agent/skills b/.agent/skills deleted file mode 120000 index 454b8427cd..0000000000 --- a/.agent/skills +++ /dev/null @@ -1 +0,0 @@ -../.claude/skills \ No newline at end of file diff --git a/.agent/skills/component-refactoring b/.agent/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.agent/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.agent/skills/frontend-code-review b/.agent/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.agent/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.agent/skills/frontend-testing b/.agent/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.agent/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.agent/skills/orpc-contract-first b/.agent/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.agent/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.agent/skills/skill-creator b/.agent/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.agent/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.agent/skills/vercel-react-best-practices b/.agent/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.agent/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.agent/skills/web-design-guidelines b/.agent/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.agent/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.claude/skills/component-refactoring/SKILL.md b/.agents/skills/component-refactoring/SKILL.md similarity index 100% rename from .claude/skills/component-refactoring/SKILL.md rename to .agents/skills/component-refactoring/SKILL.md diff --git a/.claude/skills/component-refactoring/references/complexity-patterns.md b/.agents/skills/component-refactoring/references/complexity-patterns.md similarity index 100% rename from .claude/skills/component-refactoring/references/complexity-patterns.md rename to .agents/skills/component-refactoring/references/complexity-patterns.md diff --git a/.claude/skills/component-refactoring/references/component-splitting.md b/.agents/skills/component-refactoring/references/component-splitting.md similarity index 100% rename from .claude/skills/component-refactoring/references/component-splitting.md rename to .agents/skills/component-refactoring/references/component-splitting.md diff --git a/.claude/skills/component-refactoring/references/hook-extraction.md b/.agents/skills/component-refactoring/references/hook-extraction.md similarity index 100% rename from .claude/skills/component-refactoring/references/hook-extraction.md rename to .agents/skills/component-refactoring/references/hook-extraction.md diff --git a/.claude/skills/frontend-code-review/SKILL.md b/.agents/skills/frontend-code-review/SKILL.md similarity index 100% rename from .claude/skills/frontend-code-review/SKILL.md rename to .agents/skills/frontend-code-review/SKILL.md diff --git a/.claude/skills/frontend-code-review/references/business-logic.md b/.agents/skills/frontend-code-review/references/business-logic.md similarity index 100% rename from .claude/skills/frontend-code-review/references/business-logic.md rename to .agents/skills/frontend-code-review/references/business-logic.md diff --git a/.claude/skills/frontend-code-review/references/code-quality.md b/.agents/skills/frontend-code-review/references/code-quality.md similarity index 100% rename from .claude/skills/frontend-code-review/references/code-quality.md rename to .agents/skills/frontend-code-review/references/code-quality.md diff --git a/.claude/skills/frontend-code-review/references/performance.md b/.agents/skills/frontend-code-review/references/performance.md similarity index 100% rename from .claude/skills/frontend-code-review/references/performance.md rename to .agents/skills/frontend-code-review/references/performance.md diff --git a/.claude/skills/frontend-testing/SKILL.md b/.agents/skills/frontend-testing/SKILL.md similarity index 100% rename from .claude/skills/frontend-testing/SKILL.md rename to .agents/skills/frontend-testing/SKILL.md diff --git a/.claude/skills/frontend-testing/assets/component-test.template.tsx b/.agents/skills/frontend-testing/assets/component-test.template.tsx similarity index 100% rename from .claude/skills/frontend-testing/assets/component-test.template.tsx rename to .agents/skills/frontend-testing/assets/component-test.template.tsx diff --git a/.claude/skills/frontend-testing/assets/hook-test.template.ts b/.agents/skills/frontend-testing/assets/hook-test.template.ts similarity index 100% rename from .claude/skills/frontend-testing/assets/hook-test.template.ts rename to .agents/skills/frontend-testing/assets/hook-test.template.ts diff --git a/.claude/skills/frontend-testing/assets/utility-test.template.ts b/.agents/skills/frontend-testing/assets/utility-test.template.ts similarity index 100% rename from .claude/skills/frontend-testing/assets/utility-test.template.ts rename to .agents/skills/frontend-testing/assets/utility-test.template.ts diff --git a/.claude/skills/frontend-testing/references/async-testing.md b/.agents/skills/frontend-testing/references/async-testing.md similarity index 100% rename from .claude/skills/frontend-testing/references/async-testing.md rename to .agents/skills/frontend-testing/references/async-testing.md diff --git a/.claude/skills/frontend-testing/references/checklist.md b/.agents/skills/frontend-testing/references/checklist.md similarity index 100% rename from .claude/skills/frontend-testing/references/checklist.md rename to .agents/skills/frontend-testing/references/checklist.md diff --git a/.claude/skills/frontend-testing/references/common-patterns.md b/.agents/skills/frontend-testing/references/common-patterns.md similarity index 100% rename from .claude/skills/frontend-testing/references/common-patterns.md rename to .agents/skills/frontend-testing/references/common-patterns.md diff --git a/.claude/skills/frontend-testing/references/domain-components.md b/.agents/skills/frontend-testing/references/domain-components.md similarity index 100% rename from .claude/skills/frontend-testing/references/domain-components.md rename to .agents/skills/frontend-testing/references/domain-components.md diff --git a/.claude/skills/frontend-testing/references/mocking.md b/.agents/skills/frontend-testing/references/mocking.md similarity index 100% rename from .claude/skills/frontend-testing/references/mocking.md rename to .agents/skills/frontend-testing/references/mocking.md diff --git a/.claude/skills/frontend-testing/references/workflow.md b/.agents/skills/frontend-testing/references/workflow.md similarity index 100% rename from .claude/skills/frontend-testing/references/workflow.md rename to .agents/skills/frontend-testing/references/workflow.md diff --git a/.claude/skills/orpc-contract-first/SKILL.md b/.agents/skills/orpc-contract-first/SKILL.md similarity index 100% rename from .claude/skills/orpc-contract-first/SKILL.md rename to .agents/skills/orpc-contract-first/SKILL.md diff --git a/.claude/skills/skill-creator/SKILL.md b/.agents/skills/skill-creator/SKILL.md similarity index 100% rename from .claude/skills/skill-creator/SKILL.md rename to .agents/skills/skill-creator/SKILL.md diff --git a/.claude/skills/skill-creator/references/output-patterns.md b/.agents/skills/skill-creator/references/output-patterns.md similarity index 100% rename from .claude/skills/skill-creator/references/output-patterns.md rename to .agents/skills/skill-creator/references/output-patterns.md diff --git a/.claude/skills/skill-creator/references/workflows.md b/.agents/skills/skill-creator/references/workflows.md similarity index 100% rename from .claude/skills/skill-creator/references/workflows.md rename to .agents/skills/skill-creator/references/workflows.md diff --git a/.claude/skills/skill-creator/scripts/init_skill.py b/.agents/skills/skill-creator/scripts/init_skill.py similarity index 100% rename from .claude/skills/skill-creator/scripts/init_skill.py rename to .agents/skills/skill-creator/scripts/init_skill.py diff --git a/.claude/skills/skill-creator/scripts/package_skill.py b/.agents/skills/skill-creator/scripts/package_skill.py similarity index 100% rename from .claude/skills/skill-creator/scripts/package_skill.py rename to .agents/skills/skill-creator/scripts/package_skill.py diff --git a/.claude/skills/skill-creator/scripts/quick_validate.py b/.agents/skills/skill-creator/scripts/quick_validate.py similarity index 100% rename from .claude/skills/skill-creator/scripts/quick_validate.py rename to .agents/skills/skill-creator/scripts/quick_validate.py diff --git a/.claude/skills/vercel-react-best-practices/AGENTS.md b/.agents/skills/vercel-react-best-practices/AGENTS.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/AGENTS.md rename to .agents/skills/vercel-react-best-practices/AGENTS.md diff --git a/.claude/skills/vercel-react-best-practices/SKILL.md b/.agents/skills/vercel-react-best-practices/SKILL.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/SKILL.md rename to .agents/skills/vercel-react-best-practices/SKILL.md diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md b/.agents/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md rename to .agents/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md b/.agents/skills/vercel-react-best-practices/rules/advanced-use-latest.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md rename to .agents/skills/vercel-react-best-practices/rules/advanced-use-latest.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-api-routes.md b/.agents/skills/vercel-react-best-practices/rules/async-api-routes.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-api-routes.md rename to .agents/skills/vercel-react-best-practices/rules/async-api-routes.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-defer-await.md b/.agents/skills/vercel-react-best-practices/rules/async-defer-await.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-defer-await.md rename to .agents/skills/vercel-react-best-practices/rules/async-defer-await.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-dependencies.md b/.agents/skills/vercel-react-best-practices/rules/async-dependencies.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-dependencies.md rename to .agents/skills/vercel-react-best-practices/rules/async-dependencies.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-parallel.md b/.agents/skills/vercel-react-best-practices/rules/async-parallel.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-parallel.md rename to .agents/skills/vercel-react-best-practices/rules/async-parallel.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md b/.agents/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md rename to .agents/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md b/.agents/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-conditional.md b/.agents/skills/vercel-react-best-practices/rules/bundle-conditional.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-conditional.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-conditional.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md b/.agents/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md b/.agents/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-preload.md b/.agents/skills/vercel-react-best-practices/rules/bundle-preload.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-preload.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-preload.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-event-listeners.md b/.agents/skills/vercel-react-best-practices/rules/client-event-listeners.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-event-listeners.md rename to .agents/skills/vercel-react-best-practices/rules/client-event-listeners.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-localstorage-schema.md b/.agents/skills/vercel-react-best-practices/rules/client-localstorage-schema.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-localstorage-schema.md rename to .agents/skills/vercel-react-best-practices/rules/client-localstorage-schema.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md b/.agents/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md rename to .agents/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-swr-dedup.md b/.agents/skills/vercel-react-best-practices/rules/client-swr-dedup.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-swr-dedup.md rename to .agents/skills/vercel-react-best-practices/rules/client-swr-dedup.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-batch-dom-css.md b/.agents/skills/vercel-react-best-practices/rules/js-batch-dom-css.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-batch-dom-css.md rename to .agents/skills/vercel-react-best-practices/rules/js-batch-dom-css.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-function-results.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-function-results.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-function-results.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-function-results.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-property-access.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-property-access.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-property-access.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-property-access.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-storage.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-storage.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-storage.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-storage.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-combine-iterations.md b/.agents/skills/vercel-react-best-practices/rules/js-combine-iterations.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-combine-iterations.md rename to .agents/skills/vercel-react-best-practices/rules/js-combine-iterations.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-early-exit.md b/.agents/skills/vercel-react-best-practices/rules/js-early-exit.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-early-exit.md rename to .agents/skills/vercel-react-best-practices/rules/js-early-exit.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-hoist-regexp.md b/.agents/skills/vercel-react-best-practices/rules/js-hoist-regexp.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-hoist-regexp.md rename to .agents/skills/vercel-react-best-practices/rules/js-hoist-regexp.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-index-maps.md b/.agents/skills/vercel-react-best-practices/rules/js-index-maps.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-index-maps.md rename to .agents/skills/vercel-react-best-practices/rules/js-index-maps.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-length-check-first.md b/.agents/skills/vercel-react-best-practices/rules/js-length-check-first.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-length-check-first.md rename to .agents/skills/vercel-react-best-practices/rules/js-length-check-first.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-min-max-loop.md b/.agents/skills/vercel-react-best-practices/rules/js-min-max-loop.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-min-max-loop.md rename to .agents/skills/vercel-react-best-practices/rules/js-min-max-loop.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-set-map-lookups.md b/.agents/skills/vercel-react-best-practices/rules/js-set-map-lookups.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-set-map-lookups.md rename to .agents/skills/vercel-react-best-practices/rules/js-set-map-lookups.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md b/.agents/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md rename to .agents/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-activity.md b/.agents/skills/vercel-react-best-practices/rules/rendering-activity.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-activity.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-activity.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md b/.agents/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md b/.agents/skills/vercel-react-best-practices/rules/rendering-conditional-render.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-conditional-render.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-content-visibility.md b/.agents/skills/vercel-react-best-practices/rules/rendering-content-visibility.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-content-visibility.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-content-visibility.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md b/.agents/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md b/.agents/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md b/.agents/skills/vercel-react-best-practices/rules/rendering-svg-precision.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-svg-precision.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md b/.agents/skills/vercel-react-best-practices/rules/rerender-defer-reads.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-defer-reads.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md b/.agents/skills/vercel-react-best-practices/rules/rerender-dependencies.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-dependencies.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md b/.agents/skills/vercel-react-best-practices/rules/rerender-derived-state.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-derived-state.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md b/.agents/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md b/.agents/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-memo.md b/.agents/skills/vercel-react-best-practices/rules/rerender-memo.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-memo.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-memo.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-transitions.md b/.agents/skills/vercel-react-best-practices/rules/rerender-transitions.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-transitions.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-transitions.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md b/.agents/skills/vercel-react-best-practices/rules/server-after-nonblocking.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md rename to .agents/skills/vercel-react-best-practices/rules/server-after-nonblocking.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-lru.md b/.agents/skills/vercel-react-best-practices/rules/server-cache-lru.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-cache-lru.md rename to .agents/skills/vercel-react-best-practices/rules/server-cache-lru.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-react.md b/.agents/skills/vercel-react-best-practices/rules/server-cache-react.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-cache-react.md rename to .agents/skills/vercel-react-best-practices/rules/server-cache-react.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md b/.agents/skills/vercel-react-best-practices/rules/server-parallel-fetching.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md rename to .agents/skills/vercel-react-best-practices/rules/server-parallel-fetching.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-serialization.md b/.agents/skills/vercel-react-best-practices/rules/server-serialization.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-serialization.md rename to .agents/skills/vercel-react-best-practices/rules/server-serialization.md diff --git a/.agents/skills/web-design-guidelines/SKILL.md b/.agents/skills/web-design-guidelines/SKILL.md new file mode 100644 index 0000000000..ceae92ab31 --- /dev/null +++ b/.agents/skills/web-design-guidelines/SKILL.md @@ -0,0 +1,39 @@ +--- +name: web-design-guidelines +description: Review UI code for Web Interface Guidelines compliance. Use when asked to "review my UI", "check accessibility", "audit design", "review UX", or "check my site against best practices". +metadata: + author: vercel + version: "1.0.0" + argument-hint: +--- + +# Web Interface Guidelines + +Review files for compliance with Web Interface Guidelines. + +## How It Works + +1. Fetch the latest guidelines from the source URL below +2. Read the specified files (or prompt user for files/pattern) +3. Check against all rules in the fetched guidelines +4. Output findings in the terse `file:line` format + +## Guidelines Source + +Fetch fresh guidelines before each review: + +``` +https://raw.githubusercontent.com/vercel-labs/web-interface-guidelines/main/command.md +``` + +Use WebFetch to retrieve the latest rules. The fetched content contains all the rules and output format instructions. + +## Usage + +When a user provides a file or pattern argument: +1. Fetch guidelines from the source URL above +2. Read the specified files +3. Apply all rules from the fetched guidelines +4. Output findings using the format specified in the guidelines + +If no files specified, ask the user which files to review. diff --git a/.claude/skills/component-refactoring b/.claude/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.claude/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.claude/skills/frontend-code-review b/.claude/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.claude/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.claude/skills/frontend-testing b/.claude/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.claude/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.claude/skills/orpc-contract-first b/.claude/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.claude/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.claude/skills/skill-creator b/.claude/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.claude/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.claude/skills/vercel-react-best-practices b/.claude/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.claude/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.claude/skills/web-design-guidelines b/.claude/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.claude/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.codex/skills b/.codex/skills deleted file mode 120000 index 454b8427cd..0000000000 --- a/.codex/skills +++ /dev/null @@ -1 +0,0 @@ -../.claude/skills \ No newline at end of file diff --git a/.codex/skills/component-refactoring b/.codex/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.codex/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.codex/skills/frontend-code-review b/.codex/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.codex/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.codex/skills/frontend-testing b/.codex/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.codex/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.codex/skills/orpc-contract-first b/.codex/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.codex/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.codex/skills/skill-creator b/.codex/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.codex/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.codex/skills/vercel-react-best-practices b/.codex/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.codex/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.codex/skills/web-design-guidelines b/.codex/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.codex/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.cursor/skills/component-refactoring b/.cursor/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.cursor/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.cursor/skills/frontend-code-review b/.cursor/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.cursor/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.cursor/skills/frontend-testing b/.cursor/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.cursor/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.cursor/skills/orpc-contract-first b/.cursor/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.cursor/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.cursor/skills/skill-creator b/.cursor/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.cursor/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.cursor/skills/vercel-react-best-practices b/.cursor/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.cursor/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.cursor/skills/web-design-guidelines b/.cursor/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.cursor/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.gemini/skills/component-refactoring b/.gemini/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.gemini/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.gemini/skills/frontend-code-review b/.gemini/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.gemini/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.gemini/skills/frontend-testing b/.gemini/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.gemini/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.gemini/skills/orpc-contract-first b/.gemini/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.gemini/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.gemini/skills/skill-creator b/.gemini/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.gemini/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.gemini/skills/vercel-react-best-practices b/.gemini/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.gemini/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.gemini/skills/web-design-guidelines b/.gemini/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.gemini/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.github/skills/component-refactoring b/.github/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.github/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.github/skills/frontend-code-review b/.github/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.github/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.github/skills/frontend-testing b/.github/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.github/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.github/skills/orpc-contract-first b/.github/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.github/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.github/skills/skill-creator b/.github/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.github/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.github/skills/vercel-react-best-practices b/.github/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.github/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.github/skills/web-design-guidelines b/.github/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.github/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index ff006324bb..4571fd1cd1 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -82,6 +82,6 @@ jobs: # mdformat breaks YAML front matter in markdown files. Add --exclude for directories containing YAML front matter. - name: mdformat run: | - uvx --python 3.13 mdformat . --exclude ".claude/skills/**" + uvx --python 3.13 mdformat . --exclude ".agents/skills/**" - uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27 diff --git a/api/agent-notes/controllers/console/datasets/datasets_document.py.md b/api/agent-notes/controllers/console/datasets/datasets_document.py.md deleted file mode 100644 index b100249981..0000000000 --- a/api/agent-notes/controllers/console/datasets/datasets_document.py.md +++ /dev/null @@ -1,52 +0,0 @@ -## Purpose - -`api/controllers/console/datasets/datasets_document.py` contains the console (authenticated) APIs for managing dataset documents (list/create/update/delete, processing controls, estimates, etc.). - -## Storage model (uploaded files) - -- For local file uploads into a knowledge base, the binary is stored via `extensions.ext_storage.storage` under the key: - - `upload_files//.` -- File metadata is stored in the `upload_files` table (`UploadFile` model), keyed by `UploadFile.id`. -- Dataset `Document` records reference the uploaded file via: - - `Document.data_source_info.upload_file_id` - -## Download endpoint - -- `GET /datasets//documents//download` - - - Only supported when `Document.data_source_type == "upload_file"`. - - Performs dataset permission + tenant checks via `DocumentResource.get_document(...)`. - - Delegates `Document -> UploadFile` validation and signed URL generation to `DocumentService.get_document_download_url(...)`. - - Applies `cloud_edition_billing_rate_limit_check("knowledge")` to match other KB operations. - - Response body is **only**: `{ "url": "" }`. - -- `POST /datasets//documents/download-zip` - - - Accepts `{ "document_ids": ["..."] }` (upload-file only). - - Returns `application/zip` as a single attachment download. - - Rationale: browsers often block multiple automatic downloads; a ZIP avoids that limitation. - - Applies `cloud_edition_billing_rate_limit_check("knowledge")`. - - Delegates dataset permission checks, document/upload-file validation, and download-name generation to - `DocumentService.prepare_document_batch_download_zip(...)` before streaming the ZIP. - -## Verification plan - -- Upload a document from a local file into a dataset. -- Call the download endpoint and confirm it returns a signed URL. -- Open the URL and confirm: - - Response headers force download (`Content-Disposition`), and - - Downloaded bytes match the uploaded file. -- Select multiple uploaded-file documents and download as ZIP; confirm all selected files exist in the archive. - -## Shared helper - -- `DocumentService.get_document_download_url(document)` resolves the `UploadFile` and signs a download URL. -- `DocumentService.prepare_document_batch_download_zip(...)` performs dataset permission checks, batches - document + upload file lookups, preserves request order, and generates the client-visible ZIP filename. -- Internal helpers now live in `DocumentService` (`_get_upload_file_id_for_upload_file_document(...)`, - `_get_upload_file_for_upload_file_document(...)`, `_get_upload_files_by_document_id_for_zip_download(...)`). -- ZIP packing is handled by `FileService.build_upload_files_zip_tempfile(...)`, which also: - - sanitizes entry names to avoid path traversal, and - - deduplicates names while preserving extensions (e.g., `doc.txt` → `doc (1).txt`). - Streaming the response and deferring cleanup is handled by the route via `send_file(path, ...)` + `ExitStack` + - `response.call_on_close(...)` (the file is deleted when the response is closed). diff --git a/api/agent-notes/services/dataset_service.py.md b/api/agent-notes/services/dataset_service.py.md deleted file mode 100644 index b68ef345f5..0000000000 --- a/api/agent-notes/services/dataset_service.py.md +++ /dev/null @@ -1,18 +0,0 @@ -## Purpose - -`api/services/dataset_service.py` hosts dataset/document service logic used by console and API controllers. - -## Batch document operations - -- Batch document workflows should avoid N+1 database queries by using set-based lookups. -- Tenant checks must be enforced consistently across dataset/document operations. -- `DocumentService.get_documents_by_ids(...)` fetches documents for a dataset using `id.in_(...)`. -- `FileService.get_upload_files_by_ids(...)` performs tenant-scoped batch lookup for `UploadFile` (dedupes ids with `set(...)`). -- `DocumentService.get_document_download_url(...)` and `prepare_document_batch_download_zip(...)` handle - dataset/document permission checks plus `Document -> UploadFile` validation for download endpoints. - -## Verification plan - -- Exercise document list and download endpoints that use the service helpers. -- Confirm batch download uses constant query count for documents + upload files. -- Request a ZIP with a missing document id and confirm a 404 is returned. diff --git a/api/agent-notes/services/file_service.py.md b/api/agent-notes/services/file_service.py.md deleted file mode 100644 index cf394a1c05..0000000000 --- a/api/agent-notes/services/file_service.py.md +++ /dev/null @@ -1,35 +0,0 @@ -## Purpose - -`api/services/file_service.py` owns business logic around `UploadFile` objects: upload validation, storage persistence, -previews/generators, and deletion. - -## Key invariants - -- All storage I/O goes through `extensions.ext_storage.storage`. -- Uploaded file keys follow: `upload_files//.`. -- Upload validation is enforced in `FileService.upload_file(...)` (blocked extensions, size limits, dataset-only types). - -## Batch lookup helpers - -- `FileService.get_upload_files_by_ids(tenant_id, upload_file_ids)` is the canonical tenant-scoped batch loader for - `UploadFile`. - -## Dataset document download helpers - -The dataset document download/ZIP endpoints now delegate “Document → UploadFile” validation and permission checks to -`DocumentService` (`api/services/dataset_service.py`). `FileService` stays focused on generic `UploadFile` operations -(uploading, previews, deletion), plus generic ZIP serving. - -### ZIP serving - -- `FileService.build_upload_files_zip_tempfile(...)` builds a ZIP from `UploadFile` objects and yields a seeked - tempfile **path** so callers can stream it (e.g., `send_file(path, ...)`) without hitting "read of closed file" - issues from file-handle lifecycle during streamed responses. -- Flask `send_file(...)` and the `ExitStack`/`call_on_close(...)` cleanup pattern are handled in the route layer. - -## Verification plan - -- Unit: `api/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py` - - Verify signed URL generation for upload-file documents and ZIP download behavior for multiple documents. -- Unit: `api/tests/unit_tests/services/test_file_service_zip_and_lookup.py` - - Verify ZIP packing produces a valid, openable archive and preserves file content. diff --git a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md b/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md deleted file mode 100644 index 8f78dacde8..0000000000 --- a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md +++ /dev/null @@ -1,28 +0,0 @@ -## Purpose - -Unit tests for the console dataset document download endpoint: - -- `GET /datasets//documents//download` - -## Testing approach - -- Uses `Flask.test_request_context()` and calls the `Resource.get(...)` method directly. -- Monkeypatches console decorators (`login_required`, `setup_required`, rate limit) to no-ops to keep the test focused. -- Mocks: - - `DatasetService.get_dataset` / `check_dataset_permission` - - `DocumentService.get_document` for single-file download tests - - `DocumentService.get_documents_by_ids` + `FileService.get_upload_files_by_ids` for ZIP download tests - - `FileService.get_upload_files_by_ids` for `UploadFile` lookups in single-file tests - - `services.dataset_service.file_helpers.get_signed_file_url` to return a deterministic URL -- Document mocks include `id` fields so batch lookups can map documents by id. - -## Covered cases - -- Success returns `{ "url": "" }` for upload-file documents. -- 404 when document is not `upload_file`. -- 404 when `upload_file_id` is missing. -- 404 when referenced `UploadFile` row does not exist. -- 403 when document tenant does not match current tenant. -- Batch ZIP download returns `application/zip` for upload-file documents. -- Batch ZIP download rejects non-upload-file documents. -- Batch ZIP download uses a random `.zip` attachment name (`download_name`), so tests only assert the suffix. diff --git a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md b/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md deleted file mode 100644 index dbcdf26f10..0000000000 --- a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md +++ /dev/null @@ -1,18 +0,0 @@ -## Purpose - -Unit tests for `api/services/file_service.py` helper methods that are not covered by higher-level controller tests. - -## What’s covered - -- `FileService.build_upload_files_zip_tempfile(...)` - - ZIP entry name sanitization (no directory components / traversal) - - name deduplication while preserving extensions - - writing streamed bytes from `storage.load(...)` into ZIP entries - - yields a tempfile path so callers can open/stream the ZIP without holding a live file handle -- `FileService.get_upload_files_by_ids(...)` - - returns `{}` for empty id lists - - returns an id-keyed mapping for non-empty lists - -## Notes - -- These tests intentionally stub `storage.load` and `db.session.scalars(...).all()` to avoid needing a real DB/storage. diff --git a/api/agent_skills/infra.md b/api/agent_skills/infra.md deleted file mode 100644 index bc36c7bf64..0000000000 --- a/api/agent_skills/infra.md +++ /dev/null @@ -1,96 +0,0 @@ -## Configuration - -- Import `configs.dify_config` for every runtime toggle. Do not read environment variables directly. -- Add new settings to the proper mixin inside `configs/` (deployment, feature, middleware, etc.) so they load through `DifyConfig`. -- Remote overrides come from the optional providers in `configs/remote_settings_sources`; keep defaults in code safe when the value is missing. -- Example: logging pulls targets from `extensions/ext_logging.py`, and model provider URLs are assembled in `services/entities/model_provider_entities.py`. - -## Dependencies - -- Runtime dependencies live in `[project].dependencies` inside `pyproject.toml`. Optional clients go into the `storage`, `tools`, or `vdb` groups under `[dependency-groups]`. -- Always pin versions and keep the list alphabetised. Shared tooling (lint, typing, pytest) belongs in the `dev` group. -- When code needs a new package, explain why in the PR and run `uv lock` so the lockfile stays current. - -## Storage & Files - -- Use `extensions.ext_storage.storage` for all blob IO; it already respects the configured backend. -- Convert files for workflows with helpers in `core/file/file_manager.py`; they handle signed URLs and multimodal payloads. -- When writing controller logic, delegate upload quotas and metadata to `services/file_service.py` instead of touching storage directly. -- All outbound HTTP fetches (webhooks, remote files) must go through the SSRF-safe client in `core/helper/ssrf_proxy.py`; it wraps `httpx` with the allow/deny rules configured for the platform. - -## Redis & Shared State - -- Access Redis through `extensions.ext_redis.redis_client`. For locking, reuse `redis_client.lock`. -- Prefer higher-level helpers when available: rate limits use `libs.helper.RateLimiter`, provider metadata uses caches in `core/helper/provider_cache.py`. - -## Models - -- SQLAlchemy models sit in `models/` and inherit from the shared declarative `Base` defined in `models/base.py` (metadata configured via `models/engine.py`). -- `models/__init__.py` exposes grouped aggregates: account/tenant models, app and conversation tables, datasets, providers, workflow runs, triggers, etc. Import from there to avoid deep path churn. -- Follow the DDD boundary: persistence objects live in `models/`, repositories under `repositories/` translate them into domain entities, and services consume those repositories. -- When adding a table, create the model class, register it in `models/__init__.py`, wire a repository if needed, and generate an Alembic migration as described below. - -## Vector Stores - -- Vector client implementations live in `core/rag/datasource/vdb/`, with a common factory in `core/rag/datasource/vdb/vector_factory.py` and enums in `core/rag/datasource/vdb/vector_type.py`. -- Retrieval pipelines call these providers through `core/rag/datasource/retrieval_service.py` and dataset ingestion flows in `services/dataset_service.py`. -- The CLI helper `flask vdb-migrate` orchestrates bulk migrations using routines in `commands.py`; reuse that pattern when adding new backend transitions. -- To add another store, mirror the provider layout, register it with the factory, and include any schema changes in Alembic migrations. - -## Observability & OTEL - -- OpenTelemetry settings live under the observability mixin in `configs/observability`. Toggle exporters and sampling via `dify_config`, not ad-hoc env reads. -- HTTP, Celery, Redis, SQLAlchemy, and httpx instrumentation is initialised in `extensions/ext_app_metrics.py` and `extensions/ext_request_logging.py`; reuse these hooks when adding new workers or entrypoints. -- When creating background tasks or external calls, propagate tracing context with helpers in the existing instrumented clients (e.g. use the shared `httpx` session from `core/helper/http_client_pooling.py`). -- If you add a new external integration, ensure spans and metrics are emitted by wiring the appropriate OTEL instrumentation package in `pyproject.toml` and configuring it in `extensions/`. - -## Ops Integrations - -- Langfuse support and other tracing bridges live under `core/ops/opik_trace`. Config toggles sit in `configs/observability`, while exporters are initialised in the OTEL extensions mentioned above. -- External monitoring services should follow this pattern: keep client code in `core/ops`, expose switches via `dify_config`, and hook initialisation in `extensions/ext_app_metrics.py` or sibling modules. -- Before instrumenting new code paths, check whether existing context helpers (e.g. `extensions/ext_request_logging.py`) already capture the necessary metadata. - -## Controllers, Services, Core - -- Controllers only parse HTTP input and call a service method. Keep business rules in `services/`. -- Services enforce tenant rules, quotas, and orchestration, then call into `core/` engines (workflow execution, tools, LLMs). -- When adding a new endpoint, search for an existing service to extend before introducing a new layer. Example: workflow APIs pipe through `services/workflow_service.py` into `core/workflow`. - -## Plugins, Tools, Providers - -- In Dify a plugin is a tenant-installable bundle that declares one or more providers (tool, model, datasource, trigger, endpoint, agent strategy) plus its resource needs and version metadata. The manifest (`core/plugin/entities/plugin.py`) mirrors what you see in the marketplace documentation. -- Installation, upgrades, and migrations are orchestrated by `services/plugin/plugin_service.py` together with helpers such as `services/plugin/plugin_migration.py`. -- Runtime loading happens through the implementations under `core/plugin/impl/*` (tool/model/datasource/trigger/endpoint/agent). These modules normalise plugin providers so that downstream systems (`core/tools/tool_manager.py`, `services/model_provider_service.py`, `services/trigger/*`) can treat builtin and plugin capabilities the same way. -- For remote execution, plugin daemons (`core/plugin/entities/plugin_daemon.py`, `core/plugin/impl/plugin.py`) manage lifecycle hooks, credential forwarding, and background workers that keep plugin processes in sync with the main application. -- Acquire tool implementations through `core/tools/tool_manager.py`; it resolves builtin, plugin, and workflow-as-tool providers uniformly, injecting the right context (tenant, credentials, runtime config). -- To add a new plugin capability, extend the relevant `core/plugin/entities` schema and register the implementation in the matching `core/plugin/impl` module rather than importing the provider directly. - -## Async Workloads - -see `agent_skills/trigger.md` for more detailed documentation. - -- Enqueue background work through `services/async_workflow_service.py`. It routes jobs to the tiered Celery queues defined in `tasks/`. -- Workers boot from `celery_entrypoint.py` and execute functions in `tasks/workflow_execution_tasks.py`, `tasks/trigger_processing_tasks.py`, etc. -- Scheduled workflows poll from `schedule/workflow_schedule_tasks.py`. Follow the same pattern if you need new periodic jobs. - -## Database & Migrations - -- SQLAlchemy models live under `models/` and map directly to migration files in `migrations/versions`. -- Generate migrations with `uv run --project api flask db revision --autogenerate -m ""`, then review the diff; never hand-edit the database outside Alembic. -- Apply migrations locally using `uv run --project api flask db upgrade`; production deploys expect the same history. -- If you add tenant-scoped data, confirm the upgrade includes tenant filters or defaults consistent with the service logic touching those tables. - -## CLI Commands - -- Maintenance commands from `commands.py` are registered on the Flask CLI. Run them via `uv run --project api flask `. -- Use the built-in `db` commands from Flask-Migrate for schema operations (`flask db upgrade`, `flask db stamp`, etc.). Only fall back to custom helpers if you need their extra behaviour. -- Custom entries such as `flask reset-password`, `flask reset-email`, and `flask vdb-migrate` handle self-hosted account recovery and vector database migrations. -- Before adding a new command, check whether an existing service can be reused and ensure the command guards edition-specific behaviour (many enforce `SELF_HOSTED`). Document any additions in the PR. -- Ruff helpers are run directly with `uv`: `uv run --project api --dev ruff format ./api` for formatting and `uv run --project api --dev ruff check ./api` (add `--fix` if you want automatic fixes). - -## When You Add Features - -- Check for an existing helper or service before writing a new util. -- Uphold tenancy: every service method should receive the tenant ID from controller wrappers such as `controllers/console/wraps.py`. -- Update or create tests alongside behaviour changes (`tests/unit_tests` for fast coverage, `tests/integration_tests` when touching orchestrations). -- Run `uv run --project api --dev ruff check ./api`, `uv run --directory api --dev basedpyright`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh` before submitting changes. diff --git a/api/agent_skills/plugin.md b/api/agent_skills/plugin.md deleted file mode 100644 index 954ddd236b..0000000000 --- a/api/agent_skills/plugin.md +++ /dev/null @@ -1 +0,0 @@ -// TBD diff --git a/api/agent_skills/plugin_oauth.md b/api/agent_skills/plugin_oauth.md deleted file mode 100644 index 954ddd236b..0000000000 --- a/api/agent_skills/plugin_oauth.md +++ /dev/null @@ -1 +0,0 @@ -// TBD diff --git a/api/agent_skills/trigger.md b/api/agent_skills/trigger.md deleted file mode 100644 index f4b076332c..0000000000 --- a/api/agent_skills/trigger.md +++ /dev/null @@ -1,53 +0,0 @@ -## Overview - -Trigger is a collection of nodes that we called `Start` nodes, also, the concept of `Start` is the same as `RootNode` in the workflow engine `core/workflow/graph_engine`, On the other hand, `Start` node is the entry point of workflows, every workflow run always starts from a `Start` node. - -## Trigger nodes - -- `UserInput` -- `Trigger Webhook` -- `Trigger Schedule` -- `Trigger Plugin` - -### UserInput - -Before `Trigger` concept is introduced, it's what we called `Start` node, but now, to avoid confusion, it was renamed to `UserInput` node, has a strong relation with `ServiceAPI` in `controllers/service_api/app` - -1. `UserInput` node introduces a list of arguments that need to be provided by the user, finally it will be converted into variables in the workflow variable pool. -1. `ServiceAPI` accept those arguments, and pass through them into `UserInput` node. -1. For its detailed implementation, please refer to `core/workflow/nodes/start` - -### Trigger Webhook - -Inside Webhook Node, Dify provided a UI panel that allows user define a HTTP manifest `core/workflow/nodes/trigger_webhook/entities.py`.`WebhookData`, also, Dify generates a random webhook id for each `Trigger Webhook` node, the implementation was implemented in `core/trigger/utils/endpoint.py`, as you can see, `webhook-debug` is a debug mode for webhook, you may find it in `controllers/trigger/webhook.py`. - -Finally, requests to `webhook` endpoint will be converted into variables in workflow variable pool during workflow execution. - -### Trigger Schedule - -`Trigger Schedule` node is a node that allows user define a schedule to trigger the workflow, detailed manifest is here `core/workflow/nodes/trigger_schedule/entities.py`, we have a poller and executor to handle millions of schedules, see `docker/entrypoint.sh` / `schedule/workflow_schedule_task.py` for help. - -To Achieve this, a `WorkflowSchedulePlan` model was introduced in `models/trigger.py`, and a `events/event_handlers/sync_workflow_schedule_when_app_published.py` was used to sync workflow schedule plans when app is published. - -### Trigger Plugin - -`Trigger Plugin` node allows user define there own distributed trigger plugin, whenever a request was received, Dify forwards it to the plugin and wait for parsed variables from it. - -1. Requests were saved in storage by `services/trigger/trigger_request_service.py`, referenced by `services/trigger/trigger_service.py`.`TriggerService`.`process_endpoint` -1. Plugins accept those requests and parse variables from it, see `core/plugin/impl/trigger.py` for details. - -A `subscription` concept was out here by Dify, it means an endpoint address from Dify was bound to thirdparty webhook service like `Github` `Slack` `Linear` `GoogleDrive` `Gmail` etc. Once a subscription was created, Dify continually receives requests from the platforms and handle them one by one. - -## Worker Pool / Async Task - -All the events that triggered a new workflow run is always in async mode, a unified entrypoint can be found here `services/async_workflow_service.py`.`AsyncWorkflowService`.`trigger_workflow_async`. - -The infrastructure we used is `celery`, we've already configured it in `docker/entrypoint.sh`, and the consumers are in `tasks/async_workflow_tasks.py`, 3 queues were used to handle different tiers of users, `PROFESSIONAL_QUEUE` `TEAM_QUEUE` `SANDBOX_QUEUE`. - -## Debug Strategy - -Dify divided users into 2 groups: builders / end users. - -Builders are the users who create workflows, in this stage, debugging a workflow becomes a critical part of the workflow development process, as the start node in workflows, trigger nodes can `listen` to the events from `WebhookDebug` `Schedule` `Plugin`, debugging process was created in `controllers/console/app/workflow.py`.`DraftWorkflowTriggerNodeApi`. - -A polling process can be considered as combine of few single `poll` operations, each `poll` operation fetches events cached in `Redis`, returns `None` if no event was found, more detailed implemented: `core/trigger/debug/event_bus.py` was used to handle the polling process, and `core/trigger/debug/event_selectors.py` was used to select the event poller based on the trigger type. diff --git a/api/context/flask_app_context.py b/api/context/flask_app_context.py index 4b693cd91f..360be16beb 100644 --- a/api/context/flask_app_context.py +++ b/api/context/flask_app_context.py @@ -9,7 +9,7 @@ from typing import Any, final from flask import Flask, current_app, g -from context import register_context_capturer +from core.workflow.context import register_context_capturer from core.workflow.context.execution_context import ( AppContext, IExecutionContext, diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 8ceb896d4f..979ab63a58 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -146,6 +146,7 @@ class DatasetUpdatePayload(BaseModel): embedding_model: str | None = None embedding_model_provider: str | None = None retrieval_model: dict[str, Any] | None = None + summary_index_setting: dict[str, Any] | None = None partial_member_list: list[dict[str, str]] | None = None external_retrieval_model: dict[str, Any] | None = None external_knowledge_id: str | None = None diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 2599e6293a..86cab40ee9 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -41,10 +41,11 @@ from fields.document_fields import ( from libs.datetime_utils import naive_utc_now from libs.login import current_account_with_tenant, login_required from models import DatasetProcessRule, Document, DocumentSegment, UploadFile -from models.dataset import DocumentPipelineExecutionLog +from models.dataset import DocumentPipelineExecutionLog, DocumentSegmentSummary from services.dataset_service import DatasetService, DocumentService from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig, ProcessRule, RetrievalModel from services.file_service import FileService +from tasks.generate_summary_index_task import generate_summary_index_task from ..app.error import ( ProviderModelCurrentlyNotSupportError, @@ -110,6 +111,10 @@ class DocumentRenamePayload(BaseModel): name: str +class GenerateSummaryPayload(BaseModel): + document_list: list[str] + + class DocumentBatchDownloadZipPayload(BaseModel): """Request payload for bulk downloading documents as a zip archive.""" @@ -132,6 +137,7 @@ register_schema_models( RetrievalModel, DocumentRetryPayload, DocumentRenamePayload, + GenerateSummaryPayload, DocumentBatchDownloadZipPayload, ) @@ -319,6 +325,89 @@ class DatasetDocumentListApi(Resource): paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) documents = paginated_documents.items + + # Check if dataset has summary index enabled + has_summary_index = dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True + + # Filter documents that need summary calculation + documents_need_summary = [doc for doc in documents if doc.need_summary is True] + document_ids_need_summary = [str(doc.id) for doc in documents_need_summary] + + # Calculate summary_index_status for documents that need summary (only if dataset summary index is enabled) + summary_status_map = {} + if has_summary_index and document_ids_need_summary: + # Get all segments for these documents (excluding qa_model and re_segment) + segments = ( + db.session.query(DocumentSegment.id, DocumentSegment.document_id) + .where( + DocumentSegment.document_id.in_(document_ids_need_summary), + DocumentSegment.status != "re_segment", + DocumentSegment.tenant_id == current_tenant_id, + ) + .all() + ) + + # Group segments by document_id + document_segments_map = {} + for segment in segments: + doc_id = str(segment.document_id) + if doc_id not in document_segments_map: + document_segments_map[doc_id] = [] + document_segments_map[doc_id].append(segment.id) + + # Get all summary records for these segments + all_segment_ids = [seg.id for seg in segments] + summaries = {} + if all_segment_ids: + summary_records = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id.in_(all_segment_ids), + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.enabled == True, # Only count enabled summaries + ) + .all() + ) + summaries = {summary.chunk_id: summary.status for summary in summary_records} + + # Calculate summary_index_status for each document + for doc_id in document_ids_need_summary: + segment_ids = document_segments_map.get(doc_id, []) + if not segment_ids: + # No segments, status is None (not started) + summary_status_map[doc_id] = None + continue + + # Count summary statuses for this document's segments + status_counts = {"completed": 0, "generating": 0, "error": 0, "not_started": 0} + for segment_id in segment_ids: + status = summaries.get(segment_id, "not_started") + if status in status_counts: + status_counts[status] += 1 + else: + status_counts["not_started"] += 1 + + generating_count = status_counts["generating"] + + # Determine overall status: + # - "SUMMARIZING" only when task is queued and at least one summary is generating + # - None (empty) for all other cases (not queued, all completed/error) + if generating_count > 0: + # Task is queued and at least one summary is still generating + summary_status_map[doc_id] = "SUMMARIZING" + else: + # Task not queued yet, or all summaries are completed/error (task finished) + summary_status_map[doc_id] = None + + # Add summary_index_status to each document + for document in documents: + if has_summary_index and document.need_summary is True: + # Get status from map, default to None (not queued yet) + document.summary_index_status = summary_status_map.get(str(document.id)) + else: + # Return null if summary index is not enabled or document doesn't need summary + document.summary_index_status = None + if fetch: for document in documents: completed_segments = ( @@ -804,6 +893,7 @@ class DocumentApi(DocumentResource): "display_status": document.display_status, "doc_form": document.doc_form, "doc_language": document.doc_language, + "need_summary": document.need_summary if document.need_summary is not None else False, } else: dataset_process_rules = DatasetService.get_process_rules(dataset_id) @@ -839,6 +929,7 @@ class DocumentApi(DocumentResource): "display_status": document.display_status, "doc_form": document.doc_form, "doc_language": document.doc_language, + "need_summary": document.need_summary if document.need_summary is not None else False, } return response, 200 @@ -1262,3 +1353,216 @@ class DocumentPipelineExecutionLogApi(DocumentResource): "input_data": log.input_data, "datasource_node_id": log.datasource_node_id, }, 200 + + +@console_ns.route("/datasets//documents/generate-summary") +class DocumentGenerateSummaryApi(Resource): + @console_ns.doc("generate_summary_for_documents") + @console_ns.doc(description="Generate summary index for documents") + @console_ns.doc(params={"dataset_id": "Dataset ID"}) + @console_ns.expect(console_ns.models[GenerateSummaryPayload.__name__]) + @console_ns.response(200, "Summary generation started successfully") + @console_ns.response(400, "Invalid request or dataset configuration") + @console_ns.response(403, "Permission denied") + @console_ns.response(404, "Dataset not found") + @setup_required + @login_required + @account_initialization_required + @cloud_edition_billing_rate_limit_check("knowledge") + def post(self, dataset_id): + """ + Generate summary index for specified documents. + + This endpoint checks if the dataset configuration supports summary generation + (indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), + then asynchronously generates summary indexes for the provided documents. + """ + current_user, _ = current_account_with_tenant() + dataset_id = str(dataset_id) + + # Get dataset + dataset = DatasetService.get_dataset(dataset_id) + if not dataset: + raise NotFound("Dataset not found.") + + # Check permissions + if not current_user.is_dataset_editor: + raise Forbidden() + + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + # Validate request payload + payload = GenerateSummaryPayload.model_validate(console_ns.payload or {}) + document_list = payload.document_list + + if not document_list: + raise ValueError("document_list cannot be empty.") + + # Check if dataset configuration supports summary generation + if dataset.indexing_technique != "high_quality": + raise ValueError( + f"Summary generation is only available for 'high_quality' indexing technique. " + f"Current indexing technique: {dataset.indexing_technique}" + ) + + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + raise ValueError("Summary index is not enabled for this dataset. Please enable it in the dataset settings.") + + # Verify all documents exist and belong to the dataset + documents = ( + db.session.query(Document) + .filter( + Document.id.in_(document_list), + Document.dataset_id == dataset_id, + ) + .all() + ) + + if len(documents) != len(document_list): + found_ids = {doc.id for doc in documents} + missing_ids = set(document_list) - found_ids + raise NotFound(f"Some documents not found: {list(missing_ids)}") + + # Dispatch async tasks for each document + for document in documents: + # Skip qa_model documents as they don't generate summaries + if document.doc_form == "qa_model": + logger.info("Skipping summary generation for qa_model document %s", document.id) + continue + + # Dispatch async task + generate_summary_index_task(dataset_id, document.id) + logger.info( + "Dispatched summary generation task for document %s in dataset %s", + document.id, + dataset_id, + ) + + return {"result": "success"}, 200 + + +@console_ns.route("/datasets//documents//summary-status") +class DocumentSummaryStatusApi(DocumentResource): + @console_ns.doc("get_document_summary_status") + @console_ns.doc(description="Get summary index generation status for a document") + @console_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"}) + @console_ns.response(200, "Summary status retrieved successfully") + @console_ns.response(404, "Document not found") + @setup_required + @login_required + @account_initialization_required + def get(self, dataset_id, document_id): + """ + Get summary index generation status for a document. + + Returns: + - total_segments: Total number of segments in the document + - summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records + - summaries: List of summary records with status and content preview + """ + current_user, _ = current_account_with_tenant() + dataset_id = str(dataset_id) + document_id = str(document_id) + + # Get document + document = self.get_document(dataset_id, document_id) + + # Get dataset + dataset = DatasetService.get_dataset(dataset_id) + if not dataset: + raise NotFound("Dataset not found.") + + # Check permissions + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + # Get all segments for this document + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.document_id == document_id, + DocumentSegment.dataset_id == dataset_id, + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + ) + .all() + ) + + total_segments = len(segments) + + # Get all summary records for these segments + segment_ids = [segment.id for segment in segments] + summaries = [] + if segment_ids: + summaries = ( + db.session.query(DocumentSegmentSummary) + .filter( + DocumentSegmentSummary.document_id == document_id, + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.chunk_id.in_(segment_ids), + DocumentSegmentSummary.enabled == True, # Only return enabled summaries + ) + .all() + ) + + # Create a mapping of chunk_id to summary + summary_map = {summary.chunk_id: summary for summary in summaries} + + # Count statuses + status_counts = { + "completed": 0, + "generating": 0, + "error": 0, + "not_started": 0, + } + + summary_list = [] + for segment in segments: + summary = summary_map.get(segment.id) + if summary: + status = summary.status + status_counts[status] = status_counts.get(status, 0) + 1 + summary_list.append( + { + "segment_id": segment.id, + "segment_position": segment.position, + "status": summary.status, + "summary_preview": ( + summary.summary_content[:100] + "..." + if summary.summary_content and len(summary.summary_content) > 100 + else summary.summary_content + ), + "error": summary.error, + "created_at": int(summary.created_at.timestamp()) if summary.created_at else None, + "updated_at": int(summary.updated_at.timestamp()) if summary.updated_at else None, + } + ) + else: + status_counts["not_started"] += 1 + summary_list.append( + { + "segment_id": segment.id, + "segment_position": segment.position, + "status": "not_started", + "summary_preview": None, + "error": None, + "created_at": None, + "updated_at": None, + } + ) + + return { + "total_segments": total_segments, + "summary_status": status_counts, + "summaries": summary_list, + }, 200 diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index 16fecb41c6..8a9bc6a201 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -32,7 +32,7 @@ from extensions.ext_redis import redis_client from fields.segment_fields import child_chunk_fields, segment_fields from libs.helper import escape_like_pattern from libs.login import current_account_with_tenant, login_required -from models.dataset import ChildChunk, DocumentSegment +from models.dataset import ChildChunk, DocumentSegment, DocumentSegmentSummary from models.model import UploadFile from services.dataset_service import DatasetService, DocumentService, SegmentService from services.entities.knowledge_entities.knowledge_entities import ChildChunkUpdateArgs, SegmentUpdateArgs @@ -41,6 +41,23 @@ from services.errors.chunk import ChildChunkIndexingError as ChildChunkIndexingS from tasks.batch_create_segment_to_index_task import batch_create_segment_to_index_task +def _get_segment_with_summary(segment, dataset_id): + """Helper function to marshal segment and add summary information.""" + segment_dict = marshal(segment, segment_fields) + # Query summary for this segment (only enabled summaries) + summary = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id == segment.id, + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.enabled == True, # Only return enabled summaries + ) + .first() + ) + segment_dict["summary"] = summary.summary_content if summary else None + return segment_dict + + class SegmentListQuery(BaseModel): limit: int = Field(default=20, ge=1, le=100) status: list[str] = Field(default_factory=list) @@ -63,6 +80,7 @@ class SegmentUpdatePayload(BaseModel): keywords: list[str] | None = None regenerate_child_chunks: bool = False attachment_ids: list[str] | None = None + summary: str | None = None # Summary content for summary index class BatchImportPayload(BaseModel): @@ -180,8 +198,32 @@ class DatasetDocumentSegmentListApi(Resource): segments = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) + # Query summaries for all segments in this page (batch query for efficiency) + segment_ids = [segment.id for segment in segments.items] + summaries = {} + if segment_ids: + summary_records = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id.in_(segment_ids), + DocumentSegmentSummary.dataset_id == dataset_id, + ) + .all() + ) + # Only include enabled summaries + summaries = { + summary.chunk_id: summary.summary_content for summary in summary_records if summary.enabled is True + } + + # Add summary to each segment + segments_with_summary = [] + for segment in segments.items: + segment_dict = marshal(segment, segment_fields) + segment_dict["summary"] = summaries.get(segment.id) + segments_with_summary.append(segment_dict) + response = { - "data": marshal(segments.items, segment_fields), + "data": segments_with_summary, "limit": limit, "total": segments.total, "total_pages": segments.pages, @@ -327,7 +369,7 @@ class DatasetDocumentSegmentAddApi(Resource): payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) segment = SegmentService.create_segment(payload_dict, document, dataset) - return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200 @console_ns.route("/datasets//documents//segments/") @@ -389,10 +431,12 @@ class DatasetDocumentSegmentUpdateApi(Resource): payload = SegmentUpdatePayload.model_validate(console_ns.payload or {}) payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) + + # Update segment (summary update with change detection is handled in SegmentService.update_segment) segment = SegmentService.update_segment( SegmentUpdateArgs.model_validate(payload.model_dump(exclude_none=True)), segment, document, dataset ) - return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200 @setup_required @login_required diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index 932cb4fcce..e62be13c2f 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -1,6 +1,13 @@ -from flask_restx import Resource +from flask_restx import Resource, fields from controllers.common.schema import register_schema_model +from fields.hit_testing_fields import ( + child_chunk_fields, + document_fields, + files_fields, + hit_testing_record_fields, + segment_fields, +) from libs.login import login_required from .. import console_ns @@ -14,13 +21,45 @@ from ..wraps import ( register_schema_model(console_ns, HitTestingPayload) +def _get_or_create_model(model_name: str, field_def): + """Get or create a flask_restx model to avoid dict type issues in Swagger.""" + existing = console_ns.models.get(model_name) + if existing is None: + existing = console_ns.model(model_name, field_def) + return existing + + +# Register models for flask_restx to avoid dict type issues in Swagger +document_model = _get_or_create_model("HitTestingDocument", document_fields) + +segment_fields_copy = segment_fields.copy() +segment_fields_copy["document"] = fields.Nested(document_model) +segment_model = _get_or_create_model("HitTestingSegment", segment_fields_copy) + +child_chunk_model = _get_or_create_model("HitTestingChildChunk", child_chunk_fields) +files_model = _get_or_create_model("HitTestingFile", files_fields) + +hit_testing_record_fields_copy = hit_testing_record_fields.copy() +hit_testing_record_fields_copy["segment"] = fields.Nested(segment_model) +hit_testing_record_fields_copy["child_chunks"] = fields.List(fields.Nested(child_chunk_model)) +hit_testing_record_fields_copy["files"] = fields.List(fields.Nested(files_model)) +hit_testing_record_model = _get_or_create_model("HitTestingRecord", hit_testing_record_fields_copy) + +# Response model for hit testing API +hit_testing_response_fields = { + "query": fields.String, + "records": fields.List(fields.Nested(hit_testing_record_model)), +} +hit_testing_response_model = _get_or_create_model("HitTestingResponse", hit_testing_response_fields) + + @console_ns.route("/datasets//hit-testing") class HitTestingApi(Resource, DatasetsHitTestingBase): @console_ns.doc("test_dataset_retrieval") @console_ns.doc(description="Test dataset knowledge retrieval") @console_ns.doc(params={"dataset_id": "Dataset ID"}) @console_ns.expect(console_ns.models[HitTestingPayload.__name__]) - @console_ns.response(200, "Hit testing completed successfully") + @console_ns.response(200, "Hit testing completed successfully", model=hit_testing_response_model) @console_ns.response(404, "Dataset not found") @console_ns.response(400, "Invalid parameters") @setup_required diff --git a/api/core/entities/knowledge_entities.py b/api/core/entities/knowledge_entities.py index d4093b5245..b1ba3c3e2a 100644 --- a/api/core/entities/knowledge_entities.py +++ b/api/core/entities/knowledge_entities.py @@ -3,6 +3,7 @@ from pydantic import BaseModel, Field, field_validator class PreviewDetail(BaseModel): content: str + summary: str | None = None child_chunks: list[str] | None = None diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index f1b50f360b..e172e88298 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -311,14 +311,18 @@ class IndexingRunner: qa_preview_texts: list[QAPreviewDetail] = [] total_segments = 0 + # doc_form represents the segmentation method (general, parent-child, QA) index_type = doc_form index_processor = IndexProcessorFactory(index_type).init_index_processor() + # one extract_setting is one source document for extract_setting in extract_settings: # extract processing_rule = DatasetProcessRule( mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"]) ) + # Extract document content text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"]) + # Cleaning and segmentation documents = index_processor.transform( text_docs, current_user=None, @@ -361,6 +365,12 @@ class IndexingRunner: if doc_form and doc_form == "qa_model": return IndexingEstimate(total_segments=total_segments * 20, qa_preview=qa_preview_texts, preview=[]) + + # Generate summary preview + summary_index_setting = tmp_processing_rule.get("summary_index_setting") + if summary_index_setting and summary_index_setting.get("enable") and preview_texts: + preview_texts = index_processor.generate_summary_preview(tenant_id, preview_texts, summary_index_setting) + return IndexingEstimate(total_segments=total_segments, preview=preview_texts) def _extract( diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index ec2b7f2d44..1e44d89e2f 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -434,3 +434,20 @@ INSTRUCTION_GENERATE_TEMPLATE_PROMPT = """The output of this prompt is not as ex You should edit the prompt according to the IDEAL OUTPUT.""" INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" + +DEFAULT_GENERATOR_SUMMARY_PROMPT = ( + """Summarize the following content. Extract only the key information and main points. """ + """Remove redundant details. + +Requirements: +1. Write a concise summary in plain text +2. Use the same language as the input content +3. Focus on important facts, concepts, and details +4. If images are included, describe their key information +5. Do not use words like "好的", "ok", "I understand", "This text discusses", "The content mentions" +6. Write directly without extra words + +Output only the summary text. Start summarizing now: + +""" +) \ No newline at end of file diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 8ec1ce6242..372cbe5032 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -389,15 +389,14 @@ class RetrievalService: .all() } - records = [] - include_segment_ids = set() - segment_child_map = {} - valid_dataset_documents = {} image_doc_ids: list[Any] = [] child_index_node_ids = [] index_node_ids = [] doc_to_document_map = {} + summary_segment_ids = set() # Track segments retrieved via summary + + # First pass: collect all document IDs and identify summary documents for document in documents: document_id = document.metadata.get("document_id") if document_id not in dataset_documents: @@ -408,16 +407,24 @@ class RetrievalService: continue valid_dataset_documents[document_id] = dataset_document + doc_id = document.metadata.get("doc_id") or "" + doc_to_document_map[doc_id] = document + + # Check if this is a summary document + is_summary = document.metadata.get("is_summary", False) + if is_summary: + # For summary documents, find the original chunk via original_chunk_id + original_chunk_id = document.metadata.get("original_chunk_id") + if original_chunk_id: + summary_segment_ids.add(original_chunk_id) + continue # Skip adding to other lists for summary documents + if dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX: - doc_id = document.metadata.get("doc_id") or "" - doc_to_document_map[doc_id] = document if document.metadata.get("doc_type") == DocType.IMAGE: image_doc_ids.append(doc_id) else: child_index_node_ids.append(doc_id) else: - doc_id = document.metadata.get("doc_id") or "" - doc_to_document_map[doc_id] = document if document.metadata.get("doc_type") == DocType.IMAGE: image_doc_ids.append(doc_id) else: @@ -433,6 +440,7 @@ class RetrievalService: attachment_map: dict[str, list[dict[str, Any]]] = {} child_chunk_map: dict[str, list[ChildChunk]] = {} doc_segment_map: dict[str, list[str]] = {} + segment_summary_map: dict[str, str] = {} # Map segment_id to summary content with session_factory.create_session() as session: attachments = cls.get_segment_attachment_infos(image_doc_ids, session) @@ -447,6 +455,7 @@ class RetrievalService: doc_segment_map[attachment["segment_id"]].append(attachment["attachment_id"]) else: doc_segment_map[attachment["segment_id"]] = [attachment["attachment_id"]] + child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id.in_(child_index_node_ids)) child_index_nodes = session.execute(child_chunk_stmt).scalars().all() @@ -470,6 +479,7 @@ class RetrievalService: index_node_segments = session.execute(document_segment_stmt).scalars().all() # type: ignore for index_node_segment in index_node_segments: doc_segment_map[index_node_segment.id] = [index_node_segment.index_node_id] + if segment_ids: document_segment_stmt = select(DocumentSegment).where( DocumentSegment.enabled == True, @@ -481,6 +491,42 @@ class RetrievalService: if index_node_segments: segments.extend(index_node_segments) + # Handle summary documents: query segments by original_chunk_id + if summary_segment_ids: + summary_segment_ids_list = list(summary_segment_ids) + summary_segment_stmt = select(DocumentSegment).where( + DocumentSegment.enabled == True, + DocumentSegment.status == "completed", + DocumentSegment.id.in_(summary_segment_ids_list), + ) + summary_segments = session.execute(summary_segment_stmt).scalars().all() # type: ignore + segments.extend(summary_segments) + # Add summary segment IDs to segment_ids for summary query + for seg in summary_segments: + if seg.id not in segment_ids: + segment_ids.append(seg.id) + + # Batch query summaries for segments retrieved via summary (only enabled summaries) + if summary_segment_ids: + from models.dataset import DocumentSegmentSummary + + summaries = ( + session.query(DocumentSegmentSummary) + .filter( + DocumentSegmentSummary.chunk_id.in_(list(summary_segment_ids)), + DocumentSegmentSummary.status == "completed", + DocumentSegmentSummary.enabled == True, # Only retrieve enabled summaries + ) + .all() + ) + for summary in summaries: + if summary.summary_content: + segment_summary_map[summary.chunk_id] = summary.summary_content + + include_segment_ids = set() + segment_child_map: dict[str, dict[str, Any]] = {} + records: list[dict[str, Any]] = [] + for segment in segments: child_chunks: list[ChildChunk] = child_chunk_map.get(segment.id, []) attachment_infos: list[dict[str, Any]] = attachment_map.get(segment.id, []) @@ -493,7 +539,7 @@ class RetrievalService: child_chunk_details = [] max_score = 0.0 for child_chunk in child_chunks: - document = doc_to_document_map[child_chunk.index_node_id] + document = doc_to_document_map.get(child_chunk.index_node_id) child_chunk_detail = { "id": child_chunk.id, "content": child_chunk.content, @@ -503,7 +549,7 @@ class RetrievalService: child_chunk_details.append(child_chunk_detail) max_score = max(max_score, document.metadata.get("score", 0.0) if document else 0.0) for attachment_info in attachment_infos: - file_document = doc_to_document_map[attachment_info["id"]] + file_document = doc_to_document_map.get(attachment_info["id"]) max_score = max( max_score, file_document.metadata.get("score", 0.0) if file_document else 0.0 ) @@ -576,9 +622,16 @@ class RetrievalService: else None ) + # Extract summary if this segment was retrieved via summary + summary_content = segment_summary_map.get(segment.id) + # Create RetrievalSegments object retrieval_segment = RetrievalSegments( - segment=segment, child_chunks=child_chunks_list, score=score, files=files + segment=segment, + child_chunks=child_chunks_list, + score=score, + files=files, + summary=summary_content, ) result.append(retrieval_segment) diff --git a/api/core/rag/embedding/retrieval.py b/api/core/rag/embedding/retrieval.py index b54a37b49e..f6834ab87b 100644 --- a/api/core/rag/embedding/retrieval.py +++ b/api/core/rag/embedding/retrieval.py @@ -20,3 +20,4 @@ class RetrievalSegments(BaseModel): child_chunks: list[RetrievalChildChunk] | None = None score: float | None = None files: list[dict[str, str | int]] | None = None + summary: str | None = None # Summary content if retrieved via summary index diff --git a/api/core/rag/index_processor/index_processor_base.py b/api/core/rag/index_processor/index_processor_base.py index e36b54eedd..151a3de7d9 100644 --- a/api/core/rag/index_processor/index_processor_base.py +++ b/api/core/rag/index_processor/index_processor_base.py @@ -13,6 +13,7 @@ from urllib.parse import unquote, urlparse import httpx from configs import dify_config +from core.entities.knowledge_entities import PreviewDetail from core.helper import ssrf_proxy from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.index_processor.constant.doc_type import DocType @@ -45,6 +46,17 @@ class BaseIndexProcessor(ABC): def transform(self, documents: list[Document], current_user: Account | None = None, **kwargs) -> list[Document]: raise NotImplementedError + @abstractmethod + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: + """ + For each segment in preview_texts, generate a summary using LLM and attach it to the segment. + The summary can be stored in a new attribute, e.g., summary. + This method should be implemented by subclasses. + """ + raise NotImplementedError + @abstractmethod def load( self, diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index cf68cff7dc..0bf1b1e30a 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -1,9 +1,25 @@ """Paragraph index processor.""" +import logging +import re import uuid from collections.abc import Mapping from typing import Any +logger = logging.getLogger(__name__) + +from core.entities.knowledge_entities import PreviewDetail +from core.file import File, FileTransferMethod, FileType, file_manager +from core.llm_generator.prompts import DEFAULT_GENERATOR_SUMMARY_PROMPT +from core.model_manager import ModelInstance +from core.model_runtime.entities.message_entities import ( + ImagePromptMessageContent, + PromptMessageContentUnionTypes, + TextPromptMessageContent, + UserPromptMessage, +) +from core.model_runtime.entities.model_entities import ModelFeature, ModelType +from core.provider_manager import ProviderManager from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.retrieval_service import RetrievalService @@ -17,12 +33,16 @@ from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.models.document import AttachmentDocument, Document, MultimodalGeneralStructureChunk from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols +from extensions.ext_database import db +from factories.file_factory import build_from_mapping from libs import helper +from models import UploadFile from models.account import Account -from models.dataset import Dataset, DatasetProcessRule +from models.dataset import Dataset, DatasetProcessRule, DocumentSegment, SegmentAttachmentBinding from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import Rule +from services.summary_index_service import SummaryIndexService class ParagraphIndexProcessor(BaseIndexProcessor): @@ -108,6 +128,29 @@ class ParagraphIndexProcessor(BaseIndexProcessor): keyword.add_texts(documents) def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + if dataset.indexing_technique == "high_quality": vector = Vector(dataset) if node_ids: @@ -227,3 +270,303 @@ class ParagraphIndexProcessor(BaseIndexProcessor): } else: raise ValueError("Chunks is not a list") + + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: + """ + For each segment, concurrently call generate_summary to generate a summary + and write it to the summary attribute of PreviewDetail. + In preview mode (indexing-estimate), if any summary generation fails, the method will raise an exception. + """ + import concurrent.futures + + from flask import current_app + + # Capture Flask app context for worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def process(preview: PreviewDetail) -> None: + """Generate summary for a single preview item.""" + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + else: + # Fallback: try without app context (may fail) + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_texts)) + errors: list[Exception] = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_texts))) as executor: + futures = [ + executor.submit(process, preview) + for preview in preview_texts + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + timeout_error_msg = ( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s" + ) + logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg) + # In preview mode, timeout is also an error + errors.append(TimeoutError(timeout_error_msg)) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + # Collect exceptions from completed futures + for future in done: + try: + future.result() # This will raise any exception that occurred + except Exception as e: + logger.exception("Error in summary generation future") + errors.append(e) + + # In preview mode (indexing-estimate), if there are any errors, fail the request + if errors: + error_messages = [str(e) for e in errors] + error_summary = ( + f"Failed to generate summaries for {len(errors)} chunk(s). " + f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors + ) + if len(errors) > 3: + error_summary += f" (and {len(errors) - 3} more)" + logger.error("Summary generation failed in preview mode: %s", error_summary) + raise ValueError(error_summary) + + return preview_texts + + @staticmethod + def generate_summary( + tenant_id: str, + text: str, + summary_index_setting: dict | None = None, + segment_id: str | None = None, + ) -> str: + """ + Generate summary for the given text using ModelInstance.invoke_llm and the default or custom summary prompt, + and supports vision models by including images from the segment attachments or text content. + + Args: + tenant_id: Tenant ID + text: Text content to summarize + summary_index_setting: Summary index configuration + segment_id: Optional segment ID to fetch attachments from SegmentAttachmentBinding table + """ + if not summary_index_setting or not summary_index_setting.get("enable"): + raise ValueError("summary_index_setting is required and must be enabled to generate summary.") + + model_name = summary_index_setting.get("model_name") + model_provider_name = summary_index_setting.get("model_provider_name") + summary_prompt = summary_index_setting.get("summary_prompt") + + # Import default summary prompt + if not summary_prompt: + summary_prompt = DEFAULT_GENERATOR_SUMMARY_PROMPT + + provider_manager = ProviderManager() + provider_model_bundle = provider_manager.get_provider_model_bundle( + tenant_id, model_provider_name, ModelType.LLM + ) + model_instance = ModelInstance(provider_model_bundle, model_name) + + # Get model schema to check if vision is supported + model_schema = model_instance.model_type_instance.get_model_schema(model_name, model_instance.credentials) + supports_vision = model_schema and model_schema.features and ModelFeature.VISION in model_schema.features + + # Extract images if model supports vision + image_files = [] + if supports_vision: + # First, try to get images from SegmentAttachmentBinding (preferred method) + if segment_id: + image_files = ParagraphIndexProcessor._extract_images_from_segment_attachments(tenant_id, segment_id) + + # If no images from attachments, fall back to extracting from text + if not image_files: + image_files = ParagraphIndexProcessor._extract_images_from_text(tenant_id, text) + + # Build prompt messages + prompt_messages = [] + + if image_files: + # If we have images, create a UserPromptMessage with both text and images + prompt_message_contents: list[PromptMessageContentUnionTypes] = [] + + # Add images first + for file in image_files: + try: + file_content = file_manager.to_prompt_message_content( + file, image_detail_config=ImagePromptMessageContent.DETAIL.LOW + ) + prompt_message_contents.append(file_content) + except Exception as e: + logger.warning("Failed to convert image file to prompt message content: %s", str(e)) + continue + + # Add text content + if prompt_message_contents: # Only add text if we successfully added images + prompt_message_contents.append(TextPromptMessageContent(data=f"{summary_prompt}\n{text}")) + prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) + else: + # If image conversion failed, fall back to text-only + prompt = f"{summary_prompt}\n{text}" + prompt_messages.append(UserPromptMessage(content=prompt)) + else: + # No images, use simple text prompt + prompt = f"{summary_prompt}\n{text}" + prompt_messages.append(UserPromptMessage(content=prompt)) + + result = model_instance.invoke_llm(prompt_messages=prompt_messages, model_parameters={}, stream=False) + + return getattr(result.message, "content", "") + + @staticmethod + def _extract_images_from_text(tenant_id: str, text: str) -> list[File]: + """ + Extract images from markdown text and convert them to File objects. + + Args: + tenant_id: Tenant ID + text: Text content that may contain markdown image links + + Returns: + List of File objects representing images found in the text + """ + # Extract markdown images using regex pattern + pattern = r"!\[.*?\]\((.*?)\)" + images = re.findall(pattern, text) + + if not images: + return [] + + upload_file_id_list = [] + + for image in images: + # For data before v0.10.0 + pattern = r"/files/([a-f0-9\-]+)/image-preview(?:\?.*?)?" + match = re.search(pattern, image) + if match: + upload_file_id = match.group(1) + upload_file_id_list.append(upload_file_id) + continue + + # For data after v0.10.0 + pattern = r"/files/([a-f0-9\-]+)/file-preview(?:\?.*?)?" + match = re.search(pattern, image) + if match: + upload_file_id = match.group(1) + upload_file_id_list.append(upload_file_id) + continue + + # For tools directory - direct file formats (e.g., .png, .jpg, etc.) + pattern = r"/files/tools/([a-f0-9\-]+)\.([a-zA-Z0-9]+)(?:\?[^\s\)\"\']*)?" + match = re.search(pattern, image) + if match: + # Tool files are handled differently, skip for now + continue + + if not upload_file_id_list: + return [] + + # Get unique IDs for database query + unique_upload_file_ids = list(set(upload_file_id_list)) + upload_files = ( + db.session.query(UploadFile) + .where(UploadFile.id.in_(unique_upload_file_ids), UploadFile.tenant_id == tenant_id) + .all() + ) + + # Create File objects from UploadFile records + file_objects = [] + for upload_file in upload_files: + # Only process image files + if not upload_file.mime_type or "image" not in upload_file.mime_type: + continue + + mapping = { + "upload_file_id": upload_file.id, + "transfer_method": FileTransferMethod.LOCAL_FILE.value, + "type": FileType.IMAGE.value, + } + + try: + file_obj = build_from_mapping( + mapping=mapping, + tenant_id=tenant_id, + ) + file_objects.append(file_obj) + except Exception as e: + logger.warning("Failed to create File object from UploadFile %s: %s", upload_file.id, str(e)) + continue + + return file_objects + + @staticmethod + def _extract_images_from_segment_attachments(tenant_id: str, segment_id: str) -> list[File]: + """ + Extract images from SegmentAttachmentBinding table (preferred method). + This matches how DatasetRetrieval gets segment attachments. + + Args: + tenant_id: Tenant ID + segment_id: Segment ID to fetch attachments for + + Returns: + List of File objects representing images found in segment attachments + """ + from sqlalchemy import select + + # Query attachments from SegmentAttachmentBinding table + attachments_with_bindings = db.session.execute( + select(SegmentAttachmentBinding, UploadFile) + .join(UploadFile, UploadFile.id == SegmentAttachmentBinding.attachment_id) + .where( + SegmentAttachmentBinding.segment_id == segment_id, + SegmentAttachmentBinding.tenant_id == tenant_id, + ) + ).all() + + if not attachments_with_bindings: + return [] + + file_objects = [] + for _, upload_file in attachments_with_bindings: + # Only process image files + if not upload_file.mime_type or "image" not in upload_file.mime_type: + continue + + try: + # Create File object directly (similar to DatasetRetrieval) + file_obj = File( + id=upload_file.id, + filename=upload_file.name, + extension="." + upload_file.extension, + mime_type=upload_file.mime_type, + tenant_id=tenant_id, + type=FileType.IMAGE, + transfer_method=FileTransferMethod.LOCAL_FILE, + remote_url=upload_file.source_url, + related_id=upload_file.id, + size=upload_file.size, + storage_key=upload_file.key, + ) + file_objects.append(file_obj) + except Exception as e: + logger.warning("Failed to create File object from UploadFile %s: %s", upload_file.id, str(e)) + continue + + return file_objects diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index 0366f3259f..8c803621b8 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -1,11 +1,13 @@ """Paragraph index processor.""" import json +import logging import uuid from collections.abc import Mapping from typing import Any from configs import dify_config +from core.entities.knowledge_entities import PreviewDetail from core.model_manager import ModelInstance from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.retrieval_service import RetrievalService @@ -25,6 +27,9 @@ from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegm from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) class ParentChildIndexProcessor(BaseIndexProcessor): @@ -135,6 +140,29 @@ class ParentChildIndexProcessor(BaseIndexProcessor): def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): # node_ids is segment's node_ids + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + if dataset.indexing_technique == "high_quality": delete_child_chunks = kwargs.get("delete_child_chunks") or False precomputed_child_node_ids = kwargs.get("precomputed_child_node_ids") @@ -326,3 +354,93 @@ class ParentChildIndexProcessor(BaseIndexProcessor): "preview": preview, "total_segments": len(parent_childs.parent_child_chunks), } + + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: + """ + For each parent chunk in preview_texts, concurrently call generate_summary to generate a summary + and write it to the summary attribute of PreviewDetail. + In preview mode (indexing-estimate), if any summary generation fails, the method will raise an exception. + + Note: For parent-child structure, we only generate summaries for parent chunks. + """ + import concurrent.futures + + from flask import current_app + + # Capture Flask app context for worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def process(preview: PreviewDetail) -> None: + """Generate summary for a single preview item (parent chunk).""" + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=tenant_id, + text=preview.content, + summary_index_setting=summary_index_setting, + ) + preview.summary = summary + else: + # Fallback: try without app context (may fail) + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=tenant_id, + text=preview.content, + summary_index_setting=summary_index_setting, + ) + preview.summary = summary + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_texts)) + errors: list[Exception] = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_texts))) as executor: + futures = [ + executor.submit(process, preview) + for preview in preview_texts + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + timeout_error_msg = ( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s" + ) + logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg) + # In preview mode, timeout is also an error + errors.append(TimeoutError(timeout_error_msg)) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + # Collect exceptions from completed futures + for future in done: + try: + future.result() # This will raise any exception that occurred + except Exception as e: + logger.exception("Error in summary generation future") + errors.append(e) + + # In preview mode (indexing-estimate), if there are any errors, fail the request + if errors: + error_messages = [str(e) for e in errors] + error_summary = ( + f"Failed to generate summaries for {len(errors)} chunk(s). " + f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors + ) + if len(errors) > 3: + error_summary += f" (and {len(errors) - 3} more)" + logger.error("Summary generation failed in preview mode: %s", error_summary) + raise ValueError(error_summary) + + return preview_texts diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 1183d5fbd7..8f738f07dc 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -11,6 +11,7 @@ import pandas as pd from flask import Flask, current_app from werkzeug.datastructures import FileStorage +from core.entities.knowledge_entities import PreviewDetail from core.llm_generator.llm_generator import LLMGenerator from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.retrieval_service import RetrievalService @@ -25,9 +26,10 @@ from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols from libs import helper from models.account import Account -from models.dataset import Dataset +from models.dataset import Dataset, DocumentSegment from models.dataset import Document as DatasetDocument from services.entities.knowledge_entities.knowledge_entities import Rule +from services.summary_index_service import SummaryIndexService logger = logging.getLogger(__name__) @@ -144,6 +146,30 @@ class QAIndexProcessor(BaseIndexProcessor): vector.create_multimodal(multimodal_documents) def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Note: qa_model doesn't generate summaries, but we clean them for completeness + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + vector = Vector(dataset) if node_ids: vector.delete_by_ids(node_ids) @@ -212,6 +238,17 @@ class QAIndexProcessor(BaseIndexProcessor): "total_segments": len(qa_chunks.qa_chunks), } + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: + """ + QA model doesn't generate summaries, so this method returns preview_texts unchanged. + + Note: QA model uses question-answer pairs, which don't require summary generation. + """ + # QA model doesn't generate summaries, return as-is + return preview_texts + def _format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language): format_documents = [] if document_node.page_content is None or not document_node.page_content.strip(): diff --git a/api/core/workflow/context/__init__.py b/api/core/workflow/context/__init__.py index 31e1f2c8d9..1237d6a017 100644 --- a/api/core/workflow/context/__init__.py +++ b/api/core/workflow/context/__init__.py @@ -7,16 +7,28 @@ execution in multi-threaded environments. from core.workflow.context.execution_context import ( AppContext, + ContextProviderNotFoundError, ExecutionContext, IExecutionContext, NullAppContext, capture_current_context, + read_context, + register_context, + register_context_capturer, + reset_context_provider, ) +from core.workflow.context.models import SandboxContext __all__ = [ "AppContext", + "ContextProviderNotFoundError", "ExecutionContext", "IExecutionContext", "NullAppContext", + "SandboxContext", "capture_current_context", + "read_context", + "register_context", + "register_context_capturer", + "reset_context_provider", ] diff --git a/api/core/workflow/context/execution_context.py b/api/core/workflow/context/execution_context.py index 5a4203be93..d951c95d68 100644 --- a/api/core/workflow/context/execution_context.py +++ b/api/core/workflow/context/execution_context.py @@ -4,9 +4,11 @@ Execution Context - Abstracted context management for workflow execution. import contextvars from abc import ABC, abstractmethod -from collections.abc import Generator +from collections.abc import Callable, Generator from contextlib import AbstractContextManager, contextmanager -from typing import Any, Protocol, final, runtime_checkable +from typing import Any, Protocol, TypeVar, final, runtime_checkable + +from pydantic import BaseModel class AppContext(ABC): @@ -204,13 +206,75 @@ class ExecutionContextBuilder: ) +_capturer: Callable[[], IExecutionContext] | None = None + +# Tenant-scoped providers using tuple keys for clarity and constant-time lookup. +# Key mapping: +# (name, tenant_id) -> provider +# - name: namespaced identifier (recommend prefixing, e.g. "workflow.sandbox") +# - tenant_id: tenant identifier string +# Value: +# provider: Callable[[], BaseModel] returning the typed context value +# Type-safety note: +# - This registry cannot enforce that all providers for a given name return the same BaseModel type. +# - Implementors SHOULD provide typed wrappers around register/read (like Go's context best practice), +# e.g. def register_sandbox_ctx(tenant_id: str, p: Callable[[], SandboxContext]) and +# def read_sandbox_ctx(tenant_id: str) -> SandboxContext. +_tenant_context_providers: dict[tuple[str, str], Callable[[], BaseModel]] = {} + +T = TypeVar("T", bound=BaseModel) + + +class ContextProviderNotFoundError(KeyError): + """Raised when a tenant-scoped context provider is missing for a given (name, tenant_id).""" + + pass + + +def register_context_capturer(capturer: Callable[[], IExecutionContext]) -> None: + """Register a single enterable execution context capturer (e.g., Flask).""" + global _capturer + _capturer = capturer + + +def register_context(name: str, tenant_id: str, provider: Callable[[], BaseModel]) -> None: + """Register a tenant-specific provider for a named context. + + Tip: use a namespaced "name" (e.g., "workflow.sandbox") to avoid key collisions. + Consider adding a typed wrapper for this registration in your feature module. + """ + _tenant_context_providers[(name, tenant_id)] = provider + + +def read_context(name: str, *, tenant_id: str) -> BaseModel: + """ + Read a context value for a specific tenant. + + Raises KeyError if the provider for (name, tenant_id) is not registered. + """ + prov = _tenant_context_providers.get((name, tenant_id)) + if prov is None: + raise ContextProviderNotFoundError(f"Context provider '{name}' not registered for tenant '{tenant_id}'") + return prov() + + def capture_current_context() -> IExecutionContext: """ Capture current execution context from the calling environment. - Returns: - IExecutionContext with captured context + If a capturer is registered (e.g., Flask), use it. Otherwise, return a minimal + context with NullAppContext + copy of current contextvars. """ - from context import capture_current_context + if _capturer is None: + return ExecutionContext( + app_context=NullAppContext(), + context_vars=contextvars.copy_context(), + ) + return _capturer() - return capture_current_context() + +def reset_context_provider() -> None: + """Reset the capturer and all tenant-scoped context providers (primarily for tests).""" + global _capturer + _capturer = None + _tenant_context_providers.clear() diff --git a/api/core/workflow/context/models.py b/api/core/workflow/context/models.py new file mode 100644 index 0000000000..af5a4b2614 --- /dev/null +++ b/api/core/workflow/context/models.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from pydantic import AnyHttpUrl, BaseModel + + +class SandboxContext(BaseModel): + """Typed context for sandbox integration. All fields optional by design.""" + + sandbox_url: AnyHttpUrl | None = None + sandbox_token: str | None = None # optional, if later needed for auth + + +__all__ = ["SandboxContext"] diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index 14ebd1f9ae..25dd98f48a 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -62,6 +62,21 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]): inputs = {"variable_selector": variable_selector} process_data = {"documents": value if isinstance(value, list) else [value]} + # Ensure storage_key is loaded for File objects + files_to_check = value if isinstance(value, list) else [value] + files_needing_storage_key = [ + f for f in files_to_check if isinstance(f, File) and not f.storage_key and f.related_id + ] + if files_needing_storage_key: + from sqlalchemy.orm import Session + + from extensions.ext_database import db + from factories.file_factory import StorageKeyLoader + + with Session(bind=db.engine) as session: + storage_key_loader = StorageKeyLoader(session, tenant_id=self.tenant_id) + storage_key_loader.load_storage_keys(files_needing_storage_key) + try: if isinstance(value, list): extracted_text_list = list(map(_extract_text_from_file, value)) @@ -415,6 +430,16 @@ def _download_file_content(file: File) -> bytes: response.raise_for_status() return response.content else: + # Check if storage_key is set + if not file.storage_key: + raise FileDownloadError(f"File storage_key is missing for file: {file.filename}") + + # Check if file exists before downloading + from extensions.ext_storage import storage + + if not storage.exists(file.storage_key): + raise FileDownloadError(f"File not found in storage: {file.storage_key}") + return file_manager.download(file) except Exception as e: raise FileDownloadError(f"Error downloading file: {str(e)}") from e diff --git a/api/core/workflow/nodes/knowledge_index/entities.py b/api/core/workflow/nodes/knowledge_index/entities.py index 3daca90b9b..bfeb9b5b79 100644 --- a/api/core/workflow/nodes/knowledge_index/entities.py +++ b/api/core/workflow/nodes/knowledge_index/entities.py @@ -158,3 +158,5 @@ class KnowledgeIndexNodeData(BaseNodeData): type: str = "knowledge-index" chunk_structure: str index_chunk_variable_selector: list[str] + indexing_technique: str | None = None + summary_index_setting: dict | None = None diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 17ca4bef7b..366fe24f60 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -1,9 +1,11 @@ +import concurrent.futures import datetime import logging import time from collections.abc import Mapping from typing import Any +from flask import current_app from sqlalchemy import func, select from core.app.entities.app_invoke_entities import InvokeFrom @@ -16,7 +18,9 @@ from core.workflow.nodes.base.node import Node from core.workflow.nodes.base.template import Template from core.workflow.runtime import VariablePool from extensions.ext_database import db -from models.dataset import Dataset, Document, DocumentSegment +from models.dataset import Dataset, Document, DocumentSegment, DocumentSegmentSummary +from services.summary_index_service import SummaryIndexService +from tasks.generate_summary_index_task import generate_summary_index_task from .entities import KnowledgeIndexNodeData from .exc import ( @@ -67,7 +71,20 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): # index knowledge try: if is_preview: - outputs = self._get_preview_output(node_data.chunk_structure, chunks) + # Preview mode: generate summaries for chunks directly without saving to database + # Format preview and generate summaries on-the-fly + # Get indexing_technique and summary_index_setting from node_data (workflow graph config) + # or fallback to dataset if not available in node_data + indexing_technique = node_data.indexing_technique or dataset.indexing_technique + summary_index_setting = node_data.summary_index_setting or dataset.summary_index_setting + + outputs = self._get_preview_output_with_summaries( + node_data.chunk_structure, + chunks, + dataset=dataset, + indexing_technique=indexing_technique, + summary_index_setting=summary_index_setting, + ) return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=variables, @@ -163,6 +180,9 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): db.session.commit() + # Generate summary index if enabled + self._handle_summary_index_generation(dataset, document, variable_pool) + return { "dataset_id": ds_id_value, "dataset_name": dataset_name_value, @@ -173,9 +193,307 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): "display_status": "completed", } - def _get_preview_output(self, chunk_structure: str, chunks: Any) -> Mapping[str, Any]: + def _handle_summary_index_generation( + self, + dataset: Dataset, + document: Document, + variable_pool: VariablePool, + ) -> None: + """ + Handle summary index generation based on mode (debug/preview or production). + + Args: + dataset: Dataset containing the document + document: Document to generate summaries for + variable_pool: Variable pool to check invoke_from + """ + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return + + # Skip qa_model documents + if document.doc_form == "qa_model": + return + + # Determine if in preview/debug mode + invoke_from = variable_pool.get(["sys", SystemVariableKey.INVOKE_FROM]) + is_preview = invoke_from and invoke_from.value == InvokeFrom.DEBUGGER + + # Determine if only parent chunks should be processed + only_parent_chunks = dataset.chunk_structure == "parent_child_index" + + if is_preview: + try: + # Query segments that need summary generation + query = db.session.query(DocumentSegment).filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, + ) + segments = query.all() + + if not segments: + logger.info("No segments found for document %s", document.id) + return + + # Filter segments based on mode + segments_to_process = [] + for segment in segments: + # Skip if summary already exists + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id, status="completed") + .first() + ) + if existing_summary: + continue + + # For parent-child mode, all segments are parent chunks, so process all + segments_to_process.append(segment) + + if not segments_to_process: + logger.info("No segments need summary generation for document %s", document.id) + return + + # Use ThreadPoolExecutor for concurrent generation + flask_app = current_app._get_current_object() # type: ignore + max_workers = min(10, len(segments_to_process)) # Limit to 10 workers + + def process_segment(segment: DocumentSegment) -> None: + """Process a single segment in a thread with Flask app context.""" + with flask_app.app_context(): + try: + SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting) + except Exception: + logger.exception( + "Failed to generate summary for segment %s", + segment.id, + ) + # Continue processing other segments + + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [executor.submit(process_segment, segment) for segment in segments_to_process] + # Wait for all tasks to complete + concurrent.futures.wait(futures) + + logger.info( + "Successfully generated summary index for %s segments in document %s", + len(segments_to_process), + document.id, + ) + except Exception: + logger.exception("Failed to generate summary index for document %s", document.id) + # Don't fail the entire indexing process if summary generation fails + else: + # Production mode: asynchronous generation + logger.info( + "Queuing summary index generation task for document %s (production mode)", + document.id, + ) + try: + generate_summary_index_task.delay(dataset.id, document.id, None) + logger.info("Summary index generation task queued for document %s", document.id) + except Exception: + logger.exception( + "Failed to queue summary index generation task for document %s", + document.id, + ) + # Don't fail the entire indexing process if task queuing fails + + def _get_preview_output_with_summaries( + self, + chunk_structure: str, + chunks: Any, + dataset: Dataset, + indexing_technique: str | None = None, + summary_index_setting: dict | None = None, + ) -> Mapping[str, Any]: + """ + Generate preview output with summaries for chunks in preview mode. + This method generates summaries on-the-fly without saving to database. + + Args: + chunk_structure: Chunk structure type + chunks: Chunks to generate preview for + dataset: Dataset object (for tenant_id) + indexing_technique: Indexing technique from node config or dataset + summary_index_setting: Summary index setting from node config or dataset + """ index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() - return index_processor.format_preview(chunks) + preview_output = index_processor.format_preview(chunks) + + # Check if summary index is enabled + if indexing_technique != "high_quality": + return preview_output + + if not summary_index_setting or not summary_index_setting.get("enable"): + return preview_output + + # Generate summaries for chunks + if "preview" in preview_output and isinstance(preview_output["preview"], list): + chunk_count = len(preview_output["preview"]) + logger.info( + "Generating summaries for %s chunks in preview mode (dataset: %s)", + chunk_count, + dataset.id, + ) + # Use ParagraphIndexProcessor's generate_summary method + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + + # Get Flask app for application context in worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def generate_summary_for_chunk(preview_item: dict) -> None: + """Generate summary for a single chunk.""" + if "content" in preview_item: + # Set Flask application context in worker thread + if flask_app: + with flask_app.app_context(): + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary + else: + # Fallback: try without app context (may fail) + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_output["preview"])) + errors: list[Exception] = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_output["preview"]))) as executor: + futures = [ + executor.submit(generate_summary_for_chunk, preview_item) + for preview_item in preview_output["preview"] + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + timeout_error_msg = ( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s" + ) + logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg) + # In preview mode, timeout is also an error + errors.append(TimeoutError(timeout_error_msg)) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + # Collect exceptions from completed futures + for future in done: + try: + future.result() # This will raise any exception that occurred + except Exception as e: + logger.exception("Error in summary generation future") + errors.append(e) + + # In preview mode, if there are any errors, fail the request + if errors: + error_messages = [str(e) for e in errors] + error_summary = ( + f"Failed to generate summaries for {len(errors)} chunk(s). " + f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors + ) + if len(errors) > 3: + error_summary += f" (and {len(errors) - 3} more)" + logger.error("Summary generation failed in preview mode: %s", error_summary) + raise KnowledgeIndexNodeError(error_summary) + + completed_count = sum(1 for item in preview_output["preview"] if item.get("summary") is not None) + logger.info( + "Completed summary generation for preview chunks: %s/%s succeeded", + completed_count, + len(preview_output["preview"]), + ) + + return preview_output + + def _get_preview_output( + self, + chunk_structure: str, + chunks: Any, + dataset: Dataset | None = None, + variable_pool: VariablePool | None = None, + ) -> Mapping[str, Any]: + index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() + preview_output = index_processor.format_preview(chunks) + + # If dataset is provided, try to enrich preview with summaries + if dataset and variable_pool: + document_id = variable_pool.get(["sys", SystemVariableKey.DOCUMENT_ID]) + if document_id: + document = db.session.query(Document).filter_by(id=document_id.value).first() + if document: + # Query summaries for this document + summaries = ( + db.session.query(DocumentSegmentSummary) + .filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, + ) + .all() + ) + + if summaries: + # Create a map of segment content to summary for matching + # Use content matching as chunks in preview might not be indexed yet + summary_by_content = {} + for summary in summaries: + segment = ( + db.session.query(DocumentSegment) + .filter_by(id=summary.chunk_id, dataset_id=dataset.id) + .first() + ) + if segment: + # Normalize content for matching (strip whitespace) + normalized_content = segment.content.strip() + summary_by_content[normalized_content] = summary.summary_content + + # Enrich preview with summaries by content matching + if "preview" in preview_output and isinstance(preview_output["preview"], list): + matched_count = 0 + for preview_item in preview_output["preview"]: + if "content" in preview_item: + # Normalize content for matching + normalized_chunk_content = preview_item["content"].strip() + if normalized_chunk_content in summary_by_content: + preview_item["summary"] = summary_by_content[normalized_chunk_content] + matched_count += 1 + + if matched_count > 0: + logger.info( + "Enriched preview with %s existing summaries (dataset: %s, document: %s)", + matched_count, + dataset.id, + document.id, + ) + + return preview_output @classmethod def version(cls) -> str: diff --git a/api/extensions/ext_celery.py b/api/extensions/ext_celery.py index 08cf96c1c1..af983f6d87 100644 --- a/api/extensions/ext_celery.py +++ b/api/extensions/ext_celery.py @@ -102,6 +102,8 @@ def init_app(app: DifyApp) -> Celery: imports = [ "tasks.async_workflow_tasks", # trigger workers "tasks.trigger_processing_tasks", # async trigger processing + "tasks.generate_summary_index_task", # summary index generation + "tasks.regenerate_summary_index_task", # summary index regeneration ] day = dify_config.CELERY_BEAT_SCHEDULER_TIME diff --git a/api/fields/dataset_fields.py b/api/fields/dataset_fields.py index 1e5ec7d200..ff6578098b 100644 --- a/api/fields/dataset_fields.py +++ b/api/fields/dataset_fields.py @@ -39,6 +39,14 @@ dataset_retrieval_model_fields = { "score_threshold_enabled": fields.Boolean, "score_threshold": fields.Float, } + +dataset_summary_index_fields = { + "enable": fields.Boolean, + "model_name": fields.String, + "model_provider_name": fields.String, + "summary_prompt": fields.String, +} + external_retrieval_model_fields = { "top_k": fields.Integer, "score_threshold": fields.Float, @@ -83,6 +91,7 @@ dataset_detail_fields = { "embedding_model_provider": fields.String, "embedding_available": fields.Boolean, "retrieval_model_dict": fields.Nested(dataset_retrieval_model_fields), + "summary_index_setting": fields.Nested(dataset_summary_index_fields), "tags": fields.List(fields.Nested(tag_fields)), "doc_form": fields.String, "external_knowledge_info": fields.Nested(external_knowledge_info_fields), diff --git a/api/fields/document_fields.py b/api/fields/document_fields.py index 9be59f7454..35a2a04f3e 100644 --- a/api/fields/document_fields.py +++ b/api/fields/document_fields.py @@ -33,6 +33,11 @@ document_fields = { "hit_count": fields.Integer, "doc_form": fields.String, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), + # Summary index generation status: + # "SUMMARIZING" (when task is queued and generating) + "summary_index_status": fields.String, + # Whether this document needs summary index generation + "need_summary": fields.Boolean, } document_with_segments_fields = { @@ -60,6 +65,10 @@ document_with_segments_fields = { "completed_segments": fields.Integer, "total_segments": fields.Integer, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), + # Summary index generation status: + # "SUMMARIZING" (when task is queued and generating) + "summary_index_status": fields.String, + "need_summary": fields.Boolean, # Whether this document needs summary index generation } dataset_and_document_fields = { diff --git a/api/fields/hit_testing_fields.py b/api/fields/hit_testing_fields.py index e70f9fa722..0b54992835 100644 --- a/api/fields/hit_testing_fields.py +++ b/api/fields/hit_testing_fields.py @@ -58,4 +58,5 @@ hit_testing_record_fields = { "score": fields.Float, "tsne_position": fields.Raw, "files": fields.List(fields.Nested(files_fields)), + "summary": fields.String, # Summary content if retrieved via summary index } diff --git a/api/fields/segment_fields.py b/api/fields/segment_fields.py index 56d6b68378..2ce9fb154c 100644 --- a/api/fields/segment_fields.py +++ b/api/fields/segment_fields.py @@ -49,4 +49,5 @@ segment_fields = { "stopped_at": TimestampField, "child_chunks": fields.List(fields.Nested(child_chunk_fields)), "attachments": fields.List(fields.Nested(attachment_fields)), + "summary": fields.String, # Summary content for the segment } diff --git a/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py b/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py new file mode 100644 index 0000000000..40fe419ef6 --- /dev/null +++ b/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py @@ -0,0 +1,69 @@ +"""add SummaryIndex feature + +Revision ID: 562dcce7d77c +Revises: 03ea244985ce +Create Date: 2026-01-12 13:58:40.584802 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '562dcce7d77c' +down_revision = '03ea244985ce' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('document_segment_summary', + sa.Column('id', models.types.StringUUID(), nullable=False), + sa.Column('dataset_id', models.types.StringUUID(), nullable=False), + sa.Column('document_id', models.types.StringUUID(), nullable=False), + sa.Column('chunk_id', models.types.StringUUID(), nullable=False), + sa.Column('summary_content', models.types.LongText(), nullable=True), + sa.Column('summary_index_node_id', sa.String(length=255), nullable=True), + sa.Column('summary_index_node_hash', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=32), server_default=sa.text("'generating'"), nullable=False), + sa.Column('error', models.types.LongText(), nullable=True), + sa.Column('enabled', sa.Boolean(), server_default=sa.text('true'), nullable=False), + sa.Column('disabled_at', sa.DateTime(), nullable=True), + sa.Column('disabled_by', models.types.StringUUID(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='document_segment_summary_pkey') + ) + with op.batch_alter_table('document_segment_summary', schema=None) as batch_op: + batch_op.create_index('document_segment_summary_chunk_id_idx', ['chunk_id'], unique=False) + batch_op.create_index('document_segment_summary_dataset_id_idx', ['dataset_id'], unique=False) + batch_op.create_index('document_segment_summary_document_id_idx', ['document_id'], unique=False) + batch_op.create_index('document_segment_summary_status_idx', ['status'], unique=False) + + with op.batch_alter_table('datasets', schema=None) as batch_op: + batch_op.add_column(sa.Column('summary_index_setting', models.types.AdjustedJSON(), nullable=True)) + + with op.batch_alter_table('documents', schema=None) as batch_op: + batch_op.add_column(sa.Column('need_summary', sa.Boolean(), server_default=sa.text('false'), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('documents', schema=None) as batch_op: + batch_op.drop_column('need_summary') + + with op.batch_alter_table('datasets', schema=None) as batch_op: + batch_op.drop_column('summary_index_setting') + + with op.batch_alter_table('document_segment_summary', schema=None) as batch_op: + batch_op.drop_index('document_segment_summary_status_idx') + batch_op.drop_index('document_segment_summary_document_id_idx') + batch_op.drop_index('document_segment_summary_dataset_id_idx') + batch_op.drop_index('document_segment_summary_chunk_id_idx') + + op.drop_table('document_segment_summary') + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index 62f11b8c72..d7ccee5829 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -72,6 +72,7 @@ class Dataset(Base): keyword_number = mapped_column(sa.Integer, nullable=True, server_default=sa.text("10")) collection_binding_id = mapped_column(StringUUID, nullable=True) retrieval_model = mapped_column(AdjustedJSON, nullable=True) + summary_index_setting = mapped_column(AdjustedJSON, nullable=True) built_in_field_enabled = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) icon_info = mapped_column(AdjustedJSON, nullable=True) runtime_mode = mapped_column(sa.String(255), nullable=True, server_default=sa.text("'general'")) @@ -419,6 +420,7 @@ class Document(Base): doc_metadata = mapped_column(AdjustedJSON, nullable=True) doc_form = mapped_column(String(255), nullable=False, server_default=sa.text("'text_model'")) doc_language = mapped_column(String(255), nullable=True) + need_summary: Mapped[bool | None] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) DATA_SOURCES = ["upload_file", "notion_import", "website_crawl"] @@ -1575,3 +1577,35 @@ class SegmentAttachmentBinding(Base): segment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) attachment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + + +class DocumentSegmentSummary(Base): + __tablename__ = "document_segment_summary" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="document_segment_summary_pkey"), + sa.Index("document_segment_summary_dataset_id_idx", "dataset_id"), + sa.Index("document_segment_summary_document_id_idx", "document_id"), + sa.Index("document_segment_summary_chunk_id_idx", "chunk_id"), + sa.Index("document_segment_summary_status_idx", "status"), + ) + + id: Mapped[str] = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4())) + dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + document_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + # corresponds to DocumentSegment.id or parent chunk id + chunk_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + summary_content: Mapped[str] = mapped_column(LongText, nullable=True) + summary_index_node_id: Mapped[str] = mapped_column(String(255), nullable=True) + summary_index_node_hash: Mapped[str] = mapped_column(String(255), nullable=True) + status: Mapped[str] = mapped_column(String(32), nullable=False, server_default=sa.text("'generating'")) + error: Mapped[str] = mapped_column(LongText, nullable=True) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) + disabled_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) + disabled_by = mapped_column(StringUUID, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + ) + + def __repr__(self): + return f"" diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index be9a0e9279..4fc340a39a 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -89,6 +89,7 @@ from tasks.disable_segments_from_index_task import disable_segments_from_index_t from tasks.document_indexing_update_task import document_indexing_update_task from tasks.enable_segments_to_index_task import enable_segments_to_index_task from tasks.recover_document_indexing_task import recover_document_indexing_task +from tasks.regenerate_summary_index_task import regenerate_summary_index_task from tasks.remove_document_from_index_task import remove_document_from_index_task from tasks.retry_document_indexing_task import retry_document_indexing_task from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task @@ -476,6 +477,11 @@ class DatasetService: if external_retrieval_model: dataset.retrieval_model = external_retrieval_model + # Update summary index setting if provided + summary_index_setting = data.get("summary_index_setting", None) + if summary_index_setting is not None: + dataset.summary_index_setting = summary_index_setting + # Update basic dataset properties dataset.name = data.get("name", dataset.name) dataset.description = data.get("description", dataset.description) @@ -558,12 +564,18 @@ class DatasetService: # Handle indexing technique changes and embedding model updates action = DatasetService._handle_indexing_technique_change(dataset, data, filtered_data) + # Check if summary_index_setting model changed (before updating database) + summary_model_changed = DatasetService._check_summary_index_setting_model_changed(dataset, data) + # Add metadata fields filtered_data["updated_by"] = user.id filtered_data["updated_at"] = naive_utc_now() # update Retrieval model if data.get("retrieval_model"): filtered_data["retrieval_model"] = data["retrieval_model"] + # update summary index setting + if data.get("summary_index_setting"): + filtered_data["summary_index_setting"] = data.get("summary_index_setting") # update icon info if data.get("icon_info"): filtered_data["icon_info"] = data.get("icon_info") @@ -572,12 +584,30 @@ class DatasetService: db.session.query(Dataset).filter_by(id=dataset.id).update(filtered_data) db.session.commit() + # Reload dataset to get updated values + db.session.refresh(dataset) + # update pipeline knowledge base node data DatasetService._update_pipeline_knowledge_base_node_data(dataset, user.id) # Trigger vector index task if indexing technique changed if action: deal_dataset_vector_index_task.delay(dataset.id, action) + # If embedding_model changed, also regenerate summary vectors + if action == "update": + regenerate_summary_index_task.delay( + dataset.id, + regenerate_reason="embedding_model_changed", + regenerate_vectors_only=True, + ) + + # Trigger summary index regeneration if summary model changed + if summary_model_changed: + regenerate_summary_index_task.delay( + dataset.id, + regenerate_reason="summary_model_changed", + regenerate_vectors_only=False, + ) return dataset @@ -616,6 +646,7 @@ class DatasetService: knowledge_index_node_data["chunk_structure"] = dataset.chunk_structure knowledge_index_node_data["indexing_technique"] = dataset.indexing_technique # pyright: ignore[reportAttributeAccessIssue] knowledge_index_node_data["keyword_number"] = dataset.keyword_number + knowledge_index_node_data["summary_index_setting"] = dataset.summary_index_setting node["data"] = knowledge_index_node_data updated = True except Exception: @@ -854,6 +885,53 @@ class DatasetService: ) filtered_data["collection_binding_id"] = dataset_collection_binding.id + @staticmethod + def _check_summary_index_setting_model_changed(dataset: Dataset, data: dict[str, Any]) -> bool: + """ + Check if summary_index_setting model (model_name or model_provider_name) has changed. + + Args: + dataset: Current dataset object + data: Update data dictionary + + Returns: + bool: True if summary model changed, False otherwise + """ + # Check if summary_index_setting is being updated + if "summary_index_setting" not in data or data.get("summary_index_setting") is None: + return False + + new_summary_setting = data.get("summary_index_setting") + old_summary_setting = dataset.summary_index_setting + + # If old setting doesn't exist or is disabled, no need to regenerate + if not old_summary_setting or not old_summary_setting.get("enable"): + return False + + # If new setting is disabled, no need to regenerate + if not new_summary_setting or not new_summary_setting.get("enable"): + return False + + # Compare model_name and model_provider_name + old_model_name = old_summary_setting.get("model_name") + old_model_provider = old_summary_setting.get("model_provider_name") + new_model_name = new_summary_setting.get("model_name") + new_model_provider = new_summary_setting.get("model_provider_name") + + # Check if model changed + if old_model_name != new_model_name or old_model_provider != new_model_provider: + logger.info( + "Summary index setting model changed for dataset %s: old=%s/%s, new=%s/%s", + dataset.id, + old_model_provider, + old_model_name, + new_model_provider, + new_model_name, + ) + return True + + return False + @staticmethod def update_rag_pipeline_dataset_settings( session: Session, dataset: Dataset, knowledge_configuration: KnowledgeConfiguration, has_published: bool = False @@ -889,6 +967,9 @@ class DatasetService: else: raise ValueError("Invalid index method") dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump() + # Update summary_index_setting if provided + if knowledge_configuration.summary_index_setting is not None: + dataset.summary_index_setting = knowledge_configuration.summary_index_setting session.add(dataset) else: if dataset.chunk_structure and dataset.chunk_structure != knowledge_configuration.chunk_structure: @@ -994,6 +1075,9 @@ class DatasetService: if dataset.keyword_number != knowledge_configuration.keyword_number: dataset.keyword_number = knowledge_configuration.keyword_number dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump() + # Update summary_index_setting if provided + if knowledge_configuration.summary_index_setting is not None: + dataset.summary_index_setting = knowledge_configuration.summary_index_setting session.add(dataset) session.commit() if action: @@ -1964,6 +2048,8 @@ class DocumentService: DuplicateDocumentIndexingTaskProxy( dataset.tenant_id, dataset.id, duplicate_document_ids ).delay() + # Note: Summary index generation is triggered in document_indexing_task after indexing completes + # to ensure segments are available. See tasks/document_indexing_task.py except LockNotOwnedError: pass @@ -2268,6 +2354,11 @@ class DocumentService: name: str, batch: str, ): + # Set need_summary based on dataset's summary_index_setting + need_summary = False + if dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True: + need_summary = True + document = Document( tenant_id=dataset.tenant_id, dataset_id=dataset.id, @@ -2281,6 +2372,7 @@ class DocumentService: created_by=account.id, doc_form=document_form, doc_language=document_language, + need_summary=need_summary, ) doc_metadata = {} if dataset.built_in_field_enabled: @@ -2505,6 +2597,7 @@ class DocumentService: embedding_model_provider=knowledge_config.embedding_model_provider, collection_binding_id=dataset_collection_binding_id, retrieval_model=retrieval_model.model_dump() if retrieval_model else None, + summary_index_setting=knowledge_config.summary_index_setting, is_multimodal=knowledge_config.is_multimodal, ) @@ -2686,6 +2779,14 @@ class DocumentService: if not isinstance(args["process_rule"]["rules"]["segmentation"]["max_tokens"], int): raise ValueError("Process rule segmentation max_tokens is invalid") + # valid summary index setting + summary_index_setting = args["process_rule"].get("summary_index_setting") + if summary_index_setting and summary_index_setting.get("enable"): + if "model_name" not in summary_index_setting or not summary_index_setting["model_name"]: + raise ValueError("Summary index model name is required") + if "model_provider_name" not in summary_index_setting or not summary_index_setting["model_provider_name"]: + raise ValueError("Summary index model provider name is required") + @staticmethod def batch_update_document_status( dataset: Dataset, document_ids: list[str], action: Literal["enable", "disable", "archive", "un_archive"], user @@ -3154,6 +3255,39 @@ class SegmentService: if args.enabled or keyword_changed: # update segment vector index VectorService.update_segment_vector(args.keywords, segment, dataset) + # update summary index if summary is provided and has changed + if args.summary is not None: + # Check if summary index is enabled + has_summary_index = ( + dataset.indexing_technique == "high_quality" + and dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ) + + if has_summary_index: + # Query existing summary from database + from models.dataset import DocumentSegmentSummary + + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id == segment.id, + DocumentSegmentSummary.dataset_id == dataset.id, + ) + .first() + ) + + # Check if summary has changed + existing_summary_content = existing_summary.summary_content if existing_summary else None + if existing_summary_content != args.summary: + # Summary has changed, update it + from services.summary_index_service import SummaryIndexService + + try: + SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) + except Exception: + logger.exception("Failed to update summary for segment %s", segment.id) + # Don't fail the entire update if summary update fails else: segment_hash = helper.generate_text_hash(content) tokens = 0 @@ -3228,6 +3362,15 @@ class SegmentService: elif document.doc_form in (IndexStructureType.PARAGRAPH_INDEX, IndexStructureType.QA_INDEX): # update segment vector index VectorService.update_segment_vector(args.keywords, segment, dataset) + # update summary index if summary is provided + if args.summary is not None: + from services.summary_index_service import SummaryIndexService + + try: + SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) + except Exception: + logger.exception("Failed to update summary for segment %s", segment.id) + # Don't fail the entire update if summary update fails # update multimodel vector index VectorService.update_multimodel_vector(segment, args.attachment_ids or [], dataset) except Exception as e: diff --git a/api/services/entities/knowledge_entities/knowledge_entities.py b/api/services/entities/knowledge_entities/knowledge_entities.py index 7959734e89..8dc5b93501 100644 --- a/api/services/entities/knowledge_entities/knowledge_entities.py +++ b/api/services/entities/knowledge_entities/knowledge_entities.py @@ -119,6 +119,7 @@ class KnowledgeConfig(BaseModel): data_source: DataSource | None = None process_rule: ProcessRule | None = None retrieval_model: RetrievalModel | None = None + summary_index_setting: dict | None = None doc_form: str = "text_model" doc_language: str = "English" embedding_model: str | None = None @@ -141,6 +142,7 @@ class SegmentUpdateArgs(BaseModel): regenerate_child_chunks: bool = False enabled: bool | None = None attachment_ids: list[str] | None = None + summary: str | None = None # Summary content for summary index class ChildChunkUpdateArgs(BaseModel): diff --git a/api/services/entities/knowledge_entities/rag_pipeline_entities.py b/api/services/entities/knowledge_entities/rag_pipeline_entities.py index cbb0efcc2a..041ae4edba 100644 --- a/api/services/entities/knowledge_entities/rag_pipeline_entities.py +++ b/api/services/entities/knowledge_entities/rag_pipeline_entities.py @@ -116,6 +116,8 @@ class KnowledgeConfiguration(BaseModel): embedding_model: str = "" keyword_number: int | None = 10 retrieval_model: RetrievalSetting + # add summary index setting + summary_index_setting: dict | None = None @field_validator("embedding_model_provider", mode="before") @classmethod diff --git a/api/services/summary_index_service.py b/api/services/summary_index_service.py new file mode 100644 index 0000000000..1ab2ac510f --- /dev/null +++ b/api/services/summary_index_service.py @@ -0,0 +1,626 @@ +"""Summary index service for generating and managing document segment summaries.""" + +import logging +import time +import uuid + +from core.rag.datasource.vdb.vector_factory import Vector +from core.rag.index_processor.constant.doc_type import DocType +from core.rag.models.document import Document +from extensions.ext_database import db +from libs import helper +from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary +from models.dataset import Document as DatasetDocument + +logger = logging.getLogger(__name__) + + +class SummaryIndexService: + """Service for generating and managing summary indexes.""" + + @staticmethod + def generate_summary_for_segment( + segment: DocumentSegment, + dataset: Dataset, + summary_index_setting: dict, + ) -> str: + """ + Generate summary for a single segment. + + Args: + segment: DocumentSegment to generate summary for + dataset: Dataset containing the segment + summary_index_setting: Summary index configuration + + Returns: + Generated summary text + + Raises: + ValueError: If summary_index_setting is invalid or generation fails + """ + # Reuse the existing generate_summary method from ParagraphIndexProcessor + # Use lazy import to avoid circular import + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + + summary_content = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=segment.content, + summary_index_setting=summary_index_setting, + segment_id=segment.id, + ) + + if not summary_content: + raise ValueError("Generated summary is empty") + + return summary_content + + @staticmethod + def create_summary_record( + segment: DocumentSegment, + dataset: Dataset, + summary_content: str, + status: str = "generating", + ) -> DocumentSegmentSummary: + """ + Create or update a DocumentSegmentSummary record. + If a summary record already exists for this segment, it will be updated instead of creating a new one. + + Args: + segment: DocumentSegment to create summary for + dataset: Dataset containing the segment + summary_content: Generated summary content + status: Summary status (default: "generating") + + Returns: + Created or updated DocumentSegmentSummary instance + """ + # Check if summary record already exists + existing_summary = ( + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() + ) + + if existing_summary: + # Update existing record + existing_summary.summary_content = summary_content + existing_summary.status = status + existing_summary.error = None # Clear any previous errors + # Re-enable if it was disabled + if not existing_summary.enabled: + existing_summary.enabled = True + existing_summary.disabled_at = None + existing_summary.disabled_by = None + db.session.add(existing_summary) + db.session.flush() + return existing_summary + else: + # Create new record (enabled by default) + summary_record = DocumentSegmentSummary( + dataset_id=dataset.id, + document_id=segment.document_id, + chunk_id=segment.id, + summary_content=summary_content, + status=status, + enabled=True, # Explicitly set enabled to True + ) + db.session.add(summary_record) + db.session.flush() + return summary_record + + @staticmethod + def vectorize_summary( + summary_record: DocumentSegmentSummary, + segment: DocumentSegment, + dataset: Dataset, + ) -> None: + """ + Vectorize summary and store in vector database. + + Args: + summary_record: DocumentSegmentSummary record + segment: Original DocumentSegment + dataset: Dataset containing the segment + """ + if dataset.indexing_technique != "high_quality": + logger.warning( + "Summary vectorization skipped for dataset %s: indexing_technique is not high_quality", + dataset.id, + ) + return + + # Reuse existing index_node_id if available (like segment does), otherwise generate new one + old_summary_node_id = summary_record.summary_index_node_id + if old_summary_node_id: + # Reuse existing index_node_id (like segment behavior) + summary_index_node_id = old_summary_node_id + else: + # Generate new index node ID only for new summaries + summary_index_node_id = str(uuid.uuid4()) + + # Always regenerate hash (in case summary content changed) + summary_hash = helper.generate_text_hash(summary_record.summary_content) + + # Delete old vector only if we're reusing the same index_node_id (to overwrite) + # If index_node_id changed, the old vector should have been deleted elsewhere + if old_summary_node_id and old_summary_node_id == summary_index_node_id: + try: + vector = Vector(dataset) + vector.delete_by_ids([old_summary_node_id]) + except Exception as e: + logger.warning( + "Failed to delete old summary vector for segment %s: %s. Continuing with new vectorization.", + segment.id, + str(e), + ) + + # Create document with summary content and metadata + summary_document = Document( + page_content=summary_record.summary_content, + metadata={ + "doc_id": summary_index_node_id, + "doc_hash": summary_hash, + "dataset_id": dataset.id, + "document_id": segment.document_id, + "original_chunk_id": segment.id, # Key: link to original chunk + "doc_type": DocType.TEXT, + "is_summary": True, # Identifier for summary documents + }, + ) + + # Vectorize and store with retry mechanism for connection errors + max_retries = 3 + retry_delay = 2.0 + + for attempt in range(max_retries): + try: + vector = Vector(dataset) + vector.add_texts([summary_document], duplicate_check=True) + + # Success - update summary record with index node info + summary_record.summary_index_node_id = summary_index_node_id + summary_record.summary_index_node_hash = summary_hash + summary_record.status = "completed" + db.session.add(summary_record) + db.session.flush() + return # Success, exit function + + except (ConnectionError, Exception) as e: + error_str = str(e).lower() + # Check if it's a connection-related error that might be transient + is_connection_error = any( + keyword in error_str + for keyword in [ + "connection", + "disconnected", + "timeout", + "network", + "could not connect", + "server disconnected", + "weaviate", + ] + ) + + if is_connection_error and attempt < max_retries - 1: + # Retry for connection errors + wait_time = retry_delay * (2**attempt) # Exponential backoff + logger.warning( + "Vectorization attempt %s/%s failed for segment %s: %s. Retrying in %.1f seconds...", + attempt + 1, + max_retries, + segment.id, + str(e), + wait_time, + ) + time.sleep(wait_time) + continue + else: + # Final attempt failed or non-connection error - log and update status + logger.error( + "Failed to vectorize summary for segment %s after %s attempts: %s", + segment.id, + attempt + 1, + str(e), + exc_info=True, + ) + summary_record.status = "error" + summary_record.error = f"Vectorization failed: {str(e)}" + db.session.add(summary_record) + db.session.flush() + raise + + @staticmethod + def generate_and_vectorize_summary( + segment: DocumentSegment, + dataset: Dataset, + summary_index_setting: dict, + ) -> DocumentSegmentSummary: + """ + Generate summary for a segment and vectorize it. + + Args: + segment: DocumentSegment to generate summary for + dataset: Dataset containing the segment + summary_index_setting: Summary index configuration + + Returns: + Created DocumentSegmentSummary instance + + Raises: + ValueError: If summary generation fails + """ + try: + # Generate summary + summary_content = SummaryIndexService.generate_summary_for_segment(segment, dataset, summary_index_setting) + + # Create or update summary record (will handle overwrite internally) + summary_record = SummaryIndexService.create_summary_record( + segment, dataset, summary_content, status="generating" + ) + + # Vectorize summary (will delete old vector if exists before creating new one) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + + db.session.commit() + logger.info("Successfully generated and vectorized summary for segment %s", segment.id) + return summary_record + + except Exception: + logger.exception("Failed to generate summary for segment %s", segment.id) + # Update summary record with error status if it exists + summary_record = ( + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() + ) + if summary_record: + summary_record.status = "error" + summary_record.error = str(e) + db.session.add(summary_record) + db.session.commit() + raise + + @staticmethod + def generate_summaries_for_document( + dataset: Dataset, + document: DatasetDocument, + summary_index_setting: dict, + segment_ids: list[str] | None = None, + only_parent_chunks: bool = False, + ) -> list[DocumentSegmentSummary]: + """ + Generate summaries for all segments in a document including vectorization. + + Args: + dataset: Dataset containing the document + document: DatasetDocument to generate summaries for + summary_index_setting: Summary index configuration + segment_ids: Optional list of specific segment IDs to process + only_parent_chunks: If True, only process parent chunks (for parent-child mode) + + Returns: + List of created DocumentSegmentSummary instances + """ + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + "Skipping summary generation for dataset %s: indexing_technique is %s, not 'high_quality'", + dataset.id, + dataset.indexing_technique, + ) + return [] + + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info("Summary index is disabled for dataset %s", dataset.id) + return [] + + # Skip qa_model documents + if document.doc_form == "qa_model": + logger.info("Skipping summary generation for qa_model document %s", document.id) + return [] + + logger.info( + "Starting summary generation for document %s in dataset %s, segment_ids: %s, only_parent_chunks: %s", + document.id, + dataset.id, + len(segment_ids) if segment_ids else "all", + only_parent_chunks, + ) + + # Query segments (only enabled segments) + query = db.session.query(DocumentSegment).filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, # Only generate summaries for enabled segments + ) + + if segment_ids: + query = query.filter(DocumentSegment.id.in_(segment_ids)) + + segments = query.all() + + if not segments: + logger.info("No segments found for document %s", document.id) + return [] + + summary_records = [] + + for segment in segments: + # For parent-child mode, only process parent chunks + # In parent-child mode, all DocumentSegments are parent chunks, + # so we process all of them. Child chunks are stored in ChildChunk table + # and are not DocumentSegments, so they won't be in the segments list. + # This check is mainly for clarity and future-proofing. + if only_parent_chunks: + # In parent-child mode, all segments in the query are parent chunks + # Child chunks are not DocumentSegments, so they won't appear here + # We can process all segments + pass + + try: + summary_record = SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + summary_records.append(summary_record) + except Exception: + logger.exception("Failed to generate summary for segment %s", segment.id) + # Continue with other segments + continue + + logger.info( + "Completed summary generation for document %s: %s summaries generated and vectorized", + document.id, + len(summary_records), + ) + return summary_records + + @staticmethod + def disable_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + disabled_by: str | None = None, + ) -> None: + """ + Disable summary records and remove vectors from vector database for segments. + Unlike delete, this preserves the summary records but marks them as disabled. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to disable summaries for. If None, disable all. + disabled_by: User ID who disabled the summaries + """ + from libs.datetime_utils import naive_utc_now + + query = db.session.query(DocumentSegmentSummary).filter_by( + dataset_id=dataset.id, + enabled=True, # Only disable enabled summaries + ) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + logger.info( + "Disabling %s summary records for dataset %s, segment_ids: %s", + len(summaries), + dataset.id, + len(segment_ids) if segment_ids else "all", + ) + + # Remove from vector database (but keep records) + if dataset.indexing_technique == "high_quality": + summary_node_ids = [s.summary_index_node_id for s in summaries if s.summary_index_node_id] + if summary_node_ids: + try: + vector = Vector(dataset) + vector.delete_by_ids(summary_node_ids) + except Exception as e: + logger.warning("Failed to remove summary vectors: %s", str(e)) + + # Disable summary records (don't delete) + now = naive_utc_now() + for summary in summaries: + summary.enabled = False + summary.disabled_at = now + summary.disabled_by = disabled_by + db.session.add(summary) + + db.session.commit() + logger.info("Disabled %s summary records for dataset %s", len(summaries), dataset.id) + + @staticmethod + def enable_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + ) -> None: + """ + Enable summary records and re-add vectors to vector database for segments. + + Note: This method enables summaries based on chunk status, not summary_index_setting.enable. + The summary_index_setting.enable flag only controls automatic generation, + not whether existing summaries can be used. + Summary.enabled should always be kept in sync with chunk.enabled. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to enable summaries for. If None, enable all. + """ + # Only enable summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return + + query = db.session.query(DocumentSegmentSummary).filter_by( + dataset_id=dataset.id, + enabled=False, # Only enable disabled summaries + ) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + logger.info( + "Enabling %s summary records for dataset %s, segment_ids: %s", + len(summaries), + dataset.id, + len(segment_ids) if segment_ids else "all", + ) + + # Re-vectorize and re-add to vector database + enabled_count = 0 + for summary in summaries: + # Get the original segment + segment = ( + db.session.query(DocumentSegment) + .filter_by( + id=summary.chunk_id, + dataset_id=dataset.id, + ) + .first() + ) + + # Summary.enabled stays in sync with chunk.enabled, only enable summary if the associated chunk is enabled. + if not segment or not segment.enabled or segment.status != "completed": + continue + + if not summary.summary_content: + continue + + try: + # Re-vectorize summary + SummaryIndexService.vectorize_summary(summary, segment, dataset) + + # Enable summary record + summary.enabled = True + summary.disabled_at = None + summary.disabled_by = None + db.session.add(summary) + enabled_count += 1 + except Exception: + logger.exception("Failed to re-vectorize summary %s", summary.id) + # Keep it disabled if vectorization fails + continue + + db.session.commit() + logger.info("Enabled %s summary records for dataset %s", enabled_count, dataset.id) + + @staticmethod + def delete_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + ) -> None: + """ + Delete summary records and vectors for segments (used only for actual deletion scenarios). + For disable/enable operations, use disable_summaries_for_segments/enable_summaries_for_segments. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to delete summaries for. If None, delete all. + """ + query = db.session.query(DocumentSegmentSummary).filter_by(dataset_id=dataset.id) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + # Delete from vector database + if dataset.indexing_technique == "high_quality": + summary_node_ids = [s.summary_index_node_id for s in summaries if s.summary_index_node_id] + if summary_node_ids: + vector = Vector(dataset) + vector.delete_by_ids(summary_node_ids) + + # Delete summary records + for summary in summaries: + db.session.delete(summary) + + db.session.commit() + logger.info("Deleted %s summary records for dataset %s", len(summaries), dataset.id) + + @staticmethod + def update_summary_for_segment( + segment: DocumentSegment, + dataset: Dataset, + summary_content: str, + ) -> DocumentSegmentSummary | None: + """ + Update summary for a segment and re-vectorize it. + + Args: + segment: DocumentSegment to update summary for + dataset: Dataset containing the segment + summary_content: New summary content + + Returns: + Updated DocumentSegmentSummary instance, or None if summary index is not enabled + """ + # Only update summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return None + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return None + + # Skip qa_model documents + if segment.document and segment.document.doc_form == "qa_model": + return None + + try: + # Find existing summary record + summary_record = ( + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() + ) + + if summary_record: + # Update existing summary + old_summary_node_id = summary_record.summary_index_node_id + + # Update summary content + summary_record.summary_content = summary_content + summary_record.status = "generating" + db.session.add(summary_record) + db.session.flush() + + # Delete old vector if exists + if old_summary_node_id: + vector = Vector(dataset) + vector.delete_by_ids([old_summary_node_id]) + + # Re-vectorize summary + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + + db.session.commit() + logger.info("Successfully updated and re-vectorized summary for segment %s", segment.id) + return summary_record + else: + # Create new summary record if doesn't exist + summary_record = SummaryIndexService.create_summary_record( + segment, dataset, summary_content, status="generating" + ) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + db.session.commit() + logger.info("Successfully created and vectorized summary for segment %s", segment.id) + return summary_record + + except Exception: + logger.exception("Failed to update summary for segment %s", segment.id) + # Update summary record with error status if it exists + summary_record = ( + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() + ) + if summary_record: + summary_record.status = "error" + summary_record.error = str(e) + db.session.add(summary_record) + db.session.commit() + raise diff --git a/api/tasks/add_document_to_index_task.py b/api/tasks/add_document_to_index_task.py index 62e6497e9d..2d3d00cd50 100644 --- a/api/tasks/add_document_to_index_task.py +++ b/api/tasks/add_document_to_index_task.py @@ -118,6 +118,19 @@ def add_document_to_index_task(dataset_document_id: str): ) session.commit() + # Enable summary indexes for all segments in this document + from services.summary_index_service import SummaryIndexService + + segment_ids_list = [segment.id for segment in segments] + if segment_ids_list: + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + ) + except Exception as e: + logger.warning("Failed to enable summaries for document %s: %s", dataset_document.id, str(e)) + end_at = time.perf_counter() logger.info( click.style(f"Document added to index: {dataset_document.id} latency: {end_at - start_at}", fg="green") diff --git a/api/tasks/delete_segment_from_index_task.py b/api/tasks/delete_segment_from_index_task.py index bfa709502c..764c635d83 100644 --- a/api/tasks/delete_segment_from_index_task.py +++ b/api/tasks/delete_segment_from_index_task.py @@ -47,6 +47,7 @@ def delete_segment_from_index_task( doc_form = dataset_document.doc_form # Proceed with index cleanup using the index_node_ids directly + # For actual deletion, we should delete summaries (not just disable them) index_processor = IndexProcessorFactory(doc_form).init_index_processor() index_processor.clean( dataset, @@ -54,6 +55,7 @@ def delete_segment_from_index_task( with_keywords=True, delete_child_chunks=True, precomputed_child_node_ids=child_node_ids, + delete_summaries=True, # Actually delete summaries when segment is deleted ) if dataset.is_multimodal: # delete segment attachment binding diff --git a/api/tasks/disable_segment_from_index_task.py b/api/tasks/disable_segment_from_index_task.py index 0ce6429a94..bc45171623 100644 --- a/api/tasks/disable_segment_from_index_task.py +++ b/api/tasks/disable_segment_from_index_task.py @@ -60,6 +60,18 @@ def disable_segment_from_index_task(segment_id: str): index_processor = IndexProcessorFactory(index_type).init_index_processor() index_processor.clean(dataset, [segment.index_node_id]) + # Disable summary index for this segment + from services.summary_index_service import SummaryIndexService + + try: + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=[segment.id], + disabled_by=segment.disabled_by, + ) + except Exception as e: + logger.warning("Failed to disable summary for segment %s: %s", segment.id, str(e)) + end_at = time.perf_counter() logger.info( click.style( diff --git a/api/tasks/disable_segments_from_index_task.py b/api/tasks/disable_segments_from_index_task.py index 03635902d1..3cc267e821 100644 --- a/api/tasks/disable_segments_from_index_task.py +++ b/api/tasks/disable_segments_from_index_task.py @@ -68,6 +68,21 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen index_node_ids.extend(attachment_ids) index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=False) + # Disable summary indexes for these segments + from services.summary_index_service import SummaryIndexService + + segment_ids_list = [segment.id for segment in segments] + try: + # Get disabled_by from first segment (they should all have the same disabled_by) + disabled_by = segments[0].disabled_by if segments else None + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + disabled_by=disabled_by, + ) + except Exception as e: + logger.warning("Failed to disable summaries for segments: %s", str(e)) + end_at = time.perf_counter() logger.info(click.style(f"Segments removed from index latency: {end_at - start_at}", fg="green")) except Exception: diff --git a/api/tasks/document_indexing_task.py b/api/tasks/document_indexing_task.py index 3bdff60196..9f3655789c 100644 --- a/api/tasks/document_indexing_task.py +++ b/api/tasks/document_indexing_task.py @@ -14,6 +14,7 @@ from enums.cloud_plan import CloudPlan from libs.datetime_utils import naive_utc_now from models.dataset import Dataset, Document from services.feature_service import FeatureService +from tasks.generate_summary_index_task import generate_summary_index_task logger = logging.getLogger(__name__) @@ -99,6 +100,69 @@ def _document_indexing(dataset_id: str, document_ids: Sequence[str]): indexing_runner.run(documents) end_at = time.perf_counter() logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green")) + + # Trigger summary index generation for completed documents if enabled + # Only generate for high_quality indexing technique and when summary_index_setting is enabled + # Re-query dataset to get latest summary_index_setting (in case it was updated) + dataset = session.query(Dataset).where(Dataset.id == dataset_id).first() + if not dataset: + logger.warning("Dataset %s not found after indexing", dataset_id) + return + + if dataset.indexing_technique == "high_quality": + summary_index_setting = dataset.summary_index_setting + if summary_index_setting and summary_index_setting.get("enable"): + # Check each document's indexing status and trigger summary generation if completed + for document_id in document_ids: + # Re-query document to get latest status (IndexingRunner may have updated it) + document = ( + session.query(Document) + .where(Document.id == document_id, Document.dataset_id == dataset_id) + .first() + ) + if document: + logger.info( + "Checking document %s for summary generation: status=%s, doc_form=%s", + document_id, + document.indexing_status, + document.doc_form, + ) + if document.indexing_status == "completed" and document.doc_form != "qa_model": + try: + generate_summary_index_task.delay(dataset.id, document_id, None) + logger.info( + "Queued summary index generation task for document %s in dataset %s " + "after indexing completed", + document_id, + dataset.id, + ) + except Exception: + logger.exception( + "Failed to queue summary index generation task for document %s", + document_id, + ) + # Don't fail the entire indexing process if summary task queuing fails + else: + logger.info( + "Skipping summary generation for document %s: status=%s, doc_form=%s", + document_id, + document.indexing_status, + document.doc_form, + ) + else: + logger.warning("Document %s not found after indexing", document_id) + else: + logger.info( + "Summary index generation skipped for dataset %s: summary_index_setting.enable=%s", + dataset.id, + summary_index_setting.get("enable") if summary_index_setting else None, + ) + else: + logger.info( + "Summary index generation skipped for dataset %s: indexing_technique=%s (not 'high_quality')", + dataset.id, + dataset.indexing_technique, + ) except DocumentIsPausedError as ex: logger.info(click.style(str(ex), fg="yellow")) except Exception: diff --git a/api/tasks/enable_segment_to_index_task.py b/api/tasks/enable_segment_to_index_task.py index 1f9f21aa7e..41ebb0b076 100644 --- a/api/tasks/enable_segment_to_index_task.py +++ b/api/tasks/enable_segment_to_index_task.py @@ -106,6 +106,17 @@ def enable_segment_to_index_task(segment_id: str): # save vector index index_processor.load(dataset, [document], multimodal_documents=multimodel_documents) + # Enable summary index for this segment + from services.summary_index_service import SummaryIndexService + + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=[segment.id], + ) + except Exception as e: + logger.warning("Failed to enable summary for segment %s: %s", segment.id, str(e)) + end_at = time.perf_counter() logger.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green")) except Exception as e: diff --git a/api/tasks/enable_segments_to_index_task.py b/api/tasks/enable_segments_to_index_task.py index 48d3c8e178..d90eb4c39f 100644 --- a/api/tasks/enable_segments_to_index_task.py +++ b/api/tasks/enable_segments_to_index_task.py @@ -106,6 +106,18 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i # save vector index index_processor.load(dataset, documents, multimodal_documents=multimodal_documents) + # Enable summary indexes for these segments + from services.summary_index_service import SummaryIndexService + + segment_ids_list = [segment.id for segment in segments] + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + ) + except Exception as e: + logger.warning("Failed to enable summaries for segments: %s", str(e)) + end_at = time.perf_counter() logger.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green")) except Exception as e: diff --git a/api/tasks/generate_summary_index_task.py b/api/tasks/generate_summary_index_task.py new file mode 100644 index 0000000000..99f957abaa --- /dev/null +++ b/api/tasks/generate_summary_index_task.py @@ -0,0 +1,112 @@ +"""Async task for generating summary indexes.""" + +import logging +import time + +import click +from celery import shared_task + +from extensions.ext_database import db +from models.dataset import Dataset, DocumentSegment +from models.dataset import Document as DatasetDocument +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) + + +@shared_task(queue="dataset") +def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids: list[str] | None = None): + """ + Async generate summary index for document segments. + + Args: + dataset_id: Dataset ID + document_id: Document ID + segment_ids: Optional list of specific segment IDs to process. If None, process all segments. + + Usage: + generate_summary_index_task.delay(dataset_id, document_id) + generate_summary_index_task.delay(dataset_id, document_id, segment_ids) + """ + logger.info( + click.style( + f"Start generating summary index for document {document_id} in dataset {dataset_id}", + fg="green", + ) + ) + start_at = time.perf_counter() + + try: + dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first() + if not dataset: + logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red")) + db.session.close() + return + + document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first() + if not document: + logger.error(click.style(f"Document not found: {document_id}", fg="red")) + db.session.close() + return + + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + click.style( + f"Skipping summary generation for dataset {dataset_id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'", + fg="cyan", + ) + ) + db.session.close() + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info( + click.style( + f"Summary index is disabled for dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + # Determine if only parent chunks should be processed + only_parent_chunks = dataset.chunk_structure == "parent_child_index" + + # Generate summaries + summary_records = SummaryIndexService.generate_summaries_for_document( + dataset=dataset, + document=document, + summary_index_setting=summary_index_setting, + segment_ids=segment_ids, + only_parent_chunks=only_parent_chunks, + ) + + end_at = time.perf_counter() + logger.info( + click.style( + f"Summary index generation completed for document {document_id}: " + f"{len(summary_records)} summaries generated, latency: {end_at - start_at}", + fg="green", + ) + ) + + except Exception: + logger.exception("Failed to generate summary index for document %s", document_id) + # Update document segments with error status if needed + if segment_ids: + db.session.query(DocumentSegment).filter( + DocumentSegment.id.in_(segment_ids), + DocumentSegment.dataset_id == dataset_id, + ).update( + { + DocumentSegment.error: f"Summary generation failed: {str(e)}", + }, + synchronize_session=False, + ) + db.session.commit() + finally: + db.session.close() diff --git a/api/tasks/regenerate_summary_index_task.py b/api/tasks/regenerate_summary_index_task.py new file mode 100644 index 0000000000..f24b7bf368 --- /dev/null +++ b/api/tasks/regenerate_summary_index_task.py @@ -0,0 +1,221 @@ +"""Task for regenerating summary indexes when dataset settings change.""" + +import logging +import time + +import click +from celery import shared_task +from sqlalchemy import select + +from extensions.ext_database import db +from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary +from models.dataset import Document as DatasetDocument +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) + + +@shared_task(queue="dataset") +def regenerate_summary_index_task( + dataset_id: str, + regenerate_reason: str = "summary_model_changed", + regenerate_vectors_only: bool = False, +): + """ + Regenerate summary indexes for all documents in a dataset. + + This task is triggered when: + 1. summary_index_setting model changes (regenerate_reason="summary_model_changed") + - Regenerates summary content and vectors for all existing summaries + 2. embedding_model changes (regenerate_reason="embedding_model_changed") + - Only regenerates vectors for existing summaries (keeps summary content) + + Args: + dataset_id: Dataset ID + regenerate_reason: Reason for regeneration ("summary_model_changed" or "embedding_model_changed") + regenerate_vectors_only: If True, only regenerate vectors without regenerating summary content + """ + logger.info( + click.style( + f"Start regenerate summary index for dataset {dataset_id}, reason: {regenerate_reason}", + fg="green", + ) + ) + start_at = time.perf_counter() + + try: + dataset = db.session.query(Dataset).filter_by(id=dataset_id).first() + if not dataset: + logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red")) + db.session.close() + return + + # Only regenerate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + click.style( + f"Skipping summary regeneration for dataset {dataset_id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'", + fg="cyan", + ) + ) + db.session.close() + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info( + click.style( + f"Summary index is disabled for dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + # Get all documents with completed indexing status + dataset_documents = db.session.scalars( + select(DatasetDocument).where( + DatasetDocument.dataset_id == dataset_id, + DatasetDocument.indexing_status == "completed", + DatasetDocument.enabled == True, + DatasetDocument.archived == False, + ) + ).all() + + if not dataset_documents: + logger.info( + click.style( + f"No documents found for summary regeneration in dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + logger.info( + "Found %s documents for summary regeneration in dataset %s", + len(dataset_documents), + dataset_id, + ) + + total_segments_processed = 0 + total_segments_failed = 0 + + for dataset_document in dataset_documents: + # Skip qa_model documents + if dataset_document.doc_form == "qa_model": + continue + + try: + # Get all segments with existing summaries + segments = ( + db.session.query(DocumentSegment) + .join( + DocumentSegmentSummary, + DocumentSegment.id == DocumentSegmentSummary.chunk_id, + ) + .where( + DocumentSegment.document_id == dataset_document.id, + DocumentSegment.dataset_id == dataset_id, + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + DocumentSegmentSummary.dataset_id == dataset_id, + ) + .order_by(DocumentSegment.position.asc()) + .all() + ) + + if not segments: + continue + + logger.info( + "Regenerating summaries for %s segments in document %s", + len(segments), + dataset_document.id, + ) + + for segment in segments: + try: + # Get existing summary record + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by( + chunk_id=segment.id, + dataset_id=dataset_id, + ) + .first() + ) + + if not summary_record: + logger.warning("Summary record not found for segment %s, skipping", segment.id) + continue + + if regenerate_vectors_only: + # Only regenerate vectors (for embedding_model change) + # Delete old vector + if summary_record.summary_index_node_id: + try: + from core.rag.datasource.vdb.vector_factory import Vector + + vector = Vector(dataset) + vector.delete_by_ids([summary_record.summary_index_node_id]) + except Exception as e: + logger.warning( + "Failed to delete old summary vector for segment %s: %s", + segment.id, + str(e), + ) + + # Re-vectorize with new embedding model + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + db.session.commit() + else: + # Regenerate both summary content and vectors (for summary_model change) + SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting) + db.session.commit() + + total_segments_processed += 1 + + except Exception as e: + logger.error( + "Failed to regenerate summary for segment %s: %s", + segment.id, + str(e), + exc_info=True, + ) + total_segments_failed += 1 + # Update summary record with error status + if summary_record: + summary_record.status = "error" + summary_record.error = f"Regeneration failed: {str(e)}" + db.session.add(summary_record) + db.session.commit() + continue + + except Exception as e: + logger.error( + "Failed to process document %s for summary regeneration: %s", + dataset_document.id, + str(e), + exc_info=True, + ) + continue + + end_at = time.perf_counter() + logger.info( + click.style( + f"Summary index regeneration completed for dataset {dataset_id}: " + f"{total_segments_processed} segments processed successfully, " + f"{total_segments_failed} segments failed, " + f"total documents: {len(dataset_documents)}, " + f"latency: {end_at - start_at:.2f}s", + fg="green", + ) + ) + + except Exception: + logger.exception("Regenerate summary index failed for dataset %s", dataset_id) + finally: + db.session.close() diff --git a/api/tasks/remove_document_from_index_task.py b/api/tasks/remove_document_from_index_task.py index c3c255fb17..55259ab527 100644 --- a/api/tasks/remove_document_from_index_task.py +++ b/api/tasks/remove_document_from_index_task.py @@ -46,6 +46,21 @@ def remove_document_from_index_task(document_id: str): index_processor = IndexProcessorFactory(document.doc_form).init_index_processor() segments = session.scalars(select(DocumentSegment).where(DocumentSegment.document_id == document.id)).all() + + # Disable summary indexes for all segments in this document + from services.summary_index_service import SummaryIndexService + + segment_ids_list = [segment.id for segment in segments] + if segment_ids_list: + try: + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + disabled_by=document.disabled_by, + ) + except Exception as e: + logger.warning("Failed to disable summaries for document %s: %s", document.id, str(e)) + index_node_ids = [segment.index_node_id for segment in segments] if index_node_ids: try: diff --git a/api/tests/unit_tests/core/workflow/context/test_execution_context.py b/api/tests/unit_tests/core/workflow/context/test_execution_context.py index 217c39385c..63466cfb5e 100644 --- a/api/tests/unit_tests/core/workflow/context/test_execution_context.py +++ b/api/tests/unit_tests/core/workflow/context/test_execution_context.py @@ -5,6 +5,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pydantic import BaseModel from core.workflow.context.execution_context import ( AppContext, @@ -12,6 +13,8 @@ from core.workflow.context.execution_context import ( ExecutionContextBuilder, IExecutionContext, NullAppContext, + read_context, + register_context, ) @@ -256,3 +259,31 @@ class TestCaptureCurrentContext: # Context variables should be captured assert result.context_vars is not None + + +class TestTenantScopedContextRegistry: + def setup_method(self): + from core.workflow.context import reset_context_provider + + reset_context_provider() + + def teardown_method(self): + from core.workflow.context import reset_context_provider + + reset_context_provider() + + def test_tenant_provider_read_ok(self): + class SandboxContext(BaseModel): + base_url: str | None = None + + register_context("workflow.sandbox", "t1", lambda: SandboxContext(base_url="http://t1")) + register_context("workflow.sandbox", "t2", lambda: SandboxContext(base_url="http://t2")) + + assert read_context("workflow.sandbox", tenant_id="t1").base_url == "http://t1" + assert read_context("workflow.sandbox", tenant_id="t2").base_url == "http://t2" + + def test_missing_provider_raises_keyerror(self): + from core.workflow.context import ContextProviderNotFoundError + + with pytest.raises(ContextProviderNotFoundError): + read_context("missing", tenant_id="unknown") diff --git a/web/i18n/en-US/common.json b/web/i18n/en-US/common.json index d2e5281282..20e5400e56 100644 --- a/web/i18n/en-US/common.json +++ b/web/i18n/en-US/common.json @@ -350,7 +350,7 @@ "modelProvider.card.quota": "QUOTA", "modelProvider.card.quotaExhausted": "Quota exhausted", "modelProvider.card.removeKey": "Remove API Key", - "modelProvider.card.tip": "Message Credits supports models from OpenAI, Anthropic, Gemini, xAI, DeepSeek and Tongyi. Priority will be given to the paid quota. The free quota will be used after the paid quota is exhausted.", + "modelProvider.card.tip": "Message Credits supports models from OpenAI. Priority will be given to the paid quota. The free quota will be used after the paid quota is exhausted.", "modelProvider.card.tokens": "Tokens", "modelProvider.collapse": "Collapse", "modelProvider.config": "Config", diff --git a/web/i18n/ja-JP/common.json b/web/i18n/ja-JP/common.json index ffc2d0bd31..8a76021759 100644 --- a/web/i18n/ja-JP/common.json +++ b/web/i18n/ja-JP/common.json @@ -350,7 +350,7 @@ "modelProvider.card.quota": "クォータ", "modelProvider.card.quotaExhausted": "クォータが使い果たされました", "modelProvider.card.removeKey": "API キーを削除", - "modelProvider.card.tip": "メッセージ枠はOpenAI、Anthropic、Gemini、xAI、DeepSeek、Tongyiのモデルを使用することをサポートしています。無料枠は有料枠が使い果たされた後に消費されます。", + "modelProvider.card.tip": "メッセージ枠はOpenAIのモデルを使用することをサポートしています。無料枠は有料枠が使い果たされた後に消費されます。", "modelProvider.card.tokens": "トークン", "modelProvider.collapse": "折り畳み", "modelProvider.config": "設定", diff --git a/web/i18n/zh-Hans/common.json b/web/i18n/zh-Hans/common.json index b5eabfeecc..6f62b53e2d 100644 --- a/web/i18n/zh-Hans/common.json +++ b/web/i18n/zh-Hans/common.json @@ -350,7 +350,7 @@ "modelProvider.card.quota": "额度", "modelProvider.card.quotaExhausted": "配额已用完", "modelProvider.card.removeKey": "删除 API 密钥", - "modelProvider.card.tip": "消息额度支持使用 OpenAI、Anthropic、Gemini、xAI、深度求索、通义 的模型;免费额度会在付费额度用尽后才会消耗。", + "modelProvider.card.tip": "消息额度支持使用 OpenAI 的模型;免费额度会在付费额度用尽后才会消耗。", "modelProvider.card.tokens": "Tokens", "modelProvider.collapse": "收起", "modelProvider.config": "配置", diff --git a/web/next.config.js b/web/next.config.ts similarity index 88% rename from web/next.config.js rename to web/next.config.ts index 1457d638c4..fc4dee3289 100644 --- a/web/next.config.js +++ b/web/next.config.ts @@ -1,3 +1,4 @@ +import type { NextConfig } from 'next' import process from 'node:process' import withBundleAnalyzerInit from '@next/bundle-analyzer' import createMDX from '@next/mdx' @@ -24,10 +25,9 @@ const withBundleAnalyzer = withBundleAnalyzerInit({ const hasSetWebPrefix = process.env.NEXT_PUBLIC_WEB_PREFIX const port = process.env.PORT || 3000 const locImageURLs = !hasSetWebPrefix ? [new URL(`http://localhost:${port}/**`), new URL(`http://127.0.0.1:${port}/**`)] : [] -const remoteImageURLs = [hasSetWebPrefix ? new URL(`${process.env.NEXT_PUBLIC_WEB_PREFIX}/**`) : '', ...locImageURLs].filter(item => !!item) +const remoteImageURLs = ([hasSetWebPrefix ? new URL(`${process.env.NEXT_PUBLIC_WEB_PREFIX}/**`) : '', ...locImageURLs].filter(item => !!item)) as URL[] -/** @type {import('next').NextConfig} */ -const nextConfig = { +const nextConfig: NextConfig = { basePath: process.env.NEXT_PUBLIC_BASE_PATH || '', serverExternalPackages: ['esbuild-wasm'], transpilePackages: ['echarts', 'zrender'], @@ -42,7 +42,7 @@ const nextConfig = { // https://nextjs.org/docs/messages/next-image-unconfigured-host images: { remotePatterns: remoteImageURLs.map(remoteImageURL => ({ - protocol: remoteImageURL.protocol.replace(':', ''), + protocol: remoteImageURL.protocol.replace(':', '') as 'http' | 'https', hostname: remoteImageURL.hostname, port: remoteImageURL.port, pathname: remoteImageURL.pathname,