From bcd33be22a08844f1e20a782c4bf583969e2e616 Mon Sep 17 00:00:00 2001 From: FFXN Date: Mon, 15 Dec 2025 16:32:13 +0800 Subject: [PATCH 01/26] Add "type" field to PipelineRecommendedPlugin model; Add query param "type" to recommended-plugins api. --- .../rag_pipeline/rag_pipeline_workflow.py | 7 +- ...495f0_alter_table_pipeline_recommended_.py | 65 +++++++++++++++++++ api/models/dataset.py | 1 + api/services/rag_pipeline/rag_pipeline.py | 10 +-- 4 files changed, 78 insertions(+), 5 deletions(-) create mode 100644 api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index a0dc692c4e..6b86846e3e 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -1004,6 +1004,11 @@ class RagPipelineRecommendedPluginApi(Resource): @login_required @account_initialization_required def get(self): + parser = reqparse.RequestParser() + parser.add_argument('type', type=str, location='args', required=False, default='all') + args = parser.parse_args() + plugin_type = args["type"] + rag_pipeline_service = RagPipelineService() - recommended_plugins = rag_pipeline_service.get_recommended_plugins() + recommended_plugins = rag_pipeline_service.get_recommended_plugins(plugin_type) return recommended_plugins diff --git a/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py new file mode 100644 index 0000000000..ca3172665b --- /dev/null +++ b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py @@ -0,0 +1,65 @@ +"""Alter table pipeline_recommended_plugins add column type + +Revision ID: 6bb0832495f0 +Revises: 7bb281b7a422 +Create Date: 2025-12-15 16:14:38.482072 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '6bb0832495f0' +down_revision = '7bb281b7a422' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('app_triggers', schema=None) as batch_op: + batch_op.alter_column('provider_name', + existing_type=sa.VARCHAR(length=255), + nullable=False, + existing_server_default=sa.text("''::character varying")) + + with op.batch_alter_table('operation_logs', schema=None) as batch_op: + batch_op.alter_column('content', + existing_type=postgresql.JSON(astext_type=sa.Text()), + nullable=False) + + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.add_column(sa.Column('type', sa.String(length=50), nullable=True)) + + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BIGINT(), + nullable=False) + + # ### end Alembic commands ### + +# “推荐插件”model添加type字段;查询推荐列表接口添加type参数 +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BIGINT(), + nullable=True) + + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.drop_column('type') + + with op.batch_alter_table('operation_logs', schema=None) as batch_op: + batch_op.alter_column('content', + existing_type=postgresql.JSON(astext_type=sa.Text()), + nullable=True) + + with op.batch_alter_table('app_triggers', schema=None) as batch_op: + batch_op.alter_column('provider_name', + existing_type=sa.VARCHAR(length=255), + nullable=True, + existing_server_default=sa.text("''::character varying")) + + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index e072711b82..c4b4c6d985 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -1458,6 +1458,7 @@ class PipelineRecommendedPlugin(TypeBase): ) plugin_id: Mapped[str] = mapped_column(LongText, nullable=False) provider_name: Mapped[str] = mapped_column(LongText, nullable=False) + type: Mapped[str] = mapped_column(sa.String(50), nullable=True) position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) active: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) created_at: Mapped[datetime] = mapped_column( diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index 097d16e2a7..e438eae295 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -1248,12 +1248,14 @@ class RagPipelineService: session.commit() return workflow_node_execution_db_model - def get_recommended_plugins(self) -> dict: + def get_recommended_plugins(self, type) -> dict: # Query active recommended plugins + query = db.session.query(PipelineRecommendedPlugin).where(PipelineRecommendedPlugin.active == True) + if type and type != "all": + query = query.where(PipelineRecommendedPlugin.type == type) + pipeline_recommended_plugins = ( - db.session.query(PipelineRecommendedPlugin) - .where(PipelineRecommendedPlugin.active == True) - .order_by(PipelineRecommendedPlugin.position.asc()) + query.order_by(PipelineRecommendedPlugin.position.asc()) .all() ) From ff7344f3d375259c0b49a5202046c7bd40bff43f Mon Sep 17 00:00:00 2001 From: FFXN Date: Mon, 15 Dec 2025 16:38:44 +0800 Subject: [PATCH 02/26] Add "type" field to PipelineRecommendedPlugin model; Add query param "type" to recommended-plugins api. --- ..._12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py index ca3172665b..40bbbded1d 100644 --- a/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py +++ b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py @@ -40,7 +40,6 @@ def upgrade(): # ### end Alembic commands ### -# “推荐插件”model添加type字段;查询推荐列表接口添加type参数 def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('providers', schema=None) as batch_op: From a1a3fa02836175f51610a74c5e5d2061301c2be7 Mon Sep 17 00:00:00 2001 From: FFXN Date: Mon, 15 Dec 2025 16:44:32 +0800 Subject: [PATCH 03/26] Add "type" field to PipelineRecommendedPlugin model; Add query param "type" to recommended-plugins api. --- .../console/datasets/rag_pipeline/rag_pipeline_workflow.py | 4 ++-- api/services/rag_pipeline/rag_pipeline.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index 6b86846e3e..602c24d3b8 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -1007,8 +1007,8 @@ class RagPipelineRecommendedPluginApi(Resource): parser = reqparse.RequestParser() parser.add_argument('type', type=str, location='args', required=False, default='all') args = parser.parse_args() - plugin_type = args["type"] + type = args["type"] rag_pipeline_service = RagPipelineService() - recommended_plugins = rag_pipeline_service.get_recommended_plugins(plugin_type) + recommended_plugins = rag_pipeline_service.get_recommended_plugins(type) return recommended_plugins diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index e438eae295..3065651c9d 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -1248,7 +1248,7 @@ class RagPipelineService: session.commit() return workflow_node_execution_db_model - def get_recommended_plugins(self, type) -> dict: + def get_recommended_plugins(self, type: str) -> dict: # Query active recommended plugins query = db.session.query(PipelineRecommendedPlugin).where(PipelineRecommendedPlugin.active == True) if type and type != "all": From 2f54965a7286b22dec1d6b2d9f95e74b080a34be Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 16 Dec 2025 10:43:45 +0800 Subject: [PATCH 04/26] Add "type" field to PipelineRecommendedPlugin model; Add query param "type" to recommended-plugins api. --- .../console/datasets/rag_pipeline/rag_pipeline_workflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index 602c24d3b8..f0e78488d1 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -997,7 +997,7 @@ class RagPipelineDatasourceVariableApi(Resource): ) return workflow_node_execution - +from flask_restx import reqparse @console_ns.route("/rag/pipelines/recommended-plugins") class RagPipelineRecommendedPluginApi(Resource): @setup_required From 8dad6b6a6de08285a1e219ebee9d8f5251ff2f50 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 16 Dec 2025 14:34:59 +0800 Subject: [PATCH 05/26] Add "type" field to PipelineRecommendedPlugin model; Add query param "type" to recommended-plugins api. --- ...8_add_type_column_not_null_default_tool.py | 33 +++++++++++++++++++ api/models/dataset.py | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py diff --git a/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py b/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py new file mode 100644 index 0000000000..20ca06d200 --- /dev/null +++ b/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py @@ -0,0 +1,33 @@ +"""add type column not null default tool + +Revision ID: 2536f83803a8 +Revises: 6bb0832495f0 +Create Date: 2025-12-16 14:24:40.740253 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '2536f83803a8' +down_revision = '6bb0832495f0' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.add_column(sa.Column('type', sa.String(length=50), nullable=False, server_default='tool')) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.drop_column('type') + + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index c4b4c6d985..6ec5d7277a 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -1458,7 +1458,7 @@ class PipelineRecommendedPlugin(TypeBase): ) plugin_id: Mapped[str] = mapped_column(LongText, nullable=False) provider_name: Mapped[str] = mapped_column(LongText, nullable=False) - type: Mapped[str] = mapped_column(sa.String(50), nullable=True) + type: Mapped[str] = mapped_column(sa.String(50), nullable=False, default="tool") position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) active: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) created_at: Mapped[datetime] = mapped_column( From 407e1c827631e147f33e480cb96aa0c1547f8ae2 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 16 Dec 2025 08:14:05 +0000 Subject: [PATCH 06/26] [autofix.ci] apply automated fixes --- .../console/datasets/rag_pipeline/rag_pipeline_workflow.py | 5 ++++- api/services/rag_pipeline/rag_pipeline.py | 5 +---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index 139a46e0f5..81e344250d 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -968,7 +968,10 @@ class RagPipelineDatasourceVariableApi(Resource): ) return workflow_node_execution + from flask_restx import reqparse + + @console_ns.route("/rag/pipelines/recommended-plugins") class RagPipelineRecommendedPluginApi(Resource): @setup_required @@ -976,7 +979,7 @@ class RagPipelineRecommendedPluginApi(Resource): @account_initialization_required def get(self): parser = reqparse.RequestParser() - parser.add_argument('type', type=str, location='args', required=False, default='all') + parser.add_argument("type", type=str, location="args", required=False, default="all") args = parser.parse_args() type = args["type"] diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index 3065651c9d..f53448e7fe 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -1254,10 +1254,7 @@ class RagPipelineService: if type and type != "all": query = query.where(PipelineRecommendedPlugin.type == type) - pipeline_recommended_plugins = ( - query.order_by(PipelineRecommendedPlugin.position.asc()) - .all() - ) + pipeline_recommended_plugins = query.order_by(PipelineRecommendedPlugin.position.asc()).all() if not pipeline_recommended_plugins: return { From 15ff8efb15b5ad5acddbbe71feb2ea3b1f380823 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 16 Dec 2025 16:20:04 +0800 Subject: [PATCH 07/26] merge alembic head --- ...025_12_16_1619-5d8b3015e29b_merge_heads.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py diff --git a/api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py b/api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py new file mode 100644 index 0000000000..7b9b8a79f9 --- /dev/null +++ b/api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py @@ -0,0 +1,25 @@ +"""merge heads + +Revision ID: 5d8b3015e29b +Revises: d57accd375ae, 2536f83803a8 +Create Date: 2025-12-16 16:19:09.076002 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '5d8b3015e29b' +down_revision = ('d57accd375ae', '2536f83803a8') +branch_labels = None +depends_on = None + + +def upgrade(): + pass + + +def downgrade(): + pass From 3782f17dc76ddcb9de9055b38116a3ea520fc2a9 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 16 Dec 2025 16:35:15 +0800 Subject: [PATCH 08/26] Optimize code. --- .../console/datasets/rag_pipeline/rag_pipeline_workflow.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index 81e344250d..de55f3a48d 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -5,6 +5,7 @@ from uuid import UUID from flask import abort, request from flask_restx import Resource, marshal_with # type: ignore +from flask_restx import reqparse from pydantic import BaseModel, Field from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, InternalServerError, NotFound @@ -968,10 +969,6 @@ class RagPipelineDatasourceVariableApi(Resource): ) return workflow_node_execution - -from flask_restx import reqparse - - @console_ns.route("/rag/pipelines/recommended-plugins") class RagPipelineRecommendedPluginApi(Resource): @setup_required From 3781c2a0257c11a8eb6a1950d1f4ae456645af7c Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 16 Dec 2025 08:37:32 +0000 Subject: [PATCH 09/26] [autofix.ci] apply automated fixes --- .../console/datasets/rag_pipeline/rag_pipeline_workflow.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index de55f3a48d..46d67f0581 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -4,8 +4,7 @@ from typing import Any, Literal, cast from uuid import UUID from flask import abort, request -from flask_restx import Resource, marshal_with # type: ignore -from flask_restx import reqparse +from flask_restx import Resource, marshal_with, reqparse # type: ignore from pydantic import BaseModel, Field from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, InternalServerError, NotFound @@ -969,6 +968,7 @@ class RagPipelineDatasourceVariableApi(Resource): ) return workflow_node_execution + @console_ns.route("/rag/pipelines/recommended-plugins") class RagPipelineRecommendedPluginApi(Resource): @setup_required From ec4f885dad061215223d49163b53b511dac68844 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 16 Dec 2025 18:19:24 +0800 Subject: [PATCH 10/26] update migration script. --- ...495f0_alter_table_pipeline_recommended_.py | 64 ------------------- ...025_12_16_1619-5d8b3015e29b_merge_heads.py | 25 -------- ..._add_type_column_not_null_default_tool.py} | 16 +++-- api/models/dataset.py | 2 +- 4 files changed, 10 insertions(+), 97 deletions(-) delete mode 100644 api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py delete mode 100644 api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py rename api/migrations/versions/{2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py => 2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py} (74%) diff --git a/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py deleted file mode 100644 index 40bbbded1d..0000000000 --- a/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Alter table pipeline_recommended_plugins add column type - -Revision ID: 6bb0832495f0 -Revises: 7bb281b7a422 -Create Date: 2025-12-15 16:14:38.482072 - -""" -from alembic import op -import models as models -import sqlalchemy as sa -from sqlalchemy.dialects import postgresql - -# revision identifiers, used by Alembic. -revision = '6bb0832495f0' -down_revision = '7bb281b7a422' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('app_triggers', schema=None) as batch_op: - batch_op.alter_column('provider_name', - existing_type=sa.VARCHAR(length=255), - nullable=False, - existing_server_default=sa.text("''::character varying")) - - with op.batch_alter_table('operation_logs', schema=None) as batch_op: - batch_op.alter_column('content', - existing_type=postgresql.JSON(astext_type=sa.Text()), - nullable=False) - - with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: - batch_op.add_column(sa.Column('type', sa.String(length=50), nullable=True)) - - with op.batch_alter_table('providers', schema=None) as batch_op: - batch_op.alter_column('quota_used', - existing_type=sa.BIGINT(), - nullable=False) - - # ### end Alembic commands ### - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('providers', schema=None) as batch_op: - batch_op.alter_column('quota_used', - existing_type=sa.BIGINT(), - nullable=True) - - with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: - batch_op.drop_column('type') - - with op.batch_alter_table('operation_logs', schema=None) as batch_op: - batch_op.alter_column('content', - existing_type=postgresql.JSON(astext_type=sa.Text()), - nullable=True) - - with op.batch_alter_table('app_triggers', schema=None) as batch_op: - batch_op.alter_column('provider_name', - existing_type=sa.VARCHAR(length=255), - nullable=True, - existing_server_default=sa.text("''::character varying")) - - # ### end Alembic commands ### diff --git a/api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py b/api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py deleted file mode 100644 index 7b9b8a79f9..0000000000 --- a/api/migrations/versions/2025_12_16_1619-5d8b3015e29b_merge_heads.py +++ /dev/null @@ -1,25 +0,0 @@ -"""merge heads - -Revision ID: 5d8b3015e29b -Revises: d57accd375ae, 2536f83803a8 -Create Date: 2025-12-16 16:19:09.076002 - -""" -from alembic import op -import models as models -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '5d8b3015e29b' -down_revision = ('d57accd375ae', '2536f83803a8') -branch_labels = None -depends_on = None - - -def upgrade(): - pass - - -def downgrade(): - pass diff --git a/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py b/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py similarity index 74% rename from api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py rename to api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py index 20ca06d200..6b504acbc9 100644 --- a/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py +++ b/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py @@ -1,32 +1,34 @@ """add type column not null default tool -Revision ID: 2536f83803a8 -Revises: 6bb0832495f0 -Create Date: 2025-12-16 14:24:40.740253 +Revision ID: 03ea244985ce +Revises: d57accd375ae +Create Date: 2025-12-16 18:17:12.193877 """ from alembic import op import models as models import sqlalchemy as sa - +from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. -revision = '2536f83803a8' -down_revision = '6bb0832495f0' +revision = '03ea244985ce' +down_revision = 'd57accd375ae' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: - batch_op.add_column(sa.Column('type', sa.String(length=50), nullable=False, server_default='tool')) + batch_op.add_column(sa.Column('type', sa.String(length=50), server_default=sa.text("'tool'"), nullable=False)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: batch_op.drop_column('type') diff --git a/api/models/dataset.py b/api/models/dataset.py index ad9c627f17..445ac6086f 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -1532,7 +1532,7 @@ class PipelineRecommendedPlugin(TypeBase): ) plugin_id: Mapped[str] = mapped_column(LongText, nullable=False) provider_name: Mapped[str] = mapped_column(LongText, nullable=False) - type: Mapped[str] = mapped_column(sa.String(50), nullable=False, default="tool") + type: Mapped[str] = mapped_column(sa.String(50), nullable=False, server_default=sa.text("'tool'")) position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) active: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) created_at: Mapped[datetime] = mapped_column( From f4a7efde3d3dd312566c5bb10b24290bdbdfaaf4 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 16 Dec 2025 18:30:12 +0800 Subject: [PATCH 11/26] update migration script. --- ...e_add_type_column_not_null_default_tool.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py b/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py index 6b504acbc9..3f20af7dcd 100644 --- a/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py +++ b/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py @@ -19,17 +19,47 @@ depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('app_triggers', schema=None) as batch_op: + batch_op.alter_column('provider_name', + existing_type=sa.VARCHAR(length=255), + nullable=False, + existing_server_default=sa.text("''::character varying")) + + with op.batch_alter_table('operation_logs', schema=None) as batch_op: + batch_op.alter_column('content', + existing_type=postgresql.JSON(astext_type=sa.Text()), + nullable=False) with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: batch_op.add_column(sa.Column('type', sa.String(length=50), server_default=sa.text("'tool'"), nullable=False)) + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BIGINT(), + nullable=False) + # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BIGINT(), + nullable=True) with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: batch_op.drop_column('type') + with op.batch_alter_table('operation_logs', schema=None) as batch_op: + batch_op.alter_column('content', + existing_type=postgresql.JSON(astext_type=sa.Text()), + nullable=True) + + with op.batch_alter_table('app_triggers', schema=None) as batch_op: + batch_op.alter_column('provider_name', + existing_type=sa.VARCHAR(length=255), + nullable=True, + existing_server_default=sa.text("''::character varying")) + # ### end Alembic commands ### From 1fcf6e4943f9d4e64af0b7b07e8ebf1df002c14e Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Wed, 17 Dec 2025 11:12:59 +0800 Subject: [PATCH 12/26] Update 2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py --- ...e_add_type_column_not_null_default_tool.py | 34 ------------------- 1 file changed, 34 deletions(-) diff --git a/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py b/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py index 3f20af7dcd..2bdd430e81 100644 --- a/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py +++ b/api/migrations/versions/2025_12_16_1817-03ea244985ce_add_type_column_not_null_default_tool.py @@ -19,47 +19,13 @@ depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('app_triggers', schema=None) as batch_op: - batch_op.alter_column('provider_name', - existing_type=sa.VARCHAR(length=255), - nullable=False, - existing_server_default=sa.text("''::character varying")) - - with op.batch_alter_table('operation_logs', schema=None) as batch_op: - batch_op.alter_column('content', - existing_type=postgresql.JSON(astext_type=sa.Text()), - nullable=False) - with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: batch_op.add_column(sa.Column('type', sa.String(length=50), server_default=sa.text("'tool'"), nullable=False)) - - with op.batch_alter_table('providers', schema=None) as batch_op: - batch_op.alter_column('quota_used', - existing_type=sa.BIGINT(), - nullable=False) - # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('providers', schema=None) as batch_op: - batch_op.alter_column('quota_used', - existing_type=sa.BIGINT(), - nullable=True) - with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: batch_op.drop_column('type') - - with op.batch_alter_table('operation_logs', schema=None) as batch_op: - batch_op.alter_column('content', - existing_type=postgresql.JSON(astext_type=sa.Text()), - nullable=True) - - with op.batch_alter_table('app_triggers', schema=None) as batch_op: - batch_op.alter_column('provider_name', - existing_type=sa.VARCHAR(length=255), - nullable=True, - existing_server_default=sa.text("''::character varying")) - # ### end Alembic commands ### From 25bfc1cc3b388ee1937dec64a31cac383aefd178 Mon Sep 17 00:00:00 2001 From: FFXN Date: Mon, 12 Jan 2026 16:52:21 +0800 Subject: [PATCH 13/26] feat: implement Summary Index feature. --- api/controllers/console/datasets/datasets.py | 1 + .../console/datasets/datasets_document.py | 310 ++++++++- .../console/datasets/datasets_segments.py | 54 +- .../console/datasets/hit_testing.py | 43 +- api/core/entities/knowledge_entities.py | 1 + api/core/indexing_runner.py | 10 + api/core/llm_generator/prompts.py | 3 + api/core/rag/datasource/retrieval_service.py | 90 ++- api/core/rag/embedding/retrieval.py | 1 + .../index_processor/index_processor_base.py | 10 + .../processor/paragraph_index_processor.py | 103 ++- .../processor/parent_child_index_processor.py | 24 + .../processor/qa_index_processor.py | 27 +- .../workflow/nodes/document_extractor/node.py | 24 + .../nodes/knowledge_index/entities.py | 2 + .../knowledge_index/knowledge_index_node.py | 286 +++++++- api/extensions/ext_celery.py | 2 + api/fields/dataset_fields.py | 9 + api/fields/document_fields.py | 4 + api/fields/hit_testing_fields.py | 1 + api/fields/segment_fields.py | 1 + ...8-562dcce7d77c_add_summaryindex_feature.py | 69 ++ api/models/dataset.py | 33 + api/services/dataset_service.py | 136 ++++ .../knowledge_entities/knowledge_entities.py | 2 + api/services/summary_index_service.py | 612 ++++++++++++++++++ api/tasks/add_document_to_index_task.py | 12 + api/tasks/delete_segment_from_index_task.py | 2 + api/tasks/disable_segment_from_index_task.py | 11 + api/tasks/disable_segments_from_index_task.py | 17 +- api/tasks/document_indexing_task.py | 55 ++ api/tasks/enable_segment_to_index_task.py | 10 + api/tasks/enable_segments_to_index_task.py | 11 + api/tasks/generate_summary_index_task.py | 113 ++++ api/tasks/regenerate_summary_index_task.py | 219 +++++++ api/tasks/remove_document_from_index_task.py | 14 + 36 files changed, 2290 insertions(+), 32 deletions(-) create mode 100644 api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py create mode 100644 api/services/summary_index_service.py create mode 100644 api/tasks/generate_summary_index_task.py create mode 100644 api/tasks/regenerate_summary_index_task.py diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 8c4a4467a7..a322dcfd14 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -146,6 +146,7 @@ class DatasetUpdatePayload(BaseModel): embedding_model: str | None = None embedding_model_provider: str | None = None retrieval_model: dict[str, Any] | None = None + summary_index_setting: dict[str, Any] | None = None partial_member_list: list[str] | None = None external_retrieval_model: dict[str, Any] | None = None external_knowledge_id: str | None = None diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 6145da31a5..06c895dc79 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -39,9 +39,10 @@ from fields.document_fields import ( from libs.datetime_utils import naive_utc_now from libs.login import current_account_with_tenant, login_required from models import DatasetProcessRule, Document, DocumentSegment, UploadFile -from models.dataset import DocumentPipelineExecutionLog +from models.dataset import DocumentPipelineExecutionLog, DocumentSegmentSummary from services.dataset_service import DatasetService, DocumentService from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig, ProcessRule, RetrievalModel +from tasks.generate_summary_index_task import generate_summary_index_task from ..app.error import ( ProviderModelCurrentlyNotSupportError, @@ -104,6 +105,10 @@ class DocumentRenamePayload(BaseModel): name: str +class GenerateSummaryPayload(BaseModel): + document_list: list[str] + + register_schema_models( console_ns, KnowledgeConfig, @@ -111,6 +116,7 @@ register_schema_models( RetrievalModel, DocumentRetryPayload, DocumentRenamePayload, + GenerateSummaryPayload, ) @@ -295,6 +301,97 @@ class DatasetDocumentListApi(Resource): paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) documents = paginated_documents.items + + # Check if dataset has summary index enabled + has_summary_index = ( + dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ) + + # Filter documents that need summary calculation + documents_need_summary = [doc for doc in documents if doc.need_summary is True] + document_ids_need_summary = [str(doc.id) for doc in documents_need_summary] + + # Calculate summary_index_status for documents that need summary (only if dataset summary index is enabled) + summary_status_map = {} + if has_summary_index and document_ids_need_summary: + # Get all segments for these documents (excluding qa_model and re_segment) + segments = ( + db.session.query(DocumentSegment.id, DocumentSegment.document_id) + .where( + DocumentSegment.document_id.in_(document_ids_need_summary), + DocumentSegment.status != "re_segment", + DocumentSegment.tenant_id == current_tenant_id, + ) + .all() + ) + + # Group segments by document_id + document_segments_map = {} + for segment in segments: + doc_id = str(segment.document_id) + if doc_id not in document_segments_map: + document_segments_map[doc_id] = [] + document_segments_map[doc_id].append(segment.id) + + # Get all summary records for these segments + all_segment_ids = [seg.id for seg in segments] + summaries = {} + if all_segment_ids: + summary_records = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id.in_(all_segment_ids), + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.enabled == True, # Only count enabled summaries + ) + .all() + ) + summaries = {summary.chunk_id: summary.status for summary in summary_records} + + # Calculate summary_index_status for each document + for doc_id in document_ids_need_summary: + segment_ids = document_segments_map.get(doc_id, []) + if not segment_ids: + # No segments, status is "GENERATING" (waiting to generate) + summary_status_map[doc_id] = "GENERATING" + continue + + # Count summary statuses for this document's segments + status_counts = {"completed": 0, "generating": 0, "error": 0, "not_started": 0} + for segment_id in segment_ids: + status = summaries.get(segment_id, "not_started") + if status in status_counts: + status_counts[status] += 1 + else: + status_counts["not_started"] += 1 + + total_segments = len(segment_ids) + completed_count = status_counts["completed"] + generating_count = status_counts["generating"] + error_count = status_counts["error"] + + # Determine overall status (only three states: GENERATING, COMPLETED, ERROR) + if completed_count == total_segments: + summary_status_map[doc_id] = "COMPLETED" + elif error_count > 0: + # Has errors (even if some are completed or generating) + summary_status_map[doc_id] = "ERROR" + elif generating_count > 0 or status_counts["not_started"] > 0: + # Still generating or not started + summary_status_map[doc_id] = "GENERATING" + else: + # Default to generating + summary_status_map[doc_id] = "GENERATING" + + # Add summary_index_status to each document + for document in documents: + if has_summary_index and document.need_summary is True: + document.summary_index_status = summary_status_map.get(str(document.id), "GENERATING") + else: + # Return null if summary index is not enabled or document doesn't need summary + document.summary_index_status = None + if fetch: for document in documents: completed_segments = ( @@ -393,6 +490,7 @@ class DatasetDocumentListApi(Resource): return {"result": "success"}, 204 + @console_ns.route("/datasets/init") class DatasetInitApi(Resource): @console_ns.doc("init_dataset") @@ -780,6 +878,7 @@ class DocumentApi(DocumentResource): "display_status": document.display_status, "doc_form": document.doc_form, "doc_language": document.doc_language, + "need_summary": document.need_summary if document.need_summary is not None else False, } else: dataset_process_rules = DatasetService.get_process_rules(dataset_id) @@ -815,6 +914,7 @@ class DocumentApi(DocumentResource): "display_status": document.display_status, "doc_form": document.doc_form, "doc_language": document.doc_language, + "need_summary": document.need_summary if document.need_summary is not None else False, } return response, 200 @@ -1182,3 +1282,211 @@ class DocumentPipelineExecutionLogApi(DocumentResource): "input_data": log.input_data, "datasource_node_id": log.datasource_node_id, }, 200 + + +@console_ns.route("/datasets//documents/generate-summary") +class DocumentGenerateSummaryApi(Resource): + @console_ns.doc("generate_summary_for_documents") + @console_ns.doc(description="Generate summary index for documents") + @console_ns.doc(params={"dataset_id": "Dataset ID"}) + @console_ns.expect(console_ns.models[GenerateSummaryPayload.__name__]) + @console_ns.response(200, "Summary generation started successfully") + @console_ns.response(400, "Invalid request or dataset configuration") + @console_ns.response(403, "Permission denied") + @console_ns.response(404, "Dataset not found") + @setup_required + @login_required + @account_initialization_required + @cloud_edition_billing_rate_limit_check("knowledge") + def post(self, dataset_id): + """ + Generate summary index for specified documents. + + This endpoint checks if the dataset configuration supports summary generation + (indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), + then asynchronously generates summary indexes for the provided documents. + """ + current_user, _ = current_account_with_tenant() + dataset_id = str(dataset_id) + + # Get dataset + dataset = DatasetService.get_dataset(dataset_id) + if not dataset: + raise NotFound("Dataset not found.") + + # Check permissions + if not current_user.is_dataset_editor: + raise Forbidden() + + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + # Validate request payload + payload = GenerateSummaryPayload.model_validate(console_ns.payload or {}) + document_list = payload.document_list + + if not document_list: + raise ValueError("document_list cannot be empty.") + + # Check if dataset configuration supports summary generation + if dataset.indexing_technique != "high_quality": + raise ValueError( + f"Summary generation is only available for 'high_quality' indexing technique. " + f"Current indexing technique: {dataset.indexing_technique}" + ) + + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + raise ValueError( + "Summary index is not enabled for this dataset. " + "Please enable it in the dataset settings." + ) + + # Verify all documents exist and belong to the dataset + documents = ( + db.session.query(Document) + .filter( + Document.id.in_(document_list), + Document.dataset_id == dataset_id, + ) + .all() + ) + + if len(documents) != len(document_list): + found_ids = {doc.id for doc in documents} + missing_ids = set(document_list) - found_ids + raise NotFound(f"Some documents not found: {list(missing_ids)}") + + # Dispatch async tasks for each document + for document in documents: + # Skip qa_model documents as they don't generate summaries + if document.doc_form == "qa_model": + logger.info( + f"Skipping summary generation for qa_model document {document.id}" + ) + continue + + # Dispatch async task + generate_summary_index_task(dataset_id, document.id) + logger.info( + f"Dispatched summary generation task for document {document.id} in dataset {dataset_id}" + ) + + return {"result": "success"}, 200 + + +@console_ns.route("/datasets//documents//summary-status") +class DocumentSummaryStatusApi(DocumentResource): + @console_ns.doc("get_document_summary_status") + @console_ns.doc(description="Get summary index generation status for a document") + @console_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"}) + @console_ns.response(200, "Summary status retrieved successfully") + @console_ns.response(404, "Document not found") + @setup_required + @login_required + @account_initialization_required + def get(self, dataset_id, document_id): + """ + Get summary index generation status for a document. + + Returns: + - total_segments: Total number of segments in the document + - summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records + - summaries: List of summary records with status and content preview + """ + current_user, _ = current_account_with_tenant() + dataset_id = str(dataset_id) + document_id = str(document_id) + + # Get document + document = self.get_document(dataset_id, document_id) + + # Get dataset + dataset = DatasetService.get_dataset(dataset_id) + if not dataset: + raise NotFound("Dataset not found.") + + # Check permissions + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + # Get all segments for this document + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.document_id == document_id, + DocumentSegment.dataset_id == dataset_id, + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + ) + .all() + ) + + total_segments = len(segments) + + # Get all summary records for these segments + segment_ids = [segment.id for segment in segments] + summaries = [] + if segment_ids: + summaries = ( + db.session.query(DocumentSegmentSummary) + .filter( + DocumentSegmentSummary.document_id == document_id, + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.chunk_id.in_(segment_ids), + DocumentSegmentSummary.enabled == True, # Only return enabled summaries + ) + .all() + ) + + # Create a mapping of chunk_id to summary + summary_map = {summary.chunk_id: summary for summary in summaries} + + # Count statuses + status_counts = { + "completed": 0, + "generating": 0, + "error": 0, + "not_started": 0, + } + + summary_list = [] + for segment in segments: + summary = summary_map.get(segment.id) + if summary: + status = summary.status + status_counts[status] = status_counts.get(status, 0) + 1 + summary_list.append({ + "segment_id": segment.id, + "segment_position": segment.position, + "status": summary.status, + "summary_preview": summary.summary_content[:100] + "..." if summary.summary_content and len(summary.summary_content) > 100 else summary.summary_content, + "error": summary.error, + "created_at": int(summary.created_at.timestamp()) if summary.created_at else None, + "updated_at": int(summary.updated_at.timestamp()) if summary.updated_at else None, + }) + else: + status_counts["not_started"] += 1 + summary_list.append({ + "segment_id": segment.id, + "segment_position": segment.position, + "status": "not_started", + "summary_preview": None, + "error": None, + "created_at": None, + "updated_at": None, + }) + + return { + "total_segments": total_segments, + "summary_status": status_counts, + "summaries": summary_list, + }, 200 diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index e73abc2555..74a60edb07 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -29,7 +29,7 @@ from extensions.ext_database import db from extensions.ext_redis import redis_client from fields.segment_fields import child_chunk_fields, segment_fields from libs.login import current_account_with_tenant, login_required -from models.dataset import ChildChunk, DocumentSegment +from models.dataset import ChildChunk, DocumentSegment, DocumentSegmentSummary from models.model import UploadFile from services.dataset_service import DatasetService, DocumentService, SegmentService from services.entities.knowledge_entities.knowledge_entities import ChildChunkUpdateArgs, SegmentUpdateArgs @@ -38,6 +38,23 @@ from services.errors.chunk import ChildChunkIndexingError as ChildChunkIndexingS from tasks.batch_create_segment_to_index_task import batch_create_segment_to_index_task +def _get_segment_with_summary(segment, dataset_id): + """Helper function to marshal segment and add summary information.""" + segment_dict = marshal(segment, segment_fields) + # Query summary for this segment (only enabled summaries) + summary = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id == segment.id, + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.enabled == True, # Only return enabled summaries + ) + .first() + ) + segment_dict["summary"] = summary.summary_content if summary else None + return segment_dict + + class SegmentListQuery(BaseModel): limit: int = Field(default=20, ge=1, le=100) status: list[str] = Field(default_factory=list) @@ -60,6 +77,7 @@ class SegmentUpdatePayload(BaseModel): keywords: list[str] | None = None regenerate_child_chunks: bool = False attachment_ids: list[str] | None = None + summary: str | None = None # Summary content for summary index class BatchImportPayload(BaseModel): @@ -153,8 +171,34 @@ class DatasetDocumentSegmentListApi(Resource): segments = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) + # Query summaries for all segments in this page (batch query for efficiency) + segment_ids = [segment.id for segment in segments.items] + summaries = {} + if segment_ids: + summary_records = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id.in_(segment_ids), + DocumentSegmentSummary.dataset_id == dataset_id, + ) + .all() + ) + # Only include enabled summaries + summaries = { + summary.chunk_id: summary.summary_content + for summary in summary_records + if summary.enabled is True + } + + # Add summary to each segment + segments_with_summary = [] + for segment in segments.items: + segment_dict = marshal(segment, segment_fields) + segment_dict["summary"] = summaries.get(segment.id) + segments_with_summary.append(segment_dict) + response = { - "data": marshal(segments.items, segment_fields), + "data": segments_with_summary, "limit": limit, "total": segments.total, "total_pages": segments.pages, @@ -300,7 +344,7 @@ class DatasetDocumentSegmentAddApi(Resource): payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) segment = SegmentService.create_segment(payload_dict, document, dataset) - return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200 @console_ns.route("/datasets//documents//segments/") @@ -362,10 +406,12 @@ class DatasetDocumentSegmentUpdateApi(Resource): payload = SegmentUpdatePayload.model_validate(console_ns.payload or {}) payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) + + # Update segment (summary update with change detection is handled in SegmentService.update_segment) segment = SegmentService.update_segment( SegmentUpdateArgs.model_validate(payload.model_dump(exclude_none=True)), segment, document, dataset ) - return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200 @setup_required @login_required diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index 932cb4fcce..c947132070 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -1,4 +1,4 @@ -from flask_restx import Resource +from flask_restx import Resource, fields from controllers.common.schema import register_schema_model from libs.login import login_required @@ -10,17 +10,56 @@ from ..wraps import ( cloud_edition_billing_rate_limit_check, setup_required, ) +from fields.hit_testing_fields import ( + child_chunk_fields, + document_fields, + files_fields, + hit_testing_record_fields, + segment_fields, +) register_schema_model(console_ns, HitTestingPayload) +def _get_or_create_model(model_name: str, field_def): + """Get or create a flask_restx model to avoid dict type issues in Swagger.""" + existing = console_ns.models.get(model_name) + if existing is None: + existing = console_ns.model(model_name, field_def) + return existing + + +# Register models for flask_restx to avoid dict type issues in Swagger +document_model = _get_or_create_model("HitTestingDocument", document_fields) + +segment_fields_copy = segment_fields.copy() +segment_fields_copy["document"] = fields.Nested(document_model) +segment_model = _get_or_create_model("HitTestingSegment", segment_fields_copy) + +child_chunk_model = _get_or_create_model("HitTestingChildChunk", child_chunk_fields) +files_model = _get_or_create_model("HitTestingFile", files_fields) + +hit_testing_record_fields_copy = hit_testing_record_fields.copy() +hit_testing_record_fields_copy["segment"] = fields.Nested(segment_model) +hit_testing_record_fields_copy["child_chunks"] = fields.List(fields.Nested(child_chunk_model)) +hit_testing_record_fields_copy["files"] = fields.List(fields.Nested(files_model)) +hit_testing_record_model = _get_or_create_model("HitTestingRecord", hit_testing_record_fields_copy) + +# Response model for hit testing API +hit_testing_response_fields = { + "query": fields.String, + "records": fields.List(fields.Nested(hit_testing_record_model)), +} +hit_testing_response_model = _get_or_create_model("HitTestingResponse", hit_testing_response_fields) + + @console_ns.route("/datasets//hit-testing") class HitTestingApi(Resource, DatasetsHitTestingBase): @console_ns.doc("test_dataset_retrieval") @console_ns.doc(description="Test dataset knowledge retrieval") @console_ns.doc(params={"dataset_id": "Dataset ID"}) @console_ns.expect(console_ns.models[HitTestingPayload.__name__]) - @console_ns.response(200, "Hit testing completed successfully") + @console_ns.response(200, "Hit testing completed successfully", model=hit_testing_response_model) @console_ns.response(404, "Dataset not found") @console_ns.response(400, "Invalid parameters") @setup_required diff --git a/api/core/entities/knowledge_entities.py b/api/core/entities/knowledge_entities.py index d4093b5245..b1ba3c3e2a 100644 --- a/api/core/entities/knowledge_entities.py +++ b/api/core/entities/knowledge_entities.py @@ -3,6 +3,7 @@ from pydantic import BaseModel, Field, field_validator class PreviewDetail(BaseModel): content: str + summary: str | None = None child_chunks: list[str] | None = None diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index 59de4f403d..199d22e8f5 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -311,14 +311,18 @@ class IndexingRunner: qa_preview_texts: list[QAPreviewDetail] = [] total_segments = 0 + # doc_form represents the segmentation method (general, parent-child, QA) index_type = doc_form index_processor = IndexProcessorFactory(index_type).init_index_processor() + # one extract_setting is one source document for extract_setting in extract_settings: # extract processing_rule = DatasetProcessRule( mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"]) ) + # Extract document content text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"]) + # Cleaning and segmentation documents = index_processor.transform( text_docs, current_user=None, @@ -361,6 +365,12 @@ class IndexingRunner: if doc_form and doc_form == "qa_model": return IndexingEstimate(total_segments=total_segments * 20, qa_preview=qa_preview_texts, preview=[]) + + # Generate summary preview + summary_index_setting = tmp_processing_rule["summary_index_setting"] if "summary_index_setting" in tmp_processing_rule else None + if summary_index_setting and summary_index_setting.get('enable') and preview_texts: + preview_texts = index_processor.generate_summary_preview(tenant_id, preview_texts, summary_index_setting) + return IndexingEstimate(total_segments=total_segments, preview=preview_texts) def _extract( diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index ec2b7f2d44..1fbf279309 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -434,3 +434,6 @@ INSTRUCTION_GENERATE_TEMPLATE_PROMPT = """The output of this prompt is not as ex You should edit the prompt according to the IDEAL OUTPUT.""" INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" + +DEFAULT_GENERATOR_SUMMARY_PROMPT = """ +You are a helpful assistant that summarizes long pieces of text into concise summaries. Given the following text, generate a brief summary that captures the main points and key information. The summary should be clear, concise, and written in complete sentences. """ diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index a139fba4d0..a4b48ab092 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -371,6 +371,8 @@ class RetrievalService: include_segment_ids = set() segment_child_map = {} segment_file_map = {} + segment_summary_map = {} # Map segment_id to summary content + summary_segment_ids = set() # Track segments retrieved via summary with Session(bind=db.engine, expire_on_commit=False) as session: # Process documents for document in documents: @@ -398,13 +400,25 @@ class RetrievalService: attachment_info = attachment_info_dict["attachment_info"] segment_id = attachment_info_dict["segment_id"] else: - child_index_node_id = document.metadata.get("doc_id") - child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id == child_index_node_id) - child_chunk = session.scalar(child_chunk_stmt) + # Check if this is a summary document + is_summary = document.metadata.get("is_summary", False) + if is_summary: + # For summary documents, find the original chunk via original_chunk_id + original_chunk_id = document.metadata.get("original_chunk_id") + if not original_chunk_id: + continue + segment_id = original_chunk_id + # Track that this segment was retrieved via summary + summary_segment_ids.add(segment_id) + else: + # For normal documents, find by child chunk index_node_id + child_index_node_id = document.metadata.get("doc_id") + child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id == child_index_node_id) + child_chunk = session.scalar(child_chunk_stmt) - if not child_chunk: - continue - segment_id = child_chunk.segment_id + if not child_chunk: + continue + segment_id = child_chunk.segment_id if not segment_id: continue @@ -489,16 +503,34 @@ class RetrievalService: if segment: segment_file_map[segment.id] = [attachment_info] else: - index_node_id = document.metadata.get("doc_id") - if not index_node_id: - continue - document_segment_stmt = select(DocumentSegment).where( - DocumentSegment.dataset_id == dataset_document.dataset_id, - DocumentSegment.enabled == True, - DocumentSegment.status == "completed", - DocumentSegment.index_node_id == index_node_id, - ) - segment = session.scalar(document_segment_stmt) + # Check if this is a summary document + is_summary = document.metadata.get("is_summary", False) + if is_summary: + # For summary documents, find the original chunk via original_chunk_id + original_chunk_id = document.metadata.get("original_chunk_id") + if not original_chunk_id: + continue + # Track that this segment was retrieved via summary + summary_segment_ids.add(original_chunk_id) + document_segment_stmt = select(DocumentSegment).where( + DocumentSegment.dataset_id == dataset_document.dataset_id, + DocumentSegment.enabled == True, + DocumentSegment.status == "completed", + DocumentSegment.id == original_chunk_id, + ) + segment = session.scalar(document_segment_stmt) + else: + # For normal documents, find by index_node_id + index_node_id = document.metadata.get("doc_id") + if not index_node_id: + continue + document_segment_stmt = select(DocumentSegment).where( + DocumentSegment.dataset_id == dataset_document.dataset_id, + DocumentSegment.enabled == True, + DocumentSegment.status == "completed", + DocumentSegment.index_node_id == index_node_id, + ) + segment = session.scalar(document_segment_stmt) if not segment: continue @@ -526,6 +558,23 @@ class RetrievalService: if record["segment"].id in segment_file_map: record["files"] = segment_file_map[record["segment"].id] # type: ignore[assignment] + # Batch query summaries for segments retrieved via summary (only enabled summaries) + if summary_segment_ids: + from models.dataset import DocumentSegmentSummary + + summaries = ( + session.query(DocumentSegmentSummary) + .filter( + DocumentSegmentSummary.chunk_id.in_(summary_segment_ids), + DocumentSegmentSummary.status == "completed", + DocumentSegmentSummary.enabled == True, # Only retrieve enabled summaries + ) + .all() + ) + for summary in summaries: + if summary.summary_content: + segment_summary_map[summary.chunk_id] = summary.summary_content + result = [] for record in records: # Extract segment @@ -549,9 +598,16 @@ class RetrievalService: else None ) + # Extract summary if this segment was retrieved via summary + summary_content = segment_summary_map.get(segment.id) + # Create RetrievalSegments object retrieval_segment = RetrievalSegments( - segment=segment, child_chunks=child_chunks, score=score, files=files + segment=segment, + child_chunks=child_chunks, + score=score, + files=files, + summary=summary_content, ) result.append(retrieval_segment) diff --git a/api/core/rag/embedding/retrieval.py b/api/core/rag/embedding/retrieval.py index b54a37b49e..f6834ab87b 100644 --- a/api/core/rag/embedding/retrieval.py +++ b/api/core/rag/embedding/retrieval.py @@ -20,3 +20,4 @@ class RetrievalSegments(BaseModel): child_chunks: list[RetrievalChildChunk] | None = None score: float | None = None files: list[dict[str, str | int]] | None = None + summary: str | None = None # Summary content if retrieved via summary index diff --git a/api/core/rag/index_processor/index_processor_base.py b/api/core/rag/index_processor/index_processor_base.py index 8a28eb477a..5fc149c1a6 100644 --- a/api/core/rag/index_processor/index_processor_base.py +++ b/api/core/rag/index_processor/index_processor_base.py @@ -13,6 +13,7 @@ from urllib.parse import unquote, urlparse import httpx from configs import dify_config +from core.entities.knowledge_entities import PreviewDetail from core.helper import ssrf_proxy from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.index_processor.constant.doc_type import DocType @@ -45,6 +46,15 @@ class BaseIndexProcessor(ABC): def transform(self, documents: list[Document], current_user: Account | None = None, **kwargs) -> list[Document]: raise NotImplementedError + @abstractmethod + def generate_summary_preview(self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict) -> list[PreviewDetail]: + """ + For each segment in preview_texts, generate a summary using LLM and attach it to the segment. + The summary can be stored in a new attribute, e.g., summary. + This method should be implemented by subclasses. + """ + raise NotImplementedError + @abstractmethod def load( self, diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index cf68cff7dc..89a6d80306 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -1,9 +1,13 @@ """Paragraph index processor.""" +import logging import uuid from collections.abc import Mapping from typing import Any +logger = logging.getLogger(__name__) + +from core.entities.knowledge_entities import PreviewDetail from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.retrieval_service import RetrievalService @@ -17,12 +21,19 @@ from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.models.document import AttachmentDocument, Document, MultimodalGeneralStructureChunk from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols +from extensions.ext_database import db from libs import helper from models.account import Account -from models.dataset import Dataset, DatasetProcessRule +from models.dataset import Dataset, DatasetProcessRule, DocumentSegment from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import Rule +from services.summary_index_service import SummaryIndexService +from core.llm_generator.prompts import DEFAULT_GENERATOR_SUMMARY_PROMPT +from core.model_runtime.entities.message_entities import UserPromptMessage +from core.model_runtime.entities.model_entities import ModelType +from core.provider_manager import ProviderManager +from core.model_manager import ModelInstance class ParagraphIndexProcessor(BaseIndexProcessor): @@ -108,6 +119,29 @@ class ParagraphIndexProcessor(BaseIndexProcessor): keyword.add_texts(documents) def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + if dataset.indexing_technique == "high_quality": vector = Vector(dataset) if node_ids: @@ -227,3 +261,70 @@ class ParagraphIndexProcessor(BaseIndexProcessor): } else: raise ValueError("Chunks is not a list") + + def generate_summary_preview(self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict) -> list[PreviewDetail]: + """ + For each segment, concurrently call generate_summary to generate a summary + and write it to the summary attribute of PreviewDetail. + """ + import concurrent.futures + from flask import current_app + + # Capture Flask app context for worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def process(preview: PreviewDetail) -> None: + """Generate summary for a single preview item.""" + try: + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + else: + # Fallback: try without app context (may fail) + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + except Exception as e: + logger.error(f"Failed to generate summary for preview: {str(e)}") + # Don't fail the entire preview if summary generation fails + preview.summary = None + + with concurrent.futures.ThreadPoolExecutor() as executor: + list(executor.map(process, preview_texts)) + return preview_texts + + @staticmethod + def generate_summary(tenant_id: str, text: str, summary_index_setting: dict = None) -> str: + """ + Generate summary for the given text using ModelInstance.invoke_llm and the default or custom summary prompt. + """ + if not summary_index_setting or not summary_index_setting.get("enable"): + raise ValueError("summary_index_setting is required and must be enabled to generate summary.") + + model_name = summary_index_setting.get("model_name") + model_provider_name = summary_index_setting.get("model_provider_name") + summary_prompt = summary_index_setting.get("summary_prompt") + + # Import default summary prompt + if not summary_prompt: + summary_prompt = DEFAULT_GENERATOR_SUMMARY_PROMPT + + prompt = f"{summary_prompt}\n{text}" + + provider_manager = ProviderManager() + provider_model_bundle = provider_manager.get_provider_model_bundle(tenant_id, model_provider_name, ModelType.LLM) + model_instance = ModelInstance(provider_model_bundle, model_name) + prompt_messages = [UserPromptMessage(content=prompt)] + + result = model_instance.invoke_llm( + prompt_messages=prompt_messages, + model_parameters={}, + stream=False + ) + + return getattr(result.message, "content", "") diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index 0366f3259f..7e33ef9c02 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -25,6 +25,7 @@ from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegm from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule +from services.summary_index_service import SummaryIndexService class ParentChildIndexProcessor(BaseIndexProcessor): @@ -135,6 +136,29 @@ class ParentChildIndexProcessor(BaseIndexProcessor): def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): # node_ids is segment's node_ids + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + if dataset.indexing_technique == "high_quality": delete_child_chunks = kwargs.get("delete_child_chunks") or False precomputed_child_node_ids = kwargs.get("precomputed_child_node_ids") diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 1183d5fbd7..b38af0cacb 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -25,9 +25,10 @@ from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols from libs import helper from models.account import Account -from models.dataset import Dataset +from models.dataset import Dataset, DocumentSegment from models.dataset import Document as DatasetDocument from services.entities.knowledge_entities.knowledge_entities import Rule +from services.summary_index_service import SummaryIndexService logger = logging.getLogger(__name__) @@ -144,6 +145,30 @@ class QAIndexProcessor(BaseIndexProcessor): vector.create_multimodal(multimodal_documents) def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Note: qa_model doesn't generate summaries, but we clean them for completeness + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + vector = Vector(dataset) if node_ids: vector.delete_by_ids(node_ids) diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index 14ebd1f9ae..2cbd7952ba 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -62,6 +62,21 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]): inputs = {"variable_selector": variable_selector} process_data = {"documents": value if isinstance(value, list) else [value]} + # Ensure storage_key is loaded for File objects + files_to_check = value if isinstance(value, list) else [value] + files_needing_storage_key = [ + f for f in files_to_check + if isinstance(f, File) and not f.storage_key and f.related_id + ] + if files_needing_storage_key: + from factories.file_factory import StorageKeyLoader + from extensions.ext_database import db + from sqlalchemy.orm import Session + + with Session(bind=db.engine) as session: + storage_key_loader = StorageKeyLoader(session, tenant_id=self.tenant_id) + storage_key_loader.load_storage_keys(files_needing_storage_key) + try: if isinstance(value, list): extracted_text_list = list(map(_extract_text_from_file, value)) @@ -415,6 +430,15 @@ def _download_file_content(file: File) -> bytes: response.raise_for_status() return response.content else: + # Check if storage_key is set + if not file.storage_key: + raise FileDownloadError(f"File storage_key is missing for file: {file.filename}") + + # Check if file exists before downloading + from extensions.ext_storage import storage + if not storage.exists(file.storage_key): + raise FileDownloadError(f"File not found in storage: {file.storage_key}") + return file_manager.download(file) except Exception as e: raise FileDownloadError(f"Error downloading file: {str(e)}") from e diff --git a/api/core/workflow/nodes/knowledge_index/entities.py b/api/core/workflow/nodes/knowledge_index/entities.py index 3daca90b9b..bfeb9b5b79 100644 --- a/api/core/workflow/nodes/knowledge_index/entities.py +++ b/api/core/workflow/nodes/knowledge_index/entities.py @@ -158,3 +158,5 @@ class KnowledgeIndexNodeData(BaseNodeData): type: str = "knowledge-index" chunk_structure: str index_chunk_variable_selector: list[str] + indexing_technique: str | None = None + summary_index_setting: dict | None = None diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 17ca4bef7b..4d264683d0 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -1,9 +1,11 @@ +import concurrent.futures import datetime import logging import time from collections.abc import Mapping from typing import Any +from flask import current_app from sqlalchemy import func, select from core.app.entities.app_invoke_entities import InvokeFrom @@ -16,7 +18,9 @@ from core.workflow.nodes.base.node import Node from core.workflow.nodes.base.template import Template from core.workflow.runtime import VariablePool from extensions.ext_database import db -from models.dataset import Dataset, Document, DocumentSegment +from models.dataset import Dataset, Document, DocumentSegment, DocumentSegmentSummary +from services.summary_index_service import SummaryIndexService +from tasks.generate_summary_index_task import generate_summary_index_task from .entities import KnowledgeIndexNodeData from .exc import ( @@ -67,7 +71,18 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): # index knowledge try: if is_preview: - outputs = self._get_preview_output(node_data.chunk_structure, chunks) + # Preview mode: generate summaries for chunks directly without saving to database + # Format preview and generate summaries on-the-fly + # Get indexing_technique and summary_index_setting from node_data (workflow graph config) + # or fallback to dataset if not available in node_data + indexing_technique = node_data.indexing_technique or dataset.indexing_technique + summary_index_setting = node_data.summary_index_setting or dataset.summary_index_setting + + outputs = self._get_preview_output_with_summaries( + node_data.chunk_structure, chunks, dataset=dataset, + indexing_technique=indexing_technique, + summary_index_setting=summary_index_setting + ) return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=variables, @@ -163,6 +178,9 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): db.session.commit() + # Generate summary index if enabled + self._handle_summary_index_generation(dataset, document, variable_pool) + return { "dataset_id": ds_id_value, "dataset_name": dataset_name_value, @@ -173,9 +191,269 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): "display_status": "completed", } - def _get_preview_output(self, chunk_structure: str, chunks: Any) -> Mapping[str, Any]: + def _handle_summary_index_generation( + self, + dataset: Dataset, + document: Document, + variable_pool: VariablePool, + ) -> None: + """ + Handle summary index generation based on mode (debug/preview or production). + + Args: + dataset: Dataset containing the document + document: Document to generate summaries for + variable_pool: Variable pool to check invoke_from + """ + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return + + # Skip qa_model documents + if document.doc_form == "qa_model": + return + + # Determine if in preview/debug mode + invoke_from = variable_pool.get(["sys", SystemVariableKey.INVOKE_FROM]) + is_preview = invoke_from and invoke_from.value == InvokeFrom.DEBUGGER + + # Determine if only parent chunks should be processed + only_parent_chunks = dataset.chunk_structure == "parent_child_index" + + if is_preview: + try: + # Query segments that need summary generation + query = db.session.query(DocumentSegment).filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, + ) + segments = query.all() + + if not segments: + logger.info(f"No segments found for document {document.id}") + return + + # Filter segments based on mode + segments_to_process = [] + for segment in segments: + # Skip if summary already exists + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id, status="completed") + .first() + ) + if existing_summary: + continue + + # For parent-child mode, all segments are parent chunks, so process all + segments_to_process.append(segment) + + if not segments_to_process: + logger.info(f"No segments need summary generation for document {document.id}") + return + + # Use ThreadPoolExecutor for concurrent generation + flask_app = current_app._get_current_object() # type: ignore + max_workers = min(10, len(segments_to_process)) # Limit to 10 workers + + def process_segment(segment: DocumentSegment) -> None: + """Process a single segment in a thread with Flask app context.""" + with flask_app.app_context(): + try: + SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + except Exception as e: + logger.error(f"Failed to generate summary for segment {segment.id}: {str(e)}") + # Continue processing other segments + + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit(process_segment, segment) for segment in segments_to_process + ] + # Wait for all tasks to complete + concurrent.futures.wait(futures) + + logger.info( + f"Successfully generated summary index for {len(segments_to_process)} segments " + f"in document {document.id}" + ) + except Exception as e: + logger.exception(f"Failed to generate summary index for document {document.id}: {str(e)}") + # Don't fail the entire indexing process if summary generation fails + else: + # Production mode: asynchronous generation + logger.info(f"Queuing summary index generation task for document {document.id} (production mode)") + try: + generate_summary_index_task.delay(dataset.id, document.id, None) + logger.info(f"Summary index generation task queued for document {document.id}") + except Exception as e: + logger.exception(f"Failed to queue summary index generation task for document {document.id}: {str(e)}") + # Don't fail the entire indexing process if task queuing fails + + def _get_preview_output_with_summaries( + self, chunk_structure: str, chunks: Any, dataset: Dataset, + indexing_technique: str | None = None, + summary_index_setting: dict | None = None + ) -> Mapping[str, Any]: + """ + Generate preview output with summaries for chunks in preview mode. + This method generates summaries on-the-fly without saving to database. + + Args: + chunk_structure: Chunk structure type + chunks: Chunks to generate preview for + dataset: Dataset object (for tenant_id) + indexing_technique: Indexing technique from node config or dataset + summary_index_setting: Summary index setting from node config or dataset + """ index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() - return index_processor.format_preview(chunks) + preview_output = index_processor.format_preview(chunks) + + # Check if summary index is enabled + if indexing_technique != "high_quality": + return preview_output + + if not summary_index_setting or not summary_index_setting.get("enable"): + return preview_output + + # Generate summaries for chunks + if "preview" in preview_output and isinstance(preview_output["preview"], list): + chunk_count = len(preview_output["preview"]) + logger.info( + f"Generating summaries for {chunk_count} chunks in preview mode " + f"(dataset: {dataset.id})" + ) + # Use ParagraphIndexProcessor's generate_summary method + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + + # Get Flask app for application context in worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def generate_summary_for_chunk(preview_item: dict) -> None: + """Generate summary for a single chunk.""" + if "content" in preview_item: + try: + # Set Flask application context in worker thread + if flask_app: + with flask_app.app_context(): + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary + else: + # Fallback: try without app context (may fail) + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary + except Exception as e: + logger.error(f"Failed to generate summary for chunk: {str(e)}") + # Don't fail the entire preview if summary generation fails + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_output["preview"])) + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_output["preview"]))) as executor: + futures = [ + executor.submit(generate_summary_for_chunk, preview_item) + for preview_item in preview_output["preview"] + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + logger.warning( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s. " + "Cancelling remaining tasks..." + ) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + completed_count = sum(1 for item in preview_output["preview"] if item.get("summary") is not None) + logger.info( + f"Completed summary generation for preview chunks: {completed_count}/{len(preview_output['preview'])} succeeded" + ) + + return preview_output + + def _get_preview_output( + self, chunk_structure: str, chunks: Any, dataset: Dataset | None = None, variable_pool: VariablePool | None = None + ) -> Mapping[str, Any]: + index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() + preview_output = index_processor.format_preview(chunks) + + # If dataset is provided, try to enrich preview with summaries + if dataset and variable_pool: + document_id = variable_pool.get(["sys", SystemVariableKey.DOCUMENT_ID]) + if document_id: + document = db.session.query(Document).filter_by(id=document_id.value).first() + if document: + # Query summaries for this document + summaries = ( + db.session.query(DocumentSegmentSummary) + .filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, + ) + .all() + ) + + if summaries: + # Create a map of segment content to summary for matching + # Use content matching as chunks in preview might not be indexed yet + summary_by_content = {} + for summary in summaries: + segment = ( + db.session.query(DocumentSegment) + .filter_by(id=summary.chunk_id, dataset_id=dataset.id) + .first() + ) + if segment: + # Normalize content for matching (strip whitespace) + normalized_content = segment.content.strip() + summary_by_content[normalized_content] = summary.summary_content + + # Enrich preview with summaries by content matching + if "preview" in preview_output and isinstance(preview_output["preview"], list): + matched_count = 0 + for preview_item in preview_output["preview"]: + if "content" in preview_item: + # Normalize content for matching + normalized_chunk_content = preview_item["content"].strip() + if normalized_chunk_content in summary_by_content: + preview_item["summary"] = summary_by_content[normalized_chunk_content] + matched_count += 1 + + if matched_count > 0: + logger.info( + f"Enriched preview with {matched_count} existing summaries " + f"(dataset: {dataset.id}, document: {document.id})" + ) + + return preview_output @classmethod def version(cls) -> str: diff --git a/api/extensions/ext_celery.py b/api/extensions/ext_celery.py index 5cf4984709..ffa0e3d7a0 100644 --- a/api/extensions/ext_celery.py +++ b/api/extensions/ext_celery.py @@ -99,6 +99,8 @@ def init_app(app: DifyApp) -> Celery: imports = [ "tasks.async_workflow_tasks", # trigger workers "tasks.trigger_processing_tasks", # async trigger processing + "tasks.generate_summary_index_task", # summary index generation + "tasks.regenerate_summary_index_task", # summary index regeneration ] day = dify_config.CELERY_BEAT_SCHEDULER_TIME diff --git a/api/fields/dataset_fields.py b/api/fields/dataset_fields.py index 1e5ec7d200..ff6578098b 100644 --- a/api/fields/dataset_fields.py +++ b/api/fields/dataset_fields.py @@ -39,6 +39,14 @@ dataset_retrieval_model_fields = { "score_threshold_enabled": fields.Boolean, "score_threshold": fields.Float, } + +dataset_summary_index_fields = { + "enable": fields.Boolean, + "model_name": fields.String, + "model_provider_name": fields.String, + "summary_prompt": fields.String, +} + external_retrieval_model_fields = { "top_k": fields.Integer, "score_threshold": fields.Float, @@ -83,6 +91,7 @@ dataset_detail_fields = { "embedding_model_provider": fields.String, "embedding_available": fields.Boolean, "retrieval_model_dict": fields.Nested(dataset_retrieval_model_fields), + "summary_index_setting": fields.Nested(dataset_summary_index_fields), "tags": fields.List(fields.Nested(tag_fields)), "doc_form": fields.String, "external_knowledge_info": fields.Nested(external_knowledge_info_fields), diff --git a/api/fields/document_fields.py b/api/fields/document_fields.py index 9be59f7454..62f5e19e25 100644 --- a/api/fields/document_fields.py +++ b/api/fields/document_fields.py @@ -33,6 +33,8 @@ document_fields = { "hit_count": fields.Integer, "doc_form": fields.String, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), + "summary_index_status": fields.String, # Summary index generation status: "waiting", "generating", "completed", "partial_error", or null if not enabled + "need_summary": fields.Boolean, # Whether this document needs summary index generation } document_with_segments_fields = { @@ -60,6 +62,8 @@ document_with_segments_fields = { "completed_segments": fields.Integer, "total_segments": fields.Integer, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), + "summary_index_status": fields.String, # Summary index generation status: "waiting", "generating", "completed", "partial_error", or null if not enabled + "need_summary": fields.Boolean, # Whether this document needs summary index generation } dataset_and_document_fields = { diff --git a/api/fields/hit_testing_fields.py b/api/fields/hit_testing_fields.py index e70f9fa722..0b54992835 100644 --- a/api/fields/hit_testing_fields.py +++ b/api/fields/hit_testing_fields.py @@ -58,4 +58,5 @@ hit_testing_record_fields = { "score": fields.Float, "tsne_position": fields.Raw, "files": fields.List(fields.Nested(files_fields)), + "summary": fields.String, # Summary content if retrieved via summary index } diff --git a/api/fields/segment_fields.py b/api/fields/segment_fields.py index 56d6b68378..2ce9fb154c 100644 --- a/api/fields/segment_fields.py +++ b/api/fields/segment_fields.py @@ -49,4 +49,5 @@ segment_fields = { "stopped_at": TimestampField, "child_chunks": fields.List(fields.Nested(child_chunk_fields)), "attachments": fields.List(fields.Nested(attachment_fields)), + "summary": fields.String, # Summary content for the segment } diff --git a/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py b/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py new file mode 100644 index 0000000000..40fe419ef6 --- /dev/null +++ b/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py @@ -0,0 +1,69 @@ +"""add SummaryIndex feature + +Revision ID: 562dcce7d77c +Revises: 03ea244985ce +Create Date: 2026-01-12 13:58:40.584802 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '562dcce7d77c' +down_revision = '03ea244985ce' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('document_segment_summary', + sa.Column('id', models.types.StringUUID(), nullable=False), + sa.Column('dataset_id', models.types.StringUUID(), nullable=False), + sa.Column('document_id', models.types.StringUUID(), nullable=False), + sa.Column('chunk_id', models.types.StringUUID(), nullable=False), + sa.Column('summary_content', models.types.LongText(), nullable=True), + sa.Column('summary_index_node_id', sa.String(length=255), nullable=True), + sa.Column('summary_index_node_hash', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=32), server_default=sa.text("'generating'"), nullable=False), + sa.Column('error', models.types.LongText(), nullable=True), + sa.Column('enabled', sa.Boolean(), server_default=sa.text('true'), nullable=False), + sa.Column('disabled_at', sa.DateTime(), nullable=True), + sa.Column('disabled_by', models.types.StringUUID(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='document_segment_summary_pkey') + ) + with op.batch_alter_table('document_segment_summary', schema=None) as batch_op: + batch_op.create_index('document_segment_summary_chunk_id_idx', ['chunk_id'], unique=False) + batch_op.create_index('document_segment_summary_dataset_id_idx', ['dataset_id'], unique=False) + batch_op.create_index('document_segment_summary_document_id_idx', ['document_id'], unique=False) + batch_op.create_index('document_segment_summary_status_idx', ['status'], unique=False) + + with op.batch_alter_table('datasets', schema=None) as batch_op: + batch_op.add_column(sa.Column('summary_index_setting', models.types.AdjustedJSON(), nullable=True)) + + with op.batch_alter_table('documents', schema=None) as batch_op: + batch_op.add_column(sa.Column('need_summary', sa.Boolean(), server_default=sa.text('false'), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('documents', schema=None) as batch_op: + batch_op.drop_column('need_summary') + + with op.batch_alter_table('datasets', schema=None) as batch_op: + batch_op.drop_column('summary_index_setting') + + with op.batch_alter_table('document_segment_summary', schema=None) as batch_op: + batch_op.drop_index('document_segment_summary_status_idx') + batch_op.drop_index('document_segment_summary_document_id_idx') + batch_op.drop_index('document_segment_summary_dataset_id_idx') + batch_op.drop_index('document_segment_summary_chunk_id_idx') + + op.drop_table('document_segment_summary') + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index 445ac6086f..6497c0efc0 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -72,6 +72,7 @@ class Dataset(Base): keyword_number = mapped_column(sa.Integer, nullable=True, server_default=sa.text("10")) collection_binding_id = mapped_column(StringUUID, nullable=True) retrieval_model = mapped_column(AdjustedJSON, nullable=True) + summary_index_setting = mapped_column(AdjustedJSON, nullable=True) built_in_field_enabled = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) icon_info = mapped_column(AdjustedJSON, nullable=True) runtime_mode = mapped_column(sa.String(255), nullable=True, server_default=sa.text("'general'")) @@ -419,6 +420,7 @@ class Document(Base): doc_metadata = mapped_column(AdjustedJSON, nullable=True) doc_form = mapped_column(String(255), nullable=False, server_default=sa.text("'text_model'")) doc_language = mapped_column(String(255), nullable=True) + need_summary: Mapped[bool | None] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) DATA_SOURCES = ["upload_file", "notion_import", "website_crawl"] @@ -1567,3 +1569,34 @@ class SegmentAttachmentBinding(Base): segment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) attachment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + + +class DocumentSegmentSummary(Base): + __tablename__ = "document_segment_summary" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="document_segment_summary_pkey"), + sa.Index("document_segment_summary_dataset_id_idx", "dataset_id"), + sa.Index("document_segment_summary_document_id_idx", "document_id"), + sa.Index("document_segment_summary_chunk_id_idx", "chunk_id"), + sa.Index("document_segment_summary_status_idx", "status"), + ) + + id: Mapped[str] = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4())) + dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + document_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + # corresponds to DocumentSegment.id or parent chunk id + chunk_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + summary_content: Mapped[str] = mapped_column(LongText, nullable=True) + summary_index_node_id: Mapped[str] = mapped_column(String(255), nullable=True) + summary_index_node_hash: Mapped[str] = mapped_column(String(255), nullable=True) + status: Mapped[str] = mapped_column(String(32), nullable=False, server_default=sa.text("'generating'")) + error: Mapped[str] = mapped_column(LongText, nullable=True) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) + disabled_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) + disabled_by = mapped_column(StringUUID, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp()) + + def __repr__(self): + return f"" + diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 970192fde5..56a10a7753 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -89,6 +89,7 @@ from tasks.enable_segments_to_index_task import enable_segments_to_index_task from tasks.recover_document_indexing_task import recover_document_indexing_task from tasks.remove_document_from_index_task import remove_document_from_index_task from tasks.retry_document_indexing_task import retry_document_indexing_task +from tasks.regenerate_summary_index_task import regenerate_summary_index_task from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task logger = logging.getLogger(__name__) @@ -473,6 +474,11 @@ class DatasetService: if external_retrieval_model: dataset.retrieval_model = external_retrieval_model + # Update summary index setting if provided + summary_index_setting = data.get("summary_index_setting", None) + if summary_index_setting is not None: + dataset.summary_index_setting = summary_index_setting + # Update basic dataset properties dataset.name = data.get("name", dataset.name) dataset.description = data.get("description", dataset.description) @@ -555,12 +561,20 @@ class DatasetService: # Handle indexing technique changes and embedding model updates action = DatasetService._handle_indexing_technique_change(dataset, data, filtered_data) + # Check if summary_index_setting model changed (before updating database) + summary_model_changed = DatasetService._check_summary_index_setting_model_changed( + dataset, data + ) + # Add metadata fields filtered_data["updated_by"] = user.id filtered_data["updated_at"] = naive_utc_now() # update Retrieval model if data.get("retrieval_model"): filtered_data["retrieval_model"] = data["retrieval_model"] + # update summary index setting + if data.get("summary_index_setting"): + filtered_data["summary_index_setting"] = data.get("summary_index_setting") # update icon info if data.get("icon_info"): filtered_data["icon_info"] = data.get("icon_info") @@ -569,12 +583,30 @@ class DatasetService: db.session.query(Dataset).filter_by(id=dataset.id).update(filtered_data) db.session.commit() + # Reload dataset to get updated values + db.session.refresh(dataset) + # update pipeline knowledge base node data DatasetService._update_pipeline_knowledge_base_node_data(dataset, user.id) # Trigger vector index task if indexing technique changed if action: deal_dataset_vector_index_task.delay(dataset.id, action) + # If embedding_model changed, also regenerate summary vectors + if action == "update": + regenerate_summary_index_task.delay( + dataset.id, + regenerate_reason="embedding_model_changed", + regenerate_vectors_only=True, + ) + + # Trigger summary index regeneration if summary model changed + if summary_model_changed: + regenerate_summary_index_task.delay( + dataset.id, + regenerate_reason="summary_model_changed", + regenerate_vectors_only=False, + ) return dataset @@ -613,6 +645,7 @@ class DatasetService: knowledge_index_node_data["chunk_structure"] = dataset.chunk_structure knowledge_index_node_data["indexing_technique"] = dataset.indexing_technique # pyright: ignore[reportAttributeAccessIssue] knowledge_index_node_data["keyword_number"] = dataset.keyword_number + knowledge_index_node_data["summary_index_setting"] = dataset.summary_index_setting node["data"] = knowledge_index_node_data updated = True except Exception: @@ -851,6 +884,49 @@ class DatasetService: ) filtered_data["collection_binding_id"] = dataset_collection_binding.id + @staticmethod + def _check_summary_index_setting_model_changed(dataset: Dataset, data: dict[str, Any]) -> bool: + """ + Check if summary_index_setting model (model_name or model_provider_name) has changed. + + Args: + dataset: Current dataset object + data: Update data dictionary + + Returns: + bool: True if summary model changed, False otherwise + """ + # Check if summary_index_setting is being updated + if "summary_index_setting" not in data or data.get("summary_index_setting") is None: + return False + + new_summary_setting = data.get("summary_index_setting") + old_summary_setting = dataset.summary_index_setting + + # If old setting doesn't exist or is disabled, no need to regenerate + if not old_summary_setting or not old_summary_setting.get("enable"): + return False + + # If new setting is disabled, no need to regenerate + if not new_summary_setting or not new_summary_setting.get("enable"): + return False + + # Compare model_name and model_provider_name + old_model_name = old_summary_setting.get("model_name") + old_model_provider = old_summary_setting.get("model_provider_name") + new_model_name = new_summary_setting.get("model_name") + new_model_provider = new_summary_setting.get("model_provider_name") + + # Check if model changed + if old_model_name != new_model_name or old_model_provider != new_model_provider: + logger.info( + f"Summary index setting model changed for dataset {dataset.id}: " + f"old={old_model_provider}/{old_model_name}, new={new_model_provider}/{new_model_name}" + ) + return True + + return False + @staticmethod def update_rag_pipeline_dataset_settings( session: Session, dataset: Dataset, knowledge_configuration: KnowledgeConfiguration, has_published: bool = False @@ -1823,6 +1899,8 @@ class DocumentService: DuplicateDocumentIndexingTaskProxy( dataset.tenant_id, dataset.id, duplicate_document_ids ).delay() + # Note: Summary index generation is triggered in document_indexing_task after indexing completes + # to ensure segments are available. See tasks/document_indexing_task.py except LockNotOwnedError: pass @@ -2127,6 +2205,14 @@ class DocumentService: name: str, batch: str, ): + # Set need_summary based on dataset's summary_index_setting + need_summary = False + if ( + dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ): + need_summary = True + document = Document( tenant_id=dataset.tenant_id, dataset_id=dataset.id, @@ -2140,6 +2226,7 @@ class DocumentService: created_by=account.id, doc_form=document_form, doc_language=document_language, + need_summary=need_summary, ) doc_metadata = {} if dataset.built_in_field_enabled: @@ -2364,6 +2451,7 @@ class DocumentService: embedding_model_provider=knowledge_config.embedding_model_provider, collection_binding_id=dataset_collection_binding_id, retrieval_model=retrieval_model.model_dump() if retrieval_model else None, + summary_index_setting=knowledge_config.summary_index_setting, is_multimodal=knowledge_config.is_multimodal, ) @@ -2545,6 +2633,14 @@ class DocumentService: if not isinstance(args["process_rule"]["rules"]["segmentation"]["max_tokens"], int): raise ValueError("Process rule segmentation max_tokens is invalid") + # valid summary index setting + if args["process_rule"]["summary_index_setting"] and args["process_rule"]["summary_index_setting"]["enable"]: + summary_index_setting = args["process_rule"]["summary_index_setting"] + if "model_name" not in summary_index_setting or not summary_index_setting["model_name"]: + raise ValueError("Summary index model name is required") + if "model_provider_name" not in summary_index_setting or not summary_index_setting["model_provider_name"]: + raise ValueError("Summary index model provider name is required") + @staticmethod def batch_update_document_status( dataset: Dataset, document_ids: list[str], action: Literal["enable", "disable", "archive", "un_archive"], user @@ -3013,6 +3109,37 @@ class SegmentService: if args.enabled or keyword_changed: # update segment vector index VectorService.update_segment_vector(args.keywords, segment, dataset) + # update summary index if summary is provided and has changed + if args.summary is not None: + # Check if summary index is enabled + has_summary_index = ( + dataset.indexing_technique == "high_quality" + and dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ) + + if has_summary_index: + # Query existing summary from database + from models.dataset import DocumentSegmentSummary + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id == segment.id, + DocumentSegmentSummary.dataset_id == dataset.id, + ) + .first() + ) + + # Check if summary has changed + existing_summary_content = existing_summary.summary_content if existing_summary else None + if existing_summary_content != args.summary: + # Summary has changed, update it + from services.summary_index_service import SummaryIndexService + try: + SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) + except Exception as e: + logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + # Don't fail the entire update if summary update fails else: segment_hash = helper.generate_text_hash(content) tokens = 0 @@ -3087,6 +3214,15 @@ class SegmentService: elif document.doc_form in (IndexStructureType.PARAGRAPH_INDEX, IndexStructureType.QA_INDEX): # update segment vector index VectorService.update_segment_vector(args.keywords, segment, dataset) + # update summary index if summary is provided + if args.summary is not None: + from services.summary_index_service import SummaryIndexService + + try: + SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) + except Exception as e: + logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + # Don't fail the entire update if summary update fails # update multimodel vector index VectorService.update_multimodel_vector(segment, args.attachment_ids or [], dataset) except Exception as e: diff --git a/api/services/entities/knowledge_entities/knowledge_entities.py b/api/services/entities/knowledge_entities/knowledge_entities.py index 7959734e89..8dc5b93501 100644 --- a/api/services/entities/knowledge_entities/knowledge_entities.py +++ b/api/services/entities/knowledge_entities/knowledge_entities.py @@ -119,6 +119,7 @@ class KnowledgeConfig(BaseModel): data_source: DataSource | None = None process_rule: ProcessRule | None = None retrieval_model: RetrievalModel | None = None + summary_index_setting: dict | None = None doc_form: str = "text_model" doc_language: str = "English" embedding_model: str | None = None @@ -141,6 +142,7 @@ class SegmentUpdateArgs(BaseModel): regenerate_child_chunks: bool = False enabled: bool | None = None attachment_ids: list[str] | None = None + summary: str | None = None # Summary content for summary index class ChildChunkUpdateArgs(BaseModel): diff --git a/api/services/summary_index_service.py b/api/services/summary_index_service.py new file mode 100644 index 0000000000..1d5c51aad8 --- /dev/null +++ b/api/services/summary_index_service.py @@ -0,0 +1,612 @@ +"""Summary index service for generating and managing document segment summaries.""" + +import logging +import time +import uuid +from typing import Any + +from core.rag.datasource.vdb.vector_factory import Vector +from core.rag.index_processor.constant.doc_type import DocType +from core.rag.models.document import Document +from extensions.ext_database import db +from libs import helper +from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary +from models.dataset import Document as DatasetDocument + +logger = logging.getLogger(__name__) + + +class SummaryIndexService: + """Service for generating and managing summary indexes.""" + + @staticmethod + def generate_summary_for_segment( + segment: DocumentSegment, + dataset: Dataset, + summary_index_setting: dict, + ) -> str: + """ + Generate summary for a single segment. + + Args: + segment: DocumentSegment to generate summary for + dataset: Dataset containing the segment + summary_index_setting: Summary index configuration + + Returns: + Generated summary text + + Raises: + ValueError: If summary_index_setting is invalid or generation fails + """ + # Reuse the existing generate_summary method from ParagraphIndexProcessor + # Use lazy import to avoid circular import + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + + summary_content = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=segment.content, + summary_index_setting=summary_index_setting, + ) + + if not summary_content: + raise ValueError("Generated summary is empty") + + return summary_content + + @staticmethod + def create_summary_record( + segment: DocumentSegment, + dataset: Dataset, + summary_content: str, + status: str = "generating", + ) -> DocumentSegmentSummary: + """ + Create or update a DocumentSegmentSummary record. + If a summary record already exists for this segment, it will be updated instead of creating a new one. + + Args: + segment: DocumentSegment to create summary for + dataset: Dataset containing the segment + summary_content: Generated summary content + status: Summary status (default: "generating") + + Returns: + Created or updated DocumentSegmentSummary instance + """ + # Check if summary record already exists + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + + if existing_summary: + # Update existing record + existing_summary.summary_content = summary_content + existing_summary.status = status + existing_summary.error = None # Clear any previous errors + # Re-enable if it was disabled + if not existing_summary.enabled: + existing_summary.enabled = True + existing_summary.disabled_at = None + existing_summary.disabled_by = None + db.session.add(existing_summary) + db.session.flush() + return existing_summary + else: + # Create new record (enabled by default) + summary_record = DocumentSegmentSummary( + dataset_id=dataset.id, + document_id=segment.document_id, + chunk_id=segment.id, + summary_content=summary_content, + status=status, + enabled=True, # Explicitly set enabled to True + ) + db.session.add(summary_record) + db.session.flush() + return summary_record + + @staticmethod + def vectorize_summary( + summary_record: DocumentSegmentSummary, + segment: DocumentSegment, + dataset: Dataset, + ) -> None: + """ + Vectorize summary and store in vector database. + + Args: + summary_record: DocumentSegmentSummary record + segment: Original DocumentSegment + dataset: Dataset containing the segment + """ + if dataset.indexing_technique != "high_quality": + logger.warning( + f"Summary vectorization skipped for dataset {dataset.id}: " + "indexing_technique is not high_quality" + ) + return + + # Reuse existing index_node_id if available (like segment does), otherwise generate new one + old_summary_node_id = summary_record.summary_index_node_id + if old_summary_node_id: + # Reuse existing index_node_id (like segment behavior) + summary_index_node_id = old_summary_node_id + else: + # Generate new index node ID only for new summaries + summary_index_node_id = str(uuid.uuid4()) + + # Always regenerate hash (in case summary content changed) + summary_hash = helper.generate_text_hash(summary_record.summary_content) + + # Delete old vector only if we're reusing the same index_node_id (to overwrite) + # If index_node_id changed, the old vector should have been deleted elsewhere + if old_summary_node_id and old_summary_node_id == summary_index_node_id: + try: + vector = Vector(dataset) + vector.delete_by_ids([old_summary_node_id]) + except Exception as e: + logger.warning( + f"Failed to delete old summary vector for segment {segment.id}: {str(e)}. " + "Continuing with new vectorization." + ) + + # Create document with summary content and metadata + summary_document = Document( + page_content=summary_record.summary_content, + metadata={ + "doc_id": summary_index_node_id, + "doc_hash": summary_hash, + "dataset_id": dataset.id, + "document_id": segment.document_id, + "original_chunk_id": segment.id, # Key: link to original chunk + "doc_type": DocType.TEXT, + "is_summary": True, # Identifier for summary documents + }, + ) + + # Vectorize and store with retry mechanism for connection errors + max_retries = 3 + retry_delay = 2.0 + + for attempt in range(max_retries): + try: + vector = Vector(dataset) + vector.add_texts([summary_document], duplicate_check=True) + + # Success - update summary record with index node info + summary_record.summary_index_node_id = summary_index_node_id + summary_record.summary_index_node_hash = summary_hash + summary_record.status = "completed" + db.session.add(summary_record) + db.session.flush() + return # Success, exit function + + except (ConnectionError, Exception) as e: + error_str = str(e).lower() + # Check if it's a connection-related error that might be transient + is_connection_error = any(keyword in error_str for keyword in [ + "connection", "disconnected", "timeout", "network", + "could not connect", "server disconnected", "weaviate" + ]) + + if is_connection_error and attempt < max_retries - 1: + # Retry for connection errors + wait_time = retry_delay * (2 ** attempt) # Exponential backoff + logger.warning( + f"Vectorization attempt {attempt + 1}/{max_retries} failed for segment {segment.id}: {str(e)}. " + f"Retrying in {wait_time:.1f} seconds..." + ) + time.sleep(wait_time) + continue + else: + # Final attempt failed or non-connection error - log and update status + logger.error( + f"Failed to vectorize summary for segment {segment.id} after {attempt + 1} attempts: {str(e)}", + exc_info=True + ) + summary_record.status = "error" + summary_record.error = f"Vectorization failed: {str(e)}" + db.session.add(summary_record) + db.session.flush() + raise + + @staticmethod + def generate_and_vectorize_summary( + segment: DocumentSegment, + dataset: Dataset, + summary_index_setting: dict, + ) -> DocumentSegmentSummary: + """ + Generate summary for a segment and vectorize it. + + Args: + segment: DocumentSegment to generate summary for + dataset: Dataset containing the segment + summary_index_setting: Summary index configuration + + Returns: + Created DocumentSegmentSummary instance + + Raises: + ValueError: If summary generation fails + """ + try: + # Generate summary + summary_content = SummaryIndexService.generate_summary_for_segment( + segment, dataset, summary_index_setting + ) + + # Create or update summary record (will handle overwrite internally) + summary_record = SummaryIndexService.create_summary_record( + segment, dataset, summary_content, status="generating" + ) + + # Vectorize summary (will delete old vector if exists before creating new one) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + + db.session.commit() + logger.info(f"Successfully generated and vectorized summary for segment {segment.id}") + return summary_record + + except Exception as e: + logger.exception(f"Failed to generate summary for segment {segment.id}: {str(e)}") + # Update summary record with error status if it exists + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + if summary_record: + summary_record.status = "error" + summary_record.error = str(e) + db.session.add(summary_record) + db.session.commit() + raise + + @staticmethod + def generate_summaries_for_document( + dataset: Dataset, + document: DatasetDocument, + summary_index_setting: dict, + segment_ids: list[str] | None = None, + only_parent_chunks: bool = False, + ) -> list[DocumentSegmentSummary]: + """ + Generate summaries for all segments in a document including vectorization. + + Args: + dataset: Dataset containing the document + document: DatasetDocument to generate summaries for + summary_index_setting: Summary index configuration + segment_ids: Optional list of specific segment IDs to process + only_parent_chunks: If True, only process parent chunks (for parent-child mode) + + Returns: + List of created DocumentSegmentSummary instances + """ + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + f"Skipping summary generation for dataset {dataset.id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'" + ) + return [] + + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info(f"Summary index is disabled for dataset {dataset.id}") + return [] + + # Skip qa_model documents + if document.doc_form == "qa_model": + logger.info(f"Skipping summary generation for qa_model document {document.id}") + return [] + + logger.info( + f"Starting summary generation for document {document.id} in dataset {dataset.id}, " + f"segment_ids: {len(segment_ids) if segment_ids else 'all'}, " + f"only_parent_chunks: {only_parent_chunks}" + ) + + # Query segments (only enabled segments) + query = db.session.query(DocumentSegment).filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, # Only generate summaries for enabled segments + ) + + if segment_ids: + query = query.filter(DocumentSegment.id.in_(segment_ids)) + + segments = query.all() + + if not segments: + logger.info(f"No segments found for document {document.id}") + return [] + + summary_records = [] + + for segment in segments: + # For parent-child mode, only process parent chunks + # In parent-child mode, all DocumentSegments are parent chunks, + # so we process all of them. Child chunks are stored in ChildChunk table + # and are not DocumentSegments, so they won't be in the segments list. + # This check is mainly for clarity and future-proofing. + if only_parent_chunks: + # In parent-child mode, all segments in the query are parent chunks + # Child chunks are not DocumentSegments, so they won't appear here + # We can process all segments + pass + + try: + summary_record = SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + summary_records.append(summary_record) + except Exception as e: + logger.error(f"Failed to generate summary for segment {segment.id}: {str(e)}") + # Continue with other segments + continue + + logger.info( + f"Completed summary generation for document {document.id}: " + f"{len(summary_records)} summaries generated and vectorized" + ) + return summary_records + + @staticmethod + def disable_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + disabled_by: str | None = None, + ) -> None: + """ + Disable summary records and remove vectors from vector database for segments. + Unlike delete, this preserves the summary records but marks them as disabled. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to disable summaries for. If None, disable all. + disabled_by: User ID who disabled the summaries + """ + from libs.datetime_utils import naive_utc_now + + query = db.session.query(DocumentSegmentSummary).filter_by( + dataset_id=dataset.id, + enabled=True, # Only disable enabled summaries + ) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + logger.info( + f"Disabling {len(summaries)} summary records for dataset {dataset.id}, " + f"segment_ids: {len(segment_ids) if segment_ids else 'all'}" + ) + + # Remove from vector database (but keep records) + if dataset.indexing_technique == "high_quality": + summary_node_ids = [ + s.summary_index_node_id for s in summaries if s.summary_index_node_id + ] + if summary_node_ids: + try: + vector = Vector(dataset) + vector.delete_by_ids(summary_node_ids) + except Exception as e: + logger.warning(f"Failed to remove summary vectors: {str(e)}") + + # Disable summary records (don't delete) + now = naive_utc_now() + for summary in summaries: + summary.enabled = False + summary.disabled_at = now + summary.disabled_by = disabled_by + db.session.add(summary) + + db.session.commit() + logger.info(f"Disabled {len(summaries)} summary records for dataset {dataset.id}") + + @staticmethod + def enable_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + ) -> None: + """ + Enable summary records and re-add vectors to vector database for segments. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to enable summaries for. If None, enable all. + """ + # Only enable summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return + + query = db.session.query(DocumentSegmentSummary).filter_by( + dataset_id=dataset.id, + enabled=False, # Only enable disabled summaries + ) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + logger.info( + f"Enabling {len(summaries)} summary records for dataset {dataset.id}, " + f"segment_ids: {len(segment_ids) if segment_ids else 'all'}" + ) + + # Re-vectorize and re-add to vector database + enabled_count = 0 + for summary in summaries: + # Get the original segment + segment = db.session.query(DocumentSegment).filter_by( + id=summary.chunk_id, + dataset_id=dataset.id, + ).first() + + if not segment or not segment.enabled or segment.status != "completed": + continue + + if not summary.summary_content: + continue + + try: + # Re-vectorize summary + SummaryIndexService.vectorize_summary(summary, segment, dataset) + + # Enable summary record + summary.enabled = True + summary.disabled_at = None + summary.disabled_by = None + db.session.add(summary) + enabled_count += 1 + except Exception as e: + logger.error(f"Failed to re-vectorize summary {summary.id}: {str(e)}") + # Keep it disabled if vectorization fails + continue + + db.session.commit() + logger.info(f"Enabled {enabled_count} summary records for dataset {dataset.id}") + + @staticmethod + def delete_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + ) -> None: + """ + Delete summary records and vectors for segments (used only for actual deletion scenarios). + For disable/enable operations, use disable_summaries_for_segments/enable_summaries_for_segments. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to delete summaries for. If None, delete all. + """ + query = db.session.query(DocumentSegmentSummary).filter_by(dataset_id=dataset.id) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + # Delete from vector database + if dataset.indexing_technique == "high_quality": + summary_node_ids = [ + s.summary_index_node_id for s in summaries if s.summary_index_node_id + ] + if summary_node_ids: + vector = Vector(dataset) + vector.delete_by_ids(summary_node_ids) + + # Delete summary records + for summary in summaries: + db.session.delete(summary) + + db.session.commit() + logger.info(f"Deleted {len(summaries)} summary records for dataset {dataset.id}") + + @staticmethod + def update_summary_for_segment( + segment: DocumentSegment, + dataset: Dataset, + summary_content: str, + ) -> DocumentSegmentSummary | None: + """ + Update summary for a segment and re-vectorize it. + + Args: + segment: DocumentSegment to update summary for + dataset: Dataset containing the segment + summary_content: New summary content + + Returns: + Updated DocumentSegmentSummary instance, or None if summary index is not enabled + """ + # Only update summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return None + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return None + + # Skip qa_model documents + if segment.document and segment.document.doc_form == "qa_model": + return None + + try: + # Find existing summary record + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + + if summary_record: + # Update existing summary + old_summary_node_id = summary_record.summary_index_node_id + + # Update summary content + summary_record.summary_content = summary_content + summary_record.status = "generating" + db.session.add(summary_record) + db.session.flush() + + # Delete old vector if exists + if old_summary_node_id: + vector = Vector(dataset) + vector.delete_by_ids([old_summary_node_id]) + + # Re-vectorize summary + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + + db.session.commit() + logger.info(f"Successfully updated and re-vectorized summary for segment {segment.id}") + return summary_record + else: + # Create new summary record if doesn't exist + summary_record = SummaryIndexService.create_summary_record( + segment, dataset, summary_content, status="generating" + ) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + db.session.commit() + logger.info(f"Successfully created and vectorized summary for segment {segment.id}") + return summary_record + + except Exception as e: + logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + # Update summary record with error status if it exists + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + if summary_record: + summary_record.status = "error" + summary_record.error = str(e) + db.session.add(summary_record) + db.session.commit() + raise + diff --git a/api/tasks/add_document_to_index_task.py b/api/tasks/add_document_to_index_task.py index e7dead8a56..da6f468edd 100644 --- a/api/tasks/add_document_to_index_task.py +++ b/api/tasks/add_document_to_index_task.py @@ -117,6 +117,18 @@ def add_document_to_index_task(dataset_document_id: str): ) db.session.commit() + # Enable summary indexes for all segments in this document + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + if segment_ids_list: + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + ) + except Exception as e: + logger.warning(f"Failed to enable summaries for document {dataset_document.id}: {str(e)}") + end_at = time.perf_counter() logger.info( click.style(f"Document added to index: {dataset_document.id} latency: {end_at - start_at}", fg="green") diff --git a/api/tasks/delete_segment_from_index_task.py b/api/tasks/delete_segment_from_index_task.py index bea5c952cf..14146018f1 100644 --- a/api/tasks/delete_segment_from_index_task.py +++ b/api/tasks/delete_segment_from_index_task.py @@ -42,6 +42,7 @@ def delete_segment_from_index_task( doc_form = dataset_document.doc_form # Proceed with index cleanup using the index_node_ids directly + # For actual deletion, we should delete summaries (not just disable them) index_processor = IndexProcessorFactory(doc_form).init_index_processor() index_processor.clean( dataset, @@ -49,6 +50,7 @@ def delete_segment_from_index_task( with_keywords=True, delete_child_chunks=True, precomputed_child_node_ids=child_node_ids, + delete_summaries=True, # Actually delete summaries when segment is deleted ) if dataset.is_multimodal: # delete segment attachment binding diff --git a/api/tasks/disable_segment_from_index_task.py b/api/tasks/disable_segment_from_index_task.py index 6b5f01b416..67c2867edd 100644 --- a/api/tasks/disable_segment_from_index_task.py +++ b/api/tasks/disable_segment_from_index_task.py @@ -53,6 +53,17 @@ def disable_segment_from_index_task(segment_id: str): logger.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan")) return + # Disable summary index for this segment + from services.summary_index_service import SummaryIndexService + try: + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=[segment.id], + disabled_by=segment.disabled_by, + ) + except Exception as e: + logger.warning(f"Failed to disable summary for segment {segment.id}: {str(e)}") + index_type = dataset_document.doc_form index_processor = IndexProcessorFactory(index_type).init_index_processor() index_processor.clean(dataset, [segment.index_node_id]) diff --git a/api/tasks/disable_segments_from_index_task.py b/api/tasks/disable_segments_from_index_task.py index c2a3de29f4..b6a534bacf 100644 --- a/api/tasks/disable_segments_from_index_task.py +++ b/api/tasks/disable_segments_from_index_task.py @@ -58,12 +58,25 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen return try: + # Disable summary indexes for these segments + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + try: + # Get disabled_by from first segment (they should all have the same disabled_by) + disabled_by = segments[0].disabled_by if segments else None + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + disabled_by=disabled_by, + ) + except Exception as e: + logger.warning(f"Failed to disable summaries for segments: {str(e)}") + index_node_ids = [segment.index_node_id for segment in segments] if dataset.is_multimodal: - segment_ids = [segment.id for segment in segments] segment_attachment_bindings = ( db.session.query(SegmentAttachmentBinding) - .where(SegmentAttachmentBinding.segment_id.in_(segment_ids)) + .where(SegmentAttachmentBinding.segment_id.in_(segment_ids_list)) .all() ) if segment_attachment_bindings: diff --git a/api/tasks/document_indexing_task.py b/api/tasks/document_indexing_task.py index acbdab631b..319837ceaf 100644 --- a/api/tasks/document_indexing_task.py +++ b/api/tasks/document_indexing_task.py @@ -8,6 +8,7 @@ from celery import shared_task from configs import dify_config from core.entities.document_task import DocumentTask from core.indexing_runner import DocumentIsPausedError, IndexingRunner +from tasks.generate_summary_index_task import generate_summary_index_task from core.rag.pipeline.queue import TenantIsolatedTaskQueue from enums.cloud_plan import CloudPlan from extensions.ext_database import db @@ -100,6 +101,60 @@ def _document_indexing(dataset_id: str, document_ids: Sequence[str]): indexing_runner.run(documents) end_at = time.perf_counter() logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green")) + + # Trigger summary index generation for completed documents if enabled + # Only generate for high_quality indexing technique and when summary_index_setting is enabled + # Re-query dataset to get latest summary_index_setting (in case it was updated) + dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first() + if not dataset: + logger.warning(f"Dataset {dataset_id} not found after indexing") + return + + if dataset.indexing_technique == "high_quality": + summary_index_setting = dataset.summary_index_setting + if summary_index_setting and summary_index_setting.get("enable"): + # Check each document's indexing status and trigger summary generation if completed + for document_id in document_ids: + # Re-query document to get latest status (IndexingRunner may have updated it) + document = ( + db.session.query(Document) + .where(Document.id == document_id, Document.dataset_id == dataset_id) + .first() + ) + if document: + logger.info( + f"Checking document {document_id} for summary generation: " + f"status={document.indexing_status}, doc_form={document.doc_form}" + ) + if document.indexing_status == "completed" and document.doc_form != "qa_model": + try: + generate_summary_index_task.delay(dataset.id, document_id, None) + logger.info( + f"Queued summary index generation task for document {document_id} " + f"in dataset {dataset.id} after indexing completed" + ) + except Exception as e: + logger.exception( + f"Failed to queue summary index generation task for document {document_id}: {str(e)}" + ) + # Don't fail the entire indexing process if summary task queuing fails + else: + logger.info( + f"Skipping summary generation for document {document_id}: " + f"status={document.indexing_status}, doc_form={document.doc_form}" + ) + else: + logger.warning(f"Document {document_id} not found after indexing") + else: + logger.info( + f"Summary index generation skipped for dataset {dataset.id}: " + f"summary_index_setting.enable={summary_index_setting.get('enable') if summary_index_setting else None}" + ) + else: + logger.info( + f"Summary index generation skipped for dataset {dataset.id}: " + f"indexing_technique={dataset.indexing_technique} (not 'high_quality')" + ) except DocumentIsPausedError as ex: logger.info(click.style(str(ex), fg="yellow")) except Exception: diff --git a/api/tasks/enable_segment_to_index_task.py b/api/tasks/enable_segment_to_index_task.py index 7615469ed0..113e19871e 100644 --- a/api/tasks/enable_segment_to_index_task.py +++ b/api/tasks/enable_segment_to_index_task.py @@ -103,6 +103,16 @@ def enable_segment_to_index_task(segment_id: str): # save vector index index_processor.load(dataset, [document], multimodal_documents=multimodel_documents) + # Enable summary index for this segment + from services.summary_index_service import SummaryIndexService + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=[segment.id], + ) + except Exception as e: + logger.warning(f"Failed to enable summary for segment {segment.id}: {str(e)}") + end_at = time.perf_counter() logger.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green")) except Exception as e: diff --git a/api/tasks/enable_segments_to_index_task.py b/api/tasks/enable_segments_to_index_task.py index 9f17d09e18..0c419ca2f0 100644 --- a/api/tasks/enable_segments_to_index_task.py +++ b/api/tasks/enable_segments_to_index_task.py @@ -108,6 +108,17 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i # save vector index index_processor.load(dataset, documents, multimodal_documents=multimodal_documents) + # Enable summary indexes for these segments + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + ) + except Exception as e: + logger.warning(f"Failed to enable summaries for segments: {str(e)}") + end_at = time.perf_counter() logger.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green")) except Exception as e: diff --git a/api/tasks/generate_summary_index_task.py b/api/tasks/generate_summary_index_task.py new file mode 100644 index 0000000000..2850658ce4 --- /dev/null +++ b/api/tasks/generate_summary_index_task.py @@ -0,0 +1,113 @@ +"""Async task for generating summary indexes.""" + +import logging +import time + +import click +from celery import shared_task + +from extensions.ext_database import db +from models.dataset import Dataset, DocumentSegment +from models.dataset import Document as DatasetDocument +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) + + +@shared_task(queue="dataset") +def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids: list[str] | None = None): + """ + Async generate summary index for document segments. + + Args: + dataset_id: Dataset ID + document_id: Document ID + segment_ids: Optional list of specific segment IDs to process. If None, process all segments. + + Usage: + generate_summary_index_task.delay(dataset_id, document_id) + generate_summary_index_task.delay(dataset_id, document_id, segment_ids) + """ + logger.info( + click.style( + f"Start generating summary index for document {document_id} in dataset {dataset_id}", + fg="green", + ) + ) + start_at = time.perf_counter() + + try: + dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first() + if not dataset: + logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red")) + db.session.close() + return + + document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first() + if not document: + logger.error(click.style(f"Document not found: {document_id}", fg="red")) + db.session.close() + return + + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + click.style( + f"Skipping summary generation for dataset {dataset_id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'", + fg="cyan", + ) + ) + db.session.close() + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info( + click.style( + f"Summary index is disabled for dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + # Determine if only parent chunks should be processed + only_parent_chunks = dataset.chunk_structure == "parent_child_index" + + # Generate summaries + summary_records = SummaryIndexService.generate_summaries_for_document( + dataset=dataset, + document=document, + summary_index_setting=summary_index_setting, + segment_ids=segment_ids, + only_parent_chunks=only_parent_chunks, + ) + + end_at = time.perf_counter() + logger.info( + click.style( + f"Summary index generation completed for document {document_id}: " + f"{len(summary_records)} summaries generated, latency: {end_at - start_at}", + fg="green", + ) + ) + + except Exception as e: + logger.exception(f"Failed to generate summary index for document {document_id}: {str(e)}") + # Update document segments with error status if needed + if segment_ids: + db.session.query(DocumentSegment).filter( + DocumentSegment.id.in_(segment_ids), + DocumentSegment.dataset_id == dataset_id, + ).update( + { + DocumentSegment.error: f"Summary generation failed: {str(e)}", + }, + synchronize_session=False, + ) + db.session.commit() + finally: + db.session.close() + diff --git a/api/tasks/regenerate_summary_index_task.py b/api/tasks/regenerate_summary_index_task.py new file mode 100644 index 0000000000..ddc48f9d99 --- /dev/null +++ b/api/tasks/regenerate_summary_index_task.py @@ -0,0 +1,219 @@ +"""Task for regenerating summary indexes when dataset settings change.""" + +import logging +import time +from typing import Any + +import click +from celery import shared_task +from sqlalchemy import select + +from extensions.ext_database import db +from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary +from models.dataset import Document as DatasetDocument +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) + + +@shared_task(queue="dataset") +def regenerate_summary_index_task( + dataset_id: str, + regenerate_reason: str = "summary_model_changed", + regenerate_vectors_only: bool = False, +): + """ + Regenerate summary indexes for all documents in a dataset. + + This task is triggered when: + 1. summary_index_setting model changes (regenerate_reason="summary_model_changed") + - Regenerates summary content and vectors for all existing summaries + 2. embedding_model changes (regenerate_reason="embedding_model_changed") + - Only regenerates vectors for existing summaries (keeps summary content) + + Args: + dataset_id: Dataset ID + regenerate_reason: Reason for regeneration ("summary_model_changed" or "embedding_model_changed") + regenerate_vectors_only: If True, only regenerate vectors without regenerating summary content + """ + logger.info( + click.style( + f"Start regenerate summary index for dataset {dataset_id}, reason: {regenerate_reason}", + fg="green", + ) + ) + start_at = time.perf_counter() + + try: + dataset = db.session.query(Dataset).filter_by(id=dataset_id).first() + if not dataset: + logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red")) + db.session.close() + return + + # Only regenerate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + click.style( + f"Skipping summary regeneration for dataset {dataset_id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'", + fg="cyan", + ) + ) + db.session.close() + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info( + click.style( + f"Summary index is disabled for dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + # Get all documents with completed indexing status + dataset_documents = db.session.scalars( + select(DatasetDocument).where( + DatasetDocument.dataset_id == dataset_id, + DatasetDocument.indexing_status == "completed", + DatasetDocument.enabled == True, + DatasetDocument.archived == False, + ) + ).all() + + if not dataset_documents: + logger.info( + click.style( + f"No documents found for summary regeneration in dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + logger.info( + f"Found {len(dataset_documents)} documents for summary regeneration in dataset {dataset_id}" + ) + + total_segments_processed = 0 + total_segments_failed = 0 + + for dataset_document in dataset_documents: + # Skip qa_model documents + if dataset_document.doc_form == "qa_model": + continue + + try: + # Get all segments with existing summaries + segments = ( + db.session.query(DocumentSegment) + .join( + DocumentSegmentSummary, + DocumentSegment.id == DocumentSegmentSummary.chunk_id, + ) + .where( + DocumentSegment.document_id == dataset_document.id, + DocumentSegment.dataset_id == dataset_id, + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + DocumentSegmentSummary.dataset_id == dataset_id, + ) + .order_by(DocumentSegment.position.asc()) + .all() + ) + + if not segments: + continue + + logger.info( + f"Regenerating summaries for {len(segments)} segments in document {dataset_document.id}" + ) + + for segment in segments: + try: + # Get existing summary record + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by( + chunk_id=segment.id, + dataset_id=dataset_id, + ) + .first() + ) + + if not summary_record: + logger.warning( + f"Summary record not found for segment {segment.id}, skipping" + ) + continue + + if regenerate_vectors_only: + # Only regenerate vectors (for embedding_model change) + # Delete old vector + if summary_record.summary_index_node_id: + try: + from core.rag.datasource.vdb.vector_factory import Vector + + vector = Vector(dataset) + vector.delete_by_ids([summary_record.summary_index_node_id]) + except Exception as e: + logger.warning( + f"Failed to delete old summary vector for segment {segment.id}: {str(e)}" + ) + + # Re-vectorize with new embedding model + SummaryIndexService.vectorize_summary( + summary_record, segment, dataset + ) + db.session.commit() + else: + # Regenerate both summary content and vectors (for summary_model change) + SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + db.session.commit() + + total_segments_processed += 1 + + except Exception as e: + logger.error( + f"Failed to regenerate summary for segment {segment.id}: {str(e)}", + exc_info=True, + ) + total_segments_failed += 1 + # Update summary record with error status + if summary_record: + summary_record.status = "error" + summary_record.error = f"Regeneration failed: {str(e)}" + db.session.add(summary_record) + db.session.commit() + continue + + except Exception as e: + logger.error( + f"Failed to process document {dataset_document.id} for summary regeneration: {str(e)}", + exc_info=True, + ) + continue + + end_at = time.perf_counter() + logger.info( + click.style( + f"Summary index regeneration completed for dataset {dataset_id}: " + f"{total_segments_processed} segments processed successfully, " + f"{total_segments_failed} segments failed, " + f"total documents: {len(dataset_documents)}, " + f"latency: {end_at - start_at:.2f}s", + fg="green", + ) + ) + + except Exception: + logger.exception(f"Regenerate summary index failed for dataset {dataset_id}") + finally: + db.session.close() + diff --git a/api/tasks/remove_document_from_index_task.py b/api/tasks/remove_document_from_index_task.py index c0ab2d0b41..7d191f00c0 100644 --- a/api/tasks/remove_document_from_index_task.py +++ b/api/tasks/remove_document_from_index_task.py @@ -47,6 +47,20 @@ def remove_document_from_index_task(document_id: str): index_processor = IndexProcessorFactory(document.doc_form).init_index_processor() segments = db.session.scalars(select(DocumentSegment).where(DocumentSegment.document_id == document.id)).all() + + # Disable summary indexes for all segments in this document + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + if segment_ids_list: + try: + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + disabled_by=document.disabled_by, + ) + except Exception as e: + logger.warning(f"Failed to disable summaries for document {document.id}: {str(e)}") + index_node_ids = [segment.index_node_id for segment in segments] if index_node_ids: try: From 7eb65b07c8535370fea9297e6cb33ce3110093aa Mon Sep 17 00:00:00 2001 From: FFXN Date: Wed, 14 Jan 2026 17:52:27 +0800 Subject: [PATCH 14/26] feat: Make summary index support vision, and make the code more standardized. --- .../console/datasets/datasets_document.py | 136 +++++----- .../console/datasets/datasets_segments.py | 6 +- .../console/datasets/hit_testing.py | 14 +- api/core/indexing_runner.py | 4 +- api/core/llm_generator/prompts.py | 4 +- api/core/rag/datasource/retrieval_service.py | 6 +- .../index_processor/index_processor_base.py | 4 +- .../processor/paragraph_index_processor.py | 244 ++++++++++++++++-- .../workflow/nodes/document_extractor/node.py | 15 +- .../knowledge_index/knowledge_index_node.py | 116 +++++---- api/fields/document_fields.py | 6 +- api/models/dataset.py | 5 +- api/services/dataset_service.py | 35 +-- api/services/summary_index_service.py | 171 ++++++------ api/tasks/add_document_to_index_task.py | 3 +- api/tasks/disable_segment_from_index_task.py | 3 +- api/tasks/disable_segments_from_index_task.py | 3 +- api/tasks/document_indexing_task.py | 43 +-- api/tasks/enable_segment_to_index_task.py | 3 +- api/tasks/enable_segments_to_index_task.py | 3 +- api/tasks/generate_summary_index_task.py | 5 +- api/tasks/regenerate_summary_index_task.py | 40 +-- api/tasks/remove_document_from_index_task.py | 7 +- 23 files changed, 569 insertions(+), 307 deletions(-) diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 5d3a11d200..1ca9a615e3 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -107,7 +107,8 @@ class DocumentRenamePayload(BaseModel): class GenerateSummaryPayload(BaseModel): document_list: list[str] - + + class DocumentDatasetListParam(BaseModel): page: int = Field(1, title="Page", description="Page number.") limit: int = Field(20, title="Limit", description="Page size.") @@ -311,17 +312,14 @@ class DatasetDocumentListApi(Resource): paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) documents = paginated_documents.items - + # Check if dataset has summary index enabled - has_summary_index = ( - dataset.summary_index_setting - and dataset.summary_index_setting.get("enable") is True - ) - + has_summary_index = dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True + # Filter documents that need summary calculation documents_need_summary = [doc for doc in documents if doc.need_summary is True] document_ids_need_summary = [str(doc.id) for doc in documents_need_summary] - + # Calculate summary_index_status for documents that need summary (only if dataset summary index is enabled) summary_status_map = {} if has_summary_index and document_ids_need_summary: @@ -335,7 +333,7 @@ class DatasetDocumentListApi(Resource): ) .all() ) - + # Group segments by document_id document_segments_map = {} for segment in segments: @@ -343,7 +341,7 @@ class DatasetDocumentListApi(Resource): if doc_id not in document_segments_map: document_segments_map[doc_id] = [] document_segments_map[doc_id].append(segment.id) - + # Get all summary records for these segments all_segment_ids = [seg.id for seg in segments] summaries = {} @@ -358,7 +356,7 @@ class DatasetDocumentListApi(Resource): .all() ) summaries = {summary.chunk_id: summary.status for summary in summary_records} - + # Calculate summary_index_status for each document for doc_id in document_ids_need_summary: segment_ids = document_segments_map.get(doc_id, []) @@ -366,7 +364,7 @@ class DatasetDocumentListApi(Resource): # No segments, status is "GENERATING" (waiting to generate) summary_status_map[doc_id] = "GENERATING" continue - + # Count summary statuses for this document's segments status_counts = {"completed": 0, "generating": 0, "error": 0, "not_started": 0} for segment_id in segment_ids: @@ -375,12 +373,12 @@ class DatasetDocumentListApi(Resource): status_counts[status] += 1 else: status_counts["not_started"] += 1 - + total_segments = len(segment_ids) completed_count = status_counts["completed"] generating_count = status_counts["generating"] error_count = status_counts["error"] - + # Determine overall status (only three states: GENERATING, COMPLETED, ERROR) if completed_count == total_segments: summary_status_map[doc_id] = "COMPLETED" @@ -393,7 +391,7 @@ class DatasetDocumentListApi(Resource): else: # Default to generating summary_status_map[doc_id] = "GENERATING" - + # Add summary_index_status to each document for document in documents: if has_summary_index and document.need_summary is True: @@ -401,7 +399,7 @@ class DatasetDocumentListApi(Resource): else: # Return null if summary index is not enabled or document doesn't need summary document.summary_index_status = None - + if fetch: for document in documents: completed_segments = ( @@ -500,7 +498,6 @@ class DatasetDocumentListApi(Resource): return {"result": "success"}, 204 - @console_ns.route("/datasets/init") class DatasetInitApi(Resource): @console_ns.doc("init_dataset") @@ -1311,49 +1308,46 @@ class DocumentGenerateSummaryApi(Resource): def post(self, dataset_id): """ Generate summary index for specified documents. - + This endpoint checks if the dataset configuration supports summary generation (indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), then asynchronously generates summary indexes for the provided documents. """ current_user, _ = current_account_with_tenant() dataset_id = str(dataset_id) - + # Get dataset dataset = DatasetService.get_dataset(dataset_id) if not dataset: raise NotFound("Dataset not found.") - + # Check permissions if not current_user.is_dataset_editor: raise Forbidden() - + try: DatasetService.check_dataset_permission(dataset, current_user) except services.errors.account.NoPermissionError as e: raise Forbidden(str(e)) - + # Validate request payload payload = GenerateSummaryPayload.model_validate(console_ns.payload or {}) document_list = payload.document_list - + if not document_list: raise ValueError("document_list cannot be empty.") - + # Check if dataset configuration supports summary generation if dataset.indexing_technique != "high_quality": raise ValueError( f"Summary generation is only available for 'high_quality' indexing technique. " f"Current indexing technique: {dataset.indexing_technique}" ) - + summary_index_setting = dataset.summary_index_setting if not summary_index_setting or not summary_index_setting.get("enable"): - raise ValueError( - "Summary index is not enabled for this dataset. " - "Please enable it in the dataset settings." - ) - + raise ValueError("Summary index is not enabled for this dataset. Please enable it in the dataset settings.") + # Verify all documents exist and belong to the dataset documents = ( db.session.query(Document) @@ -1363,27 +1357,27 @@ class DocumentGenerateSummaryApi(Resource): ) .all() ) - + if len(documents) != len(document_list): found_ids = {doc.id for doc in documents} missing_ids = set(document_list) - found_ids raise NotFound(f"Some documents not found: {list(missing_ids)}") - + # Dispatch async tasks for each document for document in documents: # Skip qa_model documents as they don't generate summaries if document.doc_form == "qa_model": - logger.info( - f"Skipping summary generation for qa_model document {document.id}" - ) + logger.info("Skipping summary generation for qa_model document %s", document.id) continue - + # Dispatch async task generate_summary_index_task(dataset_id, document.id) logger.info( - f"Dispatched summary generation task for document {document.id} in dataset {dataset_id}" + "Dispatched summary generation task for document %s in dataset %s", + document.id, + dataset_id, ) - + return {"result": "success"}, 200 @@ -1400,7 +1394,7 @@ class DocumentSummaryStatusApi(DocumentResource): def get(self, dataset_id, document_id): """ Get summary index generation status for a document. - + Returns: - total_segments: Total number of segments in the document - summary_status: Dictionary with status counts @@ -1413,21 +1407,21 @@ class DocumentSummaryStatusApi(DocumentResource): current_user, _ = current_account_with_tenant() dataset_id = str(dataset_id) document_id = str(document_id) - + # Get document document = self.get_document(dataset_id, document_id) - + # Get dataset dataset = DatasetService.get_dataset(dataset_id) if not dataset: raise NotFound("Dataset not found.") - + # Check permissions try: DatasetService.check_dataset_permission(dataset, current_user) except services.errors.account.NoPermissionError as e: raise Forbidden(str(e)) - + # Get all segments for this document segments = ( db.session.query(DocumentSegment) @@ -1439,9 +1433,9 @@ class DocumentSummaryStatusApi(DocumentResource): ) .all() ) - + total_segments = len(segments) - + # Get all summary records for these segments segment_ids = [segment.id for segment in segments] summaries = [] @@ -1456,10 +1450,10 @@ class DocumentSummaryStatusApi(DocumentResource): ) .all() ) - + # Create a mapping of chunk_id to summary summary_map = {summary.chunk_id: summary for summary in summaries} - + # Count statuses status_counts = { "completed": 0, @@ -1467,34 +1461,42 @@ class DocumentSummaryStatusApi(DocumentResource): "error": 0, "not_started": 0, } - + summary_list = [] for segment in segments: summary = summary_map.get(segment.id) if summary: status = summary.status status_counts[status] = status_counts.get(status, 0) + 1 - summary_list.append({ - "segment_id": segment.id, - "segment_position": segment.position, - "status": summary.status, - "summary_preview": summary.summary_content[:100] + "..." if summary.summary_content and len(summary.summary_content) > 100 else summary.summary_content, - "error": summary.error, - "created_at": int(summary.created_at.timestamp()) if summary.created_at else None, - "updated_at": int(summary.updated_at.timestamp()) if summary.updated_at else None, - }) + summary_list.append( + { + "segment_id": segment.id, + "segment_position": segment.position, + "status": summary.status, + "summary_preview": ( + summary.summary_content[:100] + "..." + if summary.summary_content and len(summary.summary_content) > 100 + else summary.summary_content + ), + "error": summary.error, + "created_at": int(summary.created_at.timestamp()) if summary.created_at else None, + "updated_at": int(summary.updated_at.timestamp()) if summary.updated_at else None, + } + ) else: status_counts["not_started"] += 1 - summary_list.append({ - "segment_id": segment.id, - "segment_position": segment.position, - "status": "not_started", - "summary_preview": None, - "error": None, - "created_at": None, - "updated_at": None, - }) - + summary_list.append( + { + "segment_id": segment.id, + "segment_position": segment.position, + "status": "not_started", + "summary_preview": None, + "error": None, + "created_at": None, + "updated_at": None, + } + ) + return { "total_segments": total_segments, "summary_status": status_counts, diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index 423462f966..8a9bc6a201 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -212,9 +212,7 @@ class DatasetDocumentSegmentListApi(Resource): ) # Only include enabled summaries summaries = { - summary.chunk_id: summary.summary_content - for summary in summary_records - if summary.enabled is True + summary.chunk_id: summary.summary_content for summary in summary_records if summary.enabled is True } # Add summary to each segment @@ -433,7 +431,7 @@ class DatasetDocumentSegmentUpdateApi(Resource): payload = SegmentUpdatePayload.model_validate(console_ns.payload or {}) payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) - + # Update segment (summary update with change detection is handled in SegmentService.update_segment) segment = SegmentService.update_segment( SegmentUpdateArgs.model_validate(payload.model_dump(exclude_none=True)), segment, document, dataset diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index c947132070..e62be13c2f 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -1,6 +1,13 @@ from flask_restx import Resource, fields from controllers.common.schema import register_schema_model +from fields.hit_testing_fields import ( + child_chunk_fields, + document_fields, + files_fields, + hit_testing_record_fields, + segment_fields, +) from libs.login import login_required from .. import console_ns @@ -10,13 +17,6 @@ from ..wraps import ( cloud_edition_billing_rate_limit_check, setup_required, ) -from fields.hit_testing_fields import ( - child_chunk_fields, - document_fields, - files_fields, - hit_testing_record_fields, - segment_fields, -) register_schema_model(console_ns, HitTestingPayload) diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index 599a655ab9..e172e88298 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -367,8 +367,8 @@ class IndexingRunner: return IndexingEstimate(total_segments=total_segments * 20, qa_preview=qa_preview_texts, preview=[]) # Generate summary preview - summary_index_setting = tmp_processing_rule["summary_index_setting"] if "summary_index_setting" in tmp_processing_rule else None - if summary_index_setting and summary_index_setting.get('enable') and preview_texts: + summary_index_setting = tmp_processing_rule.get("summary_index_setting") + if summary_index_setting and summary_index_setting.get("enable") and preview_texts: preview_texts = index_processor.generate_summary_preview(tenant_id, preview_texts, summary_index_setting) return IndexingEstimate(total_segments=total_segments, preview=preview_texts) diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 1fbf279309..af7995f3bd 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -436,4 +436,6 @@ You should edit the prompt according to the IDEAL OUTPUT.""" INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" DEFAULT_GENERATOR_SUMMARY_PROMPT = """ -You are a helpful assistant that summarizes long pieces of text into concise summaries. Given the following text, generate a brief summary that captures the main points and key information. The summary should be clear, concise, and written in complete sentences. """ +You are a helpful assistant that summarizes long pieces of text into concise summaries. +Given the following text, generate a brief summary that captures the main points and key information. +The summary should be clear, concise, and written in complete sentences. """ diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 6deb967e0a..372cbe5032 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -395,7 +395,7 @@ class RetrievalService: index_node_ids = [] doc_to_document_map = {} summary_segment_ids = set() # Track segments retrieved via summary - + # First pass: collect all document IDs and identify summary documents for document in documents: document_id = document.metadata.get("document_id") @@ -455,7 +455,7 @@ class RetrievalService: doc_segment_map[attachment["segment_id"]].append(attachment["attachment_id"]) else: doc_segment_map[attachment["segment_id"]] = [attachment["attachment_id"]] - + child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id.in_(child_index_node_ids)) child_index_nodes = session.execute(child_chunk_stmt).scalars().all() @@ -479,7 +479,7 @@ class RetrievalService: index_node_segments = session.execute(document_segment_stmt).scalars().all() # type: ignore for index_node_segment in index_node_segments: doc_segment_map[index_node_segment.id] = [index_node_segment.index_node_id] - + if segment_ids: document_segment_stmt = select(DocumentSegment).where( DocumentSegment.enabled == True, diff --git a/api/core/rag/index_processor/index_processor_base.py b/api/core/rag/index_processor/index_processor_base.py index 8bbdf8ba39..151a3de7d9 100644 --- a/api/core/rag/index_processor/index_processor_base.py +++ b/api/core/rag/index_processor/index_processor_base.py @@ -47,7 +47,9 @@ class BaseIndexProcessor(ABC): raise NotImplementedError @abstractmethod - def generate_summary_preview(self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict) -> list[PreviewDetail]: + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: """ For each segment in preview_texts, generate a summary using LLM and attach it to the segment. The summary can be stored in a new attribute, e.g., summary. diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index 89a6d80306..a6f2f4e820 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -1,6 +1,7 @@ """Paragraph index processor.""" import logging +import re import uuid from collections.abc import Mapping from typing import Any @@ -8,6 +9,17 @@ from typing import Any logger = logging.getLogger(__name__) from core.entities.knowledge_entities import PreviewDetail +from core.file import File, FileTransferMethod, FileType, file_manager +from core.llm_generator.prompts import DEFAULT_GENERATOR_SUMMARY_PROMPT +from core.model_manager import ModelInstance +from core.model_runtime.entities.message_entities import ( + ImagePromptMessageContent, + PromptMessageContentUnionTypes, + TextPromptMessageContent, + UserPromptMessage, +) +from core.model_runtime.entities.model_entities import ModelFeature, ModelType +from core.provider_manager import ProviderManager from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.retrieval_service import RetrievalService @@ -22,18 +34,15 @@ from core.rag.models.document import AttachmentDocument, Document, MultimodalGen from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols from extensions.ext_database import db +from factories.file_factory import build_from_mapping from libs import helper +from models import UploadFile from models.account import Account -from models.dataset import Dataset, DatasetProcessRule, DocumentSegment +from models.dataset import Dataset, DatasetProcessRule, DocumentSegment, SegmentAttachmentBinding from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import Rule from services.summary_index_service import SummaryIndexService -from core.llm_generator.prompts import DEFAULT_GENERATOR_SUMMARY_PROMPT -from core.model_runtime.entities.message_entities import UserPromptMessage -from core.model_runtime.entities.model_entities import ModelType -from core.provider_manager import ProviderManager -from core.model_manager import ModelInstance class ParagraphIndexProcessor(BaseIndexProcessor): @@ -262,12 +271,15 @@ class ParagraphIndexProcessor(BaseIndexProcessor): else: raise ValueError("Chunks is not a list") - def generate_summary_preview(self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict) -> list[PreviewDetail]: + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: """ For each segment, concurrently call generate_summary to generate a summary and write it to the summary attribute of PreviewDetail. """ import concurrent.futures + from flask import current_app # Capture Flask app context for worker threads @@ -289,8 +301,8 @@ class ParagraphIndexProcessor(BaseIndexProcessor): # Fallback: try without app context (may fail) summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) preview.summary = summary - except Exception as e: - logger.error(f"Failed to generate summary for preview: {str(e)}") + except Exception: + logger.exception("Failed to generate summary for preview") # Don't fail the entire preview if summary generation fails preview.summary = None @@ -299,9 +311,21 @@ class ParagraphIndexProcessor(BaseIndexProcessor): return preview_texts @staticmethod - def generate_summary(tenant_id: str, text: str, summary_index_setting: dict = None) -> str: + def generate_summary( + tenant_id: str, + text: str, + summary_index_setting: dict | None = None, + segment_id: str | None = None, + ) -> str: """ - Generate summary for the given text using ModelInstance.invoke_llm and the default or custom summary prompt. + Generate summary for the given text using ModelInstance.invoke_llm and the default or custom summary prompt, + and supports vision models by including images from the segment attachments or text content. + + Args: + tenant_id: Tenant ID + text: Text content to summarize + summary_index_setting: Summary index configuration + segment_id: Optional segment ID to fetch attachments from SegmentAttachmentBinding table """ if not summary_index_setting or not summary_index_setting.get("enable"): raise ValueError("summary_index_setting is required and must be enabled to generate summary.") @@ -314,17 +338,195 @@ class ParagraphIndexProcessor(BaseIndexProcessor): if not summary_prompt: summary_prompt = DEFAULT_GENERATOR_SUMMARY_PROMPT - prompt = f"{summary_prompt}\n{text}" - provider_manager = ProviderManager() - provider_model_bundle = provider_manager.get_provider_model_bundle(tenant_id, model_provider_name, ModelType.LLM) - model_instance = ModelInstance(provider_model_bundle, model_name) - prompt_messages = [UserPromptMessage(content=prompt)] - - result = model_instance.invoke_llm( - prompt_messages=prompt_messages, - model_parameters={}, - stream=False + provider_model_bundle = provider_manager.get_provider_model_bundle( + tenant_id, model_provider_name, ModelType.LLM ) + model_instance = ModelInstance(provider_model_bundle, model_name) + + # Get model schema to check if vision is supported + model_schema = model_instance.get_model_schema(model_name, provider_model_bundle.credentials) + supports_vision = model_schema and model_schema.features and ModelFeature.VISION in model_schema.features + + # Extract images if model supports vision + image_files = [] + if supports_vision: + # First, try to get images from SegmentAttachmentBinding (preferred method) + if segment_id: + image_files = ParagraphIndexProcessor._extract_images_from_segment_attachments(tenant_id, segment_id) + + # If no images from attachments, fall back to extracting from text + if not image_files: + image_files = ParagraphIndexProcessor._extract_images_from_text(tenant_id, text) + + # Build prompt messages + prompt_messages = [] + + if image_files: + # If we have images, create a UserPromptMessage with both text and images + prompt_message_contents: list[PromptMessageContentUnionTypes] = [] + + # Add images first + for file in image_files: + try: + file_content = file_manager.to_prompt_message_content( + file, image_detail_config=ImagePromptMessageContent.DETAIL.LOW + ) + prompt_message_contents.append(file_content) + except Exception as e: + logger.warning("Failed to convert image file to prompt message content: %s", str(e)) + continue + + # Add text content + if prompt_message_contents: # Only add text if we successfully added images + prompt_message_contents.append(TextPromptMessageContent(data=f"{summary_prompt}\n{text}")) + prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) + else: + # If image conversion failed, fall back to text-only + prompt = f"{summary_prompt}\n{text}" + prompt_messages.append(UserPromptMessage(content=prompt)) + else: + # No images, use simple text prompt + prompt = f"{summary_prompt}\n{text}" + prompt_messages.append(UserPromptMessage(content=prompt)) + + result = model_instance.invoke_llm(prompt_messages=prompt_messages, model_parameters={}, stream=False) return getattr(result.message, "content", "") + + @staticmethod + def _extract_images_from_text(tenant_id: str, text: str) -> list[File]: + """ + Extract images from markdown text and convert them to File objects. + + Args: + tenant_id: Tenant ID + text: Text content that may contain markdown image links + + Returns: + List of File objects representing images found in the text + """ + # Extract markdown images using regex pattern + pattern = r"!\[.*?\]\((.*?)\)" + images = re.findall(pattern, text) + + if not images: + return [] + + upload_file_id_list = [] + + for image in images: + # For data before v0.10.0 + pattern = r"/files/([a-f0-9\-]+)/image-preview(?:\?.*?)?" + match = re.search(pattern, image) + if match: + upload_file_id = match.group(1) + upload_file_id_list.append(upload_file_id) + continue + + # For data after v0.10.0 + pattern = r"/files/([a-f0-9\-]+)/file-preview(?:\?.*?)?" + match = re.search(pattern, image) + if match: + upload_file_id = match.group(1) + upload_file_id_list.append(upload_file_id) + continue + + # For tools directory - direct file formats (e.g., .png, .jpg, etc.) + pattern = r"/files/tools/([a-f0-9\-]+)\.([a-zA-Z0-9]+)(?:\?[^\s\)\"\']*)?" + match = re.search(pattern, image) + if match: + # Tool files are handled differently, skip for now + continue + + if not upload_file_id_list: + return [] + + # Get unique IDs for database query + unique_upload_file_ids = list(set(upload_file_id_list)) + upload_files = ( + db.session.query(UploadFile) + .where(UploadFile.id.in_(unique_upload_file_ids), UploadFile.tenant_id == tenant_id) + .all() + ) + + # Create File objects from UploadFile records + file_objects = [] + for upload_file in upload_files: + # Only process image files + if not upload_file.mime_type or "image" not in upload_file.mime_type: + continue + + mapping = { + "upload_file_id": upload_file.id, + "transfer_method": FileTransferMethod.LOCAL_FILE.value, + "type": FileType.IMAGE.value, + } + + try: + file_obj = build_from_mapping( + mapping=mapping, + tenant_id=tenant_id, + ) + file_objects.append(file_obj) + except Exception as e: + logger.warning("Failed to create File object from UploadFile %s: %s", upload_file.id, str(e)) + continue + + return file_objects + + @staticmethod + def _extract_images_from_segment_attachments(tenant_id: str, segment_id: str) -> list[File]: + """ + Extract images from SegmentAttachmentBinding table (preferred method). + This matches how DatasetRetrieval gets segment attachments. + + Args: + tenant_id: Tenant ID + segment_id: Segment ID to fetch attachments for + + Returns: + List of File objects representing images found in segment attachments + """ + from sqlalchemy import select + + # Query attachments from SegmentAttachmentBinding table + attachments_with_bindings = db.session.execute( + select(SegmentAttachmentBinding, UploadFile) + .join(UploadFile, UploadFile.id == SegmentAttachmentBinding.attachment_id) + .where( + SegmentAttachmentBinding.segment_id == segment_id, + SegmentAttachmentBinding.tenant_id == tenant_id, + ) + ).all() + + if not attachments_with_bindings: + return [] + + file_objects = [] + for _, upload_file in attachments_with_bindings: + # Only process image files + if not upload_file.mime_type or "image" not in upload_file.mime_type: + continue + + try: + # Create File object directly (similar to DatasetRetrieval) + file_obj = File( + id=upload_file.id, + filename=upload_file.name, + extension="." + upload_file.extension, + mime_type=upload_file.mime_type, + tenant_id=tenant_id, + type=FileType.IMAGE, + transfer_method=FileTransferMethod.LOCAL_FILE, + remote_url=upload_file.source_url, + related_id=upload_file.id, + size=upload_file.size, + storage_key=upload_file.key, + ) + file_objects.append(file_obj) + except Exception as e: + logger.warning("Failed to create File object from UploadFile %s: %s", upload_file.id, str(e)) + continue + + return file_objects diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index 2cbd7952ba..25dd98f48a 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -65,14 +65,14 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]): # Ensure storage_key is loaded for File objects files_to_check = value if isinstance(value, list) else [value] files_needing_storage_key = [ - f for f in files_to_check - if isinstance(f, File) and not f.storage_key and f.related_id + f for f in files_to_check if isinstance(f, File) and not f.storage_key and f.related_id ] if files_needing_storage_key: - from factories.file_factory import StorageKeyLoader - from extensions.ext_database import db from sqlalchemy.orm import Session - + + from extensions.ext_database import db + from factories.file_factory import StorageKeyLoader + with Session(bind=db.engine) as session: storage_key_loader = StorageKeyLoader(session, tenant_id=self.tenant_id) storage_key_loader.load_storage_keys(files_needing_storage_key) @@ -433,12 +433,13 @@ def _download_file_content(file: File) -> bytes: # Check if storage_key is set if not file.storage_key: raise FileDownloadError(f"File storage_key is missing for file: {file.filename}") - + # Check if file exists before downloading from extensions.ext_storage import storage + if not storage.exists(file.storage_key): raise FileDownloadError(f"File not found in storage: {file.storage_key}") - + return file_manager.download(file) except Exception as e: raise FileDownloadError(f"Error downloading file: {str(e)}") from e diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 4d264683d0..d14bdee1fd 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -77,11 +77,13 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): # or fallback to dataset if not available in node_data indexing_technique = node_data.indexing_technique or dataset.indexing_technique summary_index_setting = node_data.summary_index_setting or dataset.summary_index_setting - + outputs = self._get_preview_output_with_summaries( - node_data.chunk_structure, chunks, dataset=dataset, + node_data.chunk_structure, + chunks, + dataset=dataset, indexing_technique=indexing_technique, - summary_index_setting=summary_index_setting + summary_index_setting=summary_index_setting, ) return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, @@ -237,7 +239,7 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): segments = query.all() if not segments: - logger.info(f"No segments found for document {document.id}") + logger.info("No segments found for document %s", document.id) return # Filter segments based on mode @@ -256,7 +258,7 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): segments_to_process.append(segment) if not segments_to_process: - logger.info(f"No segments need summary generation for document {document.id}") + logger.info("No segments need summary generation for document %s", document.id) return # Use ThreadPoolExecutor for concurrent generation @@ -267,46 +269,55 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): """Process a single segment in a thread with Flask app context.""" with flask_app.app_context(): try: - SummaryIndexService.generate_and_vectorize_summary( - segment, dataset, summary_index_setting + SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting) + except Exception: + logger.exception( + "Failed to generate summary for segment %s", + segment.id, ) - except Exception as e: - logger.error(f"Failed to generate summary for segment {segment.id}: {str(e)}") # Continue processing other segments with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: - futures = [ - executor.submit(process_segment, segment) for segment in segments_to_process - ] + futures = [executor.submit(process_segment, segment) for segment in segments_to_process] # Wait for all tasks to complete concurrent.futures.wait(futures) logger.info( - f"Successfully generated summary index for {len(segments_to_process)} segments " - f"in document {document.id}" + "Successfully generated summary index for %s segments in document %s", + len(segments_to_process), + document.id, ) - except Exception as e: - logger.exception(f"Failed to generate summary index for document {document.id}: {str(e)}") + except Exception: + logger.exception("Failed to generate summary index for document %s", document.id) # Don't fail the entire indexing process if summary generation fails else: # Production mode: asynchronous generation - logger.info(f"Queuing summary index generation task for document {document.id} (production mode)") + logger.info( + "Queuing summary index generation task for document %s (production mode)", + document.id, + ) try: generate_summary_index_task.delay(dataset.id, document.id, None) - logger.info(f"Summary index generation task queued for document {document.id}") - except Exception as e: - logger.exception(f"Failed to queue summary index generation task for document {document.id}: {str(e)}") + logger.info("Summary index generation task queued for document %s", document.id) + except Exception: + logger.exception( + "Failed to queue summary index generation task for document %s", + document.id, + ) # Don't fail the entire indexing process if task queuing fails def _get_preview_output_with_summaries( - self, chunk_structure: str, chunks: Any, dataset: Dataset, + self, + chunk_structure: str, + chunks: Any, + dataset: Dataset, indexing_technique: str | None = None, - summary_index_setting: dict | None = None + summary_index_setting: dict | None = None, ) -> Mapping[str, Any]: """ Generate preview output with summaries for chunks in preview mode. This method generates summaries on-the-fly without saving to database. - + Args: chunk_structure: Chunk structure type chunks: Chunks to generate preview for @@ -316,31 +327,32 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): """ index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() preview_output = index_processor.format_preview(chunks) - + # Check if summary index is enabled if indexing_technique != "high_quality": return preview_output - + if not summary_index_setting or not summary_index_setting.get("enable"): return preview_output - + # Generate summaries for chunks if "preview" in preview_output and isinstance(preview_output["preview"], list): chunk_count = len(preview_output["preview"]) logger.info( - f"Generating summaries for {chunk_count} chunks in preview mode " - f"(dataset: {dataset.id})" + "Generating summaries for %s chunks in preview mode (dataset: %s)", + chunk_count, + dataset.id, ) # Use ParagraphIndexProcessor's generate_summary method from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor - + # Get Flask app for application context in worker threads flask_app = None try: flask_app = current_app._get_current_object() # type: ignore except RuntimeError: logger.warning("No Flask application context available, summary generation may fail") - + def generate_summary_for_chunk(preview_item: dict) -> None: """Generate summary for a single chunk.""" if "content" in preview_item: @@ -364,10 +376,10 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): ) if summary: preview_item["summary"] = summary - except Exception as e: - logger.error(f"Failed to generate summary for chunk: {str(e)}") + except Exception: + logger.exception("Failed to generate summary for chunk") # Don't fail the entire preview if summary generation fails - + # Generate summaries concurrently using ThreadPoolExecutor # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) timeout_seconds = min(300, 60 * len(preview_output["preview"])) @@ -378,31 +390,39 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): ] # Wait for all tasks to complete with timeout done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) - + # Cancel tasks that didn't complete in time if not_done: logger.warning( - f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s. " - "Cancelling remaining tasks..." + "Summary generation timeout: %s chunks did not complete within %ss. " + "Cancelling remaining tasks...", + len(not_done), + timeout_seconds, ) for future in not_done: future.cancel() # Wait a bit for cancellation to take effect concurrent.futures.wait(not_done, timeout=5) - + completed_count = sum(1 for item in preview_output["preview"] if item.get("summary") is not None) logger.info( - f"Completed summary generation for preview chunks: {completed_count}/{len(preview_output['preview'])} succeeded" + "Completed summary generation for preview chunks: %s/%s succeeded", + completed_count, + len(preview_output["preview"]), ) - + return preview_output def _get_preview_output( - self, chunk_structure: str, chunks: Any, dataset: Dataset | None = None, variable_pool: VariablePool | None = None + self, + chunk_structure: str, + chunks: Any, + dataset: Dataset | None = None, + variable_pool: VariablePool | None = None, ) -> Mapping[str, Any]: index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() preview_output = index_processor.format_preview(chunks) - + # If dataset is provided, try to enrich preview with summaries if dataset and variable_pool: document_id = variable_pool.get(["sys", SystemVariableKey.DOCUMENT_ID]) @@ -420,7 +440,7 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): ) .all() ) - + if summaries: # Create a map of segment content to summary for matching # Use content matching as chunks in preview might not be indexed yet @@ -435,7 +455,7 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): # Normalize content for matching (strip whitespace) normalized_content = segment.content.strip() summary_by_content[normalized_content] = summary.summary_content - + # Enrich preview with summaries by content matching if "preview" in preview_output and isinstance(preview_output["preview"], list): matched_count = 0 @@ -446,13 +466,15 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): if normalized_chunk_content in summary_by_content: preview_item["summary"] = summary_by_content[normalized_chunk_content] matched_count += 1 - + if matched_count > 0: logger.info( - f"Enriched preview with {matched_count} existing summaries " - f"(dataset: {dataset.id}, document: {document.id})" + "Enriched preview with %s existing summaries (dataset: %s, document: %s)", + matched_count, + dataset.id, + document.id, ) - + return preview_output @classmethod diff --git a/api/fields/document_fields.py b/api/fields/document_fields.py index 62f5e19e25..875726d31d 100644 --- a/api/fields/document_fields.py +++ b/api/fields/document_fields.py @@ -33,7 +33,8 @@ document_fields = { "hit_count": fields.Integer, "doc_form": fields.String, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), - "summary_index_status": fields.String, # Summary index generation status: "waiting", "generating", "completed", "partial_error", or null if not enabled + # Summary index generation status: "GENERATING", "COMPLETED", "ERROR", or null if not enabled + "summary_index_status": fields.String, "need_summary": fields.Boolean, # Whether this document needs summary index generation } @@ -62,7 +63,8 @@ document_with_segments_fields = { "completed_segments": fields.Integer, "total_segments": fields.Integer, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), - "summary_index_status": fields.String, # Summary index generation status: "waiting", "generating", "completed", "partial_error", or null if not enabled + # Summary index generation status: "GENERATING", "COMPLETED", "ERROR", or null if not enabled + "summary_index_status": fields.String, "need_summary": fields.Boolean, # Whether this document needs summary index generation } diff --git a/api/models/dataset.py b/api/models/dataset.py index 6497c0efc0..f207c1d2d8 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -1595,8 +1595,9 @@ class DocumentSegmentSummary(Base): disabled_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) disabled_by = mapped_column(StringUUID, nullable=True) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + ) def __repr__(self): return f"" - diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 82e9770286..2bff0e1524 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -87,9 +87,9 @@ from tasks.disable_segments_from_index_task import disable_segments_from_index_t from tasks.document_indexing_update_task import document_indexing_update_task from tasks.enable_segments_to_index_task import enable_segments_to_index_task from tasks.recover_document_indexing_task import recover_document_indexing_task +from tasks.regenerate_summary_index_task import regenerate_summary_index_task from tasks.remove_document_from_index_task import remove_document_from_index_task from tasks.retry_document_indexing_task import retry_document_indexing_task -from tasks.regenerate_summary_index_task import regenerate_summary_index_task from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task logger = logging.getLogger(__name__) @@ -563,9 +563,7 @@ class DatasetService: action = DatasetService._handle_indexing_technique_change(dataset, data, filtered_data) # Check if summary_index_setting model changed (before updating database) - summary_model_changed = DatasetService._check_summary_index_setting_model_changed( - dataset, data - ) + summary_model_changed = DatasetService._check_summary_index_setting_model_changed(dataset, data) # Add metadata fields filtered_data["updated_by"] = user.id @@ -921,8 +919,12 @@ class DatasetService: # Check if model changed if old_model_name != new_model_name or old_model_provider != new_model_provider: logger.info( - f"Summary index setting model changed for dataset {dataset.id}: " - f"old={old_model_provider}/{old_model_name}, new={new_model_provider}/{new_model_name}" + "Summary index setting model changed for dataset %s: old=%s/%s, new=%s/%s", + dataset.id, + old_model_provider, + old_model_name, + new_model_provider, + new_model_name, ) return True @@ -2208,12 +2210,9 @@ class DocumentService: ): # Set need_summary based on dataset's summary_index_setting need_summary = False - if ( - dataset.summary_index_setting - and dataset.summary_index_setting.get("enable") is True - ): + if dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True: need_summary = True - + document = Document( tenant_id=dataset.tenant_id, dataset_id=dataset.id, @@ -3118,10 +3117,11 @@ class SegmentService: and dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True ) - + if has_summary_index: # Query existing summary from database from models.dataset import DocumentSegmentSummary + existing_summary = ( db.session.query(DocumentSegmentSummary) .where( @@ -3130,16 +3130,17 @@ class SegmentService: ) .first() ) - + # Check if summary has changed existing_summary_content = existing_summary.summary_content if existing_summary else None if existing_summary_content != args.summary: # Summary has changed, update it from services.summary_index_service import SummaryIndexService + try: SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) - except Exception as e: - logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + except Exception: + logger.exception("Failed to update summary for segment %s", segment.id) # Don't fail the entire update if summary update fails else: segment_hash = helper.generate_text_hash(content) @@ -3221,8 +3222,8 @@ class SegmentService: try: SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) - except Exception as e: - logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + except Exception: + logger.exception("Failed to update summary for segment %s", segment.id) # Don't fail the entire update if summary update fails # update multimodel vector index VectorService.update_multimodel_vector(segment, args.attachment_ids or [], dataset) diff --git a/api/services/summary_index_service.py b/api/services/summary_index_service.py index 1d5c51aad8..d2cf23cb1c 100644 --- a/api/services/summary_index_service.py +++ b/api/services/summary_index_service.py @@ -3,7 +3,6 @@ import logging import time import uuid -from typing import Any from core.rag.datasource.vdb.vector_factory import Vector from core.rag.index_processor.constant.doc_type import DocType @@ -47,6 +46,7 @@ class SummaryIndexService: tenant_id=dataset.tenant_id, text=segment.content, summary_index_setting=summary_index_setting, + segment_id=segment.id, ) if not summary_content: @@ -76,11 +76,9 @@ class SummaryIndexService: """ # Check if summary record already exists existing_summary = ( - db.session.query(DocumentSegmentSummary) - .filter_by(chunk_id=segment.id, dataset_id=dataset.id) - .first() + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() ) - + if existing_summary: # Update existing record existing_summary.summary_content = summary_content @@ -124,8 +122,8 @@ class SummaryIndexService: """ if dataset.indexing_technique != "high_quality": logger.warning( - f"Summary vectorization skipped for dataset {dataset.id}: " - "indexing_technique is not high_quality" + "Summary vectorization skipped for dataset %s: indexing_technique is not high_quality", + dataset.id, ) return @@ -137,10 +135,10 @@ class SummaryIndexService: else: # Generate new index node ID only for new summaries summary_index_node_id = str(uuid.uuid4()) - + # Always regenerate hash (in case summary content changed) summary_hash = helper.generate_text_hash(summary_record.summary_content) - + # Delete old vector only if we're reusing the same index_node_id (to overwrite) # If index_node_id changed, the old vector should have been deleted elsewhere if old_summary_node_id and old_summary_node_id == summary_index_node_id: @@ -149,8 +147,9 @@ class SummaryIndexService: vector.delete_by_ids([old_summary_node_id]) except Exception as e: logger.warning( - f"Failed to delete old summary vector for segment {segment.id}: {str(e)}. " - "Continuing with new vectorization." + "Failed to delete old summary vector for segment %s: %s. Continuing with new vectorization.", + segment.id, + str(e), ) # Create document with summary content and metadata @@ -170,12 +169,12 @@ class SummaryIndexService: # Vectorize and store with retry mechanism for connection errors max_retries = 3 retry_delay = 2.0 - + for attempt in range(max_retries): try: vector = Vector(dataset) vector.add_texts([summary_document], duplicate_check=True) - + # Success - update summary record with index node info summary_record.summary_index_node_id = summary_index_node_id summary_record.summary_index_node_hash = summary_hash @@ -183,29 +182,44 @@ class SummaryIndexService: db.session.add(summary_record) db.session.flush() return # Success, exit function - + except (ConnectionError, Exception) as e: error_str = str(e).lower() # Check if it's a connection-related error that might be transient - is_connection_error = any(keyword in error_str for keyword in [ - "connection", "disconnected", "timeout", "network", - "could not connect", "server disconnected", "weaviate" - ]) - + is_connection_error = any( + keyword in error_str + for keyword in [ + "connection", + "disconnected", + "timeout", + "network", + "could not connect", + "server disconnected", + "weaviate", + ] + ) + if is_connection_error and attempt < max_retries - 1: # Retry for connection errors - wait_time = retry_delay * (2 ** attempt) # Exponential backoff + wait_time = retry_delay * (2**attempt) # Exponential backoff logger.warning( - f"Vectorization attempt {attempt + 1}/{max_retries} failed for segment {segment.id}: {str(e)}. " - f"Retrying in {wait_time:.1f} seconds..." + "Vectorization attempt %s/%s failed for segment %s: %s. Retrying in %.1f seconds...", + attempt + 1, + max_retries, + segment.id, + str(e), + wait_time, ) time.sleep(wait_time) continue else: # Final attempt failed or non-connection error - log and update status logger.error( - f"Failed to vectorize summary for segment {segment.id} after {attempt + 1} attempts: {str(e)}", - exc_info=True + "Failed to vectorize summary for segment %s after %s attempts: %s", + segment.id, + attempt + 1, + str(e), + exc_info=True, ) summary_record.status = "error" summary_record.error = f"Vectorization failed: {str(e)}" @@ -235,9 +249,7 @@ class SummaryIndexService: """ try: # Generate summary - summary_content = SummaryIndexService.generate_summary_for_segment( - segment, dataset, summary_index_setting - ) + summary_content = SummaryIndexService.generate_summary_for_segment(segment, dataset, summary_index_setting) # Create or update summary record (will handle overwrite internally) summary_record = SummaryIndexService.create_summary_record( @@ -248,16 +260,14 @@ class SummaryIndexService: SummaryIndexService.vectorize_summary(summary_record, segment, dataset) db.session.commit() - logger.info(f"Successfully generated and vectorized summary for segment {segment.id}") + logger.info("Successfully generated and vectorized summary for segment %s", segment.id) return summary_record - except Exception as e: - logger.exception(f"Failed to generate summary for segment {segment.id}: {str(e)}") + except Exception: + logger.exception("Failed to generate summary for segment %s", segment.id) # Update summary record with error status if it exists summary_record = ( - db.session.query(DocumentSegmentSummary) - .filter_by(chunk_id=segment.id, dataset_id=dataset.id) - .first() + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() ) if summary_record: summary_record.status = "error" @@ -290,24 +300,27 @@ class SummaryIndexService: # Only generate summary index for high_quality indexing technique if dataset.indexing_technique != "high_quality": logger.info( - f"Skipping summary generation for dataset {dataset.id}: " - f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'" + "Skipping summary generation for dataset %s: indexing_technique is %s, not 'high_quality'", + dataset.id, + dataset.indexing_technique, ) return [] if not summary_index_setting or not summary_index_setting.get("enable"): - logger.info(f"Summary index is disabled for dataset {dataset.id}") + logger.info("Summary index is disabled for dataset %s", dataset.id) return [] # Skip qa_model documents if document.doc_form == "qa_model": - logger.info(f"Skipping summary generation for qa_model document {document.id}") + logger.info("Skipping summary generation for qa_model document %s", document.id) return [] logger.info( - f"Starting summary generation for document {document.id} in dataset {dataset.id}, " - f"segment_ids: {len(segment_ids) if segment_ids else 'all'}, " - f"only_parent_chunks: {only_parent_chunks}" + "Starting summary generation for document %s in dataset %s, segment_ids: %s, only_parent_chunks: %s", + document.id, + dataset.id, + len(segment_ids) if segment_ids else "all", + only_parent_chunks, ) # Query segments (only enabled segments) @@ -324,7 +337,7 @@ class SummaryIndexService: segments = query.all() if not segments: - logger.info(f"No segments found for document {document.id}") + logger.info("No segments found for document %s", document.id) return [] summary_records = [] @@ -346,14 +359,15 @@ class SummaryIndexService: segment, dataset, summary_index_setting ) summary_records.append(summary_record) - except Exception as e: - logger.error(f"Failed to generate summary for segment {segment.id}: {str(e)}") + except Exception: + logger.exception("Failed to generate summary for segment %s", segment.id) # Continue with other segments continue logger.info( - f"Completed summary generation for document {document.id}: " - f"{len(summary_records)} summaries generated and vectorized" + "Completed summary generation for document %s: %s summaries generated and vectorized", + document.id, + len(summary_records), ) return summary_records @@ -373,7 +387,7 @@ class SummaryIndexService: disabled_by: User ID who disabled the summaries """ from libs.datetime_utils import naive_utc_now - + query = db.session.query(DocumentSegmentSummary).filter_by( dataset_id=dataset.id, enabled=True, # Only disable enabled summaries @@ -388,21 +402,21 @@ class SummaryIndexService: return logger.info( - f"Disabling {len(summaries)} summary records for dataset {dataset.id}, " - f"segment_ids: {len(segment_ids) if segment_ids else 'all'}" + "Disabling %s summary records for dataset %s, segment_ids: %s", + len(summaries), + dataset.id, + len(segment_ids) if segment_ids else "all", ) # Remove from vector database (but keep records) if dataset.indexing_technique == "high_quality": - summary_node_ids = [ - s.summary_index_node_id for s in summaries if s.summary_index_node_id - ] + summary_node_ids = [s.summary_index_node_id for s in summaries if s.summary_index_node_id] if summary_node_ids: try: vector = Vector(dataset) vector.delete_by_ids(summary_node_ids) except Exception as e: - logger.warning(f"Failed to remove summary vectors: {str(e)}") + logger.warning("Failed to remove summary vectors: %s", str(e)) # Disable summary records (don't delete) now = naive_utc_now() @@ -413,7 +427,7 @@ class SummaryIndexService: db.session.add(summary) db.session.commit() - logger.info(f"Disabled {len(summaries)} summary records for dataset {dataset.id}") + logger.info("Disabled %s summary records for dataset %s", len(summaries), dataset.id) @staticmethod def enable_summaries_for_segments( @@ -450,19 +464,25 @@ class SummaryIndexService: return logger.info( - f"Enabling {len(summaries)} summary records for dataset {dataset.id}, " - f"segment_ids: {len(segment_ids) if segment_ids else 'all'}" + "Enabling %s summary records for dataset %s, segment_ids: %s", + len(summaries), + dataset.id, + len(segment_ids) if segment_ids else "all", ) # Re-vectorize and re-add to vector database enabled_count = 0 for summary in summaries: # Get the original segment - segment = db.session.query(DocumentSegment).filter_by( - id=summary.chunk_id, - dataset_id=dataset.id, - ).first() - + segment = ( + db.session.query(DocumentSegment) + .filter_by( + id=summary.chunk_id, + dataset_id=dataset.id, + ) + .first() + ) + if not segment or not segment.enabled or segment.status != "completed": continue @@ -472,20 +492,20 @@ class SummaryIndexService: try: # Re-vectorize summary SummaryIndexService.vectorize_summary(summary, segment, dataset) - + # Enable summary record summary.enabled = True summary.disabled_at = None summary.disabled_by = None db.session.add(summary) enabled_count += 1 - except Exception as e: - logger.error(f"Failed to re-vectorize summary {summary.id}: {str(e)}") + except Exception: + logger.exception("Failed to re-vectorize summary %s", summary.id) # Keep it disabled if vectorization fails continue db.session.commit() - logger.info(f"Enabled {enabled_count} summary records for dataset {dataset.id}") + logger.info("Enabled %s summary records for dataset %s", enabled_count, dataset.id) @staticmethod def delete_summaries_for_segments( @@ -512,9 +532,7 @@ class SummaryIndexService: # Delete from vector database if dataset.indexing_technique == "high_quality": - summary_node_ids = [ - s.summary_index_node_id for s in summaries if s.summary_index_node_id - ] + summary_node_ids = [s.summary_index_node_id for s in summaries if s.summary_index_node_id] if summary_node_ids: vector = Vector(dataset) vector.delete_by_ids(summary_node_ids) @@ -524,7 +542,7 @@ class SummaryIndexService: db.session.delete(summary) db.session.commit() - logger.info(f"Deleted {len(summaries)} summary records for dataset {dataset.id}") + logger.info("Deleted %s summary records for dataset %s", len(summaries), dataset.id) @staticmethod def update_summary_for_segment( @@ -559,9 +577,7 @@ class SummaryIndexService: try: # Find existing summary record summary_record = ( - db.session.query(DocumentSegmentSummary) - .filter_by(chunk_id=segment.id, dataset_id=dataset.id) - .first() + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() ) if summary_record: @@ -583,7 +599,7 @@ class SummaryIndexService: SummaryIndexService.vectorize_summary(summary_record, segment, dataset) db.session.commit() - logger.info(f"Successfully updated and re-vectorized summary for segment {segment.id}") + logger.info("Successfully updated and re-vectorized summary for segment %s", segment.id) return summary_record else: # Create new summary record if doesn't exist @@ -592,16 +608,14 @@ class SummaryIndexService: ) SummaryIndexService.vectorize_summary(summary_record, segment, dataset) db.session.commit() - logger.info(f"Successfully created and vectorized summary for segment {segment.id}") + logger.info("Successfully created and vectorized summary for segment %s", segment.id) return summary_record - except Exception as e: - logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + except Exception: + logger.exception("Failed to update summary for segment %s", segment.id) # Update summary record with error status if it exists summary_record = ( - db.session.query(DocumentSegmentSummary) - .filter_by(chunk_id=segment.id, dataset_id=dataset.id) - .first() + db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first() ) if summary_record: summary_record.status = "error" @@ -609,4 +623,3 @@ class SummaryIndexService: db.session.add(summary_record) db.session.commit() raise - diff --git a/api/tasks/add_document_to_index_task.py b/api/tasks/add_document_to_index_task.py index da6f468edd..c6cf8cc10d 100644 --- a/api/tasks/add_document_to_index_task.py +++ b/api/tasks/add_document_to_index_task.py @@ -119,6 +119,7 @@ def add_document_to_index_task(dataset_document_id: str): # Enable summary indexes for all segments in this document from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] if segment_ids_list: try: @@ -127,7 +128,7 @@ def add_document_to_index_task(dataset_document_id: str): segment_ids=segment_ids_list, ) except Exception as e: - logger.warning(f"Failed to enable summaries for document {dataset_document.id}: {str(e)}") + logger.warning("Failed to enable summaries for document %s: %s", dataset_document.id, str(e)) end_at = time.perf_counter() logger.info( diff --git a/api/tasks/disable_segment_from_index_task.py b/api/tasks/disable_segment_from_index_task.py index 67c2867edd..335de86ec0 100644 --- a/api/tasks/disable_segment_from_index_task.py +++ b/api/tasks/disable_segment_from_index_task.py @@ -55,6 +55,7 @@ def disable_segment_from_index_task(segment_id: str): # Disable summary index for this segment from services.summary_index_service import SummaryIndexService + try: SummaryIndexService.disable_summaries_for_segments( dataset=dataset, @@ -62,7 +63,7 @@ def disable_segment_from_index_task(segment_id: str): disabled_by=segment.disabled_by, ) except Exception as e: - logger.warning(f"Failed to disable summary for segment {segment.id}: {str(e)}") + logger.warning("Failed to disable summary for segment %s: %s", segment.id, str(e)) index_type = dataset_document.doc_form index_processor = IndexProcessorFactory(index_type).init_index_processor() diff --git a/api/tasks/disable_segments_from_index_task.py b/api/tasks/disable_segments_from_index_task.py index b6a534bacf..43cd466e4b 100644 --- a/api/tasks/disable_segments_from_index_task.py +++ b/api/tasks/disable_segments_from_index_task.py @@ -60,6 +60,7 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen try: # Disable summary indexes for these segments from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] try: # Get disabled_by from first segment (they should all have the same disabled_by) @@ -70,7 +71,7 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen disabled_by=disabled_by, ) except Exception as e: - logger.warning(f"Failed to disable summaries for segments: {str(e)}") + logger.warning("Failed to disable summaries for segments: %s", str(e)) index_node_ids = [segment.index_node_id for segment in segments] if dataset.is_multimodal: diff --git a/api/tasks/document_indexing_task.py b/api/tasks/document_indexing_task.py index 319837ceaf..4c65ed6ab3 100644 --- a/api/tasks/document_indexing_task.py +++ b/api/tasks/document_indexing_task.py @@ -8,13 +8,13 @@ from celery import shared_task from configs import dify_config from core.entities.document_task import DocumentTask from core.indexing_runner import DocumentIsPausedError, IndexingRunner -from tasks.generate_summary_index_task import generate_summary_index_task from core.rag.pipeline.queue import TenantIsolatedTaskQueue from enums.cloud_plan import CloudPlan from extensions.ext_database import db from libs.datetime_utils import naive_utc_now from models.dataset import Dataset, Document from services.feature_service import FeatureService +from tasks.generate_summary_index_task import generate_summary_index_task logger = logging.getLogger(__name__) @@ -101,15 +101,15 @@ def _document_indexing(dataset_id: str, document_ids: Sequence[str]): indexing_runner.run(documents) end_at = time.perf_counter() logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green")) - + # Trigger summary index generation for completed documents if enabled # Only generate for high_quality indexing technique and when summary_index_setting is enabled # Re-query dataset to get latest summary_index_setting (in case it was updated) dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first() if not dataset: - logger.warning(f"Dataset {dataset_id} not found after indexing") + logger.warning("Dataset %s not found after indexing", dataset_id) return - + if dataset.indexing_technique == "high_quality": summary_index_setting = dataset.summary_index_setting if summary_index_setting and summary_index_setting.get("enable"): @@ -123,37 +123,46 @@ def _document_indexing(dataset_id: str, document_ids: Sequence[str]): ) if document: logger.info( - f"Checking document {document_id} for summary generation: " - f"status={document.indexing_status}, doc_form={document.doc_form}" + "Checking document %s for summary generation: status=%s, doc_form=%s", + document_id, + document.indexing_status, + document.doc_form, ) if document.indexing_status == "completed" and document.doc_form != "qa_model": try: generate_summary_index_task.delay(dataset.id, document_id, None) logger.info( - f"Queued summary index generation task for document {document_id} " - f"in dataset {dataset.id} after indexing completed" + "Queued summary index generation task for document %s in dataset %s " + "after indexing completed", + document_id, + dataset.id, ) - except Exception as e: + except Exception: logger.exception( - f"Failed to queue summary index generation task for document {document_id}: {str(e)}" + "Failed to queue summary index generation task for document %s", + document_id, ) # Don't fail the entire indexing process if summary task queuing fails else: logger.info( - f"Skipping summary generation for document {document_id}: " - f"status={document.indexing_status}, doc_form={document.doc_form}" + "Skipping summary generation for document %s: status=%s, doc_form=%s", + document_id, + document.indexing_status, + document.doc_form, ) else: - logger.warning(f"Document {document_id} not found after indexing") + logger.warning("Document %s not found after indexing", document_id) else: logger.info( - f"Summary index generation skipped for dataset {dataset.id}: " - f"summary_index_setting.enable={summary_index_setting.get('enable') if summary_index_setting else None}" + "Summary index generation skipped for dataset %s: summary_index_setting.enable=%s", + dataset.id, + summary_index_setting.get("enable") if summary_index_setting else None, ) else: logger.info( - f"Summary index generation skipped for dataset {dataset.id}: " - f"indexing_technique={dataset.indexing_technique} (not 'high_quality')" + "Summary index generation skipped for dataset %s: indexing_technique=%s (not 'high_quality')", + dataset.id, + dataset.indexing_technique, ) except DocumentIsPausedError as ex: logger.info(click.style(str(ex), fg="yellow")) diff --git a/api/tasks/enable_segment_to_index_task.py b/api/tasks/enable_segment_to_index_task.py index 113e19871e..0b16dfd56e 100644 --- a/api/tasks/enable_segment_to_index_task.py +++ b/api/tasks/enable_segment_to_index_task.py @@ -105,13 +105,14 @@ def enable_segment_to_index_task(segment_id: str): # Enable summary index for this segment from services.summary_index_service import SummaryIndexService + try: SummaryIndexService.enable_summaries_for_segments( dataset=dataset, segment_ids=[segment.id], ) except Exception as e: - logger.warning(f"Failed to enable summary for segment {segment.id}: {str(e)}") + logger.warning("Failed to enable summary for segment %s: %s", segment.id, str(e)) end_at = time.perf_counter() logger.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green")) diff --git a/api/tasks/enable_segments_to_index_task.py b/api/tasks/enable_segments_to_index_task.py index 0c419ca2f0..8b9ae5c10b 100644 --- a/api/tasks/enable_segments_to_index_task.py +++ b/api/tasks/enable_segments_to_index_task.py @@ -110,6 +110,7 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i # Enable summary indexes for these segments from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] try: SummaryIndexService.enable_summaries_for_segments( @@ -117,7 +118,7 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i segment_ids=segment_ids_list, ) except Exception as e: - logger.warning(f"Failed to enable summaries for segments: {str(e)}") + logger.warning("Failed to enable summaries for segments: %s", str(e)) end_at = time.perf_counter() logger.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green")) diff --git a/api/tasks/generate_summary_index_task.py b/api/tasks/generate_summary_index_task.py index 2850658ce4..99f957abaa 100644 --- a/api/tasks/generate_summary_index_task.py +++ b/api/tasks/generate_summary_index_task.py @@ -94,8 +94,8 @@ def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids: ) ) - except Exception as e: - logger.exception(f"Failed to generate summary index for document {document_id}: {str(e)}") + except Exception: + logger.exception("Failed to generate summary index for document %s", document_id) # Update document segments with error status if needed if segment_ids: db.session.query(DocumentSegment).filter( @@ -110,4 +110,3 @@ def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids: db.session.commit() finally: db.session.close() - diff --git a/api/tasks/regenerate_summary_index_task.py b/api/tasks/regenerate_summary_index_task.py index ddc48f9d99..f24b7bf368 100644 --- a/api/tasks/regenerate_summary_index_task.py +++ b/api/tasks/regenerate_summary_index_task.py @@ -2,7 +2,6 @@ import logging import time -from typing import Any import click from celery import shared_task @@ -24,13 +23,13 @@ def regenerate_summary_index_task( ): """ Regenerate summary indexes for all documents in a dataset. - + This task is triggered when: 1. summary_index_setting model changes (regenerate_reason="summary_model_changed") - Regenerates summary content and vectors for all existing summaries 2. embedding_model changes (regenerate_reason="embedding_model_changed") - Only regenerates vectors for existing summaries (keeps summary content) - + Args: dataset_id: Dataset ID regenerate_reason: Reason for regeneration ("summary_model_changed" or "embedding_model_changed") @@ -96,7 +95,9 @@ def regenerate_summary_index_task( return logger.info( - f"Found {len(dataset_documents)} documents for summary regeneration in dataset {dataset_id}" + "Found %s documents for summary regeneration in dataset %s", + len(dataset_documents), + dataset_id, ) total_segments_processed = 0 @@ -130,7 +131,9 @@ def regenerate_summary_index_task( continue logger.info( - f"Regenerating summaries for {len(segments)} segments in document {dataset_document.id}" + "Regenerating summaries for %s segments in document %s", + len(segments), + dataset_document.id, ) for segment in segments: @@ -146,9 +149,7 @@ def regenerate_summary_index_task( ) if not summary_record: - logger.warning( - f"Summary record not found for segment {segment.id}, skipping" - ) + logger.warning("Summary record not found for segment %s, skipping", segment.id) continue if regenerate_vectors_only: @@ -162,26 +163,26 @@ def regenerate_summary_index_task( vector.delete_by_ids([summary_record.summary_index_node_id]) except Exception as e: logger.warning( - f"Failed to delete old summary vector for segment {segment.id}: {str(e)}" + "Failed to delete old summary vector for segment %s: %s", + segment.id, + str(e), ) # Re-vectorize with new embedding model - SummaryIndexService.vectorize_summary( - summary_record, segment, dataset - ) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) db.session.commit() else: # Regenerate both summary content and vectors (for summary_model change) - SummaryIndexService.generate_and_vectorize_summary( - segment, dataset, summary_index_setting - ) + SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting) db.session.commit() total_segments_processed += 1 except Exception as e: logger.error( - f"Failed to regenerate summary for segment {segment.id}: {str(e)}", + "Failed to regenerate summary for segment %s: %s", + segment.id, + str(e), exc_info=True, ) total_segments_failed += 1 @@ -195,7 +196,9 @@ def regenerate_summary_index_task( except Exception as e: logger.error( - f"Failed to process document {dataset_document.id} for summary regeneration: {str(e)}", + "Failed to process document %s for summary regeneration: %s", + dataset_document.id, + str(e), exc_info=True, ) continue @@ -213,7 +216,6 @@ def regenerate_summary_index_task( ) except Exception: - logger.exception(f"Regenerate summary index failed for dataset {dataset_id}") + logger.exception("Regenerate summary index failed for dataset %s", dataset_id) finally: db.session.close() - diff --git a/api/tasks/remove_document_from_index_task.py b/api/tasks/remove_document_from_index_task.py index 7d191f00c0..5c8f1ff993 100644 --- a/api/tasks/remove_document_from_index_task.py +++ b/api/tasks/remove_document_from_index_task.py @@ -47,9 +47,10 @@ def remove_document_from_index_task(document_id: str): index_processor = IndexProcessorFactory(document.doc_form).init_index_processor() segments = db.session.scalars(select(DocumentSegment).where(DocumentSegment.document_id == document.id)).all() - + # Disable summary indexes for all segments in this document from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] if segment_ids_list: try: @@ -59,8 +60,8 @@ def remove_document_from_index_task(document_id: str): disabled_by=document.disabled_by, ) except Exception as e: - logger.warning(f"Failed to disable summaries for document {document.id}: {str(e)}") - + logger.warning("Failed to disable summaries for document %s: %s", document.id, str(e)) + index_node_ids = [segment.index_node_id for segment in segments] if index_node_ids: try: From f4d20a02aa51430fabc9cb34e114e8f39437ae6f Mon Sep 17 00:00:00 2001 From: FFXN Date: Thu, 15 Jan 2026 11:06:18 +0800 Subject: [PATCH 15/26] feat: fix summary index bug. --- api/services/dataset_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 2bff0e1524..226efcc6d8 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -2634,8 +2634,8 @@ class DocumentService: raise ValueError("Process rule segmentation max_tokens is invalid") # valid summary index setting - if args["process_rule"]["summary_index_setting"] and args["process_rule"]["summary_index_setting"]["enable"]: - summary_index_setting = args["process_rule"]["summary_index_setting"] + summary_index_setting = args["process_rule"].get("summary_index_setting") + if summary_index_setting and summary_index_setting.get("enable"): if "model_name" not in summary_index_setting or not summary_index_setting["model_name"]: raise ValueError("Summary index model name is required") if "model_provider_name" not in summary_index_setting or not summary_index_setting["model_provider_name"]: From 22d0c55363e943412710c50fb7227ba3450540b5 Mon Sep 17 00:00:00 2001 From: FFXN Date: Thu, 15 Jan 2026 15:10:38 +0800 Subject: [PATCH 16/26] fix: fix summary index bug. --- .../rag/index_processor/processor/paragraph_index_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index a6f2f4e820..930abd6bc6 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -345,7 +345,7 @@ class ParagraphIndexProcessor(BaseIndexProcessor): model_instance = ModelInstance(provider_model_bundle, model_name) # Get model schema to check if vision is supported - model_schema = model_instance.get_model_schema(model_name, provider_model_bundle.credentials) + model_schema = model_instance.model_type_instance.get_model_schema(model_name, model_instance.credentials) supports_vision = model_schema and model_schema.features and ModelFeature.VISION in model_schema.features # Extract images if model supports vision From 74245fea8ec1ef053ed430c7b22a372f0d9c4563 Mon Sep 17 00:00:00 2001 From: FFXN Date: Thu, 15 Jan 2026 17:57:15 +0800 Subject: [PATCH 17/26] fix: fix summary index bug. --- .../processor/parent_child_index_processor.py | 58 +++++++++++++++++++ .../processor/qa_index_processor.py | 11 ++++ 2 files changed, 69 insertions(+) diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index 7e33ef9c02..ccb1c55b72 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -1,11 +1,13 @@ """Paragraph index processor.""" import json +import logging import uuid from collections.abc import Mapping from typing import Any from configs import dify_config +from core.entities.knowledge_entities import PreviewDetail from core.model_manager import ModelInstance from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.retrieval_service import RetrievalService @@ -27,6 +29,8 @@ from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule from services.summary_index_service import SummaryIndexService +logger = logging.getLogger(__name__) + class ParentChildIndexProcessor(BaseIndexProcessor): def extract(self, extract_setting: ExtractSetting, **kwargs) -> list[Document]: @@ -350,3 +354,57 @@ class ParentChildIndexProcessor(BaseIndexProcessor): "preview": preview, "total_segments": len(parent_childs.parent_child_chunks), } + + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: + """ + For each parent chunk in preview_texts, concurrently call generate_summary to generate a summary + and write it to the summary attribute of PreviewDetail. + + Note: For parent-child structure, we only generate summaries for parent chunks. + """ + import concurrent.futures + + from flask import current_app + + # Capture Flask app context for worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def process(preview: PreviewDetail) -> None: + """Generate summary for a single preview item (parent chunk).""" + try: + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): + # Use ParagraphIndexProcessor's generate_summary method + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=tenant_id, + text=preview.content, + summary_index_setting=summary_index_setting, + ) + if summary: + preview.summary = summary + else: + # Fallback: try without app context (may fail) + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=tenant_id, + text=preview.content, + summary_index_setting=summary_index_setting, + ) + if summary: + preview.summary = summary + except Exception: + logger.exception("Failed to generate summary for preview") + # Don't fail the entire preview if summary generation fails + preview.summary = None + + with concurrent.futures.ThreadPoolExecutor() as executor: + list(executor.map(process, preview_texts)) + return preview_texts diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index b38af0cacb..1875e8b848 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -237,6 +237,17 @@ class QAIndexProcessor(BaseIndexProcessor): "total_segments": len(qa_chunks.qa_chunks), } + def generate_summary_preview( + self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict + ) -> list[PreviewDetail]: + """ + QA model doesn't generate summaries, so this method returns preview_texts unchanged. + + Note: QA model uses question-answer pairs, which don't require summary generation. + """ + # QA model doesn't generate summaries, return as-is + return preview_texts + def _format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language): format_documents = [] if document_node.page_content is None or not document_node.page_content.strip(): From 869e70964f5711fabe3b1cc0494fdf5b528ed28c Mon Sep 17 00:00:00 2001 From: FFXN Date: Thu, 15 Jan 2026 18:09:48 +0800 Subject: [PATCH 18/26] fix: fix summary index bug. --- api/core/rag/index_processor/processor/qa_index_processor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 1875e8b848..8f738f07dc 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -11,6 +11,7 @@ import pandas as pd from flask import Flask, current_app from werkzeug.datastructures import FileStorage +from core.entities.knowledge_entities import PreviewDetail from core.llm_generator.llm_generator import LLMGenerator from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.retrieval_service import RetrievalService From fcb2fe55e7eb12722306bb46f992ffe8ed3c3069 Mon Sep 17 00:00:00 2001 From: FFXN Date: Fri, 16 Jan 2026 18:55:10 +0800 Subject: [PATCH 19/26] fix: fix summary index bug. --- api/core/llm_generator/prompts.py | 20 +++++++++++++++---- api/services/dataset_service.py | 6 ++++++ .../rag_pipeline_entities.py | 1 + 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index af7995f3bd..1e44d89e2f 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -435,7 +435,19 @@ You should edit the prompt according to the IDEAL OUTPUT.""" INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" -DEFAULT_GENERATOR_SUMMARY_PROMPT = """ -You are a helpful assistant that summarizes long pieces of text into concise summaries. -Given the following text, generate a brief summary that captures the main points and key information. -The summary should be clear, concise, and written in complete sentences. """ +DEFAULT_GENERATOR_SUMMARY_PROMPT = ( + """Summarize the following content. Extract only the key information and main points. """ + """Remove redundant details. + +Requirements: +1. Write a concise summary in plain text +2. Use the same language as the input content +3. Focus on important facts, concepts, and details +4. If images are included, describe their key information +5. Do not use words like "好的", "ok", "I understand", "This text discusses", "The content mentions" +6. Write directly without extra words + +Output only the summary text. Start summarizing now: + +""" +) \ No newline at end of file diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 226efcc6d8..1b710783fe 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -965,6 +965,9 @@ class DatasetService: else: raise ValueError("Invalid index method") dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump() + # Update summary_index_setting if provided + if knowledge_configuration.summary_index_setting is not None: + dataset.summary_index_setting = knowledge_configuration.summary_index_setting session.add(dataset) else: if dataset.chunk_structure and dataset.chunk_structure != knowledge_configuration.chunk_structure: @@ -1070,6 +1073,9 @@ class DatasetService: if dataset.keyword_number != knowledge_configuration.keyword_number: dataset.keyword_number = knowledge_configuration.keyword_number dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump() + # Update summary_index_setting if provided + if knowledge_configuration.summary_index_setting is not None: + dataset.summary_index_setting = knowledge_configuration.summary_index_setting session.add(dataset) session.commit() if action: diff --git a/api/services/entities/knowledge_entities/rag_pipeline_entities.py b/api/services/entities/knowledge_entities/rag_pipeline_entities.py index cbb0efcc2a..defd3cdd71 100644 --- a/api/services/entities/knowledge_entities/rag_pipeline_entities.py +++ b/api/services/entities/knowledge_entities/rag_pipeline_entities.py @@ -116,6 +116,7 @@ class KnowledgeConfiguration(BaseModel): embedding_model: str = "" keyword_number: int | None = 10 retrieval_model: RetrievalSetting + summary_index_setting: dict | None = None @field_validator("embedding_model_provider", mode="before") @classmethod From 4fb08ae7d23e9e358d8d5f14f1da42ac990f4647 Mon Sep 17 00:00:00 2001 From: FFXN Date: Fri, 16 Jan 2026 20:24:18 +0800 Subject: [PATCH 20/26] fix: fix summary index bug. --- .../entities/knowledge_entities/rag_pipeline_entities.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/services/entities/knowledge_entities/rag_pipeline_entities.py b/api/services/entities/knowledge_entities/rag_pipeline_entities.py index defd3cdd71..041ae4edba 100644 --- a/api/services/entities/knowledge_entities/rag_pipeline_entities.py +++ b/api/services/entities/knowledge_entities/rag_pipeline_entities.py @@ -116,6 +116,7 @@ class KnowledgeConfiguration(BaseModel): embedding_model: str = "" keyword_number: int | None = 10 retrieval_model: RetrievalSetting + # add summary index setting summary_index_setting: dict | None = None @field_validator("embedding_model_provider", mode="before") From 008a5f361dba0893cccb6823cd5c85cf6c5f1342 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 20 Jan 2026 11:53:16 +0800 Subject: [PATCH 21/26] fix: fix summary index bug. --- .../console/datasets/datasets_document.py | 29 ++++----- .../knowledge_index/knowledge_index_node.py | 60 ++++++++++++------- api/fields/document_fields.py | 9 ++- api/services/summary_index_service.py | 11 ++-- 4 files changed, 63 insertions(+), 46 deletions(-) diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 1ca9a615e3..85c2f33222 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -361,8 +361,8 @@ class DatasetDocumentListApi(Resource): for doc_id in document_ids_need_summary: segment_ids = document_segments_map.get(doc_id, []) if not segment_ids: - # No segments, status is "GENERATING" (waiting to generate) - summary_status_map[doc_id] = "GENERATING" + # No segments, status is None (not started) + summary_status_map[doc_id] = None continue # Count summary statuses for this document's segments @@ -374,28 +374,23 @@ class DatasetDocumentListApi(Resource): else: status_counts["not_started"] += 1 - total_segments = len(segment_ids) - completed_count = status_counts["completed"] generating_count = status_counts["generating"] - error_count = status_counts["error"] - # Determine overall status (only three states: GENERATING, COMPLETED, ERROR) - if completed_count == total_segments: - summary_status_map[doc_id] = "COMPLETED" - elif error_count > 0: - # Has errors (even if some are completed or generating) - summary_status_map[doc_id] = "ERROR" - elif generating_count > 0 or status_counts["not_started"] > 0: - # Still generating or not started - summary_status_map[doc_id] = "GENERATING" + # Determine overall status: + # - "SUMMARIZING" only when task is queued and at least one summary is generating + # - None (empty) for all other cases (not queued, all completed/error) + if generating_count > 0: + # Task is queued and at least one summary is still generating + summary_status_map[doc_id] = "SUMMARIZING" else: - # Default to generating - summary_status_map[doc_id] = "GENERATING" + # Task not queued yet, or all summaries are completed/error (task finished) + summary_status_map[doc_id] = None # Add summary_index_status to each document for document in documents: if has_summary_index and document.need_summary is True: - document.summary_index_status = summary_status_map.get(str(document.id), "GENERATING") + # Get status from map, default to None (not queued yet) + document.summary_index_status = summary_status_map.get(str(document.id)) else: # Return null if summary index is not enabled or document doesn't need summary document.summary_index_status = None diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index d14bdee1fd..366fe24f60 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -356,19 +356,9 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): def generate_summary_for_chunk(preview_item: dict) -> None: """Generate summary for a single chunk.""" if "content" in preview_item: - try: - # Set Flask application context in worker thread - if flask_app: - with flask_app.app_context(): - summary = ParagraphIndexProcessor.generate_summary( - tenant_id=dataset.tenant_id, - text=preview_item["content"], - summary_index_setting=summary_index_setting, - ) - if summary: - preview_item["summary"] = summary - else: - # Fallback: try without app context (may fail) + # Set Flask application context in worker thread + if flask_app: + with flask_app.app_context(): summary = ParagraphIndexProcessor.generate_summary( tenant_id=dataset.tenant_id, text=preview_item["content"], @@ -376,13 +366,21 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): ) if summary: preview_item["summary"] = summary - except Exception: - logger.exception("Failed to generate summary for chunk") - # Don't fail the entire preview if summary generation fails + else: + # Fallback: try without app context (may fail) + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary # Generate summaries concurrently using ThreadPoolExecutor # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) timeout_seconds = min(300, 60 * len(preview_output["preview"])) + errors: list[Exception] = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_output["preview"]))) as executor: futures = [ executor.submit(generate_summary_for_chunk, preview_item) @@ -393,17 +391,37 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): # Cancel tasks that didn't complete in time if not_done: - logger.warning( - "Summary generation timeout: %s chunks did not complete within %ss. " - "Cancelling remaining tasks...", - len(not_done), - timeout_seconds, + timeout_error_msg = ( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s" ) + logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg) + # In preview mode, timeout is also an error + errors.append(TimeoutError(timeout_error_msg)) for future in not_done: future.cancel() # Wait a bit for cancellation to take effect concurrent.futures.wait(not_done, timeout=5) + # Collect exceptions from completed futures + for future in done: + try: + future.result() # This will raise any exception that occurred + except Exception as e: + logger.exception("Error in summary generation future") + errors.append(e) + + # In preview mode, if there are any errors, fail the request + if errors: + error_messages = [str(e) for e in errors] + error_summary = ( + f"Failed to generate summaries for {len(errors)} chunk(s). " + f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors + ) + if len(errors) > 3: + error_summary += f" (and {len(errors) - 3} more)" + logger.error("Summary generation failed in preview mode: %s", error_summary) + raise KnowledgeIndexNodeError(error_summary) + completed_count = sum(1 for item in preview_output["preview"] if item.get("summary") is not None) logger.info( "Completed summary generation for preview chunks: %s/%s succeeded", diff --git a/api/fields/document_fields.py b/api/fields/document_fields.py index 875726d31d..35a2a04f3e 100644 --- a/api/fields/document_fields.py +++ b/api/fields/document_fields.py @@ -33,9 +33,11 @@ document_fields = { "hit_count": fields.Integer, "doc_form": fields.String, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), - # Summary index generation status: "GENERATING", "COMPLETED", "ERROR", or null if not enabled + # Summary index generation status: + # "SUMMARIZING" (when task is queued and generating) "summary_index_status": fields.String, - "need_summary": fields.Boolean, # Whether this document needs summary index generation + # Whether this document needs summary index generation + "need_summary": fields.Boolean, } document_with_segments_fields = { @@ -63,7 +65,8 @@ document_with_segments_fields = { "completed_segments": fields.Integer, "total_segments": fields.Integer, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), - # Summary index generation status: "GENERATING", "COMPLETED", "ERROR", or null if not enabled + # Summary index generation status: + # "SUMMARIZING" (when task is queued and generating) "summary_index_status": fields.String, "need_summary": fields.Boolean, # Whether this document needs summary index generation } diff --git a/api/services/summary_index_service.py b/api/services/summary_index_service.py index d2cf23cb1c..1ab2ac510f 100644 --- a/api/services/summary_index_service.py +++ b/api/services/summary_index_service.py @@ -437,6 +437,11 @@ class SummaryIndexService: """ Enable summary records and re-add vectors to vector database for segments. + Note: This method enables summaries based on chunk status, not summary_index_setting.enable. + The summary_index_setting.enable flag only controls automatic generation, + not whether existing summaries can be used. + Summary.enabled should always be kept in sync with chunk.enabled. + Args: dataset: Dataset containing the segments segment_ids: List of segment IDs to enable summaries for. If None, enable all. @@ -445,11 +450,6 @@ class SummaryIndexService: if dataset.indexing_technique != "high_quality": return - # Check if summary index is enabled - summary_index_setting = dataset.summary_index_setting - if not summary_index_setting or not summary_index_setting.get("enable"): - return - query = db.session.query(DocumentSegmentSummary).filter_by( dataset_id=dataset.id, enabled=False, # Only enable disabled summaries @@ -483,6 +483,7 @@ class SummaryIndexService: .first() ) + # Summary.enabled stays in sync with chunk.enabled, only enable summary if the associated chunk is enabled. if not segment or not segment.enabled or segment.status != "completed": continue From 63d33fe93ff7be865c71e46735eda59b29a17603 Mon Sep 17 00:00:00 2001 From: FFXN Date: Tue, 20 Jan 2026 18:14:43 +0800 Subject: [PATCH 22/26] fix: fix summary index bug. --- .../processor/paragraph_index_processor.py | 68 +++++++++++---- .../processor/parent_child_index_processor.py | 84 +++++++++++++------ 2 files changed, 114 insertions(+), 38 deletions(-) diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index 930abd6bc6..0bf1b1e30a 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -277,6 +277,7 @@ class ParagraphIndexProcessor(BaseIndexProcessor): """ For each segment, concurrently call generate_summary to generate a summary and write it to the summary attribute of PreviewDetail. + In preview mode (indexing-estimate), if any summary generation fails, the method will raise an exception. """ import concurrent.futures @@ -291,23 +292,62 @@ class ParagraphIndexProcessor(BaseIndexProcessor): def process(preview: PreviewDetail) -> None: """Generate summary for a single preview item.""" - try: - if flask_app: - # Ensure Flask app context in worker thread - with flask_app.app_context(): - summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) - preview.summary = summary - else: - # Fallback: try without app context (may fail) + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) preview.summary = summary - except Exception: - logger.exception("Failed to generate summary for preview") - # Don't fail the entire preview if summary generation fails - preview.summary = None + else: + # Fallback: try without app context (may fail) + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_texts)) + errors: list[Exception] = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_texts))) as executor: + futures = [ + executor.submit(process, preview) + for preview in preview_texts + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + timeout_error_msg = ( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s" + ) + logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg) + # In preview mode, timeout is also an error + errors.append(TimeoutError(timeout_error_msg)) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + # Collect exceptions from completed futures + for future in done: + try: + future.result() # This will raise any exception that occurred + except Exception as e: + logger.exception("Error in summary generation future") + errors.append(e) + + # In preview mode (indexing-estimate), if there are any errors, fail the request + if errors: + error_messages = [str(e) for e in errors] + error_summary = ( + f"Failed to generate summaries for {len(errors)} chunk(s). " + f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors + ) + if len(errors) > 3: + error_summary += f" (and {len(errors) - 3} more)" + logger.error("Summary generation failed in preview mode: %s", error_summary) + raise ValueError(error_summary) - with concurrent.futures.ThreadPoolExecutor() as executor: - list(executor.map(process, preview_texts)) return preview_texts @staticmethod diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index ccb1c55b72..8c803621b8 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -361,6 +361,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): """ For each parent chunk in preview_texts, concurrently call generate_summary to generate a summary and write it to the summary attribute of PreviewDetail. + In preview mode (indexing-estimate), if any summary generation fails, the method will raise an exception. Note: For parent-child structure, we only generate summaries for parent chunks. """ @@ -377,34 +378,69 @@ class ParentChildIndexProcessor(BaseIndexProcessor): def process(preview: PreviewDetail) -> None: """Generate summary for a single preview item (parent chunk).""" - try: - if flask_app: - # Ensure Flask app context in worker thread - with flask_app.app_context(): - # Use ParagraphIndexProcessor's generate_summary method - from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor - summary = ParagraphIndexProcessor.generate_summary( - tenant_id=tenant_id, - text=preview.content, - summary_index_setting=summary_index_setting, - ) - if summary: - preview.summary = summary - else: - # Fallback: try without app context (may fail) - from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): summary = ParagraphIndexProcessor.generate_summary( tenant_id=tenant_id, text=preview.content, summary_index_setting=summary_index_setting, ) - if summary: - preview.summary = summary - except Exception: - logger.exception("Failed to generate summary for preview") - # Don't fail the entire preview if summary generation fails - preview.summary = None + preview.summary = summary + else: + # Fallback: try without app context (may fail) + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=tenant_id, + text=preview.content, + summary_index_setting=summary_index_setting, + ) + preview.summary = summary + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_texts)) + errors: list[Exception] = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_texts))) as executor: + futures = [ + executor.submit(process, preview) + for preview in preview_texts + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + timeout_error_msg = ( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s" + ) + logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg) + # In preview mode, timeout is also an error + errors.append(TimeoutError(timeout_error_msg)) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + # Collect exceptions from completed futures + for future in done: + try: + future.result() # This will raise any exception that occurred + except Exception as e: + logger.exception("Error in summary generation future") + errors.append(e) + + # In preview mode (indexing-estimate), if there are any errors, fail the request + if errors: + error_messages = [str(e) for e in errors] + error_summary = ( + f"Failed to generate summaries for {len(errors)} chunk(s). " + f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors + ) + if len(errors) > 3: + error_summary += f" (and {len(errors) - 3} more)" + logger.error("Summary generation failed in preview mode: %s", error_summary) + raise ValueError(error_summary) - with concurrent.futures.ThreadPoolExecutor() as executor: - list(executor.map(process, preview_texts)) return preview_texts From 34436fc89c8150d72ee0bb85a2235d39182b8714 Mon Sep 17 00:00:00 2001 From: wangxiaolei Date: Wed, 21 Jan 2026 14:31:47 +0800 Subject: [PATCH 23/26] feat: workflow support register context and read context (#31265) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Maries --- api/context/flask_app_context.py | 2 +- api/core/workflow/context/__init__.py | 12 +++ .../workflow/context/execution_context.py | 76 +++++++++++++++++-- api/core/workflow/context/models.py | 13 ++++ .../context/test_execution_context.py | 31 ++++++++ 5 files changed, 127 insertions(+), 7 deletions(-) create mode 100644 api/core/workflow/context/models.py diff --git a/api/context/flask_app_context.py b/api/context/flask_app_context.py index 4b693cd91f..360be16beb 100644 --- a/api/context/flask_app_context.py +++ b/api/context/flask_app_context.py @@ -9,7 +9,7 @@ from typing import Any, final from flask import Flask, current_app, g -from context import register_context_capturer +from core.workflow.context import register_context_capturer from core.workflow.context.execution_context import ( AppContext, IExecutionContext, diff --git a/api/core/workflow/context/__init__.py b/api/core/workflow/context/__init__.py index 31e1f2c8d9..1237d6a017 100644 --- a/api/core/workflow/context/__init__.py +++ b/api/core/workflow/context/__init__.py @@ -7,16 +7,28 @@ execution in multi-threaded environments. from core.workflow.context.execution_context import ( AppContext, + ContextProviderNotFoundError, ExecutionContext, IExecutionContext, NullAppContext, capture_current_context, + read_context, + register_context, + register_context_capturer, + reset_context_provider, ) +from core.workflow.context.models import SandboxContext __all__ = [ "AppContext", + "ContextProviderNotFoundError", "ExecutionContext", "IExecutionContext", "NullAppContext", + "SandboxContext", "capture_current_context", + "read_context", + "register_context", + "register_context_capturer", + "reset_context_provider", ] diff --git a/api/core/workflow/context/execution_context.py b/api/core/workflow/context/execution_context.py index 5a4203be93..d951c95d68 100644 --- a/api/core/workflow/context/execution_context.py +++ b/api/core/workflow/context/execution_context.py @@ -4,9 +4,11 @@ Execution Context - Abstracted context management for workflow execution. import contextvars from abc import ABC, abstractmethod -from collections.abc import Generator +from collections.abc import Callable, Generator from contextlib import AbstractContextManager, contextmanager -from typing import Any, Protocol, final, runtime_checkable +from typing import Any, Protocol, TypeVar, final, runtime_checkable + +from pydantic import BaseModel class AppContext(ABC): @@ -204,13 +206,75 @@ class ExecutionContextBuilder: ) +_capturer: Callable[[], IExecutionContext] | None = None + +# Tenant-scoped providers using tuple keys for clarity and constant-time lookup. +# Key mapping: +# (name, tenant_id) -> provider +# - name: namespaced identifier (recommend prefixing, e.g. "workflow.sandbox") +# - tenant_id: tenant identifier string +# Value: +# provider: Callable[[], BaseModel] returning the typed context value +# Type-safety note: +# - This registry cannot enforce that all providers for a given name return the same BaseModel type. +# - Implementors SHOULD provide typed wrappers around register/read (like Go's context best practice), +# e.g. def register_sandbox_ctx(tenant_id: str, p: Callable[[], SandboxContext]) and +# def read_sandbox_ctx(tenant_id: str) -> SandboxContext. +_tenant_context_providers: dict[tuple[str, str], Callable[[], BaseModel]] = {} + +T = TypeVar("T", bound=BaseModel) + + +class ContextProviderNotFoundError(KeyError): + """Raised when a tenant-scoped context provider is missing for a given (name, tenant_id).""" + + pass + + +def register_context_capturer(capturer: Callable[[], IExecutionContext]) -> None: + """Register a single enterable execution context capturer (e.g., Flask).""" + global _capturer + _capturer = capturer + + +def register_context(name: str, tenant_id: str, provider: Callable[[], BaseModel]) -> None: + """Register a tenant-specific provider for a named context. + + Tip: use a namespaced "name" (e.g., "workflow.sandbox") to avoid key collisions. + Consider adding a typed wrapper for this registration in your feature module. + """ + _tenant_context_providers[(name, tenant_id)] = provider + + +def read_context(name: str, *, tenant_id: str) -> BaseModel: + """ + Read a context value for a specific tenant. + + Raises KeyError if the provider for (name, tenant_id) is not registered. + """ + prov = _tenant_context_providers.get((name, tenant_id)) + if prov is None: + raise ContextProviderNotFoundError(f"Context provider '{name}' not registered for tenant '{tenant_id}'") + return prov() + + def capture_current_context() -> IExecutionContext: """ Capture current execution context from the calling environment. - Returns: - IExecutionContext with captured context + If a capturer is registered (e.g., Flask), use it. Otherwise, return a minimal + context with NullAppContext + copy of current contextvars. """ - from context import capture_current_context + if _capturer is None: + return ExecutionContext( + app_context=NullAppContext(), + context_vars=contextvars.copy_context(), + ) + return _capturer() - return capture_current_context() + +def reset_context_provider() -> None: + """Reset the capturer and all tenant-scoped context providers (primarily for tests).""" + global _capturer + _capturer = None + _tenant_context_providers.clear() diff --git a/api/core/workflow/context/models.py b/api/core/workflow/context/models.py new file mode 100644 index 0000000000..af5a4b2614 --- /dev/null +++ b/api/core/workflow/context/models.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from pydantic import AnyHttpUrl, BaseModel + + +class SandboxContext(BaseModel): + """Typed context for sandbox integration. All fields optional by design.""" + + sandbox_url: AnyHttpUrl | None = None + sandbox_token: str | None = None # optional, if later needed for auth + + +__all__ = ["SandboxContext"] diff --git a/api/tests/unit_tests/core/workflow/context/test_execution_context.py b/api/tests/unit_tests/core/workflow/context/test_execution_context.py index 217c39385c..63466cfb5e 100644 --- a/api/tests/unit_tests/core/workflow/context/test_execution_context.py +++ b/api/tests/unit_tests/core/workflow/context/test_execution_context.py @@ -5,6 +5,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pydantic import BaseModel from core.workflow.context.execution_context import ( AppContext, @@ -12,6 +13,8 @@ from core.workflow.context.execution_context import ( ExecutionContextBuilder, IExecutionContext, NullAppContext, + read_context, + register_context, ) @@ -256,3 +259,31 @@ class TestCaptureCurrentContext: # Context variables should be captured assert result.context_vars is not None + + +class TestTenantScopedContextRegistry: + def setup_method(self): + from core.workflow.context import reset_context_provider + + reset_context_provider() + + def teardown_method(self): + from core.workflow.context import reset_context_provider + + reset_context_provider() + + def test_tenant_provider_read_ok(self): + class SandboxContext(BaseModel): + base_url: str | None = None + + register_context("workflow.sandbox", "t1", lambda: SandboxContext(base_url="http://t1")) + register_context("workflow.sandbox", "t2", lambda: SandboxContext(base_url="http://t2")) + + assert read_context("workflow.sandbox", tenant_id="t1").base_url == "http://t1" + assert read_context("workflow.sandbox", tenant_id="t2").base_url == "http://t2" + + def test_missing_provider_raises_keyerror(self): + from core.workflow.context import ContextProviderNotFoundError + + with pytest.raises(ContextProviderNotFoundError): + read_context("missing", tenant_id="unknown") From 4b068022e1c746f87a7f3867fb6e8126458b0fee Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Wed, 21 Jan 2026 14:48:58 +0800 Subject: [PATCH 24/26] chore: reorganize agent skills and add web design skills for all agents (#31334) --- .agent/skills | 1 - .agent/skills/component-refactoring | 1 + .agent/skills/frontend-code-review | 1 + .agent/skills/frontend-testing | 1 + .agent/skills/orpc-contract-first | 1 + .agent/skills/skill-creator | 1 + .agent/skills/vercel-react-best-practices | 1 + .agent/skills/web-design-guidelines | 1 + .../skills/component-refactoring/SKILL.md | 0 .../references/complexity-patterns.md | 0 .../references/component-splitting.md | 0 .../references/hook-extraction.md | 0 .../skills/frontend-code-review/SKILL.md | 0 .../references/business-logic.md | 0 .../references/code-quality.md | 0 .../references/performance.md | 0 .../skills/frontend-testing/SKILL.md | 0 .../assets/component-test.template.tsx | 0 .../assets/hook-test.template.ts | 0 .../assets/utility-test.template.ts | 0 .../references/async-testing.md | 0 .../frontend-testing/references/checklist.md | 0 .../references/common-patterns.md | 0 .../references/domain-components.md | 0 .../frontend-testing/references/mocking.md | 0 .../frontend-testing/references/workflow.md | 0 .../skills/orpc-contract-first/SKILL.md | 0 .../skills/skill-creator/SKILL.md | 0 .../references/output-patterns.md | 0 .../skill-creator/references/workflows.md | 0 .../skill-creator/scripts/init_skill.py | 0 .../skill-creator/scripts/package_skill.py | 0 .../skill-creator/scripts/quick_validate.py | 0 .../vercel-react-best-practices/AGENTS.md | 0 .../vercel-react-best-practices/SKILL.md | 0 .../rules/advanced-event-handler-refs.md | 0 .../rules/advanced-use-latest.md | 0 .../rules/async-api-routes.md | 0 .../rules/async-defer-await.md | 0 .../rules/async-dependencies.md | 0 .../rules/async-parallel.md | 0 .../rules/async-suspense-boundaries.md | 0 .../rules/bundle-barrel-imports.md | 0 .../rules/bundle-conditional.md | 0 .../rules/bundle-defer-third-party.md | 0 .../rules/bundle-dynamic-imports.md | 0 .../rules/bundle-preload.md | 0 .../rules/client-event-listeners.md | 0 .../rules/client-localstorage-schema.md | 0 .../rules/client-passive-event-listeners.md | 0 .../rules/client-swr-dedup.md | 0 .../rules/js-batch-dom-css.md | 0 .../rules/js-cache-function-results.md | 0 .../rules/js-cache-property-access.md | 0 .../rules/js-cache-storage.md | 0 .../rules/js-combine-iterations.md | 0 .../rules/js-early-exit.md | 0 .../rules/js-hoist-regexp.md | 0 .../rules/js-index-maps.md | 0 .../rules/js-length-check-first.md | 0 .../rules/js-min-max-loop.md | 0 .../rules/js-set-map-lookups.md | 0 .../rules/js-tosorted-immutable.md | 0 .../rules/rendering-activity.md | 0 .../rules/rendering-animate-svg-wrapper.md | 0 .../rules/rendering-conditional-render.md | 0 .../rules/rendering-content-visibility.md | 0 .../rules/rendering-hoist-jsx.md | 0 .../rules/rendering-hydration-no-flicker.md | 0 .../rules/rendering-svg-precision.md | 0 .../rules/rerender-defer-reads.md | 0 .../rules/rerender-dependencies.md | 0 .../rules/rerender-derived-state.md | 0 .../rules/rerender-functional-setstate.md | 0 .../rules/rerender-lazy-state-init.md | 0 .../rules/rerender-memo.md | 0 .../rules/rerender-transitions.md | 0 .../rules/server-after-nonblocking.md | 0 .../rules/server-cache-lru.md | 0 .../rules/server-cache-react.md | 0 .../rules/server-parallel-fetching.md | 0 .../rules/server-serialization.md | 0 .agents/skills/web-design-guidelines/SKILL.md | 39 ++++++++ .claude/skills/component-refactoring | 1 + .claude/skills/frontend-code-review | 1 + .claude/skills/frontend-testing | 1 + .claude/skills/orpc-contract-first | 1 + .claude/skills/skill-creator | 1 + .claude/skills/vercel-react-best-practices | 1 + .claude/skills/web-design-guidelines | 1 + .codex/skills | 1 - .codex/skills/component-refactoring | 1 + .codex/skills/frontend-code-review | 1 + .codex/skills/frontend-testing | 1 + .codex/skills/orpc-contract-first | 1 + .codex/skills/skill-creator | 1 + .codex/skills/vercel-react-best-practices | 1 + .codex/skills/web-design-guidelines | 1 + .cursor/skills/component-refactoring | 1 + .cursor/skills/frontend-code-review | 1 + .cursor/skills/frontend-testing | 1 + .cursor/skills/orpc-contract-first | 1 + .cursor/skills/skill-creator | 1 + .cursor/skills/vercel-react-best-practices | 1 + .cursor/skills/web-design-guidelines | 1 + .gemini/skills/component-refactoring | 1 + .gemini/skills/frontend-code-review | 1 + .gemini/skills/frontend-testing | 1 + .gemini/skills/orpc-contract-first | 1 + .gemini/skills/skill-creator | 1 + .gemini/skills/vercel-react-best-practices | 1 + .gemini/skills/web-design-guidelines | 1 + .github/skills/component-refactoring | 1 + .github/skills/frontend-code-review | 1 + .github/skills/frontend-testing | 1 + .github/skills/orpc-contract-first | 1 + .github/skills/skill-creator | 1 + .github/skills/vercel-react-best-practices | 1 + .github/skills/web-design-guidelines | 1 + .github/workflows/autofix.yml | 2 +- .../console/datasets/datasets_document.py.md | 52 ---------- .../services/dataset_service.py.md | 18 ---- api/agent-notes/services/file_service.py.md | 35 ------- .../test_datasets_document_download.py.md | 28 ------ .../test_file_service_zip_and_lookup.py.md | 18 ---- api/agent_skills/infra.md | 96 ------------------- api/agent_skills/plugin.md | 1 - api/agent_skills/plugin_oauth.md | 1 - api/agent_skills/trigger.md | 53 ---------- 129 files changed, 82 insertions(+), 305 deletions(-) delete mode 120000 .agent/skills create mode 120000 .agent/skills/component-refactoring create mode 120000 .agent/skills/frontend-code-review create mode 120000 .agent/skills/frontend-testing create mode 120000 .agent/skills/orpc-contract-first create mode 120000 .agent/skills/skill-creator create mode 120000 .agent/skills/vercel-react-best-practices create mode 120000 .agent/skills/web-design-guidelines rename {.claude => .agents}/skills/component-refactoring/SKILL.md (100%) rename {.claude => .agents}/skills/component-refactoring/references/complexity-patterns.md (100%) rename {.claude => .agents}/skills/component-refactoring/references/component-splitting.md (100%) rename {.claude => .agents}/skills/component-refactoring/references/hook-extraction.md (100%) rename {.claude => .agents}/skills/frontend-code-review/SKILL.md (100%) rename {.claude => .agents}/skills/frontend-code-review/references/business-logic.md (100%) rename {.claude => .agents}/skills/frontend-code-review/references/code-quality.md (100%) rename {.claude => .agents}/skills/frontend-code-review/references/performance.md (100%) rename {.claude => .agents}/skills/frontend-testing/SKILL.md (100%) rename {.claude => .agents}/skills/frontend-testing/assets/component-test.template.tsx (100%) rename {.claude => .agents}/skills/frontend-testing/assets/hook-test.template.ts (100%) rename {.claude => .agents}/skills/frontend-testing/assets/utility-test.template.ts (100%) rename {.claude => .agents}/skills/frontend-testing/references/async-testing.md (100%) rename {.claude => .agents}/skills/frontend-testing/references/checklist.md (100%) rename {.claude => .agents}/skills/frontend-testing/references/common-patterns.md (100%) rename {.claude => .agents}/skills/frontend-testing/references/domain-components.md (100%) rename {.claude => .agents}/skills/frontend-testing/references/mocking.md (100%) rename {.claude => .agents}/skills/frontend-testing/references/workflow.md (100%) rename {.claude => .agents}/skills/orpc-contract-first/SKILL.md (100%) rename {.claude => .agents}/skills/skill-creator/SKILL.md (100%) rename {.claude => .agents}/skills/skill-creator/references/output-patterns.md (100%) rename {.claude => .agents}/skills/skill-creator/references/workflows.md (100%) rename {.claude => .agents}/skills/skill-creator/scripts/init_skill.py (100%) rename {.claude => .agents}/skills/skill-creator/scripts/package_skill.py (100%) rename {.claude => .agents}/skills/skill-creator/scripts/quick_validate.py (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/AGENTS.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/SKILL.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/advanced-use-latest.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/async-api-routes.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/async-defer-await.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/async-dependencies.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/async-parallel.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/bundle-conditional.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/bundle-preload.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/client-event-listeners.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/client-localstorage-schema.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/client-swr-dedup.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-batch-dom-css.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-cache-function-results.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-cache-property-access.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-cache-storage.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-combine-iterations.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-early-exit.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-hoist-regexp.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-index-maps.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-length-check-first.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-min-max-loop.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-set-map-lookups.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-activity.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-conditional-render.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-content-visibility.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rendering-svg-precision.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-defer-reads.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-dependencies.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-derived-state.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-memo.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/rerender-transitions.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/server-after-nonblocking.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/server-cache-lru.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/server-cache-react.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/server-parallel-fetching.md (100%) rename {.claude => .agents}/skills/vercel-react-best-practices/rules/server-serialization.md (100%) create mode 100644 .agents/skills/web-design-guidelines/SKILL.md create mode 120000 .claude/skills/component-refactoring create mode 120000 .claude/skills/frontend-code-review create mode 120000 .claude/skills/frontend-testing create mode 120000 .claude/skills/orpc-contract-first create mode 120000 .claude/skills/skill-creator create mode 120000 .claude/skills/vercel-react-best-practices create mode 120000 .claude/skills/web-design-guidelines delete mode 120000 .codex/skills create mode 120000 .codex/skills/component-refactoring create mode 120000 .codex/skills/frontend-code-review create mode 120000 .codex/skills/frontend-testing create mode 120000 .codex/skills/orpc-contract-first create mode 120000 .codex/skills/skill-creator create mode 120000 .codex/skills/vercel-react-best-practices create mode 120000 .codex/skills/web-design-guidelines create mode 120000 .cursor/skills/component-refactoring create mode 120000 .cursor/skills/frontend-code-review create mode 120000 .cursor/skills/frontend-testing create mode 120000 .cursor/skills/orpc-contract-first create mode 120000 .cursor/skills/skill-creator create mode 120000 .cursor/skills/vercel-react-best-practices create mode 120000 .cursor/skills/web-design-guidelines create mode 120000 .gemini/skills/component-refactoring create mode 120000 .gemini/skills/frontend-code-review create mode 120000 .gemini/skills/frontend-testing create mode 120000 .gemini/skills/orpc-contract-first create mode 120000 .gemini/skills/skill-creator create mode 120000 .gemini/skills/vercel-react-best-practices create mode 120000 .gemini/skills/web-design-guidelines create mode 120000 .github/skills/component-refactoring create mode 120000 .github/skills/frontend-code-review create mode 120000 .github/skills/frontend-testing create mode 120000 .github/skills/orpc-contract-first create mode 120000 .github/skills/skill-creator create mode 120000 .github/skills/vercel-react-best-practices create mode 120000 .github/skills/web-design-guidelines delete mode 100644 api/agent-notes/controllers/console/datasets/datasets_document.py.md delete mode 100644 api/agent-notes/services/dataset_service.py.md delete mode 100644 api/agent-notes/services/file_service.py.md delete mode 100644 api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md delete mode 100644 api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md delete mode 100644 api/agent_skills/infra.md delete mode 100644 api/agent_skills/plugin.md delete mode 100644 api/agent_skills/plugin_oauth.md delete mode 100644 api/agent_skills/trigger.md diff --git a/.agent/skills b/.agent/skills deleted file mode 120000 index 454b8427cd..0000000000 --- a/.agent/skills +++ /dev/null @@ -1 +0,0 @@ -../.claude/skills \ No newline at end of file diff --git a/.agent/skills/component-refactoring b/.agent/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.agent/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.agent/skills/frontend-code-review b/.agent/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.agent/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.agent/skills/frontend-testing b/.agent/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.agent/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.agent/skills/orpc-contract-first b/.agent/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.agent/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.agent/skills/skill-creator b/.agent/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.agent/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.agent/skills/vercel-react-best-practices b/.agent/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.agent/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.agent/skills/web-design-guidelines b/.agent/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.agent/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.claude/skills/component-refactoring/SKILL.md b/.agents/skills/component-refactoring/SKILL.md similarity index 100% rename from .claude/skills/component-refactoring/SKILL.md rename to .agents/skills/component-refactoring/SKILL.md diff --git a/.claude/skills/component-refactoring/references/complexity-patterns.md b/.agents/skills/component-refactoring/references/complexity-patterns.md similarity index 100% rename from .claude/skills/component-refactoring/references/complexity-patterns.md rename to .agents/skills/component-refactoring/references/complexity-patterns.md diff --git a/.claude/skills/component-refactoring/references/component-splitting.md b/.agents/skills/component-refactoring/references/component-splitting.md similarity index 100% rename from .claude/skills/component-refactoring/references/component-splitting.md rename to .agents/skills/component-refactoring/references/component-splitting.md diff --git a/.claude/skills/component-refactoring/references/hook-extraction.md b/.agents/skills/component-refactoring/references/hook-extraction.md similarity index 100% rename from .claude/skills/component-refactoring/references/hook-extraction.md rename to .agents/skills/component-refactoring/references/hook-extraction.md diff --git a/.claude/skills/frontend-code-review/SKILL.md b/.agents/skills/frontend-code-review/SKILL.md similarity index 100% rename from .claude/skills/frontend-code-review/SKILL.md rename to .agents/skills/frontend-code-review/SKILL.md diff --git a/.claude/skills/frontend-code-review/references/business-logic.md b/.agents/skills/frontend-code-review/references/business-logic.md similarity index 100% rename from .claude/skills/frontend-code-review/references/business-logic.md rename to .agents/skills/frontend-code-review/references/business-logic.md diff --git a/.claude/skills/frontend-code-review/references/code-quality.md b/.agents/skills/frontend-code-review/references/code-quality.md similarity index 100% rename from .claude/skills/frontend-code-review/references/code-quality.md rename to .agents/skills/frontend-code-review/references/code-quality.md diff --git a/.claude/skills/frontend-code-review/references/performance.md b/.agents/skills/frontend-code-review/references/performance.md similarity index 100% rename from .claude/skills/frontend-code-review/references/performance.md rename to .agents/skills/frontend-code-review/references/performance.md diff --git a/.claude/skills/frontend-testing/SKILL.md b/.agents/skills/frontend-testing/SKILL.md similarity index 100% rename from .claude/skills/frontend-testing/SKILL.md rename to .agents/skills/frontend-testing/SKILL.md diff --git a/.claude/skills/frontend-testing/assets/component-test.template.tsx b/.agents/skills/frontend-testing/assets/component-test.template.tsx similarity index 100% rename from .claude/skills/frontend-testing/assets/component-test.template.tsx rename to .agents/skills/frontend-testing/assets/component-test.template.tsx diff --git a/.claude/skills/frontend-testing/assets/hook-test.template.ts b/.agents/skills/frontend-testing/assets/hook-test.template.ts similarity index 100% rename from .claude/skills/frontend-testing/assets/hook-test.template.ts rename to .agents/skills/frontend-testing/assets/hook-test.template.ts diff --git a/.claude/skills/frontend-testing/assets/utility-test.template.ts b/.agents/skills/frontend-testing/assets/utility-test.template.ts similarity index 100% rename from .claude/skills/frontend-testing/assets/utility-test.template.ts rename to .agents/skills/frontend-testing/assets/utility-test.template.ts diff --git a/.claude/skills/frontend-testing/references/async-testing.md b/.agents/skills/frontend-testing/references/async-testing.md similarity index 100% rename from .claude/skills/frontend-testing/references/async-testing.md rename to .agents/skills/frontend-testing/references/async-testing.md diff --git a/.claude/skills/frontend-testing/references/checklist.md b/.agents/skills/frontend-testing/references/checklist.md similarity index 100% rename from .claude/skills/frontend-testing/references/checklist.md rename to .agents/skills/frontend-testing/references/checklist.md diff --git a/.claude/skills/frontend-testing/references/common-patterns.md b/.agents/skills/frontend-testing/references/common-patterns.md similarity index 100% rename from .claude/skills/frontend-testing/references/common-patterns.md rename to .agents/skills/frontend-testing/references/common-patterns.md diff --git a/.claude/skills/frontend-testing/references/domain-components.md b/.agents/skills/frontend-testing/references/domain-components.md similarity index 100% rename from .claude/skills/frontend-testing/references/domain-components.md rename to .agents/skills/frontend-testing/references/domain-components.md diff --git a/.claude/skills/frontend-testing/references/mocking.md b/.agents/skills/frontend-testing/references/mocking.md similarity index 100% rename from .claude/skills/frontend-testing/references/mocking.md rename to .agents/skills/frontend-testing/references/mocking.md diff --git a/.claude/skills/frontend-testing/references/workflow.md b/.agents/skills/frontend-testing/references/workflow.md similarity index 100% rename from .claude/skills/frontend-testing/references/workflow.md rename to .agents/skills/frontend-testing/references/workflow.md diff --git a/.claude/skills/orpc-contract-first/SKILL.md b/.agents/skills/orpc-contract-first/SKILL.md similarity index 100% rename from .claude/skills/orpc-contract-first/SKILL.md rename to .agents/skills/orpc-contract-first/SKILL.md diff --git a/.claude/skills/skill-creator/SKILL.md b/.agents/skills/skill-creator/SKILL.md similarity index 100% rename from .claude/skills/skill-creator/SKILL.md rename to .agents/skills/skill-creator/SKILL.md diff --git a/.claude/skills/skill-creator/references/output-patterns.md b/.agents/skills/skill-creator/references/output-patterns.md similarity index 100% rename from .claude/skills/skill-creator/references/output-patterns.md rename to .agents/skills/skill-creator/references/output-patterns.md diff --git a/.claude/skills/skill-creator/references/workflows.md b/.agents/skills/skill-creator/references/workflows.md similarity index 100% rename from .claude/skills/skill-creator/references/workflows.md rename to .agents/skills/skill-creator/references/workflows.md diff --git a/.claude/skills/skill-creator/scripts/init_skill.py b/.agents/skills/skill-creator/scripts/init_skill.py similarity index 100% rename from .claude/skills/skill-creator/scripts/init_skill.py rename to .agents/skills/skill-creator/scripts/init_skill.py diff --git a/.claude/skills/skill-creator/scripts/package_skill.py b/.agents/skills/skill-creator/scripts/package_skill.py similarity index 100% rename from .claude/skills/skill-creator/scripts/package_skill.py rename to .agents/skills/skill-creator/scripts/package_skill.py diff --git a/.claude/skills/skill-creator/scripts/quick_validate.py b/.agents/skills/skill-creator/scripts/quick_validate.py similarity index 100% rename from .claude/skills/skill-creator/scripts/quick_validate.py rename to .agents/skills/skill-creator/scripts/quick_validate.py diff --git a/.claude/skills/vercel-react-best-practices/AGENTS.md b/.agents/skills/vercel-react-best-practices/AGENTS.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/AGENTS.md rename to .agents/skills/vercel-react-best-practices/AGENTS.md diff --git a/.claude/skills/vercel-react-best-practices/SKILL.md b/.agents/skills/vercel-react-best-practices/SKILL.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/SKILL.md rename to .agents/skills/vercel-react-best-practices/SKILL.md diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md b/.agents/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md rename to .agents/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md b/.agents/skills/vercel-react-best-practices/rules/advanced-use-latest.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md rename to .agents/skills/vercel-react-best-practices/rules/advanced-use-latest.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-api-routes.md b/.agents/skills/vercel-react-best-practices/rules/async-api-routes.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-api-routes.md rename to .agents/skills/vercel-react-best-practices/rules/async-api-routes.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-defer-await.md b/.agents/skills/vercel-react-best-practices/rules/async-defer-await.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-defer-await.md rename to .agents/skills/vercel-react-best-practices/rules/async-defer-await.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-dependencies.md b/.agents/skills/vercel-react-best-practices/rules/async-dependencies.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-dependencies.md rename to .agents/skills/vercel-react-best-practices/rules/async-dependencies.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-parallel.md b/.agents/skills/vercel-react-best-practices/rules/async-parallel.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-parallel.md rename to .agents/skills/vercel-react-best-practices/rules/async-parallel.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md b/.agents/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md rename to .agents/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md b/.agents/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-conditional.md b/.agents/skills/vercel-react-best-practices/rules/bundle-conditional.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-conditional.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-conditional.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md b/.agents/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md b/.agents/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-preload.md b/.agents/skills/vercel-react-best-practices/rules/bundle-preload.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-preload.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-preload.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-event-listeners.md b/.agents/skills/vercel-react-best-practices/rules/client-event-listeners.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-event-listeners.md rename to .agents/skills/vercel-react-best-practices/rules/client-event-listeners.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-localstorage-schema.md b/.agents/skills/vercel-react-best-practices/rules/client-localstorage-schema.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-localstorage-schema.md rename to .agents/skills/vercel-react-best-practices/rules/client-localstorage-schema.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md b/.agents/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md rename to .agents/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-swr-dedup.md b/.agents/skills/vercel-react-best-practices/rules/client-swr-dedup.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-swr-dedup.md rename to .agents/skills/vercel-react-best-practices/rules/client-swr-dedup.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-batch-dom-css.md b/.agents/skills/vercel-react-best-practices/rules/js-batch-dom-css.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-batch-dom-css.md rename to .agents/skills/vercel-react-best-practices/rules/js-batch-dom-css.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-function-results.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-function-results.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-function-results.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-function-results.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-property-access.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-property-access.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-property-access.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-property-access.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-storage.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-storage.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-storage.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-storage.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-combine-iterations.md b/.agents/skills/vercel-react-best-practices/rules/js-combine-iterations.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-combine-iterations.md rename to .agents/skills/vercel-react-best-practices/rules/js-combine-iterations.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-early-exit.md b/.agents/skills/vercel-react-best-practices/rules/js-early-exit.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-early-exit.md rename to .agents/skills/vercel-react-best-practices/rules/js-early-exit.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-hoist-regexp.md b/.agents/skills/vercel-react-best-practices/rules/js-hoist-regexp.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-hoist-regexp.md rename to .agents/skills/vercel-react-best-practices/rules/js-hoist-regexp.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-index-maps.md b/.agents/skills/vercel-react-best-practices/rules/js-index-maps.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-index-maps.md rename to .agents/skills/vercel-react-best-practices/rules/js-index-maps.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-length-check-first.md b/.agents/skills/vercel-react-best-practices/rules/js-length-check-first.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-length-check-first.md rename to .agents/skills/vercel-react-best-practices/rules/js-length-check-first.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-min-max-loop.md b/.agents/skills/vercel-react-best-practices/rules/js-min-max-loop.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-min-max-loop.md rename to .agents/skills/vercel-react-best-practices/rules/js-min-max-loop.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-set-map-lookups.md b/.agents/skills/vercel-react-best-practices/rules/js-set-map-lookups.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-set-map-lookups.md rename to .agents/skills/vercel-react-best-practices/rules/js-set-map-lookups.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md b/.agents/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md rename to .agents/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-activity.md b/.agents/skills/vercel-react-best-practices/rules/rendering-activity.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-activity.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-activity.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md b/.agents/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md b/.agents/skills/vercel-react-best-practices/rules/rendering-conditional-render.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-conditional-render.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-content-visibility.md b/.agents/skills/vercel-react-best-practices/rules/rendering-content-visibility.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-content-visibility.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-content-visibility.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md b/.agents/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md b/.agents/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md b/.agents/skills/vercel-react-best-practices/rules/rendering-svg-precision.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-svg-precision.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md b/.agents/skills/vercel-react-best-practices/rules/rerender-defer-reads.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-defer-reads.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md b/.agents/skills/vercel-react-best-practices/rules/rerender-dependencies.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-dependencies.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md b/.agents/skills/vercel-react-best-practices/rules/rerender-derived-state.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-derived-state.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md b/.agents/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md b/.agents/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-memo.md b/.agents/skills/vercel-react-best-practices/rules/rerender-memo.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-memo.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-memo.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-transitions.md b/.agents/skills/vercel-react-best-practices/rules/rerender-transitions.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-transitions.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-transitions.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md b/.agents/skills/vercel-react-best-practices/rules/server-after-nonblocking.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md rename to .agents/skills/vercel-react-best-practices/rules/server-after-nonblocking.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-lru.md b/.agents/skills/vercel-react-best-practices/rules/server-cache-lru.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-cache-lru.md rename to .agents/skills/vercel-react-best-practices/rules/server-cache-lru.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-react.md b/.agents/skills/vercel-react-best-practices/rules/server-cache-react.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-cache-react.md rename to .agents/skills/vercel-react-best-practices/rules/server-cache-react.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md b/.agents/skills/vercel-react-best-practices/rules/server-parallel-fetching.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md rename to .agents/skills/vercel-react-best-practices/rules/server-parallel-fetching.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-serialization.md b/.agents/skills/vercel-react-best-practices/rules/server-serialization.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-serialization.md rename to .agents/skills/vercel-react-best-practices/rules/server-serialization.md diff --git a/.agents/skills/web-design-guidelines/SKILL.md b/.agents/skills/web-design-guidelines/SKILL.md new file mode 100644 index 0000000000..ceae92ab31 --- /dev/null +++ b/.agents/skills/web-design-guidelines/SKILL.md @@ -0,0 +1,39 @@ +--- +name: web-design-guidelines +description: Review UI code for Web Interface Guidelines compliance. Use when asked to "review my UI", "check accessibility", "audit design", "review UX", or "check my site against best practices". +metadata: + author: vercel + version: "1.0.0" + argument-hint: +--- + +# Web Interface Guidelines + +Review files for compliance with Web Interface Guidelines. + +## How It Works + +1. Fetch the latest guidelines from the source URL below +2. Read the specified files (or prompt user for files/pattern) +3. Check against all rules in the fetched guidelines +4. Output findings in the terse `file:line` format + +## Guidelines Source + +Fetch fresh guidelines before each review: + +``` +https://raw.githubusercontent.com/vercel-labs/web-interface-guidelines/main/command.md +``` + +Use WebFetch to retrieve the latest rules. The fetched content contains all the rules and output format instructions. + +## Usage + +When a user provides a file or pattern argument: +1. Fetch guidelines from the source URL above +2. Read the specified files +3. Apply all rules from the fetched guidelines +4. Output findings using the format specified in the guidelines + +If no files specified, ask the user which files to review. diff --git a/.claude/skills/component-refactoring b/.claude/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.claude/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.claude/skills/frontend-code-review b/.claude/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.claude/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.claude/skills/frontend-testing b/.claude/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.claude/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.claude/skills/orpc-contract-first b/.claude/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.claude/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.claude/skills/skill-creator b/.claude/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.claude/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.claude/skills/vercel-react-best-practices b/.claude/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.claude/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.claude/skills/web-design-guidelines b/.claude/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.claude/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.codex/skills b/.codex/skills deleted file mode 120000 index 454b8427cd..0000000000 --- a/.codex/skills +++ /dev/null @@ -1 +0,0 @@ -../.claude/skills \ No newline at end of file diff --git a/.codex/skills/component-refactoring b/.codex/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.codex/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.codex/skills/frontend-code-review b/.codex/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.codex/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.codex/skills/frontend-testing b/.codex/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.codex/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.codex/skills/orpc-contract-first b/.codex/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.codex/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.codex/skills/skill-creator b/.codex/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.codex/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.codex/skills/vercel-react-best-practices b/.codex/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.codex/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.codex/skills/web-design-guidelines b/.codex/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.codex/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.cursor/skills/component-refactoring b/.cursor/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.cursor/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.cursor/skills/frontend-code-review b/.cursor/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.cursor/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.cursor/skills/frontend-testing b/.cursor/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.cursor/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.cursor/skills/orpc-contract-first b/.cursor/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.cursor/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.cursor/skills/skill-creator b/.cursor/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.cursor/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.cursor/skills/vercel-react-best-practices b/.cursor/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.cursor/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.cursor/skills/web-design-guidelines b/.cursor/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.cursor/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.gemini/skills/component-refactoring b/.gemini/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.gemini/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.gemini/skills/frontend-code-review b/.gemini/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.gemini/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.gemini/skills/frontend-testing b/.gemini/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.gemini/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.gemini/skills/orpc-contract-first b/.gemini/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.gemini/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.gemini/skills/skill-creator b/.gemini/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.gemini/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.gemini/skills/vercel-react-best-practices b/.gemini/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.gemini/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.gemini/skills/web-design-guidelines b/.gemini/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.gemini/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.github/skills/component-refactoring b/.github/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.github/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.github/skills/frontend-code-review b/.github/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.github/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.github/skills/frontend-testing b/.github/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.github/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.github/skills/orpc-contract-first b/.github/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.github/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.github/skills/skill-creator b/.github/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.github/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.github/skills/vercel-react-best-practices b/.github/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.github/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.github/skills/web-design-guidelines b/.github/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.github/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index ff006324bb..4571fd1cd1 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -82,6 +82,6 @@ jobs: # mdformat breaks YAML front matter in markdown files. Add --exclude for directories containing YAML front matter. - name: mdformat run: | - uvx --python 3.13 mdformat . --exclude ".claude/skills/**" + uvx --python 3.13 mdformat . --exclude ".agents/skills/**" - uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27 diff --git a/api/agent-notes/controllers/console/datasets/datasets_document.py.md b/api/agent-notes/controllers/console/datasets/datasets_document.py.md deleted file mode 100644 index b100249981..0000000000 --- a/api/agent-notes/controllers/console/datasets/datasets_document.py.md +++ /dev/null @@ -1,52 +0,0 @@ -## Purpose - -`api/controllers/console/datasets/datasets_document.py` contains the console (authenticated) APIs for managing dataset documents (list/create/update/delete, processing controls, estimates, etc.). - -## Storage model (uploaded files) - -- For local file uploads into a knowledge base, the binary is stored via `extensions.ext_storage.storage` under the key: - - `upload_files//.` -- File metadata is stored in the `upload_files` table (`UploadFile` model), keyed by `UploadFile.id`. -- Dataset `Document` records reference the uploaded file via: - - `Document.data_source_info.upload_file_id` - -## Download endpoint - -- `GET /datasets//documents//download` - - - Only supported when `Document.data_source_type == "upload_file"`. - - Performs dataset permission + tenant checks via `DocumentResource.get_document(...)`. - - Delegates `Document -> UploadFile` validation and signed URL generation to `DocumentService.get_document_download_url(...)`. - - Applies `cloud_edition_billing_rate_limit_check("knowledge")` to match other KB operations. - - Response body is **only**: `{ "url": "" }`. - -- `POST /datasets//documents/download-zip` - - - Accepts `{ "document_ids": ["..."] }` (upload-file only). - - Returns `application/zip` as a single attachment download. - - Rationale: browsers often block multiple automatic downloads; a ZIP avoids that limitation. - - Applies `cloud_edition_billing_rate_limit_check("knowledge")`. - - Delegates dataset permission checks, document/upload-file validation, and download-name generation to - `DocumentService.prepare_document_batch_download_zip(...)` before streaming the ZIP. - -## Verification plan - -- Upload a document from a local file into a dataset. -- Call the download endpoint and confirm it returns a signed URL. -- Open the URL and confirm: - - Response headers force download (`Content-Disposition`), and - - Downloaded bytes match the uploaded file. -- Select multiple uploaded-file documents and download as ZIP; confirm all selected files exist in the archive. - -## Shared helper - -- `DocumentService.get_document_download_url(document)` resolves the `UploadFile` and signs a download URL. -- `DocumentService.prepare_document_batch_download_zip(...)` performs dataset permission checks, batches - document + upload file lookups, preserves request order, and generates the client-visible ZIP filename. -- Internal helpers now live in `DocumentService` (`_get_upload_file_id_for_upload_file_document(...)`, - `_get_upload_file_for_upload_file_document(...)`, `_get_upload_files_by_document_id_for_zip_download(...)`). -- ZIP packing is handled by `FileService.build_upload_files_zip_tempfile(...)`, which also: - - sanitizes entry names to avoid path traversal, and - - deduplicates names while preserving extensions (e.g., `doc.txt` → `doc (1).txt`). - Streaming the response and deferring cleanup is handled by the route via `send_file(path, ...)` + `ExitStack` + - `response.call_on_close(...)` (the file is deleted when the response is closed). diff --git a/api/agent-notes/services/dataset_service.py.md b/api/agent-notes/services/dataset_service.py.md deleted file mode 100644 index b68ef345f5..0000000000 --- a/api/agent-notes/services/dataset_service.py.md +++ /dev/null @@ -1,18 +0,0 @@ -## Purpose - -`api/services/dataset_service.py` hosts dataset/document service logic used by console and API controllers. - -## Batch document operations - -- Batch document workflows should avoid N+1 database queries by using set-based lookups. -- Tenant checks must be enforced consistently across dataset/document operations. -- `DocumentService.get_documents_by_ids(...)` fetches documents for a dataset using `id.in_(...)`. -- `FileService.get_upload_files_by_ids(...)` performs tenant-scoped batch lookup for `UploadFile` (dedupes ids with `set(...)`). -- `DocumentService.get_document_download_url(...)` and `prepare_document_batch_download_zip(...)` handle - dataset/document permission checks plus `Document -> UploadFile` validation for download endpoints. - -## Verification plan - -- Exercise document list and download endpoints that use the service helpers. -- Confirm batch download uses constant query count for documents + upload files. -- Request a ZIP with a missing document id and confirm a 404 is returned. diff --git a/api/agent-notes/services/file_service.py.md b/api/agent-notes/services/file_service.py.md deleted file mode 100644 index cf394a1c05..0000000000 --- a/api/agent-notes/services/file_service.py.md +++ /dev/null @@ -1,35 +0,0 @@ -## Purpose - -`api/services/file_service.py` owns business logic around `UploadFile` objects: upload validation, storage persistence, -previews/generators, and deletion. - -## Key invariants - -- All storage I/O goes through `extensions.ext_storage.storage`. -- Uploaded file keys follow: `upload_files//.`. -- Upload validation is enforced in `FileService.upload_file(...)` (blocked extensions, size limits, dataset-only types). - -## Batch lookup helpers - -- `FileService.get_upload_files_by_ids(tenant_id, upload_file_ids)` is the canonical tenant-scoped batch loader for - `UploadFile`. - -## Dataset document download helpers - -The dataset document download/ZIP endpoints now delegate “Document → UploadFile” validation and permission checks to -`DocumentService` (`api/services/dataset_service.py`). `FileService` stays focused on generic `UploadFile` operations -(uploading, previews, deletion), plus generic ZIP serving. - -### ZIP serving - -- `FileService.build_upload_files_zip_tempfile(...)` builds a ZIP from `UploadFile` objects and yields a seeked - tempfile **path** so callers can stream it (e.g., `send_file(path, ...)`) without hitting "read of closed file" - issues from file-handle lifecycle during streamed responses. -- Flask `send_file(...)` and the `ExitStack`/`call_on_close(...)` cleanup pattern are handled in the route layer. - -## Verification plan - -- Unit: `api/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py` - - Verify signed URL generation for upload-file documents and ZIP download behavior for multiple documents. -- Unit: `api/tests/unit_tests/services/test_file_service_zip_and_lookup.py` - - Verify ZIP packing produces a valid, openable archive and preserves file content. diff --git a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md b/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md deleted file mode 100644 index 8f78dacde8..0000000000 --- a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md +++ /dev/null @@ -1,28 +0,0 @@ -## Purpose - -Unit tests for the console dataset document download endpoint: - -- `GET /datasets//documents//download` - -## Testing approach - -- Uses `Flask.test_request_context()` and calls the `Resource.get(...)` method directly. -- Monkeypatches console decorators (`login_required`, `setup_required`, rate limit) to no-ops to keep the test focused. -- Mocks: - - `DatasetService.get_dataset` / `check_dataset_permission` - - `DocumentService.get_document` for single-file download tests - - `DocumentService.get_documents_by_ids` + `FileService.get_upload_files_by_ids` for ZIP download tests - - `FileService.get_upload_files_by_ids` for `UploadFile` lookups in single-file tests - - `services.dataset_service.file_helpers.get_signed_file_url` to return a deterministic URL -- Document mocks include `id` fields so batch lookups can map documents by id. - -## Covered cases - -- Success returns `{ "url": "" }` for upload-file documents. -- 404 when document is not `upload_file`. -- 404 when `upload_file_id` is missing. -- 404 when referenced `UploadFile` row does not exist. -- 403 when document tenant does not match current tenant. -- Batch ZIP download returns `application/zip` for upload-file documents. -- Batch ZIP download rejects non-upload-file documents. -- Batch ZIP download uses a random `.zip` attachment name (`download_name`), so tests only assert the suffix. diff --git a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md b/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md deleted file mode 100644 index dbcdf26f10..0000000000 --- a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md +++ /dev/null @@ -1,18 +0,0 @@ -## Purpose - -Unit tests for `api/services/file_service.py` helper methods that are not covered by higher-level controller tests. - -## What’s covered - -- `FileService.build_upload_files_zip_tempfile(...)` - - ZIP entry name sanitization (no directory components / traversal) - - name deduplication while preserving extensions - - writing streamed bytes from `storage.load(...)` into ZIP entries - - yields a tempfile path so callers can open/stream the ZIP without holding a live file handle -- `FileService.get_upload_files_by_ids(...)` - - returns `{}` for empty id lists - - returns an id-keyed mapping for non-empty lists - -## Notes - -- These tests intentionally stub `storage.load` and `db.session.scalars(...).all()` to avoid needing a real DB/storage. diff --git a/api/agent_skills/infra.md b/api/agent_skills/infra.md deleted file mode 100644 index bc36c7bf64..0000000000 --- a/api/agent_skills/infra.md +++ /dev/null @@ -1,96 +0,0 @@ -## Configuration - -- Import `configs.dify_config` for every runtime toggle. Do not read environment variables directly. -- Add new settings to the proper mixin inside `configs/` (deployment, feature, middleware, etc.) so they load through `DifyConfig`. -- Remote overrides come from the optional providers in `configs/remote_settings_sources`; keep defaults in code safe when the value is missing. -- Example: logging pulls targets from `extensions/ext_logging.py`, and model provider URLs are assembled in `services/entities/model_provider_entities.py`. - -## Dependencies - -- Runtime dependencies live in `[project].dependencies` inside `pyproject.toml`. Optional clients go into the `storage`, `tools`, or `vdb` groups under `[dependency-groups]`. -- Always pin versions and keep the list alphabetised. Shared tooling (lint, typing, pytest) belongs in the `dev` group. -- When code needs a new package, explain why in the PR and run `uv lock` so the lockfile stays current. - -## Storage & Files - -- Use `extensions.ext_storage.storage` for all blob IO; it already respects the configured backend. -- Convert files for workflows with helpers in `core/file/file_manager.py`; they handle signed URLs and multimodal payloads. -- When writing controller logic, delegate upload quotas and metadata to `services/file_service.py` instead of touching storage directly. -- All outbound HTTP fetches (webhooks, remote files) must go through the SSRF-safe client in `core/helper/ssrf_proxy.py`; it wraps `httpx` with the allow/deny rules configured for the platform. - -## Redis & Shared State - -- Access Redis through `extensions.ext_redis.redis_client`. For locking, reuse `redis_client.lock`. -- Prefer higher-level helpers when available: rate limits use `libs.helper.RateLimiter`, provider metadata uses caches in `core/helper/provider_cache.py`. - -## Models - -- SQLAlchemy models sit in `models/` and inherit from the shared declarative `Base` defined in `models/base.py` (metadata configured via `models/engine.py`). -- `models/__init__.py` exposes grouped aggregates: account/tenant models, app and conversation tables, datasets, providers, workflow runs, triggers, etc. Import from there to avoid deep path churn. -- Follow the DDD boundary: persistence objects live in `models/`, repositories under `repositories/` translate them into domain entities, and services consume those repositories. -- When adding a table, create the model class, register it in `models/__init__.py`, wire a repository if needed, and generate an Alembic migration as described below. - -## Vector Stores - -- Vector client implementations live in `core/rag/datasource/vdb/`, with a common factory in `core/rag/datasource/vdb/vector_factory.py` and enums in `core/rag/datasource/vdb/vector_type.py`. -- Retrieval pipelines call these providers through `core/rag/datasource/retrieval_service.py` and dataset ingestion flows in `services/dataset_service.py`. -- The CLI helper `flask vdb-migrate` orchestrates bulk migrations using routines in `commands.py`; reuse that pattern when adding new backend transitions. -- To add another store, mirror the provider layout, register it with the factory, and include any schema changes in Alembic migrations. - -## Observability & OTEL - -- OpenTelemetry settings live under the observability mixin in `configs/observability`. Toggle exporters and sampling via `dify_config`, not ad-hoc env reads. -- HTTP, Celery, Redis, SQLAlchemy, and httpx instrumentation is initialised in `extensions/ext_app_metrics.py` and `extensions/ext_request_logging.py`; reuse these hooks when adding new workers or entrypoints. -- When creating background tasks or external calls, propagate tracing context with helpers in the existing instrumented clients (e.g. use the shared `httpx` session from `core/helper/http_client_pooling.py`). -- If you add a new external integration, ensure spans and metrics are emitted by wiring the appropriate OTEL instrumentation package in `pyproject.toml` and configuring it in `extensions/`. - -## Ops Integrations - -- Langfuse support and other tracing bridges live under `core/ops/opik_trace`. Config toggles sit in `configs/observability`, while exporters are initialised in the OTEL extensions mentioned above. -- External monitoring services should follow this pattern: keep client code in `core/ops`, expose switches via `dify_config`, and hook initialisation in `extensions/ext_app_metrics.py` or sibling modules. -- Before instrumenting new code paths, check whether existing context helpers (e.g. `extensions/ext_request_logging.py`) already capture the necessary metadata. - -## Controllers, Services, Core - -- Controllers only parse HTTP input and call a service method. Keep business rules in `services/`. -- Services enforce tenant rules, quotas, and orchestration, then call into `core/` engines (workflow execution, tools, LLMs). -- When adding a new endpoint, search for an existing service to extend before introducing a new layer. Example: workflow APIs pipe through `services/workflow_service.py` into `core/workflow`. - -## Plugins, Tools, Providers - -- In Dify a plugin is a tenant-installable bundle that declares one or more providers (tool, model, datasource, trigger, endpoint, agent strategy) plus its resource needs and version metadata. The manifest (`core/plugin/entities/plugin.py`) mirrors what you see in the marketplace documentation. -- Installation, upgrades, and migrations are orchestrated by `services/plugin/plugin_service.py` together with helpers such as `services/plugin/plugin_migration.py`. -- Runtime loading happens through the implementations under `core/plugin/impl/*` (tool/model/datasource/trigger/endpoint/agent). These modules normalise plugin providers so that downstream systems (`core/tools/tool_manager.py`, `services/model_provider_service.py`, `services/trigger/*`) can treat builtin and plugin capabilities the same way. -- For remote execution, plugin daemons (`core/plugin/entities/plugin_daemon.py`, `core/plugin/impl/plugin.py`) manage lifecycle hooks, credential forwarding, and background workers that keep plugin processes in sync with the main application. -- Acquire tool implementations through `core/tools/tool_manager.py`; it resolves builtin, plugin, and workflow-as-tool providers uniformly, injecting the right context (tenant, credentials, runtime config). -- To add a new plugin capability, extend the relevant `core/plugin/entities` schema and register the implementation in the matching `core/plugin/impl` module rather than importing the provider directly. - -## Async Workloads - -see `agent_skills/trigger.md` for more detailed documentation. - -- Enqueue background work through `services/async_workflow_service.py`. It routes jobs to the tiered Celery queues defined in `tasks/`. -- Workers boot from `celery_entrypoint.py` and execute functions in `tasks/workflow_execution_tasks.py`, `tasks/trigger_processing_tasks.py`, etc. -- Scheduled workflows poll from `schedule/workflow_schedule_tasks.py`. Follow the same pattern if you need new periodic jobs. - -## Database & Migrations - -- SQLAlchemy models live under `models/` and map directly to migration files in `migrations/versions`. -- Generate migrations with `uv run --project api flask db revision --autogenerate -m ""`, then review the diff; never hand-edit the database outside Alembic. -- Apply migrations locally using `uv run --project api flask db upgrade`; production deploys expect the same history. -- If you add tenant-scoped data, confirm the upgrade includes tenant filters or defaults consistent with the service logic touching those tables. - -## CLI Commands - -- Maintenance commands from `commands.py` are registered on the Flask CLI. Run them via `uv run --project api flask `. -- Use the built-in `db` commands from Flask-Migrate for schema operations (`flask db upgrade`, `flask db stamp`, etc.). Only fall back to custom helpers if you need their extra behaviour. -- Custom entries such as `flask reset-password`, `flask reset-email`, and `flask vdb-migrate` handle self-hosted account recovery and vector database migrations. -- Before adding a new command, check whether an existing service can be reused and ensure the command guards edition-specific behaviour (many enforce `SELF_HOSTED`). Document any additions in the PR. -- Ruff helpers are run directly with `uv`: `uv run --project api --dev ruff format ./api` for formatting and `uv run --project api --dev ruff check ./api` (add `--fix` if you want automatic fixes). - -## When You Add Features - -- Check for an existing helper or service before writing a new util. -- Uphold tenancy: every service method should receive the tenant ID from controller wrappers such as `controllers/console/wraps.py`. -- Update or create tests alongside behaviour changes (`tests/unit_tests` for fast coverage, `tests/integration_tests` when touching orchestrations). -- Run `uv run --project api --dev ruff check ./api`, `uv run --directory api --dev basedpyright`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh` before submitting changes. diff --git a/api/agent_skills/plugin.md b/api/agent_skills/plugin.md deleted file mode 100644 index 954ddd236b..0000000000 --- a/api/agent_skills/plugin.md +++ /dev/null @@ -1 +0,0 @@ -// TBD diff --git a/api/agent_skills/plugin_oauth.md b/api/agent_skills/plugin_oauth.md deleted file mode 100644 index 954ddd236b..0000000000 --- a/api/agent_skills/plugin_oauth.md +++ /dev/null @@ -1 +0,0 @@ -// TBD diff --git a/api/agent_skills/trigger.md b/api/agent_skills/trigger.md deleted file mode 100644 index f4b076332c..0000000000 --- a/api/agent_skills/trigger.md +++ /dev/null @@ -1,53 +0,0 @@ -## Overview - -Trigger is a collection of nodes that we called `Start` nodes, also, the concept of `Start` is the same as `RootNode` in the workflow engine `core/workflow/graph_engine`, On the other hand, `Start` node is the entry point of workflows, every workflow run always starts from a `Start` node. - -## Trigger nodes - -- `UserInput` -- `Trigger Webhook` -- `Trigger Schedule` -- `Trigger Plugin` - -### UserInput - -Before `Trigger` concept is introduced, it's what we called `Start` node, but now, to avoid confusion, it was renamed to `UserInput` node, has a strong relation with `ServiceAPI` in `controllers/service_api/app` - -1. `UserInput` node introduces a list of arguments that need to be provided by the user, finally it will be converted into variables in the workflow variable pool. -1. `ServiceAPI` accept those arguments, and pass through them into `UserInput` node. -1. For its detailed implementation, please refer to `core/workflow/nodes/start` - -### Trigger Webhook - -Inside Webhook Node, Dify provided a UI panel that allows user define a HTTP manifest `core/workflow/nodes/trigger_webhook/entities.py`.`WebhookData`, also, Dify generates a random webhook id for each `Trigger Webhook` node, the implementation was implemented in `core/trigger/utils/endpoint.py`, as you can see, `webhook-debug` is a debug mode for webhook, you may find it in `controllers/trigger/webhook.py`. - -Finally, requests to `webhook` endpoint will be converted into variables in workflow variable pool during workflow execution. - -### Trigger Schedule - -`Trigger Schedule` node is a node that allows user define a schedule to trigger the workflow, detailed manifest is here `core/workflow/nodes/trigger_schedule/entities.py`, we have a poller and executor to handle millions of schedules, see `docker/entrypoint.sh` / `schedule/workflow_schedule_task.py` for help. - -To Achieve this, a `WorkflowSchedulePlan` model was introduced in `models/trigger.py`, and a `events/event_handlers/sync_workflow_schedule_when_app_published.py` was used to sync workflow schedule plans when app is published. - -### Trigger Plugin - -`Trigger Plugin` node allows user define there own distributed trigger plugin, whenever a request was received, Dify forwards it to the plugin and wait for parsed variables from it. - -1. Requests were saved in storage by `services/trigger/trigger_request_service.py`, referenced by `services/trigger/trigger_service.py`.`TriggerService`.`process_endpoint` -1. Plugins accept those requests and parse variables from it, see `core/plugin/impl/trigger.py` for details. - -A `subscription` concept was out here by Dify, it means an endpoint address from Dify was bound to thirdparty webhook service like `Github` `Slack` `Linear` `GoogleDrive` `Gmail` etc. Once a subscription was created, Dify continually receives requests from the platforms and handle them one by one. - -## Worker Pool / Async Task - -All the events that triggered a new workflow run is always in async mode, a unified entrypoint can be found here `services/async_workflow_service.py`.`AsyncWorkflowService`.`trigger_workflow_async`. - -The infrastructure we used is `celery`, we've already configured it in `docker/entrypoint.sh`, and the consumers are in `tasks/async_workflow_tasks.py`, 3 queues were used to handle different tiers of users, `PROFESSIONAL_QUEUE` `TEAM_QUEUE` `SANDBOX_QUEUE`. - -## Debug Strategy - -Dify divided users into 2 groups: builders / end users. - -Builders are the users who create workflows, in this stage, debugging a workflow becomes a critical part of the workflow development process, as the start node in workflows, trigger nodes can `listen` to the events from `WebhookDebug` `Schedule` `Plugin`, debugging process was created in `controllers/console/app/workflow.py`.`DraftWorkflowTriggerNodeApi`. - -A polling process can be considered as combine of few single `poll` operations, each `poll` operation fetches events cached in `Redis`, returns `None` if no event was found, more detailed implemented: `core/trigger/debug/event_bus.py` was used to handle the polling process, and `core/trigger/debug/event_selectors.py` was used to select the event poller based on the trigger type. From d4f5a113ed0f2db298ca2d701036d30ff4dbba03 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Wed, 21 Jan 2026 15:07:32 +0800 Subject: [PATCH 25/26] chore(web): refactor next.config.js to next.config.ts (#31331) --- web/{next.config.js => next.config.ts} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename web/{next.config.js => next.config.ts} (88%) diff --git a/web/next.config.js b/web/next.config.ts similarity index 88% rename from web/next.config.js rename to web/next.config.ts index 1457d638c4..fc4dee3289 100644 --- a/web/next.config.js +++ b/web/next.config.ts @@ -1,3 +1,4 @@ +import type { NextConfig } from 'next' import process from 'node:process' import withBundleAnalyzerInit from '@next/bundle-analyzer' import createMDX from '@next/mdx' @@ -24,10 +25,9 @@ const withBundleAnalyzer = withBundleAnalyzerInit({ const hasSetWebPrefix = process.env.NEXT_PUBLIC_WEB_PREFIX const port = process.env.PORT || 3000 const locImageURLs = !hasSetWebPrefix ? [new URL(`http://localhost:${port}/**`), new URL(`http://127.0.0.1:${port}/**`)] : [] -const remoteImageURLs = [hasSetWebPrefix ? new URL(`${process.env.NEXT_PUBLIC_WEB_PREFIX}/**`) : '', ...locImageURLs].filter(item => !!item) +const remoteImageURLs = ([hasSetWebPrefix ? new URL(`${process.env.NEXT_PUBLIC_WEB_PREFIX}/**`) : '', ...locImageURLs].filter(item => !!item)) as URL[] -/** @type {import('next').NextConfig} */ -const nextConfig = { +const nextConfig: NextConfig = { basePath: process.env.NEXT_PUBLIC_BASE_PATH || '', serverExternalPackages: ['esbuild-wasm'], transpilePackages: ['echarts', 'zrender'], @@ -42,7 +42,7 @@ const nextConfig = { // https://nextjs.org/docs/messages/next-image-unconfigured-host images: { remotePatterns: remoteImageURLs.map(remoteImageURL => ({ - protocol: remoteImageURL.protocol.replace(':', ''), + protocol: remoteImageURL.protocol.replace(':', '') as 'http' | 'https', hostname: remoteImageURL.hostname, port: remoteImageURL.port, pathname: remoteImageURL.pathname, From ed0e068a47f90c1e5ce8118f1a43307eafa7c253 Mon Sep 17 00:00:00 2001 From: Coding On Star <447357187@qq.com> Date: Wed, 21 Jan 2026 15:47:49 +0800 Subject: [PATCH 26/26] fix(i18n): update model provider tip to only mention OpenAI in English, Japanese, and Simplified Chinese translations (#31339) Co-authored-by: CodingOnStar --- web/i18n/en-US/common.json | 2 +- web/i18n/ja-JP/common.json | 2 +- web/i18n/zh-Hans/common.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/web/i18n/en-US/common.json b/web/i18n/en-US/common.json index d2e5281282..20e5400e56 100644 --- a/web/i18n/en-US/common.json +++ b/web/i18n/en-US/common.json @@ -350,7 +350,7 @@ "modelProvider.card.quota": "QUOTA", "modelProvider.card.quotaExhausted": "Quota exhausted", "modelProvider.card.removeKey": "Remove API Key", - "modelProvider.card.tip": "Message Credits supports models from OpenAI, Anthropic, Gemini, xAI, DeepSeek and Tongyi. Priority will be given to the paid quota. The free quota will be used after the paid quota is exhausted.", + "modelProvider.card.tip": "Message Credits supports models from OpenAI. Priority will be given to the paid quota. The free quota will be used after the paid quota is exhausted.", "modelProvider.card.tokens": "Tokens", "modelProvider.collapse": "Collapse", "modelProvider.config": "Config", diff --git a/web/i18n/ja-JP/common.json b/web/i18n/ja-JP/common.json index ffc2d0bd31..8a76021759 100644 --- a/web/i18n/ja-JP/common.json +++ b/web/i18n/ja-JP/common.json @@ -350,7 +350,7 @@ "modelProvider.card.quota": "クォータ", "modelProvider.card.quotaExhausted": "クォータが使い果たされました", "modelProvider.card.removeKey": "API キーを削除", - "modelProvider.card.tip": "メッセージ枠はOpenAI、Anthropic、Gemini、xAI、DeepSeek、Tongyiのモデルを使用することをサポートしています。無料枠は有料枠が使い果たされた後に消費されます。", + "modelProvider.card.tip": "メッセージ枠はOpenAIのモデルを使用することをサポートしています。無料枠は有料枠が使い果たされた後に消費されます。", "modelProvider.card.tokens": "トークン", "modelProvider.collapse": "折り畳み", "modelProvider.config": "設定", diff --git a/web/i18n/zh-Hans/common.json b/web/i18n/zh-Hans/common.json index b5eabfeecc..6f62b53e2d 100644 --- a/web/i18n/zh-Hans/common.json +++ b/web/i18n/zh-Hans/common.json @@ -350,7 +350,7 @@ "modelProvider.card.quota": "额度", "modelProvider.card.quotaExhausted": "配额已用完", "modelProvider.card.removeKey": "删除 API 密钥", - "modelProvider.card.tip": "消息额度支持使用 OpenAI、Anthropic、Gemini、xAI、深度求索、通义 的模型;免费额度会在付费额度用尽后才会消耗。", + "modelProvider.card.tip": "消息额度支持使用 OpenAI 的模型;免费额度会在付费额度用尽后才会消耗。", "modelProvider.card.tokens": "Tokens", "modelProvider.collapse": "收起", "modelProvider.config": "配置",