From 42091b4a79d67e7688341248b27c3bc663389631 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 16:51:07 +0800 Subject: [PATCH 01/82] feat: add MEMORY_BLOCK in DraftVariableType --- api/models/enums.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/models/enums.py b/api/models/enums.py index cc9f28a7bb..bc66b1bb56 100644 --- a/api/models/enums.py +++ b/api/models/enums.py @@ -21,6 +21,7 @@ class DraftVariableType(StrEnum): NODE = "node" SYS = "sys" CONVERSATION = "conversation" + MEMORY_BLOCK = "memory_block" class MessageStatus(StrEnum): From 584b2cefa3743dad11893134ea88831d226bf070 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 17:03:15 +0800 Subject: [PATCH 02/82] feat: add pydantic models for memory --- api/core/memory/entities.py | 99 +++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 api/core/memory/entities.py diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py new file mode 100644 index 0000000000..84e6d915b9 --- /dev/null +++ b/api/core/memory/entities.py @@ -0,0 +1,99 @@ +from enum import Enum +from typing import Optional, Dict, Any +from uuid import uuid4 + +from pydantic import BaseModel, Field + + +class MemoryScope(str, Enum): + """Memory scope determined by node_id field""" + APP = "app" # node_id is None + NODE = "node" # node_id is not None + + +class MemoryTerm(str, Enum): + """Memory term determined by conversation_id field""" + SESSION = "session" # conversation_id is not None + PERSISTENT = "persistent" # conversation_id is None + + +class MemoryStrategy(str, Enum): + ON_TURNS = "on_turns" + + +class MemoryScheduleMode(str, Enum): + SYNC = "sync" + ASYNC = "async" + + +class MemoryBlockSpec(BaseModel): + """Memory block specification for workflow configuration""" + id: str = Field( + default_factory=lambda: str(uuid4()), + description="Unique identifier for the memory block", + ) + name: str = Field(description="Display name of the memory block") + description: str = Field(default="", description="Description of the memory block") + template: str = Field(description="Initial template content for the memory") + instruction: str = Field(description="Instructions for updating the memory") + scope: MemoryScope = Field(description="Scope of the memory (app or node level)") + term: MemoryTerm = Field(description="Term of the memory (session or persistent)") + strategy: MemoryStrategy = Field(description="Update strategy for the memory") + update_turns: int = Field(gt=0, description="Number of turns between updates") + preserved_turns: int = Field(gt=0, description="Number of conversation turns to preserve") + schedule_mode: MemoryScheduleMode = Field(description="Synchronous or asynchronous update mode") + model: Optional[Dict[str, Any]] = Field(default=None, description="Model configuration for memory updates") + end_user_visible: bool = Field(default=False, description="Whether memory is visible to end users") + end_user_editable: bool = Field(default=False, description="Whether memory is editable by end users") + + +class MemoryBlock(BaseModel): + """Runtime memory block instance + + Design Rules: + - app_id = None: Global memory (future feature, not implemented yet) + - app_id = str: App-specific memory + - conversation_id = None: Persistent memory (cross-conversation) + - conversation_id = str: Session memory (conversation-specific) + - node_id = None: App-level scope + - node_id = str: Node-level scope + + These rules implicitly determine scope and term without redundant storage. + """ + id: str + memory_id: str + name: str + value: str + scope: MemoryScope # Derived from node_id: None=APP, str=NODE + term: MemoryTerm # Derived from conversation_id: None=PERSISTENT, str=SESSION + app_id: Optional[str] = None # None=global(future), str=app-specific + conversation_id: Optional[str] = None # None=persistent, str=session + node_id: Optional[str] = None # None=app-scope, str=node-scope + created_at: Optional[str] = None + updated_at: Optional[str] = None + + @property + def is_global(self) -> bool: + """Check if this is global memory (future feature)""" + return self.app_id is None + + @property + def is_persistent(self) -> bool: + """Check if this is persistent memory (cross-conversation)""" + return self.conversation_id is None + + @property + def is_app_scope(self) -> bool: + """Check if this is app-level scope""" + return self.node_id is None + + @property + def is_node_scope(self) -> bool: + """Check if this is node-level scope""" + return self.node_id is not None + + +class ChatflowConversationMetadata(BaseModel): + """Metadata for chatflow conversation with visible message count""" + type: str = "mutable_visible_window" + visible_count: int = Field(gt=0, description="Number of visible messages to keep") From f284c919883ed9ab19e3936a0fed2be4d47075a2 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 17:16:54 +0800 Subject: [PATCH 03/82] feat: add data tables for chatflow memory --- api/models/chatflow_memory.py | 55 +++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 api/models/chatflow_memory.py diff --git a/api/models/chatflow_memory.py b/api/models/chatflow_memory.py new file mode 100644 index 0000000000..f0b36d0d69 --- /dev/null +++ b/api/models/chatflow_memory.py @@ -0,0 +1,55 @@ +from datetime import datetime + +import sqlalchemy as sa +from sqlalchemy import DateTime, func +from sqlalchemy.orm import Mapped, mapped_column + +from .base import Base +from .types import StringUUID + + +class ChatflowMemoryVariable(Base): + __tablename__ = "chatflow_memory_variables" + + id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + app_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True) + conversation_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True) + node_id: Mapped[str | None] = mapped_column(sa.Text, nullable=True) + memory_id: Mapped[str] = mapped_column(sa.Text, nullable=False) + value: Mapped[str] = mapped_column(sa.Text, nullable=False) + name: Mapped[str] = mapped_column(sa.Text, nullable=False) + scope: Mapped[str] = mapped_column(sa.String(10), nullable=False) # 'app' or 'node' + term: Mapped[str] = mapped_column(sa.String(20), nullable=False) # 'session' or 'persistent' + + created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + ) + + +class ChatflowConversation(Base): + __tablename__ = "chatflow_conversations" + + id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + node_id: Mapped[str | None] = mapped_column(sa.Text, nullable=True) + original_conversation_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True) + conversation_metadata: Mapped[str] = mapped_column(sa.Text, nullable=False) # JSON + + created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + ) + + +class ChatflowMessage(Base): + __tablename__ = "chatflow_messages" + + id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) + conversation_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + index: Mapped[int] = mapped_column(sa.Integer, nullable=False) + version: Mapped[int] = mapped_column(sa.Integer, nullable=False) + data: Mapped[str] = mapped_column(sa.Text, nullable=False) # Serialized PromptMessage JSON + created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) From 38130c85021d828412199d711eccd518695b54da Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 17:19:48 +0800 Subject: [PATCH 04/82] feat: add memory_blocks property to workflow's graph for memory block configuration --- api/models/workflow.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/api/models/workflow.py b/api/models/workflow.py index 7ff463e08f..f4406b9fdf 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -12,6 +12,7 @@ from sqlalchemy import DateTime, orm from core.file.constants import maybe_file_object from core.file.models import File +from core.memory.entities import MemoryBlockSpec from core.variables import utils as variable_utils from core.variables.variables import FloatVariable, IntegerVariable, StringVariable from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID @@ -445,6 +446,16 @@ class Workflow(Base): {var.name: var.model_dump() for var in value}, ensure_ascii=False, ) + @property + def memory_blocks(self) -> Sequence[MemoryBlockSpec]: + """Memory blocks configuration from graph""" + + if not self.graph_dict: + return [] + + memory_blocks_config = self.graph_dict.get('memory_blocks', []) + results = [MemoryBlockSpec.model_validate(config) for config in memory_blocks_config] + return results @staticmethod def version_from_datetime(d: datetime) -> str: From fcf4e1f37d088159f13a7636679079eb5f0fa644 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 17:41:13 +0800 Subject: [PATCH 05/82] feat: add MEMORY_BLOCK_VARIABLE_NODE_ID --- api/core/workflow/constants.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/core/workflow/constants.py b/api/core/workflow/constants.py index e3fe17c284..0fad43d0f6 100644 --- a/api/core/workflow/constants.py +++ b/api/core/workflow/constants.py @@ -1,3 +1,5 @@ SYSTEM_VARIABLE_NODE_ID = "sys" ENVIRONMENT_VARIABLE_NODE_ID = "env" CONVERSATION_VARIABLE_NODE_ID = "conversation" +MEMORY_BLOCK_VARIABLE_NODE_ID = "memory_block" + From d535818505804832ca1ac243bdab8dcc622318c7 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 17:41:45 +0800 Subject: [PATCH 06/82] feat: add new_memory_block_variable for WorkflowDraftVariable --- api/models/workflow.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/api/models/workflow.py b/api/models/workflow.py index f4406b9fdf..c7b0759b4f 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -1,5 +1,6 @@ import json import logging +import uuid from collections.abc import Mapping, Sequence from datetime import datetime from enum import Enum, StrEnum @@ -15,7 +16,8 @@ from core.file.models import File from core.memory.entities import MemoryBlockSpec from core.variables import utils as variable_utils from core.variables.variables import FloatVariable, IntegerVariable, StringVariable -from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID +from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID, \ + MEMORY_BLOCK_VARIABLE_NODE_ID from core.workflow.nodes.enums import NodeType from factories.variable_factory import TypeMismatchError, build_segment_with_type from libs.datetime_utils import naive_utc_now @@ -1268,6 +1270,31 @@ class WorkflowDraftVariable(Base): variable.editable = editable return variable + @staticmethod + def new_memory_block_variable( + *, + app_id: str, + node_id: Optional[str] = None, + memory_id: str, + name: str, + value: str, + description: str = "", + ) -> "WorkflowDraftVariable": + """Create a new memory block draft variable.""" + return WorkflowDraftVariable( + id=str(uuid.uuid4()), + app_id=app_id, + node_id=MEMORY_BLOCK_VARIABLE_NODE_ID, + name=name, + value=value, + description=description, + selector=[MEMORY_BLOCK_VARIABLE_NODE_ID, memory_id] if node_id is None else + [MEMORY_BLOCK_VARIABLE_NODE_ID, memory_id, node_id], + value_type=SegmentType.STRING, + visible=True, + editable=True, + ) + @property def edited(self): return self.last_edited_at is not None From f977dc410a0b1f8a1db0cc3631c848ace0bb6c90 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 17:45:53 +0800 Subject: [PATCH 07/82] feat: add MemorySyncTimeoutError --- api/core/memory/errors.py | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 api/core/memory/errors.py diff --git a/api/core/memory/errors.py b/api/core/memory/errors.py new file mode 100644 index 0000000000..ef681e4152 --- /dev/null +++ b/api/core/memory/errors.py @@ -0,0 +1,6 @@ +class MemorySyncTimeoutError(Exception): + def __init__(self, app_id: str, conversation_id: str): + self.app_id = app_id + self.conversation_id = conversation_id + self.message = "Memory synchronization timeout after 50 seconds" + super().__init__(self.message) From 45fddc70d57bfc1239219345cf5b859eb1250e8f Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 19:11:12 +0800 Subject: [PATCH 08/82] feat: add ChatflowHistoryService and ChatflowMemoryService --- api/services/chatflow_history_service.py | 359 +++++++++++ api/services/chatflow_memory_service.py | 772 +++++++++++++++++++++++ 2 files changed, 1131 insertions(+) create mode 100644 api/services/chatflow_history_service.py create mode 100644 api/services/chatflow_memory_service.py diff --git a/api/services/chatflow_history_service.py b/api/services/chatflow_history_service.py new file mode 100644 index 0000000000..828758ee85 --- /dev/null +++ b/api/services/chatflow_history_service.py @@ -0,0 +1,359 @@ +import json +import time +from collections.abc import Sequence +from typing import Literal, Optional, overload + +from sqlalchemy import Row, Select, and_, func, select +from sqlalchemy.orm import Session + +from core.memory.entities import ChatflowConversationMetadata +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + PromptMessage, + UserPromptMessage, +) +from extensions.ext_database import db +from models.chatflow_memory import ChatflowConversation, ChatflowMessage + + +class ChatflowHistoryService: + """ + Service layer for managing chatflow conversation history. + + This unified service handles all chatflow memory operations: + - Reading visible chat history with version control + - Saving messages to append-only table + - Managing visible_count metadata + - Supporting both app-level and node-level scoping + """ + + @staticmethod + def get_visible_chat_history( + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None, + max_visible_count: Optional[int] = None + ) -> Sequence[PromptMessage]: + """ + Get visible chat history based on metadata visible_count. + + Args: + conversation_id: Original conversation ID + node_id: None for app-level, specific node_id for node-level + max_visible_count: Override visible_count for memory update operations + + Returns: + Sequence of PromptMessage objects in chronological order (oldest first) + """ + with db.session() as session: + chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( + session, conversation_id, app_id, tenant_id, node_id, create_if_missing=False + ) + + if not chatflow_conv: + return [] + + # Parse metadata + metadata_dict = json.loads(chatflow_conv.conversation_metadata) + metadata = ChatflowConversationMetadata.model_validate(metadata_dict) + + # Determine the actual number of messages to return + target_visible_count = max_visible_count if max_visible_count is not None else metadata.visible_count + + # Fetch all messages (handle versioning) + msg_stmt = select(ChatflowMessage).where( + ChatflowMessage.conversation_id == chatflow_conv.id + ).order_by(ChatflowMessage.index.asc(), ChatflowMessage.version.desc()) + + all_messages: Sequence[Row[tuple[ChatflowMessage]]] = session.execute(msg_stmt).all() + + # Filter in memory: keep only the latest version for each index + latest_messages_by_index: dict[int, ChatflowMessage] = {} + for msg_row in all_messages: + msg = msg_row[0] + index = msg.index + + if index not in latest_messages_by_index or msg.version > latest_messages_by_index[index].version: + latest_messages_by_index[index] = msg + + # Sort by index and take the latest target_visible_count messages + sorted_messages = sorted(latest_messages_by_index.values(), key=lambda m: m.index, reverse=True) + visible_messages = sorted_messages[:target_visible_count] + + # Convert to PromptMessage and restore correct order (oldest first) + prompt_messages: list[PromptMessage] = [] + for msg in reversed(visible_messages): # Restore chronological order (index ascending) + data = json.loads(msg.data) + role = data.get('role', 'user') + content = data.get('content', '') + + if role == 'user': + prompt_messages.append(UserPromptMessage(content=content)) + elif role == 'assistant': + prompt_messages.append(AssistantPromptMessage(content=content)) + + return prompt_messages + + @staticmethod + def get_app_visible_chat_history( + app_id: str, + conversation_id: str, + tenant_id: str, + max_visible_count: Optional[int] = None + ) -> Sequence[PromptMessage]: + """Get visible chat history for app level.""" + return ChatflowHistoryService.get_visible_chat_history( + conversation_id=conversation_id, + app_id=app_id, + tenant_id=tenant_id, + node_id=None, # App level + max_visible_count=max_visible_count + ) + + @staticmethod + def get_node_visible_chat_history( + node_id: str, + conversation_id: str, + app_id: str, + tenant_id: str, + max_visible_count: Optional[int] = None + ) -> Sequence[PromptMessage]: + """Get visible chat history for a specific node.""" + return ChatflowHistoryService.get_visible_chat_history( + conversation_id=conversation_id, + app_id=app_id, + tenant_id=tenant_id, + node_id=node_id, + max_visible_count=max_visible_count + ) + + @staticmethod + def save_message( + prompt_message: PromptMessage, + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None + ) -> None: + """ + Save a message to the append-only chatflow_messages table. + + Args: + node_id: None for app-level, specific node_id for node-level + """ + with db.session() as session: + chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( + session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True + ) + + # Get next index + max_index = session.execute( + select(func.max(ChatflowMessage.index)).where( + ChatflowMessage.conversation_id == chatflow_conv.id + ) + ).scalar() or -1 + next_index = max_index + 1 + + # Save new message to append-only table + message_data = { + 'role': prompt_message.role.value, + 'content': prompt_message.get_text_content(), + 'timestamp': time.time() + } + + new_message = ChatflowMessage( + conversation_id=chatflow_conv.id, + index=next_index, + version=1, + data=json.dumps(message_data) + ) + session.add(new_message) + session.commit() + + @staticmethod + def save_app_message( + prompt_message: PromptMessage, + conversation_id: str, + app_id: str, + tenant_id: str + ) -> None: + """Save PromptMessage to app-level chatflow conversation.""" + ChatflowHistoryService.save_message( + prompt_message=prompt_message, + conversation_id=conversation_id, + app_id=app_id, + tenant_id=tenant_id, + node_id=None + ) + + @staticmethod + def save_node_message( + prompt_message: PromptMessage, + node_id: str, + conversation_id: str, + app_id: str, + tenant_id: str + ) -> None: + """Save PromptMessage to node-specific chatflow conversation.""" + ChatflowHistoryService.save_message( + prompt_message=prompt_message, + conversation_id=conversation_id, + app_id=app_id, + tenant_id=tenant_id, + node_id=node_id + ) + + @staticmethod + def save_message_version( + prompt_message: PromptMessage, + message_index: int, + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None + ) -> None: + """ + Save a new version of an existing message (for message editing scenarios). + """ + with db.session() as session: + chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( + session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True + ) + + # Get the maximum version number for this index + max_version = session.execute( + select(func.max(ChatflowMessage.version)).where( + and_( + ChatflowMessage.conversation_id == chatflow_conv.id, + ChatflowMessage.index == message_index + ) + ) + ).scalar() or 0 + next_version = max_version + 1 + + # Save new version of the message + message_data = { + 'role': prompt_message.role.value, + 'content': prompt_message.get_text_content(), + 'timestamp': time.time() + } + + new_message_version = ChatflowMessage( + conversation_id=chatflow_conv.id, + index=message_index, + version=next_version, + data=json.dumps(message_data) + ) + session.add(new_message_version) + session.commit() + + @staticmethod + def update_visible_count( + conversation_id: str, + node_id: Optional[str], + new_visible_count: int, + app_id: str, + tenant_id: str + ) -> None: + """ + Update visible_count metadata for specific scope. + + Args: + node_id: None for app-level updates, specific node_id for node-level updates + new_visible_count: The new visible_count value (typically preserved_turns) + + Usage Examples: + # Update app-level visible_count + ChatflowHistoryService.update_visible_count(conv_id, None, 10, app_id, tenant_id) + + # Update node-specific visible_count + ChatflowHistoryService.update_visible_count(conv_id, "node-123", 8, app_id, tenant_id) + """ + with db.session() as session: + chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( + session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True + ) + + # Only update visible_count in metadata, do not delete any data + new_metadata = ChatflowConversationMetadata(visible_count=new_visible_count) + chatflow_conv.conversation_metadata = new_metadata.model_dump_json() + + session.commit() + + @overload + @staticmethod + def _get_or_create_chatflow_conversation( + session: Session, + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None, + create_if_missing: Literal[True] = True + ) -> ChatflowConversation: ... + + @overload + @staticmethod + def _get_or_create_chatflow_conversation( + session: Session, + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None, + create_if_missing: Literal[False] = False + ) -> Optional[ChatflowConversation]: ... + + @overload + @staticmethod + def _get_or_create_chatflow_conversation( + session: Session, + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None, + create_if_missing: bool = False + ) -> Optional[ChatflowConversation]: ... + + @staticmethod + def _get_or_create_chatflow_conversation( + session: Session, + conversation_id: str, + app_id: str, + tenant_id: str, + node_id: Optional[str] = None, + create_if_missing: bool = False + ) -> Optional[ChatflowConversation]: + """Get existing chatflow conversation or optionally create new one""" + stmt: Select[tuple[ChatflowConversation]] = select(ChatflowConversation).where( + and_( + ChatflowConversation.original_conversation_id == conversation_id, + ChatflowConversation.tenant_id == tenant_id, + ChatflowConversation.app_id == app_id + ) + ) + + if node_id: + stmt = stmt.where(ChatflowConversation.node_id == node_id) + else: + stmt = stmt.where(ChatflowConversation.node_id.is_(None)) + + chatflow_conv: Row[tuple[ChatflowConversation]] | None = session.execute(stmt).first() + + if chatflow_conv: + result: ChatflowConversation = chatflow_conv[0] # Extract the ChatflowConversation object + return result + else: + if create_if_missing: + # Create a new chatflow conversation + default_metadata = ChatflowConversationMetadata(visible_count=20) + new_chatflow_conv = ChatflowConversation( + tenant_id=tenant_id, + app_id=app_id, + node_id=node_id, + original_conversation_id=conversation_id, + conversation_metadata=default_metadata.model_dump_json(), + ) + session.add(new_chatflow_conv) + session.flush() # Obtain ID + return new_chatflow_conv + return None diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py new file mode 100644 index 0000000000..781a9ae1ea --- /dev/null +++ b/api/services/chatflow_memory_service.py @@ -0,0 +1,772 @@ +import logging +import threading +import time +from collections.abc import Sequence +from typing import Optional, cast + +from sqlalchemy import and_, select +from sqlalchemy.orm import Session + +from core.memory.entities import ( + MemoryBlock, + MemoryBlockSpec, + MemoryScheduleMode, + MemoryScope, + MemoryStrategy, + MemoryTerm, +) +from core.memory.errors import MemorySyncTimeoutError +from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage +from core.workflow.entities.variable_pool import VariablePool +from extensions.ext_database import db +from extensions.ext_redis import redis_client +from models.chatflow_memory import ChatflowMemoryVariable +from services.chatflow_history_service import ChatflowHistoryService + +logger = logging.getLogger(__name__) + +# Important note: Since Dify uses gevent, we don't need an extra task queue (e.g., Celery). +# Threads created via threading.Thread are automatically patched into greenlets in a gevent environment, +# enabling efficient asynchronous execution. + +def _get_memory_sync_lock_key(app_id: str, conversation_id: str) -> str: + """Generate Redis lock key for memory sync updates + + Args: + app_id: Application ID + conversation_id: Conversation ID + + Returns: + Formatted lock key + """ + return f"memory_sync_update:{app_id}:{conversation_id}" + +class ChatflowMemoryService: + """ + Memory service class with only static methods. + All methods are static and do not require instantiation. + """ + + @staticmethod + def get_memory(memory_id: str, tenant_id: str, + app_id: Optional[str] = None, + conversation_id: Optional[str] = None, + node_id: Optional[str] = None) -> Optional[MemoryBlock]: + """Get single memory by ID""" + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.memory_id == memory_id, + ChatflowMemoryVariable.tenant_id == tenant_id + ) + ) + + if app_id: + stmt = stmt.where(ChatflowMemoryVariable.app_id == app_id) + if conversation_id: + stmt = stmt.where(ChatflowMemoryVariable.conversation_id == conversation_id) + if node_id: + stmt = stmt.where(ChatflowMemoryVariable.node_id == node_id) + + with db.session() as session: + result = session.execute(stmt).first() + if result: + return MemoryBlock.model_validate(result[0].__dict__) + return None + + @staticmethod + def save_memory(memory: MemoryBlock, tenant_id: str, is_draft: bool = False) -> None: + """Save or update memory with draft mode support""" + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.memory_id == memory.memory_id, + ChatflowMemoryVariable.tenant_id == tenant_id + ) + ) + + with db.session() as session: + existing = session.execute(stmt).first() + if existing: + # Update existing + for key, value in memory.model_dump(exclude_unset=True).items(): + if hasattr(existing[0], key): + setattr(existing[0], key, value) + else: + # Create new + new_memory = ChatflowMemoryVariable( + tenant_id=tenant_id, + **memory.model_dump(exclude={'id'}) + ) + session.add(new_memory) + session.commit() + + # In draft mode, also write to workflow_draft_variables + if is_draft: + from models.workflow import WorkflowDraftVariable + from services.workflow_draft_variable_service import WorkflowDraftVariableService + with Session(bind=db.engine) as session: + draft_var_service = WorkflowDraftVariableService(session) + + # Try to get existing variables + existing_vars = draft_var_service.get_draft_variables_by_selectors( + app_id=memory.app_id, + selectors=[['memory_block', memory.memory_id]] + ) + + if existing_vars: + # Update existing draft variable + draft_var = existing_vars[0] + draft_var.value = memory.value + else: + # Create new draft variable + draft_var = WorkflowDraftVariable.new_memory_block_variable( + app_id=memory.app_id, + memory_id=memory.memory_id, + name=memory.name, + value=memory.value, + description=f"Memory block: {memory.name}" + ) + session.add(draft_var) + + session.commit() + + @staticmethod + def get_memories_by_specs(memory_block_specs: Sequence[MemoryBlockSpec], + tenant_id: str, app_id: str, + conversation_id: Optional[str] = None, + node_id: Optional[str] = None, + is_draft: bool = False) -> list[MemoryBlock]: + """Get runtime memory values based on MemoryBlockSpecs with draft mode support""" + from models.enums import DraftVariableType + + if not memory_block_specs: + return [] + + # In draft mode, prefer reading from workflow_draft_variables + if is_draft: + # Try reading from the draft variables table + from services.workflow_draft_variable_service import WorkflowDraftVariableService + with Session(bind=db.engine) as session: + draft_var_service = WorkflowDraftVariableService(session) + + # Build selector list + selectors = [['memory_block', spec.id] for spec in memory_block_specs] + + # Fetch draft variables + draft_vars = draft_var_service.get_draft_variables_by_selectors( + app_id=app_id, + selectors=selectors + ) + + # If draft variables exist, prefer using them + if draft_vars: + spec_by_id = {spec.id: spec for spec in memory_block_specs} + draft_memories = [] + + for draft_var in draft_vars: + if draft_var.node_id == DraftVariableType.MEMORY_BLOCK: + spec = spec_by_id.get(draft_var.name) + if spec: + memory_block = MemoryBlock( + id=draft_var.id, + memory_id=draft_var.name, + name=spec.name, + value=draft_var.value, + scope=spec.scope, + term=spec.term, + app_id=app_id, + conversation_id='draft', + node_id=node_id + ) + draft_memories.append(memory_block) + + if draft_memories: + return draft_memories + + memory_ids = [spec.id for spec in memory_block_specs] + + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.memory_id.in_(memory_ids), + ChatflowMemoryVariable.tenant_id == tenant_id, + ChatflowMemoryVariable.app_id == app_id + ) + ) + + if conversation_id: + stmt = stmt.where(ChatflowMemoryVariable.conversation_id == conversation_id) + if node_id: + stmt = stmt.where(ChatflowMemoryVariable.node_id == node_id) + + with db.session() as session: + results = session.execute(stmt).all() + found_memories = {row[0].memory_id: MemoryBlock.model_validate(row[0].__dict__) for row in results} + + # Create MemoryBlock objects for specs that don't have runtime values yet + all_memories = [] + for spec in memory_block_specs: + if spec.id in found_memories: + all_memories.append(found_memories[spec.id]) + else: + # Create default memory with template value following design rules + default_memory = MemoryBlock( + id="", # Will be assigned when saved + memory_id=spec.id, + name=spec.name, + value=spec.template, + scope=spec.scope, + term=spec.term, + # Design rules: + # - app_id=None for global (future), app_id=str for app-specific + app_id=app_id, # Always app-specific for now + # - conversation_id=None for persistent, conversation_id=str for session + conversation_id=conversation_id if spec.term == MemoryTerm.SESSION else None, + # - node_id=None for app-scope, node_id=str for node-scope + node_id=node_id if spec.scope == MemoryScope.NODE else None + ) + all_memories.append(default_memory) + + return all_memories + + @staticmethod + def get_app_memories_by_workflow(workflow, tenant_id: str, + conversation_id: Optional[str] = None) -> list[MemoryBlock]: + """Get app-scoped memories based on workflow configuration""" + from core.memory.entities import MemoryScope + + app_memory_specs = [spec for spec in workflow.memory_blocks if spec.scope == MemoryScope.APP] + return ChatflowMemoryService.get_memories_by_specs( + memory_block_specs=app_memory_specs, + tenant_id=tenant_id, + app_id=workflow.app_id, + conversation_id=conversation_id + ) + + @staticmethod + def get_node_memories_by_workflow(workflow, node_id: str, tenant_id: str) -> list[MemoryBlock]: + """Get node-scoped memories based on workflow configuration""" + from core.memory.entities import MemoryScope + + node_memory_specs = [ + spec for spec in workflow.memory_blocks + if spec.scope == MemoryScope.NODE and spec.id == node_id + ] + return ChatflowMemoryService.get_memories_by_specs( + memory_block_specs=node_memory_specs, + tenant_id=tenant_id, + app_id=workflow.app_id, + node_id=node_id + ) + + # Core Memory Orchestration features + + @staticmethod + def update_memory_if_needed(tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False) -> bool: + """Update app-level memory if conditions are met + + Args: + tenant_id: Tenant ID + app_id: Application ID + memory_block_spec: Memory block specification + conversation_id: Conversation ID + variable_pool: Variable pool for context + is_draft: Whether in draft mode + """ + if not ChatflowMemoryService._should_update_memory( + tenant_id, app_id, memory_block_spec, conversation_id + ): + return False + + if memory_block_spec.schedule_mode == MemoryScheduleMode.SYNC: + # Sync mode: will be processed in batch after the App run completes + # This only marks the need; actual update happens in _update_app_memory_after_run + return True + else: + # Async mode: submit asynchronous update immediately + ChatflowMemoryService._submit_async_memory_update( + tenant_id, app_id, memory_block_spec, conversation_id, variable_pool, is_draft + ) + return True + + @staticmethod + def update_node_memory_if_needed(tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + node_id: str, llm_output: str, + variable_pool: VariablePool, + is_draft: bool = False) -> bool: + """Update node-level memory after LLM execution + + Args: + tenant_id: Tenant ID + app_id: Application ID + memory_block_spec: Memory block specification + node_id: Node ID + llm_output: LLM output content + variable_pool: Variable pool for context + is_draft: Whether in draft mode + """ + conversation_id_segment = variable_pool.get(('sys', 'conversation_id')) + if not conversation_id_segment: + return False + conversation_id = conversation_id_segment.value + + # Save LLM output to node conversation history + assistant_message = AssistantPromptMessage(content=llm_output) + ChatflowHistoryService.save_node_message( + prompt_message=assistant_message, + node_id=node_id, + conversation_id=str(conversation_id), + app_id=app_id, + tenant_id=tenant_id + ) + + if not ChatflowMemoryService._should_update_memory( + tenant_id, app_id, memory_block_spec, str(conversation_id), node_id + ): + return False + + if memory_block_spec.schedule_mode == MemoryScheduleMode.SYNC: + # Node-level sync: blocking execution + ChatflowMemoryService._update_node_memory_sync( + tenant_id, app_id, memory_block_spec, node_id, + str(conversation_id), variable_pool, is_draft + ) + else: + # Node-level async: execute asynchronously + ChatflowMemoryService._update_node_memory_async( + tenant_id, app_id, memory_block_spec, node_id, + llm_output, str(conversation_id), variable_pool, is_draft + ) + return True + + @staticmethod + def _should_update_memory(tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + conversation_id: str, node_id: Optional[str] = None) -> bool: + """Check if memory should be updated based on strategy""" + if memory_block_spec.strategy != MemoryStrategy.ON_TURNS: + return False + + # Check turn count + turn_key = f"memory_turn_count:{tenant_id}:{app_id}:{conversation_id}" + if node_id: + turn_key += f":{node_id}" + + current_turns = redis_client.get(turn_key) + current_turns = int(current_turns) if current_turns else 0 + current_turns += 1 + + # Update count + redis_client.set(turn_key, current_turns) + + return current_turns % memory_block_spec.update_turns == 0 + + # App-level async update method + @staticmethod + def _submit_async_memory_update(tenant_id: str, app_id: str, + block: MemoryBlockSpec, + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): + """Submit async memory update task""" + + # Execute update asynchronously using thread + thread = threading.Thread( + target=ChatflowMemoryService._update_single_memory, + kwargs={ + 'tenant_id': tenant_id, + 'app_id': app_id, + 'memory_block_spec': block, + 'conversation_id': conversation_id, + 'variable_pool': variable_pool, + 'is_draft': is_draft + }, + daemon=True + ) + thread.start() + + # Node-level sync update method + @staticmethod + def _update_node_memory_sync(tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + node_id: str, conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): + """Synchronously update node memory (blocking execution)""" + ChatflowMemoryService._perform_memory_update( + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + conversation_id=conversation_id, + variable_pool=variable_pool, + node_id=node_id, + is_draft=is_draft + ) + # Wait for update to complete before returning + + # Node-level async update method + @staticmethod + def _update_node_memory_async(tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + node_id: str, llm_output: str, + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): + """Asynchronously update node memory (submit task)""" + + # Execute update asynchronously using thread + thread = threading.Thread( + target=ChatflowMemoryService._perform_node_memory_update, + kwargs={ + 'memory_block_spec': memory_block_spec, + 'tenant_id': tenant_id, + 'app_id': app_id, + 'node_id': node_id, + 'llm_output': llm_output, + 'variable_pool': variable_pool, + 'is_draft': is_draft + }, + daemon=True + ) + thread.start() + # Return immediately without waiting + + @staticmethod + def _perform_node_memory_update(*, memory_block_spec: MemoryBlockSpec, + tenant_id: str, app_id: str, node_id: str, + llm_output: str, variable_pool: VariablePool, + is_draft: bool = False): + """Execute node memory update""" + try: + # Call existing _perform_memory_update method here + ChatflowMemoryService._perform_memory_update( + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + conversation_id=str(variable_pool.get(('sys', 'conversation_id'))), + variable_pool=variable_pool, + node_id=node_id, + is_draft=is_draft + ) + except Exception as e: + logger.exception( + "Failed to update node memory %s for node %s", + memory_block_spec.id, + node_id, + exc_info=e + ) + + @staticmethod + def _update_single_memory(*, tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): + """Update single memory""" + ChatflowMemoryService._perform_memory_update( + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + conversation_id=conversation_id, + variable_pool=variable_pool, + node_id=None, # App-level memory doesn't have node_id + is_draft=is_draft + ) + + @staticmethod + def _perform_memory_update(tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + conversation_id: str, variable_pool: VariablePool, + node_id: Optional[str] = None, + is_draft: bool = False): + """Perform the actual memory update using LLM + + Args: + tenant_id: Tenant ID + app_id: Application ID + memory_block_spec: Memory block specification + conversation_id: Conversation ID + variable_pool: Variable pool for context + node_id: Optional node ID for node-level memory updates + is_draft: Whether in draft mode + """ + # Get conversation history + history = ChatflowHistoryService.get_visible_chat_history( + conversation_id=conversation_id, + app_id=app_id, + tenant_id=tenant_id, + node_id=node_id, # Pass node_id, if None then get app-level history + max_visible_count=memory_block_spec.preserved_turns + ) + + # Get current memory value + current_memory = ChatflowMemoryService.get_memory( + memory_id=memory_block_spec.id, + tenant_id=tenant_id, + app_id=app_id, + conversation_id=conversation_id if memory_block_spec.term == MemoryTerm.SESSION else None, + node_id=node_id + ) + + current_value = current_memory.value if current_memory else memory_block_spec.template + + # Build update prompt - adjust wording based on whether there's a node_id + context_type = "Node conversation history" if node_id else "Conversation history" + memory_update_prompt = f""" + Based on the following {context_type}, update the memory content: + + Current memory: {current_value} + + {context_type}: + {[msg.content for msg in history]} + + Update instruction: {memory_block_spec.instruction} + + Please output the updated memory content: + """ + + # Invoke LLM to update memory - extracted as a separate method + updated_value = ChatflowMemoryService._invoke_llm_for_memory_update( + tenant_id, + memory_block_spec, + memory_update_prompt, + current_value + ) + + if updated_value is None: + return # LLM invocation failed + + # Save updated memory + updated_memory = MemoryBlock( + id=current_memory.id if current_memory else "", + memory_id=memory_block_spec.id, + name=memory_block_spec.name, + value=updated_value, + scope=memory_block_spec.scope, + term=memory_block_spec.term, + app_id=app_id, + conversation_id=conversation_id if memory_block_spec.term == MemoryTerm.SESSION else None, + node_id=node_id + ) + + ChatflowMemoryService.save_memory(updated_memory, tenant_id, is_draft) + + # Not implemented yet: Send success event + # self._send_memory_update_event(memory_block_spec.id, "completed", updated_value) + + @staticmethod + def _invoke_llm_for_memory_update(tenant_id: str, + memory_block_spec: MemoryBlockSpec, + prompt: str, current_value: str) -> Optional[str]: + """Invoke LLM to update memory content + + Args: + tenant_id: Tenant ID + memory_block_spec: Memory block specification + prompt: Update prompt + current_value: Current memory value (used for fallback on failure) + + Returns: + Updated value, returns None if failed + """ + from core.model_manager import ModelManager + from core.model_runtime.entities.llm_entities import LLMResult + from core.model_runtime.entities.model_entities import ModelType + + model_manager = ModelManager() + + # Use model configuration defined in memory_block_spec, use default model if not specified + if hasattr(memory_block_spec, 'model') and memory_block_spec.model: + model_instance = model_manager.get_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + provider=memory_block_spec.model.get("provider", ""), + model=memory_block_spec.model.get("name", "") + ) + model_parameters = memory_block_spec.model.get("completion_params", {}) + else: + # Use default model + model_instance = model_manager.get_default_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM + ) + model_parameters = {"temperature": 0.7, "max_tokens": 1000} + + try: + response = cast( + LLMResult, + model_instance.invoke_llm( + prompt_messages=[UserPromptMessage(content=prompt)], + model_parameters=model_parameters, + stream=False + ) + ) + return response.message.get_text_content() + except Exception as e: + logger.exception("Failed to update memory using LLM", exc_info=e) + # Not implemented yet: Send failure event + # ChatflowMemoryService._send_memory_update_event(memory_block_spec.id, "failed", current_value, str(e)) + return None + + + def _send_memory_update_event(self, memory_id: str, status: str, value: str, error: str = ""): + """Send memory update event + + Note: Event system integration not implemented yet, this method is retained as a placeholder + """ + # Not implemented yet: Event system integration will be added in future versions + pass + + # App-level sync batch update related methods + @staticmethod + def wait_for_sync_memory_completion(workflow, conversation_id: str): + """Wait for sync memory update to complete, maximum 50 seconds + + Args: + workflow: Workflow object + conversation_id: Conversation ID + + Raises: + MemorySyncTimeoutError: Raised when timeout is reached + """ + from core.memory.entities import MemoryScope + + memory_blocks = workflow.memory_blocks + sync_memory_blocks = [ + block for block in memory_blocks + if block.scope == MemoryScope.APP and block.update_mode == "sync" + ] + + if not sync_memory_blocks: + return + + lock_key = _get_memory_sync_lock_key(workflow.app_id, conversation_id) + + # Retry up to 10 times, wait 5 seconds each time, total 50 seconds + max_retries = 10 + retry_interval = 5 + + for i in range(max_retries): + if not redis_client.exists(lock_key): + # Lock doesn't exist, can continue + return + + if i < max_retries - 1: + # Still have retry attempts, wait + time.sleep(retry_interval) + else: + # Maximum retry attempts reached, raise exception + raise MemorySyncTimeoutError( + app_id=workflow.app_id, + conversation_id=conversation_id + ) + + @staticmethod + def update_app_memory_after_run(workflow, conversation_id: str, variable_pool: VariablePool, + is_draft: bool = False): + """Update app-level memory after run completion + + Args: + workflow: Workflow object + conversation_id: Conversation ID + variable_pool: Variable pool + is_draft: Whether in draft mode + """ + from core.memory.entities import MemoryScope + + memory_blocks = workflow.memory_blocks + + # Separate sync and async memory blocks + sync_blocks = [] + async_blocks = [] + + for block in memory_blocks: + if block.scope == MemoryScope.APP: + if block.update_mode == "sync": + sync_blocks.append(block) + else: + async_blocks.append(block) + + # async mode: submit individual async tasks directly + for block in async_blocks: + ChatflowMemoryService._submit_async_memory_update( + tenant_id=workflow.tenant_id, + app_id=workflow.app_id, + block=block, + conversation_id=conversation_id, + variable_pool=variable_pool, + is_draft=is_draft + ) + + # sync mode: submit a batch update task + if sync_blocks: + ChatflowMemoryService._submit_sync_memory_batch_update( + workflow=workflow, + sync_blocks=sync_blocks, + conversation_id=conversation_id, + variable_pool=variable_pool, + is_draft=is_draft + ) + + @staticmethod + def _submit_sync_memory_batch_update(workflow, + sync_blocks: list[MemoryBlockSpec], + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): + """Submit sync memory batch update task""" + + # Execute batch update asynchronously using thread + thread = threading.Thread( + target=ChatflowMemoryService._batch_update_sync_memory, + kwargs={ + 'workflow': workflow, + 'sync_blocks': sync_blocks, + 'conversation_id': conversation_id, + 'variable_pool': variable_pool, + 'is_draft': is_draft + }, + daemon=True + ) + thread.start() + + @staticmethod + def _batch_update_sync_memory(*, workflow, + sync_blocks: list[MemoryBlockSpec], + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): + """Batch update sync memory (with Redis lock)""" + from concurrent.futures import ThreadPoolExecutor + + lock_key = _get_memory_sync_lock_key(workflow.app_id, conversation_id) + + # Use Redis lock context manager (30 seconds timeout) + with redis_client.lock(lock_key, timeout=30): + try: + # Update all sync memory in parallel + with ThreadPoolExecutor(max_workers=5) as executor: + futures = [] + for block in sync_blocks: + future = executor.submit( + ChatflowMemoryService._update_single_memory, + tenant_id=workflow.tenant_id, + app_id=workflow.app_id, + memory_block_spec=block, + conversation_id=conversation_id, + variable_pool=variable_pool, + is_draft=is_draft + ) + futures.append(future) + + # Wait for all updates to complete + for future in futures: + try: + future.result() + except Exception as e: + logger.exception("Failed to update memory", exc_info=e) + except Exception as e: + logger.exception("Failed to update sync memory for app %s", workflow.app_id, exc_info=e) From 0c97bbf1372f5302a411a76ed8e8af37ead46f68 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 19:12:34 +0800 Subject: [PATCH 09/82] chore: run ruff --- api/core/memory/entities.py | 6 +++--- api/models/workflow.py | 7 +++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 84e6d915b9..26927c7931 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Optional, Dict, Any +from typing import Any, Optional from uuid import uuid4 from pydantic import BaseModel, Field @@ -42,7 +42,7 @@ class MemoryBlockSpec(BaseModel): update_turns: int = Field(gt=0, description="Number of turns between updates") preserved_turns: int = Field(gt=0, description="Number of conversation turns to preserve") schedule_mode: MemoryScheduleMode = Field(description="Synchronous or asynchronous update mode") - model: Optional[Dict[str, Any]] = Field(default=None, description="Model configuration for memory updates") + model: Optional[dict[str, Any]] = Field(default=None, description="Model configuration for memory updates") end_user_visible: bool = Field(default=False, description="Whether memory is visible to end users") end_user_editable: bool = Field(default=False, description="Whether memory is editable by end users") @@ -66,7 +66,7 @@ class MemoryBlock(BaseModel): value: str scope: MemoryScope # Derived from node_id: None=APP, str=NODE term: MemoryTerm # Derived from conversation_id: None=PERSISTENT, str=SESSION - app_id: Optional[str] = None # None=global(future), str=app-specific + app_id: str # None=global(future), str=app-specific conversation_id: Optional[str] = None # None=persistent, str=session node_id: Optional[str] = None # None=app-scope, str=node-scope created_at: Optional[str] = None diff --git a/api/models/workflow.py b/api/models/workflow.py index c7b0759b4f..66c64a0fd5 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -16,8 +16,11 @@ from core.file.models import File from core.memory.entities import MemoryBlockSpec from core.variables import utils as variable_utils from core.variables.variables import FloatVariable, IntegerVariable, StringVariable -from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID, \ - MEMORY_BLOCK_VARIABLE_NODE_ID +from core.workflow.constants import ( + CONVERSATION_VARIABLE_NODE_ID, + MEMORY_BLOCK_VARIABLE_NODE_ID, + SYSTEM_VARIABLE_NODE_ID, +) from core.workflow.nodes.enums import NodeType from factories.variable_factory import TypeMismatchError, build_segment_with_type from libs.datetime_utils import naive_utc_now From bbb640c9a2fcecb9d3fc31af616957cfb4c7c26e Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 19:45:18 +0800 Subject: [PATCH 10/82] feat: add MemoryBlock to VariablePool --- api/core/workflow/entities/variable_pool.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/api/core/workflow/entities/variable_pool.py b/api/core/workflow/entities/variable_pool.py index fb0794844e..67f5551dea 100644 --- a/api/core/workflow/entities/variable_pool.py +++ b/api/core/workflow/entities/variable_pool.py @@ -46,6 +46,10 @@ class VariablePool(BaseModel): description="Conversation variables.", default_factory=list, ) + memory_blocks: Mapping[str, str] = Field( + description="Memory blocks.", + default_factory=dict, + ) def model_post_init(self, context: Any, /) -> None: # Create a mapping from field names to SystemVariableKey enum values @@ -56,6 +60,9 @@ class VariablePool(BaseModel): # Add conversation variables to the variable pool for var in self.conversation_variables: self.add((CONVERSATION_VARIABLE_NODE_ID, var.name), var) + # Add memory blocks to the variable pool + for memory_id, memory_value in self.memory_blocks.items(): + self.add(['memory_block', memory_id], memory_value) def add(self, selector: Sequence[str], value: Any, /) -> None: """ From 8341b8b1c1c77dab6e2cc4ca2df3efa60755cb90 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 20 Aug 2025 19:53:44 +0800 Subject: [PATCH 11/82] feat: add MemoryBlock config to LLM's memory config --- api/core/prompt/entities/advanced_prompt_entities.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/api/core/prompt/entities/advanced_prompt_entities.py b/api/core/prompt/entities/advanced_prompt_entities.py index c8e7b414df..ba7fee8ada 100644 --- a/api/core/prompt/entities/advanced_prompt_entities.py +++ b/api/core/prompt/entities/advanced_prompt_entities.py @@ -45,6 +45,13 @@ class MemoryConfig(BaseModel): enabled: bool size: Optional[int] = None + mode: Optional[Literal["linear", "block"]] = "linear" + block_id: Optional[list[str]] = None # available only in block mode + role_prefix: Optional[RolePrefix] = None window: WindowConfig query_prompt_template: Optional[str] = None + + @property + def is_block_mode(self) -> bool: + return self.mode == "block" and bool(self.block_id) From 5a26ebec8f0922b5270a517aa4aa69ba3b2b06c6 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 11:28:47 +0800 Subject: [PATCH 12/82] feat: add _fetch_memory_blocks for AdvancedChatAppRunner --- api/core/app/apps/advanced_chat/app_runner.py | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index 3de2f5ca9e..89d656dc0a 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -1,5 +1,5 @@ import logging -from collections.abc import Mapping +from collections.abc import Mapping, MutableMapping from typing import Any, Optional, cast from sqlalchemy import select @@ -20,6 +20,7 @@ from core.app.entities.queue_entities import ( QueueTextChunkEvent, ) from core.app.features.annotation_reply.annotation_reply import AnnotationReplyFeature +from core.memory.entities import MemoryScope from core.moderation.base import ModerationError from core.moderation.input_moderation import InputModeration from core.variables.variables import VariableUnion @@ -33,6 +34,7 @@ from models import Workflow from models.enums import UserFrom from models.model import App, Conversation, Message, MessageAnnotation from models.workflow import ConversationVariable, WorkflowType +from services.chatflow_memory_service import ChatflowMemoryService logger = logging.getLogger(__name__) @@ -371,3 +373,34 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): # Return combined list return existing_variables + new_variables + + def _fetch_memory_blocks(self) -> Mapping[str, str]: + """fetch all memory blocks for current app""" + + memory_blocks_dict: MutableMapping[str, str] = {} + is_draft = (self.application_generate_entity.invoke_from == InvokeFrom.DEBUGGER) + conversation_id = self.conversation.id + memory_block_specs = self._workflow.memory_blocks + # Get runtime memory values + memories = ChatflowMemoryService.get_memories_by_specs( + memory_block_specs=memory_block_specs, + tenant_id=self._workflow.tenant_id, + app_id=self._workflow.app_id, + conversation_id=conversation_id, + is_draft=is_draft + ) + + # Build memory_id -> value mapping + for memory in memories: + if memory.scope == MemoryScope.APP: + # App level: use memory_id directly + memory_blocks_dict[memory.memory_id] = memory.value + else: # NODE scope + node_id = memory.node_id + if not node_id: + logger.warning("Memory block %s has no node_id, skip.", memory.memory_id) + continue + key = f"{node_id}.{memory.memory_id}" + memory_blocks_dict[key] = memory.value + + return memory_blocks_dict From 7b602e900321af3bbf613244f0f045b9df212997 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 11:32:27 +0800 Subject: [PATCH 13/82] feat: wait for sync memory update in AdvancedChatAppRunner.run --- api/core/app/apps/advanced_chat/app_runner.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index 89d656dc0a..f081101df1 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -21,6 +21,7 @@ from core.app.entities.queue_entities import ( ) from core.app.features.annotation_reply.annotation_reply import AnnotationReplyFeature from core.memory.entities import MemoryScope +from core.memory.errors import MemorySyncTimeoutError from core.moderation.base import ModerationError from core.moderation.input_moderation import InputModeration from core.variables.variables import VariableUnion @@ -71,6 +72,11 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): self._app = app def run(self) -> None: + ChatflowMemoryService.wait_for_sync_memory_completion( + workflow=self._workflow, + conversation_id=self.conversation.id + ) + app_config = self.application_generate_entity.app_config app_config = cast(AdvancedChatAppConfig, app_config) From a13cb7e1c546f800d9b0130d67e6496b8b369b6d Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 11:40:30 +0800 Subject: [PATCH 14/82] feat: init memory block for VariablePool in AdvancedChatAppRunner.run --- api/core/app/apps/advanced_chat/app_runner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index f081101df1..4ba477682f 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -149,6 +149,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): # Based on the definition of `VariableUnion`, # `list[Variable]` can be safely used as `list[VariableUnion]` since they are compatible. conversation_variables=cast(list[VariableUnion], conversation_variables), + memory_blocks=self._fetch_memory_blocks(), ) # init graph From 97cd21d3beac59e6a95d605be7efa69022d66ea4 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 13:03:19 +0800 Subject: [PATCH 15/82] feat: sync conversation history with `chatflow_` tables in chatflow --- api/core/app/apps/advanced_chat/app_runner.py | 40 ++++++++++++++++++- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index 4ba477682f..4a672bdf20 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -1,6 +1,6 @@ import logging from collections.abc import Mapping, MutableMapping -from typing import Any, Optional, cast +from typing import Any, Optional, cast, override from sqlalchemy import select from sqlalchemy.orm import Session @@ -21,12 +21,13 @@ from core.app.entities.queue_entities import ( ) from core.app.features.annotation_reply.annotation_reply import AnnotationReplyFeature from core.memory.entities import MemoryScope -from core.memory.errors import MemorySyncTimeoutError +from core.model_runtime.entities import AssistantPromptMessage, UserPromptMessage from core.moderation.base import ModerationError from core.moderation.input_moderation import InputModeration from core.variables.variables import VariableUnion from core.workflow.callbacks import WorkflowCallback, WorkflowLoggingCallback from core.workflow.entities.variable_pool import VariablePool +from core.workflow.graph_engine.entities.event import GraphRunSucceededEvent from core.workflow.system_variable import SystemVariable from core.workflow.variable_loader import VariableLoader from core.workflow.workflow_entry import WorkflowEntry @@ -35,6 +36,7 @@ from models import Workflow from models.enums import UserFrom from models.model import App, Conversation, Message, MessageAnnotation from models.workflow import ConversationVariable, WorkflowType +from services.chatflow_history_service import ChatflowHistoryService from services.chatflow_memory_service import ChatflowMemoryService logger = logging.getLogger(__name__) @@ -183,6 +185,23 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): for event in generator: self._handle_event(workflow_entry, event) + @override + def _handle_event(self, workflow_entry: WorkflowEntry, event: Any) -> None: + super()._handle_event(workflow_entry, event) + if isinstance(event, GraphRunSucceededEvent): + workflow_outputs = event.outputs + if not workflow_outputs: + logger.warning("Chatflow output is empty.") + return + assistant_message = workflow_outputs.get('answer') + if not assistant_message: + logger.warning("Chatflow output does not contain 'answer'.") + return + try: + self._sync_conversation_to_chatflow_tables(assistant_message) + except Exception as e: + logger.exception("Failed to sync conversation to memory tables", exc_info=e) + def handle_input_moderation( self, app_record: App, @@ -411,3 +430,20 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): memory_blocks_dict[key] = memory.value return memory_blocks_dict + + def _sync_conversation_to_chatflow_tables(self, assistant_message: str): + # Get user input and AI response + user_message = self.application_generate_entity.query + + ChatflowHistoryService.save_app_message( + prompt_message=UserPromptMessage(content=user_message), + conversation_id=self.conversation.id, + app_id=self._workflow.app_id, + tenant_id=self._workflow.tenant_id + ) + ChatflowHistoryService.save_app_message( + prompt_message=AssistantPromptMessage(content=assistant_message), + conversation_id=self.conversation.id, + app_id=self._workflow.app_id, + tenant_id=self._workflow.tenant_id + ) From 7ffcf8dd6f6683f00ee55738ccf25b5008bdd98e Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 13:27:00 +0800 Subject: [PATCH 16/82] feat: add memory update check in AdvancedChatAppRunner --- api/core/app/apps/advanced_chat/app_runner.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index 4a672bdf20..eddaacc7b3 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -185,6 +185,11 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): for event in generator: self._handle_event(workflow_entry, event) + try: + self._check_app_memory_updates() + except Exception as e: + logger.exception("Failed to check app memory updates", exc_info=e) + @override def _handle_event(self, workflow_entry: WorkflowEntry, event: Any) -> None: super()._handle_event(workflow_entry, event) @@ -447,3 +452,16 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): app_id=self._workflow.app_id, tenant_id=self._workflow.tenant_id ) + + def _check_app_memory_updates(self): + from core.app.entities.app_invoke_entities import InvokeFrom + from services.chatflow_memory_service import ChatflowMemoryService + + is_draft = (self.application_generate_entity.invoke_from == InvokeFrom.DEBUGGER) + + ChatflowMemoryService.update_app_memory_after_run( + workflow=self._workflow, + conversation_id=self.conversation.id, + variable_pool=VariablePool(), # Make a fake pool to satisfy the signature + is_draft=is_draft + ) From 635c4ed4ce0c5d0eb2286b37d0e61b5098efbefd Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 14:24:17 +0800 Subject: [PATCH 17/82] feat: add memory update check in AdvancedChatAppRunner --- api/core/workflow/nodes/llm/node.py | 78 ++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index dfc2a0000b..07b725899d 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -5,11 +5,15 @@ import logging from collections.abc import Generator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Optional -from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity +from sqlalchemy import select +from sqlalchemy.orm import Session + +from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity from core.file import FileType, file_manager from core.helper.code_executor import CodeExecutor, CodeLanguage from core.llm_generator.output_parser.errors import OutputParserError from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output +from core.memory.entities import MemoryScope from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelInstance, ModelManager from core.model_runtime.entities import ( @@ -67,6 +71,8 @@ from core.workflow.nodes.event import ( RunStreamChunkEvent, ) from core.workflow.utils.variable_template_parser import VariableTemplateParser +from models import Workflow, db +from services.chatflow_memory_service import ChatflowMemoryService from . import llm_utils from .entities import ( @@ -290,6 +296,11 @@ class LLMNode(BaseNode): if self._file_outputs is not None: outputs["files"] = ArrayFileSegment(value=self._file_outputs) + try: + self._handle_chatflow_memory(result_text, variable_pool) + except Exception as e: + logger.warning("Memory orchestration failed for node %s: %s", self.node_id, str(e)) + yield RunCompletedEvent( run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, @@ -1078,6 +1089,71 @@ class LLMNode(BaseNode): def retry(self) -> bool: return self._node_data.retry_config.retry_enabled + def _handle_chatflow_memory(self, llm_output: str, variable_pool: VariablePool): + if not self._node_data.memory or self._node_data.memory.mode != "block": + return + + conversation_id_segment = variable_pool.get((SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.CONVERSATION_ID)) + if not conversation_id_segment: + raise ValueError("Conversation ID not found in variable pool.") + conversation_id = conversation_id_segment.text + + user_query_segment = variable_pool.get((SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)) + if not user_query_segment: + raise ValueError("User query not found in variable pool.") + user_query = user_query_segment.text + + from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage + from services.chatflow_history_service import ChatflowHistoryService + + ChatflowHistoryService.save_node_message( + prompt_message=(UserPromptMessage(content=user_query)), + node_id=self.node_id, + conversation_id=conversation_id, + app_id=self.app_id, + tenant_id=self.tenant_id + ) + ChatflowHistoryService.save_node_message( + prompt_message=(AssistantPromptMessage(content=llm_output)), + node_id=self.node_id, + conversation_id=conversation_id, + app_id=self.app_id, + tenant_id=self.tenant_id + ) + + memory_config = self._node_data.memory + if not memory_config: + return + block_ids = memory_config.block_id + if not block_ids: + return + + # FIXME: This is dirty workaround and may cause incorrect resolution for workflow version + with Session(db.engine) as session: + stmt = select(Workflow).where( + Workflow.tenant_id == self.tenant_id, + Workflow.app_id == self.app_id + ) + workflow = session.scalars(stmt).first() + if not workflow: + raise ValueError("Workflow not found.") + memory_blocks = workflow.memory_blocks + + for block_id in block_ids: + memory_block_spec = next((block for block in memory_blocks if block.id == block_id),None) + + if memory_block_spec and memory_block_spec.scope == MemoryScope.NODE: + is_draft = (self.invoke_from == InvokeFrom.DEBUGGER) + ChatflowMemoryService.update_node_memory_if_needed( + tenant_id=self.tenant_id, + app_id=self.app_id, + memory_block_spec=memory_block_spec, + node_id=self.node_id, + llm_output=llm_output, + variable_pool=variable_pool, + is_draft=is_draft + ) + def _combine_message_content_with_role( *, contents: Optional[str | list[PromptMessageContentUnionTypes]] = None, role: PromptMessageRole From 4b085d46f61e5a13996faa2c84e5164095451924 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 15:15:23 +0800 Subject: [PATCH 18/82] feat: update variable pool when update memory --- api/services/chatflow_memory_service.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 781a9ae1ea..28b4d2c095 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -17,6 +17,7 @@ from core.memory.entities import ( ) from core.memory.errors import MemorySyncTimeoutError from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage +from core.workflow.constants import MEMORY_BLOCK_VARIABLE_NODE_ID from core.workflow.entities.variable_pool import VariablePool from extensions.ext_database import db from extensions.ext_redis import redis_client @@ -74,8 +75,12 @@ class ChatflowMemoryService: return None @staticmethod - def save_memory(memory: MemoryBlock, tenant_id: str, is_draft: bool = False) -> None: + def save_memory(memory: MemoryBlock, tenant_id: str, variable_pool: VariablePool, is_draft: bool = False) -> None: """Save or update memory with draft mode support""" + + key = f"{memory.node_id}:{memory.memory_id}" if memory.node_id else memory.memory_id + variable_pool.add([MEMORY_BLOCK_VARIABLE_NODE_ID, key], memory.value) + stmt = select(ChatflowMemoryVariable).where( and_( ChatflowMemoryVariable.memory_id == memory.memory_id, @@ -552,7 +557,7 @@ class ChatflowMemoryService: node_id=node_id ) - ChatflowMemoryService.save_memory(updated_memory, tenant_id, is_draft) + ChatflowMemoryService.save_memory(updated_memory, tenant_id, variable_pool, is_draft) # Not implemented yet: Send success event # self._send_memory_update_event(memory_block_spec.id, "completed", updated_value) From 1fa8b26e557e7041f35c72f88c74e4da08906a13 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 15:17:25 +0800 Subject: [PATCH 19/82] feat: fetch memory block from WorkflowDraftVariable when debugging single node --- api/services/workflow_service.py | 37 ++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index d2715a61fe..05c924d8c0 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -12,6 +12,7 @@ from core.app.app_config.entities import VariableEntityType from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager from core.app.apps.workflow.app_config_manager import WorkflowAppConfigManager from core.file import File +from core.memory.entities import MemoryScope from core.repositories import DifyCoreRepositoryFactory from core.variables import Variable from core.variables.variables import VariableUnion @@ -45,6 +46,7 @@ from models.workflow import ( from repositories.factory import DifyAPIRepositoryFactory from services.errors.app import IsDraftWorkflowError, WorkflowHashNotEqualError from services.workflow.workflow_converter import WorkflowConverter +from .chatflow_memory_service import ChatflowMemoryService from .errors.workflow_service import DraftWorkflowDeletionError, WorkflowInUseError from .workflow_draft_variable_service import ( @@ -361,17 +363,10 @@ class WorkflowService: tenant_id=draft_workflow.tenant_id, start_node_data=start_data, user_inputs=user_inputs ) # init variable pool - variable_pool = _setup_variable_pool( - query=query, - files=files or [], - user_id=account.id, - user_inputs=user_inputs, - workflow=draft_workflow, - # NOTE(QuantumGhost): We rely on `DraftVarLoader` to load conversation variables. - conversation_variables=[], - node_type=node_type, - conversation_id=conversation_id, - ) + variable_pool = _setup_variable_pool(query=query, files=files or [], user_id=account.id, + user_inputs=user_inputs, workflow=draft_workflow, + node_type=node_type, conversation_id=conversation_id, + conversation_variables=[], is_draft=True) else: variable_pool = VariablePool( @@ -688,6 +683,7 @@ def _setup_variable_pool( node_type: NodeType, conversation_id: str, conversation_variables: list[Variable], + is_draft: bool ): # Only inject system variables for START node type. if node_type == NodeType.START: @@ -715,6 +711,7 @@ def _setup_variable_pool( # Based on the definition of `VariableUnion`, # `list[Variable]` can be safely used as `list[VariableUnion]` since they are compatible. conversation_variables=cast(list[VariableUnion], conversation_variables), # + memory_blocks=_fetch_memory_blocks(workflow, conversation_id, is_draft=is_draft), ) return variable_pool @@ -751,3 +748,21 @@ def _rebuild_single_file(tenant_id: str, value: Any, variable_entity_type: Varia return build_from_mappings(mappings=value, tenant_id=tenant_id) else: raise Exception("unreachable") + +def _fetch_memory_blocks(workflow: Workflow, conversation_id: str, is_draft: bool) -> Mapping[str, str]: + memory_blocks = {} + memory_block_specs = workflow.memory_blocks + memories = ChatflowMemoryService.get_memories_by_specs( + memory_block_specs=memory_block_specs, + tenant_id=workflow.tenant_id, + app_id=workflow.app_id, + conversation_id=conversation_id, + is_draft=is_draft, + ) + for memory in memories: + if memory.scope == MemoryScope.APP: + memory_blocks[memory.memory_id] = memory.value + else: # NODE scope + memory_blocks[f"{memory.node_id}.{memory.memory_id}"] = memory.value + + return memory_blocks From 0d95c2192e9aea5b97d3a9cae497b2b2ebab75c0 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 17:17:08 +0800 Subject: [PATCH 20/82] feat: add Web API for memory read and modify --- api/controllers/web/chatflow_memory.py | 57 +++++++++++++++++++++++++ api/core/memory/entities.py | 7 +++ api/services/chatflow_memory_service.py | 56 ++++++++++++++++++++++-- 3 files changed, 116 insertions(+), 4 deletions(-) create mode 100644 api/controllers/web/chatflow_memory.py diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py new file mode 100644 index 0000000000..92b259c498 --- /dev/null +++ b/api/controllers/web/chatflow_memory.py @@ -0,0 +1,57 @@ +from flask_restful import reqparse +from sqlalchemy.orm.session import Session + +from controllers.web import api +from controllers.web.wraps import WebApiResource +from libs.helper import uuid_value +from models import db +from models.chatflow_memory import ChatflowMemoryVariable +from services.chatflow_memory_service import ChatflowMemoryService +from services.workflow_service import WorkflowService + + +class MemoryListApi(WebApiResource): + def get(self, app_model): + parser = reqparse.RequestParser() + parser.add_argument("conversation_id", required=False, type=uuid_value, location="args") + args = parser.parse_args() + conversation_id = args.get("conversation_id") + + result = ChatflowMemoryService.get_persistent_memories(app_model) + if conversation_id: + result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id)] + + return [it for it in result if it.end_user_visible] + +class MemoryEditApi(WebApiResource): + def put(self, app_model): + parser = reqparse.RequestParser() + parser.add_argument('id', type=str, required=True) + parser.add_argument('node_id', type=str, required=False) + parser.add_argument('update', type=str, required=True) + args = parser.parse_args() + workflow = WorkflowService().get_published_workflow(app_model) + if not workflow: + return {'error': 'Workflow not found'}, 404 + memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) + if not memory_spec: + return {'error': 'Memory not found'}, 404 + if not memory_spec.end_user_editable: + return {'error': 'Memory not editable'}, 403 + with Session(db.engine) as session: + ChatflowMemoryVariable( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'], + memory_id=args['id'], + name=memory_spec.name, + value=args['update'], + scope=memory_spec.scope, + term=memory_spec.term, + ) + session.add(memory_spec) + session.commit() + return '', 204 + +api.add_resource(MemoryListApi, '/memories') +api.add_resource(MemoryEditApi, '/memory-edit') diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 26927c7931..175df321fc 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -92,6 +92,13 @@ class MemoryBlock(BaseModel): """Check if this is node-level scope""" return self.node_id is not None +class MemoryBlockWithVisibility(BaseModel): + id: str + name: str + value: str + end_user_visible: bool + end_user_editable: bool + class ChatflowConversationMetadata(BaseModel): """Metadata for chatflow conversation with visible message count""" diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 28b4d2c095..dc06397da1 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -10,6 +10,7 @@ from sqlalchemy.orm import Session from core.memory.entities import ( MemoryBlock, MemoryBlockSpec, + MemoryBlockWithVisibility, MemoryScheduleMode, MemoryScope, MemoryStrategy, @@ -21,15 +22,13 @@ from core.workflow.constants import MEMORY_BLOCK_VARIABLE_NODE_ID from core.workflow.entities.variable_pool import VariablePool from extensions.ext_database import db from extensions.ext_redis import redis_client +from models import App from models.chatflow_memory import ChatflowMemoryVariable from services.chatflow_history_service import ChatflowHistoryService +from services.workflow_service import WorkflowService logger = logging.getLogger(__name__) -# Important note: Since Dify uses gevent, we don't need an extra task queue (e.g., Celery). -# Threads created via threading.Thread are automatically patched into greenlets in a gevent environment, -# enabling efficient asynchronous execution. - def _get_memory_sync_lock_key(app_id: str, conversation_id: str) -> str: """Generate Redis lock key for memory sync updates @@ -48,6 +47,32 @@ class ChatflowMemoryService: All methods are static and do not require instantiation. """ + @staticmethod + def get_persistent_memories(app: App) -> Sequence[MemoryBlockWithVisibility]: + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.conversation_id == None + ) + ) + with db.session() as session: + db_results = session.execute(stmt).all() + return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) + + @staticmethod + def get_session_memories(app: App, conversation_id: str) -> Sequence[MemoryBlockWithVisibility]: + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.conversation_id == conversation_id + ) + ) + with db.session() as session: + db_results = session.execute(stmt).all() + return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) + @staticmethod def get_memory(memory_id: str, tenant_id: str, app_id: Optional[str] = None, @@ -347,6 +372,29 @@ class ChatflowMemoryService: ) return True + @staticmethod + def _with_visibility( + app: App, + raw_results: Sequence[ChatflowMemoryVariable] + ) -> Sequence[MemoryBlockWithVisibility]: + workflow = WorkflowService().get_published_workflow(app) + if not workflow: + return [] + results = [] + for db_result in raw_results: + spec = next((spec for spec in workflow.memory_blocks if spec.id == db_result.memory_id), None) + if spec: + results.append( + MemoryBlockWithVisibility( + id=db_result.memory_id, + name=db_result.name, + value=db_result.value, + end_user_editable=spec.end_user_editable, + end_user_visible=spec.end_user_visible, + ) + ) + return results + @staticmethod def _should_update_memory(tenant_id: str, app_id: str, memory_block_spec: MemoryBlockSpec, From e31e4ab67777568563ef5223985d0f8e788530fe Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 17:22:39 +0800 Subject: [PATCH 21/82] feat: add Service API for memory read and modify --- .../service_api/app/chatflow_memory.py | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 api/controllers/service_api/app/chatflow_memory.py diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py new file mode 100644 index 0000000000..1a7ab6733e --- /dev/null +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -0,0 +1,57 @@ +from flask_restful import Resource, reqparse +from sqlalchemy.orm import Session + +from controllers.service_api import api +from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token +from libs.helper import uuid_value +from models import db +from models.chatflow_memory import ChatflowMemoryVariable +from services.chatflow_memory_service import ChatflowMemoryService +from services.workflow_service import WorkflowService + + +class MemoryListApi(Resource): + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) + def get(self, app_model): + parser = reqparse.RequestParser() + parser.add_argument("conversation_id", required=False, type=uuid_value, location="args") + args = parser.parse_args() + conversation_id = args.get("conversation_id") + + result = ChatflowMemoryService.get_persistent_memories(app_model) + if conversation_id: + result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id)] + + return result + +class MemoryEditApi(Resource): + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) + def put(self, app_model): + parser = reqparse.RequestParser() + parser.add_argument('id', type=str, required=True) + parser.add_argument('node_id', type=str, required=False) + parser.add_argument('update', type=str, required=True) + args = parser.parse_args() + workflow = WorkflowService().get_published_workflow(app_model) + if not workflow: + return {'error': 'Workflow not found'}, 404 + memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) + if not memory_spec: + return {'error': 'Memory not found'}, 404 + with Session(db.engine) as session: + ChatflowMemoryVariable( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'], + memory_id=args['id'], + name=memory_spec.name, + value=args['update'], + scope=memory_spec.scope, + term=memory_spec.term, + ) + session.add(memory_spec) + session.commit() + return '', 204 + +api.add_resource(MemoryListApi, '/memories') +api.add_resource(MemoryEditApi, '/memory-edit') From 85a73181cc0fd5eaca22ad8337cbfbc7213fcac3 Mon Sep 17 00:00:00 2001 From: Stream Date: Thu, 21 Aug 2025 17:23:24 +0800 Subject: [PATCH 22/82] chore: run ruff --- api/services/workflow_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 05c924d8c0..87bbc4577c 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -46,8 +46,8 @@ from models.workflow import ( from repositories.factory import DifyAPIRepositoryFactory from services.errors.app import IsDraftWorkflowError, WorkflowHashNotEqualError from services.workflow.workflow_converter import WorkflowConverter -from .chatflow_memory_service import ChatflowMemoryService +from .chatflow_memory_service import ChatflowMemoryService from .errors.workflow_service import DraftWorkflowDeletionError, WorkflowInUseError from .workflow_draft_variable_service import ( DraftVariableSaver, From f72ed4898cdcef32cd904263a22c3e0043757fa7 Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 14:57:27 +0800 Subject: [PATCH 23/82] refactor: refactor from ChatflowHistoryService and ChatflowMemoryService --- api/core/llm_generator/llm_generator.py | 38 +- api/core/llm_generator/prompts.py | 15 + api/core/memory/entities.py | 9 +- api/core/workflow/nodes/llm/node.py | 4 +- api/services/chatflow_history_service.py | 127 ++----- api/services/chatflow_memory_service.py | 427 +++++++---------------- 6 files changed, 220 insertions(+), 400 deletions(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 64fc3a3e80..e3dc5f4e56 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -2,7 +2,7 @@ import json import logging import re from collections.abc import Sequence -from typing import Optional, cast +from typing import Optional, cast, Mapping import json_repair @@ -16,8 +16,9 @@ from core.llm_generator.prompts import ( LLM_MODIFY_PROMPT_SYSTEM, PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE, SYSTEM_STRUCTURED_OUTPUT_GENERATE, - WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, + WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, MEMORY_UPDATE_PROMPT, ) +from core.memory.entities import MemoryBlock, MemoryBlockSpec from core.model_manager import ModelManager from core.model_runtime.entities.llm_entities import LLMResult from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage @@ -572,3 +573,36 @@ class LLMGenerator: except Exception as e: logging.exception("Failed to invoke LLM model, model: " + json.dumps(model_config.get("name")), exc_info=e) return {"error": f"An unexpected error occurred: {str(e)}"} + + @staticmethod + def update_memory_block( + tenant_id: str, + visible_history: Mapping[str, str], + memory_block: MemoryBlock, + memory_spec: MemoryBlockSpec + ) -> str: + model_instance = ModelManager().get_model_instance( + tenant_id=tenant_id, + provider=memory_spec.model.provider, + model=memory_spec.model.name, + model_type=ModelType.LLM, + ) + formatted_history = "" + for sender, message in visible_history.items(): + formatted_history += f"{sender}: {message}\n" + formatted_prompt = PromptTemplateParser(MEMORY_UPDATE_PROMPT).format( + inputs={ + "formatted_history": formatted_history, + "current_value": memory_block.value, + "instruction": memory_spec.instruction, + } + ) + llm_result = cast( + LLMResult, + model_instance.invoke_llm( + prompt_messages=[UserPromptMessage(content=formatted_prompt)], + model_parameters={"temperature": 0.01, "max_tokens": 2000}, + stream=False, + ) + ) + return llm_result.message.get_text_content() diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index e38828578a..710ffe54f2 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -422,3 +422,18 @@ INSTRUCTION_GENERATE_TEMPLATE_PROMPT = """The output of this prompt is not as ex You should edit the prompt according to the IDEAL OUTPUT.""" INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" + +MEMORY_UPDATE_PROMPT = """ +Based on the following conversation history, update the memory content: + +Conversation history: +{{formatted_history}} + +Current memory: +{{current_value}} + +Update instruction: +{{instruction}} + +Please output only the updated memory content, no other text like greeting: +""" # noqa: E501 diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 175df321fc..f4faf44160 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -1,9 +1,12 @@ +from datetime import datetime from enum import Enum from typing import Any, Optional from uuid import uuid4 from pydantic import BaseModel, Field +from core.app.app_config.entities import ModelConfig + class MemoryScope(str, Enum): """Memory scope determined by node_id field""" @@ -42,7 +45,7 @@ class MemoryBlockSpec(BaseModel): update_turns: int = Field(gt=0, description="Number of turns between updates") preserved_turns: int = Field(gt=0, description="Number of conversation turns to preserve") schedule_mode: MemoryScheduleMode = Field(description="Synchronous or asynchronous update mode") - model: Optional[dict[str, Any]] = Field(default=None, description="Model configuration for memory updates") + model: ModelConfig = Field(description="Model configuration for memory updates") end_user_visible: bool = Field(default=False, description="Whether memory is visible to end users") end_user_editable: bool = Field(default=False, description="Whether memory is editable by end users") @@ -69,8 +72,8 @@ class MemoryBlock(BaseModel): app_id: str # None=global(future), str=app-specific conversation_id: Optional[str] = None # None=persistent, str=session node_id: Optional[str] = None # None=app-scope, str=node-scope - created_at: Optional[str] = None - updated_at: Optional[str] = None + created_at: Optional[datetime] = None + updated_at: Optional[datetime] = None @property def is_global(self) -> bool: diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 07b725899d..1ac66bc0f3 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -1147,9 +1147,9 @@ class LLMNode(BaseNode): ChatflowMemoryService.update_node_memory_if_needed( tenant_id=self.tenant_id, app_id=self.app_id, - memory_block_spec=memory_block_spec, node_id=self.node_id, - llm_output=llm_output, + conversation_id=conversation_id, + memory_block_spec=memory_block_spec, variable_pool=variable_pool, is_draft=is_draft ) diff --git a/api/services/chatflow_history_service.py b/api/services/chatflow_history_service.py index 828758ee85..3612daed3d 100644 --- a/api/services/chatflow_history_service.py +++ b/api/services/chatflow_history_service.py @@ -1,7 +1,7 @@ import json import time from collections.abc import Sequence -from typing import Literal, Optional, overload +from typing import Literal, Optional, overload, MutableMapping from sqlalchemy import Row, Select, and_, func, select from sqlalchemy.orm import Session @@ -17,15 +17,6 @@ from models.chatflow_memory import ChatflowConversation, ChatflowMessage class ChatflowHistoryService: - """ - Service layer for managing chatflow conversation history. - - This unified service handles all chatflow memory operations: - - Reading visible chat history with version control - - Saving messages to append-only table - - Managing visible_count metadata - - Supporting both app-level and node-level scoping - """ @staticmethod def get_visible_chat_history( @@ -35,18 +26,7 @@ class ChatflowHistoryService: node_id: Optional[str] = None, max_visible_count: Optional[int] = None ) -> Sequence[PromptMessage]: - """ - Get visible chat history based on metadata visible_count. - - Args: - conversation_id: Original conversation ID - node_id: None for app-level, specific node_id for node-level - max_visible_count: Override visible_count for memory update operations - - Returns: - Sequence of PromptMessage objects in chronological order (oldest first) - """ - with db.session() as session: + with Session(db.engine) as session: chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( session, conversation_id, app_id, tenant_id, node_id, create_if_missing=False ) @@ -54,79 +34,19 @@ class ChatflowHistoryService: if not chatflow_conv: return [] - # Parse metadata - metadata_dict = json.loads(chatflow_conv.conversation_metadata) - metadata = ChatflowConversationMetadata.model_validate(metadata_dict) + metadata = ChatflowConversationMetadata.model_validate_json(chatflow_conv.conversation_metadata) + visible_count: int = max_visible_count or metadata.visible_count - # Determine the actual number of messages to return - target_visible_count = max_visible_count if max_visible_count is not None else metadata.visible_count - - # Fetch all messages (handle versioning) - msg_stmt = select(ChatflowMessage).where( + stmt = select(ChatflowMessage).where( ChatflowMessage.conversation_id == chatflow_conv.id ).order_by(ChatflowMessage.index.asc(), ChatflowMessage.version.desc()) - - all_messages: Sequence[Row[tuple[ChatflowMessage]]] = session.execute(msg_stmt).all() - - # Filter in memory: keep only the latest version for each index - latest_messages_by_index: dict[int, ChatflowMessage] = {} - for msg_row in all_messages: - msg = msg_row[0] - index = msg.index - - if index not in latest_messages_by_index or msg.version > latest_messages_by_index[index].version: - latest_messages_by_index[index] = msg - - # Sort by index and take the latest target_visible_count messages - sorted_messages = sorted(latest_messages_by_index.values(), key=lambda m: m.index, reverse=True) - visible_messages = sorted_messages[:target_visible_count] - - # Convert to PromptMessage and restore correct order (oldest first) - prompt_messages: list[PromptMessage] = [] - for msg in reversed(visible_messages): # Restore chronological order (index ascending) - data = json.loads(msg.data) - role = data.get('role', 'user') - content = data.get('content', '') - - if role == 'user': - prompt_messages.append(UserPromptMessage(content=content)) - elif role == 'assistant': - prompt_messages.append(AssistantPromptMessage(content=content)) - - return prompt_messages - - @staticmethod - def get_app_visible_chat_history( - app_id: str, - conversation_id: str, - tenant_id: str, - max_visible_count: Optional[int] = None - ) -> Sequence[PromptMessage]: - """Get visible chat history for app level.""" - return ChatflowHistoryService.get_visible_chat_history( - conversation_id=conversation_id, - app_id=app_id, - tenant_id=tenant_id, - node_id=None, # App level - max_visible_count=max_visible_count - ) - - @staticmethod - def get_node_visible_chat_history( - node_id: str, - conversation_id: str, - app_id: str, - tenant_id: str, - max_visible_count: Optional[int] = None - ) -> Sequence[PromptMessage]: - """Get visible chat history for a specific node.""" - return ChatflowHistoryService.get_visible_chat_history( - conversation_id=conversation_id, - app_id=app_id, - tenant_id=tenant_id, - node_id=node_id, - max_visible_count=max_visible_count - ) + raw_messages: Sequence[Row[tuple[ChatflowMessage]]] = session.execute(stmt).all() + sorted_messages = ChatflowHistoryService._filter_latest_messages( + [it[0] for it in raw_messages] + ) + visible_count = min(visible_count, len(sorted_messages)) + visible_messages = sorted_messages[-visible_count:] + return [PromptMessage.model_validate_json(it.data) for it in visible_messages] @staticmethod def save_message( @@ -136,13 +56,7 @@ class ChatflowHistoryService: tenant_id: str, node_id: Optional[str] = None ) -> None: - """ - Save a message to the append-only chatflow_messages table. - - Args: - node_id: None for app-level, specific node_id for node-level - """ - with db.session() as session: + with Session(db.engine) as session: chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True ) @@ -216,7 +130,7 @@ class ChatflowHistoryService: """ Save a new version of an existing message (for message editing scenarios). """ - with db.session() as session: + with Session(db.engine) as session: chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True ) @@ -270,7 +184,7 @@ class ChatflowHistoryService: # Update node-specific visible_count ChatflowHistoryService.update_visible_count(conv_id, "node-123", 8, app_id, tenant_id) """ - with db.session() as session: + with Session(db.engine) as session: chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True ) @@ -281,6 +195,17 @@ class ChatflowHistoryService: session.commit() + @staticmethod + def _filter_latest_messages(raw_messages: Sequence[ChatflowMessage]) -> Sequence[ChatflowMessage]: + index_to_message: MutableMapping[int, ChatflowMessage] = {} + for msg in raw_messages: + index = msg.index + if index not in index_to_message or msg.version > index_to_message[index].version: + index_to_message[index] = msg + + sorted_messages = sorted(index_to_message.values(), key=lambda m: m.index) + return sorted_messages + @overload @staticmethod def _get_or_create_chatflow_conversation( diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index dc06397da1..aefd4f230c 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -13,7 +13,6 @@ from core.memory.entities import ( MemoryBlockWithVisibility, MemoryScheduleMode, MemoryScope, - MemoryStrategy, MemoryTerm, ) from core.memory.errors import MemorySyncTimeoutError @@ -24,7 +23,9 @@ from extensions.ext_database import db from extensions.ext_redis import redis_client from models import App from models.chatflow_memory import ChatflowMemoryVariable +from models.workflow import WorkflowDraftVariable from services.chatflow_history_service import ChatflowHistoryService +from services.workflow_draft_variable_service import WorkflowDraftVariableService from services.workflow_service import WorkflowService logger = logging.getLogger(__name__) @@ -42,11 +43,6 @@ def _get_memory_sync_lock_key(app_id: str, conversation_id: str) -> str: return f"memory_sync_update:{app_id}:{conversation_id}" class ChatflowMemoryService: - """ - Memory service class with only static methods. - All methods are static and do not require instantiation. - """ - @staticmethod def get_persistent_memories(app: App) -> Sequence[MemoryBlockWithVisibility]: stmt = select(ChatflowMemoryVariable).where( @@ -56,7 +52,7 @@ class ChatflowMemoryService: ChatflowMemoryVariable.conversation_id == None ) ) - with db.session() as session: + with Session(db.engine) as session: db_results = session.execute(stmt).all() return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) @@ -69,94 +65,38 @@ class ChatflowMemoryService: ChatflowMemoryVariable.conversation_id == conversation_id ) ) - with db.session() as session: + with Session(db.engine) as session: db_results = session.execute(stmt).all() return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) @staticmethod - def get_memory(memory_id: str, tenant_id: str, - app_id: Optional[str] = None, - conversation_id: Optional[str] = None, - node_id: Optional[str] = None) -> Optional[MemoryBlock]: - """Get single memory by ID""" - stmt = select(ChatflowMemoryVariable).where( - and_( - ChatflowMemoryVariable.memory_id == memory_id, - ChatflowMemoryVariable.tenant_id == tenant_id - ) - ) - - if app_id: - stmt = stmt.where(ChatflowMemoryVariable.app_id == app_id) - if conversation_id: - stmt = stmt.where(ChatflowMemoryVariable.conversation_id == conversation_id) - if node_id: - stmt = stmt.where(ChatflowMemoryVariable.node_id == node_id) - - with db.session() as session: - result = session.execute(stmt).first() - if result: - return MemoryBlock.model_validate(result[0].__dict__) - return None - - @staticmethod - def save_memory(memory: MemoryBlock, tenant_id: str, variable_pool: VariablePool, is_draft: bool = False) -> None: - """Save or update memory with draft mode support""" - + def save_memory(memory: MemoryBlock, tenant_id: str, variable_pool: VariablePool, is_draft: bool) -> None: key = f"{memory.node_id}:{memory.memory_id}" if memory.node_id else memory.memory_id variable_pool.add([MEMORY_BLOCK_VARIABLE_NODE_ID, key], memory.value) - stmt = select(ChatflowMemoryVariable).where( - and_( - ChatflowMemoryVariable.memory_id == memory.memory_id, - ChatflowMemoryVariable.tenant_id == tenant_id - ) - ) - with db.session() as session: - existing = session.execute(stmt).first() - if existing: - # Update existing - for key, value in memory.model_dump(exclude_unset=True).items(): - if hasattr(existing[0], key): - setattr(existing[0], key, value) - else: - # Create new - new_memory = ChatflowMemoryVariable( - tenant_id=tenant_id, - **memory.model_dump(exclude={'id'}) - ) - session.add(new_memory) + session.merge(ChatflowMemoryService._to_chatflow_memory_variable(memory)) session.commit() - # In draft mode, also write to workflow_draft_variables if is_draft: - from models.workflow import WorkflowDraftVariable - from services.workflow_draft_variable_service import WorkflowDraftVariableService with Session(bind=db.engine) as session: draft_var_service = WorkflowDraftVariableService(session) - - # Try to get existing variables existing_vars = draft_var_service.get_draft_variables_by_selectors( app_id=memory.app_id, selectors=[['memory_block', memory.memory_id]] ) - if existing_vars: - # Update existing draft variable draft_var = existing_vars[0] draft_var.value = memory.value else: - # Create new draft variable draft_var = WorkflowDraftVariable.new_memory_block_variable( app_id=memory.app_id, memory_id=memory.memory_id, name=memory.name, value=memory.value, - description=f"Memory block: {memory.name}" + description="" ) session.add(draft_var) - session.commit() @staticmethod @@ -164,104 +104,66 @@ class ChatflowMemoryService: tenant_id: str, app_id: str, conversation_id: Optional[str] = None, node_id: Optional[str] = None, - is_draft: bool = False) -> list[MemoryBlock]: - """Get runtime memory values based on MemoryBlockSpecs with draft mode support""" - from models.enums import DraftVariableType + is_draft: bool = False) -> Sequence[MemoryBlock]: + return [ChatflowMemoryService.get_memory_by_spec( + spec, tenant_id, app_id, conversation_id, node_id, is_draft + ) for spec in memory_block_specs] - if not memory_block_specs: - return [] - - # In draft mode, prefer reading from workflow_draft_variables - if is_draft: - # Try reading from the draft variables table - from services.workflow_draft_variable_service import WorkflowDraftVariableService - with Session(bind=db.engine) as session: + @staticmethod + def get_memory_by_spec(spec: MemoryBlockSpec, + tenant_id: str, app_id: str, + conversation_id: Optional[str] = None, + node_id: Optional[str] = None, + is_draft: bool = False) -> MemoryBlock: + with (Session(bind=db.engine) as session): + if is_draft: draft_var_service = WorkflowDraftVariableService(session) - - # Build selector list - selectors = [['memory_block', spec.id] for spec in memory_block_specs] - - # Fetch draft variables + selector = [MEMORY_BLOCK_VARIABLE_NODE_ID, f"{spec.id}.{node_id}"]\ + if node_id else [MEMORY_BLOCK_VARIABLE_NODE_ID, spec.id] draft_vars = draft_var_service.get_draft_variables_by_selectors( app_id=app_id, - selectors=selectors + selectors=[selector] ) - - # If draft variables exist, prefer using them if draft_vars: - spec_by_id = {spec.id: spec for spec in memory_block_specs} - draft_memories = [] - - for draft_var in draft_vars: - if draft_var.node_id == DraftVariableType.MEMORY_BLOCK: - spec = spec_by_id.get(draft_var.name) - if spec: - memory_block = MemoryBlock( - id=draft_var.id, - memory_id=draft_var.name, - name=spec.name, - value=draft_var.value, - scope=spec.scope, - term=spec.term, - app_id=app_id, - conversation_id='draft', - node_id=node_id - ) - draft_memories.append(memory_block) - - if draft_memories: - return draft_memories - - memory_ids = [spec.id for spec in memory_block_specs] - - stmt = select(ChatflowMemoryVariable).where( - and_( - ChatflowMemoryVariable.memory_id.in_(memory_ids), - ChatflowMemoryVariable.tenant_id == tenant_id, - ChatflowMemoryVariable.app_id == app_id - ) - ) - - if conversation_id: - stmt = stmt.where(ChatflowMemoryVariable.conversation_id == conversation_id) - if node_id: - stmt = stmt.where(ChatflowMemoryVariable.node_id == node_id) - - with db.session() as session: - results = session.execute(stmt).all() - found_memories = {row[0].memory_id: MemoryBlock.model_validate(row[0].__dict__) for row in results} - - # Create MemoryBlock objects for specs that don't have runtime values yet - all_memories = [] - for spec in memory_block_specs: - if spec.id in found_memories: - all_memories.append(found_memories[spec.id]) - else: - # Create default memory with template value following design rules - default_memory = MemoryBlock( - id="", # Will be assigned when saved - memory_id=spec.id, + draft_var = draft_vars[0] + return MemoryBlock( + id=draft_var.id, + memory_id=draft_var.name, name=spec.name, - value=spec.template, + value=draft_var.value, scope=spec.scope, term=spec.term, - # Design rules: - # - app_id=None for global (future), app_id=str for app-specific - app_id=app_id, # Always app-specific for now - # - conversation_id=None for persistent, conversation_id=str for session - conversation_id=conversation_id if spec.term == MemoryTerm.SESSION else None, - # - node_id=None for app-scope, node_id=str for node-scope - node_id=node_id if spec.scope == MemoryScope.NODE else None + app_id=app_id, + conversation_id=conversation_id, + node_id=node_id ) - all_memories.append(default_memory) - - return all_memories + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.memory_id == spec.id, + ChatflowMemoryVariable.tenant_id == tenant_id, + ChatflowMemoryVariable.app_id == app_id, + ChatflowMemoryVariable.node_id == node_id, + ChatflowMemoryVariable.conversation_id == conversation_id + ) + ) + result = session.execute(stmt).scalar() + if result: + return ChatflowMemoryService._to_memory_block(result) + return MemoryBlock( + id="", # Will be assigned when saved + memory_id=spec.id, + name=spec.name, + value=spec.template, + scope=spec.scope, + term=spec.term, + app_id=app_id, + conversation_id=conversation_id, + node_id=node_id + ) @staticmethod def get_app_memories_by_workflow(workflow, tenant_id: str, - conversation_id: Optional[str] = None) -> list[MemoryBlock]: - """Get app-scoped memories based on workflow configuration""" - from core.memory.entities import MemoryScope + conversation_id: Optional[str] = None) -> Sequence[MemoryBlock]: app_memory_specs = [spec for spec in workflow.memory_blocks if spec.scope == MemoryScope.APP] return ChatflowMemoryService.get_memories_by_specs( @@ -272,7 +174,7 @@ class ChatflowMemoryService: ) @staticmethod - def get_node_memories_by_workflow(workflow, node_id: str, tenant_id: str) -> list[MemoryBlock]: + def get_node_memories_by_workflow(workflow, node_id: str, tenant_id: str) -> Sequence[MemoryBlock]: """Get node-scoped memories based on workflow configuration""" from core.memory.entities import MemoryScope @@ -287,72 +189,22 @@ class ChatflowMemoryService: node_id=node_id ) - # Core Memory Orchestration features - @staticmethod - def update_memory_if_needed(tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False) -> bool: - """Update app-level memory if conditions are met - - Args: - tenant_id: Tenant ID - app_id: Application ID - memory_block_spec: Memory block specification - conversation_id: Conversation ID - variable_pool: Variable pool for context - is_draft: Whether in draft mode - """ - if not ChatflowMemoryService._should_update_memory( - tenant_id, app_id, memory_block_spec, conversation_id - ): - return False - - if memory_block_spec.schedule_mode == MemoryScheduleMode.SYNC: - # Sync mode: will be processed in batch after the App run completes - # This only marks the need; actual update happens in _update_app_memory_after_run - return True - else: - # Async mode: submit asynchronous update immediately - ChatflowMemoryService._submit_async_memory_update( - tenant_id, app_id, memory_block_spec, conversation_id, variable_pool, is_draft - ) - return True - - @staticmethod - def update_node_memory_if_needed(tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - node_id: str, llm_output: str, - variable_pool: VariablePool, - is_draft: bool = False) -> bool: - """Update node-level memory after LLM execution - - Args: - tenant_id: Tenant ID - app_id: Application ID - memory_block_spec: Memory block specification - node_id: Node ID - llm_output: LLM output content - variable_pool: Variable pool for context - is_draft: Whether in draft mode - """ + def update_node_memory_if_needed( + tenant_id: str, + app_id: str, + node_id: str, + conversation_id: str, + memory_block_spec: MemoryBlockSpec, + variable_pool: VariablePool, + is_draft: bool + ) -> bool: + """Update node-level memory after LLM execution""" conversation_id_segment = variable_pool.get(('sys', 'conversation_id')) if not conversation_id_segment: return False conversation_id = conversation_id_segment.value - # Save LLM output to node conversation history - assistant_message = AssistantPromptMessage(content=llm_output) - ChatflowHistoryService.save_node_message( - prompt_message=assistant_message, - node_id=node_id, - conversation_id=str(conversation_id), - app_id=app_id, - tenant_id=tenant_id - ) - if not ChatflowMemoryService._should_update_memory( tenant_id, app_id, memory_block_spec, str(conversation_id), node_id ): @@ -372,6 +224,57 @@ class ChatflowMemoryService: ) return True + @staticmethod + def _get_memory_from_chatflow_table(memory_id: str, tenant_id: str, + app_id: Optional[str] = None, + conversation_id: Optional[str] = None, + node_id: Optional[str] = None) -> Optional[MemoryBlock]: + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.app_id == app_id, + ChatflowMemoryVariable.memory_id == memory_id, + ChatflowMemoryVariable.tenant_id == tenant_id, + ChatflowMemoryVariable.conversation_id == conversation_id, + ChatflowMemoryVariable.node_id == node_id + ) + ) + + with db.session() as session: + result = session.execute(stmt).first() + return ChatflowMemoryService._to_memory_block(result[0]) if result else None + + @staticmethod + def _to_memory_block(entity: ChatflowMemoryVariable) -> MemoryBlock: + scope = MemoryScope(entity.scope) if not isinstance(entity.scope, MemoryScope) else entity.scope + term = MemoryTerm(entity.term) if not isinstance(entity.term, MemoryTerm) else entity.term + return MemoryBlock( + id=entity.id, + memory_id=entity.memory_id, + name=entity.name, + value=entity.value, + scope=scope, + term=term, + app_id=cast(str, entity.app_id), # It's supposed to be not nullable for now + conversation_id=entity.conversation_id, + node_id=entity.node_id, + created_at=entity.created_at, + updated_at=entity.updated_at, + ) + + @staticmethod + def _to_chatflow_memory_variable(memory_block: MemoryBlock) -> ChatflowMemoryVariable: + return ChatflowMemoryVariable( + id=memory_block.id, + node_id=memory_block.node_id, + memory_id=memory_block.memory_id, + name=memory_block.name, + value=memory_block.value, + scope=memory_block.scope, + term=memory_block.term, + app_id=memory_block.app_id, + conversation_id=memory_block.conversation_id, + ) + @staticmethod def _with_visibility( app: App, @@ -400,8 +303,7 @@ class ChatflowMemoryService: memory_block_spec: MemoryBlockSpec, conversation_id: str, node_id: Optional[str] = None) -> bool: """Check if memory should be updated based on strategy""" - if memory_block_spec.strategy != MemoryStrategy.ON_TURNS: - return False + # Currently, `memory_block_spec.strategy != MemoryStrategy.ON_TURNS` is not possible, but possible in the future # Check turn count turn_key = f"memory_turn_count:{tenant_id}:{app_id}:{conversation_id}" @@ -428,7 +330,7 @@ class ChatflowMemoryService: # Execute update asynchronously using thread thread = threading.Thread( - target=ChatflowMemoryService._update_single_memory, + target=ChatflowMemoryService._update_app_single_memory, kwargs={ 'tenant_id': tenant_id, 'app_id': app_id, @@ -492,28 +394,18 @@ class ChatflowMemoryService: tenant_id: str, app_id: str, node_id: str, llm_output: str, variable_pool: VariablePool, is_draft: bool = False): - """Execute node memory update""" - try: - # Call existing _perform_memory_update method here - ChatflowMemoryService._perform_memory_update( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - conversation_id=str(variable_pool.get(('sys', 'conversation_id'))), - variable_pool=variable_pool, - node_id=node_id, - is_draft=is_draft - ) - except Exception as e: - logger.exception( - "Failed to update node memory %s for node %s", - memory_block_spec.id, - node_id, - exc_info=e - ) + ChatflowMemoryService._perform_memory_update( + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + conversation_id=str(variable_pool.get(('sys', 'conversation_id'))), + variable_pool=variable_pool, + node_id=node_id, + is_draft=is_draft + ) @staticmethod - def _update_single_memory(*, tenant_id: str, app_id: str, + def _update_app_single_memory(*, tenant_id: str, app_id: str, memory_block_spec: MemoryBlockSpec, conversation_id: str, variable_pool: VariablePool, @@ -535,62 +427,26 @@ class ChatflowMemoryService: conversation_id: str, variable_pool: VariablePool, node_id: Optional[str] = None, is_draft: bool = False): - """Perform the actual memory update using LLM - - Args: - tenant_id: Tenant ID - app_id: Application ID - memory_block_spec: Memory block specification - conversation_id: Conversation ID - variable_pool: Variable pool for context - node_id: Optional node ID for node-level memory updates - is_draft: Whether in draft mode - """ - # Get conversation history + """Perform the actual memory update using LLM""" history = ChatflowHistoryService.get_visible_chat_history( conversation_id=conversation_id, app_id=app_id, tenant_id=tenant_id, - node_id=node_id, # Pass node_id, if None then get app-level history - max_visible_count=memory_block_spec.preserved_turns + node_id=node_id, ) # Get current memory value - current_memory = ChatflowMemoryService.get_memory( + current_memory = ChatflowMemoryService._get_memory_from_chatflow_table( memory_id=memory_block_spec.id, tenant_id=tenant_id, app_id=app_id, - conversation_id=conversation_id if memory_block_spec.term == MemoryTerm.SESSION else None, + conversation_id=conversation_id, node_id=node_id ) current_value = current_memory.value if current_memory else memory_block_spec.template - # Build update prompt - adjust wording based on whether there's a node_id - context_type = "Node conversation history" if node_id else "Conversation history" - memory_update_prompt = f""" - Based on the following {context_type}, update the memory content: - Current memory: {current_value} - - {context_type}: - {[msg.content for msg in history]} - - Update instruction: {memory_block_spec.instruction} - - Please output the updated memory content: - """ - - # Invoke LLM to update memory - extracted as a separate method - updated_value = ChatflowMemoryService._invoke_llm_for_memory_update( - tenant_id, - memory_block_spec, - memory_update_prompt, - current_value - ) - - if updated_value is None: - return # LLM invocation failed # Save updated memory updated_memory = MemoryBlock( @@ -720,23 +576,10 @@ class ChatflowMemoryService: @staticmethod def update_app_memory_after_run(workflow, conversation_id: str, variable_pool: VariablePool, is_draft: bool = False): - """Update app-level memory after run completion - - Args: - workflow: Workflow object - conversation_id: Conversation ID - variable_pool: Variable pool - is_draft: Whether in draft mode - """ - from core.memory.entities import MemoryScope - - memory_blocks = workflow.memory_blocks - - # Separate sync and async memory blocks + """Update app-level memory after run completion""" sync_blocks = [] async_blocks = [] - - for block in memory_blocks: + for block in workflow.memory_blocks: if block.scope == MemoryScope.APP: if block.update_mode == "sync": sync_blocks.append(block) @@ -805,7 +648,7 @@ class ChatflowMemoryService: futures = [] for block in sync_blocks: future = executor.submit( - ChatflowMemoryService._update_single_memory, + ChatflowMemoryService._update_app_single_memory, tenant_id=workflow.tenant_id, app_id=workflow.app_id, memory_block_spec=block, From 4d2fc66a8d6352e321b3e83eb9b657b0dcedd288 Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 15:33:45 +0800 Subject: [PATCH 24/82] feat: refactor: refactor from ChatflowHistoryService and ChatflowMemoryService --- api/core/app/apps/advanced_chat/app_runner.py | 4 +- api/core/llm_generator/llm_generator.py | 9 +- api/core/llm_generator/prompts.py | 2 +- api/core/memory/entities.py | 2 +- api/services/chatflow_history_service.py | 6 +- api/services/chatflow_memory_service.py | 203 ++++++------------ api/services/workflow_service.py | 1 + 7 files changed, 80 insertions(+), 147 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index eddaacc7b3..23ac82bf72 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -1,9 +1,10 @@ import logging from collections.abc import Mapping, MutableMapping -from typing import Any, Optional, cast, override +from typing import Any, Optional, cast from sqlalchemy import select from sqlalchemy.orm import Session +from typing_extensions import override from configs import dify_config from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfig @@ -417,6 +418,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): memory_block_specs=memory_block_specs, tenant_id=self._workflow.tenant_id, app_id=self._workflow.app_id, + node_id=None, conversation_id=conversation_id, is_draft=is_draft ) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index e3dc5f4e56..736d4fade8 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -2,7 +2,7 @@ import json import logging import re from collections.abc import Sequence -from typing import Optional, cast, Mapping +from typing import Optional, cast import json_repair @@ -14,9 +14,10 @@ from core.llm_generator.prompts import ( JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE, LLM_MODIFY_CODE_SYSTEM, LLM_MODIFY_PROMPT_SYSTEM, + MEMORY_UPDATE_PROMPT, PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE, SYSTEM_STRUCTURED_OUTPUT_GENERATE, - WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, MEMORY_UPDATE_PROMPT, + WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, ) from core.memory.entities import MemoryBlock, MemoryBlockSpec from core.model_manager import ModelManager @@ -577,7 +578,7 @@ class LLMGenerator: @staticmethod def update_memory_block( tenant_id: str, - visible_history: Mapping[str, str], + visible_history: Sequence[tuple[str, str]], memory_block: MemoryBlock, memory_spec: MemoryBlockSpec ) -> str: @@ -588,7 +589,7 @@ class LLMGenerator: model_type=ModelType.LLM, ) formatted_history = "" - for sender, message in visible_history.items(): + for sender, message in visible_history: formatted_history += f"{sender}: {message}\n" formatted_prompt = PromptTemplateParser(MEMORY_UPDATE_PROMPT).format( inputs={ diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 710ffe54f2..4eec0ef0f5 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -436,4 +436,4 @@ Update instruction: {{instruction}} Please output only the updated memory content, no other text like greeting: -""" # noqa: E501 +""" diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index f4faf44160..5bb4c512ea 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -1,6 +1,6 @@ from datetime import datetime from enum import Enum -from typing import Any, Optional +from typing import Optional from uuid import uuid4 from pydantic import BaseModel, Field diff --git a/api/services/chatflow_history_service.py b/api/services/chatflow_history_service.py index 3612daed3d..baabb7c071 100644 --- a/api/services/chatflow_history_service.py +++ b/api/services/chatflow_history_service.py @@ -1,16 +1,14 @@ import json import time -from collections.abc import Sequence -from typing import Literal, Optional, overload, MutableMapping +from collections.abc import MutableMapping, Sequence +from typing import Literal, Optional, overload from sqlalchemy import Row, Select, and_, func, select from sqlalchemy.orm import Session from core.memory.entities import ChatflowConversationMetadata from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, PromptMessage, - UserPromptMessage, ) from extensions.ext_database import db from models.chatflow_memory import ChatflowConversation, ChatflowMessage diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index aefd4f230c..d46146e065 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -7,6 +7,7 @@ from typing import Optional, cast from sqlalchemy import and_, select from sqlalchemy.orm import Session +from core.llm_generator.llm_generator import LLMGenerator from core.memory.entities import ( MemoryBlock, MemoryBlockSpec, @@ -16,7 +17,7 @@ from core.memory.entities import ( MemoryTerm, ) from core.memory.errors import MemorySyncTimeoutError -from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage +from core.model_runtime.entities.message_entities import PromptMessage from core.workflow.constants import MEMORY_BLOCK_VARIABLE_NODE_ID from core.workflow.entities.variable_pool import VariablePool from extensions.ext_database import db @@ -102,9 +103,9 @@ class ChatflowMemoryService: @staticmethod def get_memories_by_specs(memory_block_specs: Sequence[MemoryBlockSpec], tenant_id: str, app_id: str, - conversation_id: Optional[str] = None, - node_id: Optional[str] = None, - is_draft: bool = False) -> Sequence[MemoryBlock]: + conversation_id: Optional[str], + node_id: Optional[str], + is_draft: bool) -> Sequence[MemoryBlock]: return [ChatflowMemoryService.get_memory_by_spec( spec, tenant_id, app_id, conversation_id, node_id, is_draft ) for spec in memory_block_specs] @@ -112,9 +113,9 @@ class ChatflowMemoryService: @staticmethod def get_memory_by_spec(spec: MemoryBlockSpec, tenant_id: str, app_id: str, - conversation_id: Optional[str] = None, - node_id: Optional[str] = None, - is_draft: bool = False) -> MemoryBlock: + conversation_id: Optional[str], + node_id: Optional[str], + is_draft: bool) -> MemoryBlock: with (Session(bind=db.engine) as session): if is_draft: draft_var_service = WorkflowDraftVariableService(session) @@ -161,34 +162,6 @@ class ChatflowMemoryService: node_id=node_id ) - @staticmethod - def get_app_memories_by_workflow(workflow, tenant_id: str, - conversation_id: Optional[str] = None) -> Sequence[MemoryBlock]: - - app_memory_specs = [spec for spec in workflow.memory_blocks if spec.scope == MemoryScope.APP] - return ChatflowMemoryService.get_memories_by_specs( - memory_block_specs=app_memory_specs, - tenant_id=tenant_id, - app_id=workflow.app_id, - conversation_id=conversation_id - ) - - @staticmethod - def get_node_memories_by_workflow(workflow, node_id: str, tenant_id: str) -> Sequence[MemoryBlock]: - """Get node-scoped memories based on workflow configuration""" - from core.memory.entities import MemoryScope - - node_memory_specs = [ - spec for spec in workflow.memory_blocks - if spec.scope == MemoryScope.NODE and spec.id == node_id - ] - return ChatflowMemoryService.get_memories_by_specs( - memory_block_specs=node_memory_specs, - tenant_id=tenant_id, - app_id=workflow.app_id, - node_id=node_id - ) - @staticmethod def update_node_memory_if_needed( tenant_id: str, @@ -199,28 +172,36 @@ class ChatflowMemoryService: variable_pool: VariablePool, is_draft: bool ) -> bool: - """Update node-level memory after LLM execution""" - conversation_id_segment = variable_pool.get(('sys', 'conversation_id')) - if not conversation_id_segment: - return False - conversation_id = conversation_id_segment.value - if not ChatflowMemoryService._should_update_memory( - tenant_id, app_id, memory_block_spec, str(conversation_id), node_id + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + conversation_id=conversation_id, + node_id=node_id ): return False if memory_block_spec.schedule_mode == MemoryScheduleMode.SYNC: # Node-level sync: blocking execution ChatflowMemoryService._update_node_memory_sync( - tenant_id, app_id, memory_block_spec, node_id, - str(conversation_id), variable_pool, is_draft + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + node_id=node_id, + conversation_id=conversation_id, + variable_pool=variable_pool, + is_draft=is_draft ) else: # Node-level async: execute asynchronously ChatflowMemoryService._update_node_memory_async( - tenant_id, app_id, memory_block_spec, node_id, - llm_output, str(conversation_id), variable_pool, is_draft + tenant_id=tenant_id, + app_id=app_id, + memory_block_spec=memory_block_spec, + node_id=node_id, + conversation_id=conversation_id, + variable_pool=variable_pool, + is_draft=is_draft ) return True @@ -364,12 +345,14 @@ class ChatflowMemoryService: # Node-level async update method @staticmethod - def _update_node_memory_async(tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - node_id: str, llm_output: str, - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): + def _update_node_memory_async( + tenant_id: str, + app_id: str, + memory_block_spec: MemoryBlockSpec, + node_id: str, + conversation_id: str, + variable_pool: VariablePool, + is_draft: bool = False): """Asynchronously update node memory (submit task)""" # Execute update asynchronously using thread @@ -380,7 +363,6 @@ class ChatflowMemoryService: 'tenant_id': tenant_id, 'app_id': app_id, 'node_id': node_id, - 'llm_output': llm_output, 'variable_pool': variable_pool, 'is_draft': is_draft }, @@ -390,10 +372,15 @@ class ChatflowMemoryService: # Return immediately without waiting @staticmethod - def _perform_node_memory_update(*, memory_block_spec: MemoryBlockSpec, - tenant_id: str, app_id: str, node_id: str, - llm_output: str, variable_pool: VariablePool, - is_draft: bool = False): + def _perform_node_memory_update( + *, + memory_block_spec: MemoryBlockSpec, + tenant_id: str, + app_id: str, + node_id: str, + variable_pool: VariablePool, + is_draft: bool = False + ): ChatflowMemoryService._perform_memory_update( tenant_id=tenant_id, app_id=app_id, @@ -422,35 +409,36 @@ class ChatflowMemoryService: ) @staticmethod - def _perform_memory_update(tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - conversation_id: str, variable_pool: VariablePool, - node_id: Optional[str] = None, - is_draft: bool = False): - """Perform the actual memory update using LLM""" + def _perform_memory_update( + tenant_id: str, app_id: str, + memory_block_spec: MemoryBlockSpec, + conversation_id: str, + variable_pool: VariablePool, + node_id: Optional[str], + is_draft: bool): history = ChatflowHistoryService.get_visible_chat_history( conversation_id=conversation_id, app_id=app_id, tenant_id=tenant_id, node_id=node_id, ) - - # Get current memory value - current_memory = ChatflowMemoryService._get_memory_from_chatflow_table( - memory_id=memory_block_spec.id, + memory_block = ChatflowMemoryService.get_memory_by_spec( tenant_id=tenant_id, + spec=memory_block_spec, app_id=app_id, conversation_id=conversation_id, - node_id=node_id + node_id=node_id, + is_draft=is_draft + ) + updated_value = LLMGenerator.update_memory_block( + tenant_id=tenant_id, + visible_history=ChatflowMemoryService._format_chat_history(history), + memory_block=memory_block, + memory_spec=memory_block_spec, ) - - current_value = current_memory.value if current_memory else memory_block_spec.template - - - # Save updated memory updated_memory = MemoryBlock( - id=current_memory.id if current_memory else "", + id=memory_block.id, memory_id=memory_block_spec.id, name=memory_block_spec.name, value=updated_value, @@ -460,74 +448,17 @@ class ChatflowMemoryService: conversation_id=conversation_id if memory_block_spec.term == MemoryTerm.SESSION else None, node_id=node_id ) - ChatflowMemoryService.save_memory(updated_memory, tenant_id, variable_pool, is_draft) # Not implemented yet: Send success event # self._send_memory_update_event(memory_block_spec.id, "completed", updated_value) @staticmethod - def _invoke_llm_for_memory_update(tenant_id: str, - memory_block_spec: MemoryBlockSpec, - prompt: str, current_value: str) -> Optional[str]: - """Invoke LLM to update memory content - - Args: - tenant_id: Tenant ID - memory_block_spec: Memory block specification - prompt: Update prompt - current_value: Current memory value (used for fallback on failure) - - Returns: - Updated value, returns None if failed - """ - from core.model_manager import ModelManager - from core.model_runtime.entities.llm_entities import LLMResult - from core.model_runtime.entities.model_entities import ModelType - - model_manager = ModelManager() - - # Use model configuration defined in memory_block_spec, use default model if not specified - if hasattr(memory_block_spec, 'model') and memory_block_spec.model: - model_instance = model_manager.get_model_instance( - tenant_id=tenant_id, - model_type=ModelType.LLM, - provider=memory_block_spec.model.get("provider", ""), - model=memory_block_spec.model.get("name", "") - ) - model_parameters = memory_block_spec.model.get("completion_params", {}) - else: - # Use default model - model_instance = model_manager.get_default_model_instance( - tenant_id=tenant_id, - model_type=ModelType.LLM - ) - model_parameters = {"temperature": 0.7, "max_tokens": 1000} - - try: - response = cast( - LLMResult, - model_instance.invoke_llm( - prompt_messages=[UserPromptMessage(content=prompt)], - model_parameters=model_parameters, - stream=False - ) - ) - return response.message.get_text_content() - except Exception as e: - logger.exception("Failed to update memory using LLM", exc_info=e) - # Not implemented yet: Send failure event - # ChatflowMemoryService._send_memory_update_event(memory_block_spec.id, "failed", current_value, str(e)) - return None - - - def _send_memory_update_event(self, memory_id: str, status: str, value: str, error: str = ""): - """Send memory update event - - Note: Event system integration not implemented yet, this method is retained as a placeholder - """ - # Not implemented yet: Event system integration will be added in future versions - pass + def _format_chat_history(messages: Sequence[PromptMessage]) -> Sequence[tuple[str, str]]: + result = [] + for message in messages: + result.append((str(message.role.value), message.get_text_content())) + return result # App-level sync batch update related methods @staticmethod diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 87bbc4577c..426637d84e 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -756,6 +756,7 @@ def _fetch_memory_blocks(workflow: Workflow, conversation_id: str, is_draft: boo memory_block_specs=memory_block_specs, tenant_id=workflow.tenant_id, app_id=workflow.app_id, + node_id=None, conversation_id=conversation_id, is_draft=is_draft, ) From 8b680204530a895e8800ea999f6b5dbf1d7e67e7 Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 17:44:27 +0800 Subject: [PATCH 25/82] refactor: refactor from ChatflowHistoryService and ChatflowMemoryService --- api/core/app/apps/advanced_chat/app_runner.py | 19 +- api/core/memory/entities.py | 36 +- api/services/chatflow_history_service.py | 59 -- api/services/chatflow_memory_service.py | 701 ++++++++---------- api/services/workflow_service.py | 6 +- 5 files changed, 310 insertions(+), 511 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index 23ac82bf72..cbb7ad4fd6 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -425,25 +425,22 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): # Build memory_id -> value mapping for memory in memories: - if memory.scope == MemoryScope.APP: + if memory.spec.scope == MemoryScope.APP: # App level: use memory_id directly - memory_blocks_dict[memory.memory_id] = memory.value + memory_blocks_dict[memory.spec.id] = memory.value else: # NODE scope node_id = memory.node_id if not node_id: - logger.warning("Memory block %s has no node_id, skip.", memory.memory_id) + logger.warning("Memory block %s has no node_id, skip.", memory.spec.id) continue - key = f"{node_id}.{memory.memory_id}" + key = f"{node_id}.{memory.spec.id}" memory_blocks_dict[key] = memory.value return memory_blocks_dict def _sync_conversation_to_chatflow_tables(self, assistant_message: str): - # Get user input and AI response - user_message = self.application_generate_entity.query - ChatflowHistoryService.save_app_message( - prompt_message=UserPromptMessage(content=user_message), + prompt_message=UserPromptMessage(content=(self.application_generate_entity.query)), conversation_id=self.conversation.id, app_id=self._workflow.app_id, tenant_id=self._workflow.tenant_id @@ -456,14 +453,10 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): ) def _check_app_memory_updates(self): - from core.app.entities.app_invoke_entities import InvokeFrom - from services.chatflow_memory_service import ChatflowMemoryService - is_draft = (self.application_generate_entity.invoke_from == InvokeFrom.DEBUGGER) - ChatflowMemoryService.update_app_memory_after_run( + ChatflowMemoryService.update_app_memory_if_needed( workflow=self._workflow, conversation_id=self.conversation.id, - variable_pool=VariablePool(), # Make a fake pool to satisfy the signature is_draft=is_draft ) diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 5bb4c512ea..654922d154 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -1,4 +1,3 @@ -from datetime import datetime from enum import Enum from typing import Optional from uuid import uuid4 @@ -63,37 +62,12 @@ class MemoryBlock(BaseModel): These rules implicitly determine scope and term without redundant storage. """ - id: str - memory_id: str - name: str + spec: MemoryBlockSpec + tenant_id: str value: str - scope: MemoryScope # Derived from node_id: None=APP, str=NODE - term: MemoryTerm # Derived from conversation_id: None=PERSISTENT, str=SESSION - app_id: str # None=global(future), str=app-specific - conversation_id: Optional[str] = None # None=persistent, str=session - node_id: Optional[str] = None # None=app-scope, str=node-scope - created_at: Optional[datetime] = None - updated_at: Optional[datetime] = None - - @property - def is_global(self) -> bool: - """Check if this is global memory (future feature)""" - return self.app_id is None - - @property - def is_persistent(self) -> bool: - """Check if this is persistent memory (cross-conversation)""" - return self.conversation_id is None - - @property - def is_app_scope(self) -> bool: - """Check if this is app-level scope""" - return self.node_id is None - - @property - def is_node_scope(self) -> bool: - """Check if this is node-level scope""" - return self.node_id is not None + app_id: str + conversation_id: Optional[str] = None + node_id: Optional[str] = None class MemoryBlockWithVisibility(BaseModel): id: str diff --git a/api/services/chatflow_history_service.py b/api/services/chatflow_history_service.py index baabb7c071..915d5ff2c8 100644 --- a/api/services/chatflow_history_service.py +++ b/api/services/chatflow_history_service.py @@ -107,7 +107,6 @@ class ChatflowHistoryService: app_id: str, tenant_id: str ) -> None: - """Save PromptMessage to node-specific chatflow conversation.""" ChatflowHistoryService.save_message( prompt_message=prompt_message, conversation_id=conversation_id, @@ -116,50 +115,6 @@ class ChatflowHistoryService: node_id=node_id ) - @staticmethod - def save_message_version( - prompt_message: PromptMessage, - message_index: int, - conversation_id: str, - app_id: str, - tenant_id: str, - node_id: Optional[str] = None - ) -> None: - """ - Save a new version of an existing message (for message editing scenarios). - """ - with Session(db.engine) as session: - chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( - session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True - ) - - # Get the maximum version number for this index - max_version = session.execute( - select(func.max(ChatflowMessage.version)).where( - and_( - ChatflowMessage.conversation_id == chatflow_conv.id, - ChatflowMessage.index == message_index - ) - ) - ).scalar() or 0 - next_version = max_version + 1 - - # Save new version of the message - message_data = { - 'role': prompt_message.role.value, - 'content': prompt_message.get_text_content(), - 'timestamp': time.time() - } - - new_message_version = ChatflowMessage( - conversation_id=chatflow_conv.id, - index=message_index, - version=next_version, - data=json.dumps(message_data) - ) - session.add(new_message_version) - session.commit() - @staticmethod def update_visible_count( conversation_id: str, @@ -168,20 +123,6 @@ class ChatflowHistoryService: app_id: str, tenant_id: str ) -> None: - """ - Update visible_count metadata for specific scope. - - Args: - node_id: None for app-level updates, specific node_id for node-level updates - new_visible_count: The new visible_count value (typically preserved_turns) - - Usage Examples: - # Update app-level visible_count - ChatflowHistoryService.update_visible_count(conv_id, None, 10, app_id, tenant_id) - - # Update node-specific visible_count - ChatflowHistoryService.update_visible_count(conv_id, "node-123", 8, app_id, tenant_id) - """ with Session(db.engine) as session: chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( session, conversation_id, app_id, tenant_id, node_id, create_if_missing=True diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index d46146e065..9bb0db6e9d 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -2,7 +2,7 @@ import logging import threading import time from collections.abc import Sequence -from typing import Optional, cast +from typing import Optional from sqlalchemy import and_, select from sqlalchemy.orm import Session @@ -24,25 +24,13 @@ from extensions.ext_database import db from extensions.ext_redis import redis_client from models import App from models.chatflow_memory import ChatflowMemoryVariable -from models.workflow import WorkflowDraftVariable +from models.workflow import Workflow, WorkflowDraftVariable from services.chatflow_history_service import ChatflowHistoryService from services.workflow_draft_variable_service import WorkflowDraftVariableService from services.workflow_service import WorkflowService logger = logging.getLogger(__name__) -def _get_memory_sync_lock_key(app_id: str, conversation_id: str) -> str: - """Generate Redis lock key for memory sync updates - - Args: - app_id: Application ID - conversation_id: Conversation ID - - Returns: - Formatted lock key - """ - return f"memory_sync_update:{app_id}:{conversation_id}" - class ChatflowMemoryService: @staticmethod def get_persistent_memories(app: App) -> Sequence[MemoryBlockWithVisibility]: @@ -71,12 +59,34 @@ class ChatflowMemoryService: return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) @staticmethod - def save_memory(memory: MemoryBlock, tenant_id: str, variable_pool: VariablePool, is_draft: bool) -> None: - key = f"{memory.node_id}:{memory.memory_id}" if memory.node_id else memory.memory_id + def save_memory(memory: MemoryBlock, variable_pool: VariablePool, is_draft: bool) -> None: + key = f"{memory.node_id}:{memory.spec.id}" if memory.node_id else memory.spec.id variable_pool.add([MEMORY_BLOCK_VARIABLE_NODE_ID, key], memory.value) - with db.session() as session: - session.merge(ChatflowMemoryService._to_chatflow_memory_variable(memory)) + with Session(db.engine) as session: + existing = session.query(ChatflowMemoryVariable).filter_by( + memory_id=memory.spec.id, + tenant_id=memory.tenant_id, + app_id=memory.app_id, + node_id=memory.node_id, + conversation_id=memory.conversation_id + ).first() + if existing: + existing.value = memory.value + else: + session.add( + ChatflowMemoryVariable( + memory_id=memory.spec.id, + tenant_id=memory.tenant_id, + app_id=memory.app_id, + node_id=memory.node_id, + conversation_id=memory.conversation_id, + name=memory.spec.name, + value=memory.value, + term=memory.spec.term, + scope=memory.spec.scope, + ) + ) session.commit() if is_draft: @@ -84,7 +94,7 @@ class ChatflowMemoryService: draft_var_service = WorkflowDraftVariableService(session) existing_vars = draft_var_service.get_draft_variables_by_selectors( app_id=memory.app_id, - selectors=[['memory_block', memory.memory_id]] + selectors=[['memory_block', memory.spec.id]] ) if existing_vars: draft_var = existing_vars[0] @@ -92,8 +102,8 @@ class ChatflowMemoryService: else: draft_var = WorkflowDraftVariable.new_memory_block_variable( app_id=memory.app_id, - memory_id=memory.memory_id, - name=memory.name, + memory_id=memory.spec.id, + name=memory.spec.name, value=memory.value, description="" ) @@ -101,25 +111,30 @@ class ChatflowMemoryService: session.commit() @staticmethod - def get_memories_by_specs(memory_block_specs: Sequence[MemoryBlockSpec], - tenant_id: str, app_id: str, - conversation_id: Optional[str], - node_id: Optional[str], - is_draft: bool) -> Sequence[MemoryBlock]: - return [ChatflowMemoryService.get_memory_by_spec( + def get_memories_by_specs( + memory_block_specs: Sequence[MemoryBlockSpec], + tenant_id: str, app_id: str, + conversation_id: Optional[str], + node_id: Optional[str], + is_draft: bool + ) -> Sequence[MemoryBlock]: + return [ChatflowMemoryService.get_memory_by_spec( spec, tenant_id, app_id, conversation_id, node_id, is_draft ) for spec in memory_block_specs] @staticmethod - def get_memory_by_spec(spec: MemoryBlockSpec, - tenant_id: str, app_id: str, - conversation_id: Optional[str], - node_id: Optional[str], - is_draft: bool) -> MemoryBlock: - with (Session(bind=db.engine) as session): + def get_memory_by_spec( + spec: MemoryBlockSpec, + tenant_id: str, + app_id: str, + conversation_id: Optional[str], + node_id: Optional[str], + is_draft: bool + ) -> MemoryBlock: + with Session(db.engine) as session: if is_draft: draft_var_service = WorkflowDraftVariableService(session) - selector = [MEMORY_BLOCK_VARIABLE_NODE_ID, f"{spec.id}.{node_id}"]\ + selector = [MEMORY_BLOCK_VARIABLE_NODE_ID, f"{spec.id}.{node_id}"] \ if node_id else [MEMORY_BLOCK_VARIABLE_NODE_ID, spec.id] draft_vars = draft_var_service.get_draft_variables_by_selectors( app_id=app_id, @@ -128,38 +143,92 @@ class ChatflowMemoryService: if draft_vars: draft_var = draft_vars[0] return MemoryBlock( - id=draft_var.id, - memory_id=draft_var.name, - name=spec.name, value=draft_var.value, - scope=spec.scope, - term=spec.term, + tenant_id=tenant_id, app_id=app_id, conversation_id=conversation_id, - node_id=node_id + node_id=node_id, + spec=spec ) stmt = select(ChatflowMemoryVariable).where( and_( ChatflowMemoryVariable.memory_id == spec.id, ChatflowMemoryVariable.tenant_id == tenant_id, ChatflowMemoryVariable.app_id == app_id, - ChatflowMemoryVariable.node_id == node_id, - ChatflowMemoryVariable.conversation_id == conversation_id + ChatflowMemoryVariable.node_id == \ + (node_id if spec.term == MemoryScope.NODE else None), + ChatflowMemoryVariable.conversation_id == \ + (conversation_id if spec.term == MemoryTerm.SESSION else None), ) ) result = session.execute(stmt).scalar() if result: - return ChatflowMemoryService._to_memory_block(result) + return MemoryBlock( + value=result.value, + tenant_id=tenant_id, + app_id=app_id, + conversation_id=conversation_id, + node_id=node_id, + spec=spec + ) return MemoryBlock( - id="", # Will be assigned when saved - memory_id=spec.id, - name=spec.name, + tenant_id=tenant_id, value=spec.template, - scope=spec.scope, - term=spec.term, app_id=app_id, conversation_id=conversation_id, - node_id=node_id + node_id=node_id, + spec=spec + ) + + @staticmethod + def update_app_memory_if_needed( + workflow: Workflow, + conversation_id: str, + is_draft: bool + ): + visible_messages = ChatflowHistoryService.get_visible_chat_history( + conversation_id=conversation_id, + app_id=workflow.app_id, + tenant_id=workflow.tenant_id, + node_id=None, + ) + sync_blocks: list[MemoryBlock] = [] + async_blocks: list[MemoryBlock] = [] + for memory_spec in workflow.memory_blocks: + if memory_spec.scope == MemoryScope.APP: + memory = ChatflowMemoryService.get_memory_by_spec( + spec=memory_spec, + tenant_id=workflow.tenant_id, + app_id=workflow.app_id, + conversation_id=conversation_id, + node_id=None, + is_draft=is_draft + ) + if ChatflowMemoryService._should_update_memory(memory, visible_messages): + if memory.spec.schedule_mode == MemoryScheduleMode.SYNC: + sync_blocks.append(memory) + else: + async_blocks.append(memory) + + if not sync_blocks and not async_blocks: + return + + # async mode: submit individual async tasks directly + for memory_block in async_blocks: + ChatflowMemoryService._app_submit_async_memory_update( + block=memory_block, + is_draft=is_draft, + visible_messages=visible_messages + ) + + # sync mode: submit a batch update task + if sync_blocks: + ChatflowMemoryService._app_submit_sync_memory_batch_update( + sync_blocks=sync_blocks, + is_draft=is_draft, + conversation_id=conversation_id, + app_id=workflow.app_id, + visible_messages=visible_messages ) @staticmethod @@ -172,307 +241,47 @@ class ChatflowMemoryService: variable_pool: VariablePool, is_draft: bool ) -> bool: - if not ChatflowMemoryService._should_update_memory( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - conversation_id=conversation_id, - node_id=node_id - ): - return False - - if memory_block_spec.schedule_mode == MemoryScheduleMode.SYNC: - # Node-level sync: blocking execution - ChatflowMemoryService._update_node_memory_sync( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - node_id=node_id, - conversation_id=conversation_id, - variable_pool=variable_pool, - is_draft=is_draft - ) - else: - # Node-level async: execute asynchronously - ChatflowMemoryService._update_node_memory_async( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - node_id=node_id, - conversation_id=conversation_id, - variable_pool=variable_pool, - is_draft=is_draft - ) - return True - - @staticmethod - def _get_memory_from_chatflow_table(memory_id: str, tenant_id: str, - app_id: Optional[str] = None, - conversation_id: Optional[str] = None, - node_id: Optional[str] = None) -> Optional[MemoryBlock]: - stmt = select(ChatflowMemoryVariable).where( - and_( - ChatflowMemoryVariable.app_id == app_id, - ChatflowMemoryVariable.memory_id == memory_id, - ChatflowMemoryVariable.tenant_id == tenant_id, - ChatflowMemoryVariable.conversation_id == conversation_id, - ChatflowMemoryVariable.node_id == node_id - ) - ) - - with db.session() as session: - result = session.execute(stmt).first() - return ChatflowMemoryService._to_memory_block(result[0]) if result else None - - @staticmethod - def _to_memory_block(entity: ChatflowMemoryVariable) -> MemoryBlock: - scope = MemoryScope(entity.scope) if not isinstance(entity.scope, MemoryScope) else entity.scope - term = MemoryTerm(entity.term) if not isinstance(entity.term, MemoryTerm) else entity.term - return MemoryBlock( - id=entity.id, - memory_id=entity.memory_id, - name=entity.name, - value=entity.value, - scope=scope, - term=term, - app_id=cast(str, entity.app_id), # It's supposed to be not nullable for now - conversation_id=entity.conversation_id, - node_id=entity.node_id, - created_at=entity.created_at, - updated_at=entity.updated_at, - ) - - @staticmethod - def _to_chatflow_memory_variable(memory_block: MemoryBlock) -> ChatflowMemoryVariable: - return ChatflowMemoryVariable( - id=memory_block.id, - node_id=memory_block.node_id, - memory_id=memory_block.memory_id, - name=memory_block.name, - value=memory_block.value, - scope=memory_block.scope, - term=memory_block.term, - app_id=memory_block.app_id, - conversation_id=memory_block.conversation_id, - ) - - @staticmethod - def _with_visibility( - app: App, - raw_results: Sequence[ChatflowMemoryVariable] - ) -> Sequence[MemoryBlockWithVisibility]: - workflow = WorkflowService().get_published_workflow(app) - if not workflow: - return [] - results = [] - for db_result in raw_results: - spec = next((spec for spec in workflow.memory_blocks if spec.id == db_result.memory_id), None) - if spec: - results.append( - MemoryBlockWithVisibility( - id=db_result.memory_id, - name=db_result.name, - value=db_result.value, - end_user_editable=spec.end_user_editable, - end_user_visible=spec.end_user_visible, - ) - ) - return results - - @staticmethod - def _should_update_memory(tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - conversation_id: str, node_id: Optional[str] = None) -> bool: - """Check if memory should be updated based on strategy""" - # Currently, `memory_block_spec.strategy != MemoryStrategy.ON_TURNS` is not possible, but possible in the future - - # Check turn count - turn_key = f"memory_turn_count:{tenant_id}:{app_id}:{conversation_id}" - if node_id: - turn_key += f":{node_id}" - - current_turns = redis_client.get(turn_key) - current_turns = int(current_turns) if current_turns else 0 - current_turns += 1 - - # Update count - redis_client.set(turn_key, current_turns) - - return current_turns % memory_block_spec.update_turns == 0 - - # App-level async update method - @staticmethod - def _submit_async_memory_update(tenant_id: str, app_id: str, - block: MemoryBlockSpec, - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): - """Submit async memory update task""" - - # Execute update asynchronously using thread - thread = threading.Thread( - target=ChatflowMemoryService._update_app_single_memory, - kwargs={ - 'tenant_id': tenant_id, - 'app_id': app_id, - 'memory_block_spec': block, - 'conversation_id': conversation_id, - 'variable_pool': variable_pool, - 'is_draft': is_draft - }, - daemon=True - ) - thread.start() - - # Node-level sync update method - @staticmethod - def _update_node_memory_sync(tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - node_id: str, conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): - """Synchronously update node memory (blocking execution)""" - ChatflowMemoryService._perform_memory_update( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - conversation_id=conversation_id, - variable_pool=variable_pool, - node_id=node_id, - is_draft=is_draft - ) - # Wait for update to complete before returning - - # Node-level async update method - @staticmethod - def _update_node_memory_async( - tenant_id: str, - app_id: str, - memory_block_spec: MemoryBlockSpec, - node_id: str, - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): - """Asynchronously update node memory (submit task)""" - - # Execute update asynchronously using thread - thread = threading.Thread( - target=ChatflowMemoryService._perform_node_memory_update, - kwargs={ - 'memory_block_spec': memory_block_spec, - 'tenant_id': tenant_id, - 'app_id': app_id, - 'node_id': node_id, - 'variable_pool': variable_pool, - 'is_draft': is_draft - }, - daemon=True - ) - thread.start() - # Return immediately without waiting - - @staticmethod - def _perform_node_memory_update( - *, - memory_block_spec: MemoryBlockSpec, - tenant_id: str, - app_id: str, - node_id: str, - variable_pool: VariablePool, - is_draft: bool = False - ): - ChatflowMemoryService._perform_memory_update( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - conversation_id=str(variable_pool.get(('sys', 'conversation_id'))), - variable_pool=variable_pool, - node_id=node_id, - is_draft=is_draft - ) - - @staticmethod - def _update_app_single_memory(*, tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): - """Update single memory""" - ChatflowMemoryService._perform_memory_update( - tenant_id=tenant_id, - app_id=app_id, - memory_block_spec=memory_block_spec, - conversation_id=conversation_id, - variable_pool=variable_pool, - node_id=None, # App-level memory doesn't have node_id - is_draft=is_draft - ) - - @staticmethod - def _perform_memory_update( - tenant_id: str, app_id: str, - memory_block_spec: MemoryBlockSpec, - conversation_id: str, - variable_pool: VariablePool, - node_id: Optional[str], - is_draft: bool): - history = ChatflowHistoryService.get_visible_chat_history( + visible_messages = ChatflowHistoryService.get_visible_chat_history( conversation_id=conversation_id, app_id=app_id, tenant_id=tenant_id, node_id=node_id, ) memory_block = ChatflowMemoryService.get_memory_by_spec( - tenant_id=tenant_id, spec=memory_block_spec, + tenant_id=tenant_id, app_id=app_id, conversation_id=conversation_id, node_id=node_id, is_draft=is_draft ) - updated_value = LLMGenerator.update_memory_block( - tenant_id=tenant_id, - visible_history=ChatflowMemoryService._format_chat_history(history), + if not ChatflowMemoryService._should_update_memory( memory_block=memory_block, - memory_spec=memory_block_spec, - ) - # Save updated memory - updated_memory = MemoryBlock( - id=memory_block.id, - memory_id=memory_block_spec.id, - name=memory_block_spec.name, - value=updated_value, - scope=memory_block_spec.scope, - term=memory_block_spec.term, - app_id=app_id, - conversation_id=conversation_id if memory_block_spec.term == MemoryTerm.SESSION else None, - node_id=node_id - ) - ChatflowMemoryService.save_memory(updated_memory, tenant_id, variable_pool, is_draft) + visible_history=visible_messages + ): + return False - # Not implemented yet: Send success event - # self._send_memory_update_event(memory_block_spec.id, "completed", updated_value) + if memory_block_spec.schedule_mode == MemoryScheduleMode.SYNC: + # Node-level sync: blocking execution + ChatflowMemoryService._update_node_memory_sync( + visible_messages=visible_messages, + memory_block=memory_block, + variable_pool=variable_pool, + is_draft=is_draft + ) + else: + # Node-level async: execute asynchronously + ChatflowMemoryService._update_node_memory_async( + memory_block=memory_block, + visible_messages=visible_messages, + variable_pool=variable_pool, + is_draft=is_draft + ) + return True - @staticmethod - def _format_chat_history(messages: Sequence[PromptMessage]) -> Sequence[tuple[str, str]]: - result = [] - for message in messages: - result.append((str(message.role.value), message.get_text_content())) - return result - - # App-level sync batch update related methods @staticmethod def wait_for_sync_memory_completion(workflow, conversation_id: str): - """Wait for sync memory update to complete, maximum 50 seconds - - Args: - workflow: Workflow object - conversation_id: Conversation ID - - Raises: - MemorySyncTimeoutError: Raised when timeout is reached - """ - from core.memory.entities import MemoryScope + """Wait for sync memory update to complete, maximum 50 seconds""" memory_blocks = workflow.memory_blocks sync_memory_blocks = [ @@ -505,54 +314,132 @@ class ChatflowMemoryService: ) @staticmethod - def update_app_memory_after_run(workflow, conversation_id: str, variable_pool: VariablePool, - is_draft: bool = False): - """Update app-level memory after run completion""" - sync_blocks = [] - async_blocks = [] - for block in workflow.memory_blocks: - if block.scope == MemoryScope.APP: - if block.update_mode == "sync": - sync_blocks.append(block) - else: - async_blocks.append(block) - - # async mode: submit individual async tasks directly - for block in async_blocks: - ChatflowMemoryService._submit_async_memory_update( - tenant_id=workflow.tenant_id, - app_id=workflow.app_id, - block=block, - conversation_id=conversation_id, - variable_pool=variable_pool, - is_draft=is_draft - ) - - # sync mode: submit a batch update task - if sync_blocks: - ChatflowMemoryService._submit_sync_memory_batch_update( - workflow=workflow, - sync_blocks=sync_blocks, - conversation_id=conversation_id, - variable_pool=variable_pool, - is_draft=is_draft + def _with_visibility( + app: App, + raw_results: Sequence[ChatflowMemoryVariable] + ) -> Sequence[MemoryBlockWithVisibility]: + workflow = WorkflowService().get_published_workflow(app) + if not workflow: + return [] + results = [] + for chatflow_memory_variable in raw_results: + spec = next( + (spec for spec in workflow.memory_blocks if spec.id == chatflow_memory_variable.memory_id), + None ) + if spec: + results.append( + MemoryBlockWithVisibility( + id=chatflow_memory_variable.memory_id, + name=chatflow_memory_variable.name, + value=chatflow_memory_variable.value, + end_user_editable=spec.end_user_editable, + end_user_visible=spec.end_user_visible, + ) + ) + return results @staticmethod - def _submit_sync_memory_batch_update(workflow, - sync_blocks: list[MemoryBlockSpec], - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): - """Submit sync memory batch update task""" + def _should_update_memory( + memory_block: MemoryBlock, + visible_history: Sequence[PromptMessage] + ) -> bool: + return len(visible_history) > memory_block.spec.update_turns - # Execute batch update asynchronously using thread + @staticmethod + def _app_submit_async_memory_update( + block: MemoryBlock, + visible_messages: Sequence[PromptMessage], + is_draft: bool + ): + thread = threading.Thread( + target=ChatflowMemoryService._perform_memory_update, + kwargs={ + 'memory_block': block, + 'visible_messages': visible_messages, + 'variable_pool': VariablePool(), + 'is_draft': is_draft + }, + ) + thread.start() + + @staticmethod + def _app_submit_sync_memory_batch_update( + sync_blocks: Sequence[MemoryBlock], + app_id: str, + conversation_id: str, + visible_messages: Sequence[PromptMessage], + is_draft: bool + ): + """Submit sync memory batch update task""" thread = threading.Thread( target=ChatflowMemoryService._batch_update_sync_memory, kwargs={ - 'workflow': workflow, 'sync_blocks': sync_blocks, + 'app_id': app_id, 'conversation_id': conversation_id, + 'visible_messages': visible_messages, + 'is_draft': is_draft + }, + ) + thread.start() + + @staticmethod + def _batch_update_sync_memory( + sync_blocks: Sequence[MemoryBlock], + app_id: str, + conversation_id: str, + visible_messages: Sequence[PromptMessage], + is_draft: bool + ): + try: + lock_key = _get_memory_sync_lock_key(app_id, conversation_id) + with redis_client.lock(lock_key, timeout=120): + threads = [] + for block in sync_blocks: + thread = threading.Thread( + target=ChatflowMemoryService._perform_memory_update, + kwargs={ + 'memory_block': block, + 'visible_messages': visible_messages, + 'variable_pool': VariablePool(), + 'is_draft': is_draft + }, + ) + threads.append(thread) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + except Exception as e: + logger.exception("Error batch updating memory", exc_info=e) + + @staticmethod + def _update_node_memory_sync( + memory_block: MemoryBlock, + visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, + is_draft: bool + ): + ChatflowMemoryService._perform_memory_update( + memory_block=memory_block, + visible_messages=visible_messages, + variable_pool=variable_pool, + is_draft=is_draft + ) + + @staticmethod + def _update_node_memory_async( + memory_block: MemoryBlock, + visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, + is_draft: bool = False + ): + thread = threading.Thread( + target=ChatflowMemoryService._perform_memory_update, + kwargs={ + 'memory_block': memory_block, + 'visible_messages': visible_messages, 'variable_pool': variable_pool, 'is_draft': is_draft }, @@ -561,39 +448,43 @@ class ChatflowMemoryService: thread.start() @staticmethod - def _batch_update_sync_memory(*, workflow, - sync_blocks: list[MemoryBlockSpec], - conversation_id: str, - variable_pool: VariablePool, - is_draft: bool = False): - """Batch update sync memory (with Redis lock)""" - from concurrent.futures import ThreadPoolExecutor + def _perform_memory_update( + memory_block: MemoryBlock, + variable_pool: VariablePool, + visible_messages: Sequence[PromptMessage], + is_draft: bool + ): + updated_value = LLMGenerator.update_memory_block( + tenant_id=memory_block.tenant_id, + visible_history=ChatflowMemoryService._format_chat_history(visible_messages), + memory_block=memory_block, + memory_spec=memory_block.spec, + ) + updated_memory = MemoryBlock( + tenant_id=memory_block.tenant_id, + value=updated_value, + spec=memory_block.spec, + app_id=memory_block.app_id, + conversation_id=memory_block.conversation_id, + node_id=memory_block.node_id + ) + ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) - lock_key = _get_memory_sync_lock_key(workflow.app_id, conversation_id) + @staticmethod + def _format_chat_history(messages: Sequence[PromptMessage]) -> Sequence[tuple[str, str]]: + result = [] + for message in messages: + result.append((str(message.role.value), message.get_text_content())) + return result - # Use Redis lock context manager (30 seconds timeout) - with redis_client.lock(lock_key, timeout=30): - try: - # Update all sync memory in parallel - with ThreadPoolExecutor(max_workers=5) as executor: - futures = [] - for block in sync_blocks: - future = executor.submit( - ChatflowMemoryService._update_app_single_memory, - tenant_id=workflow.tenant_id, - app_id=workflow.app_id, - memory_block_spec=block, - conversation_id=conversation_id, - variable_pool=variable_pool, - is_draft=is_draft - ) - futures.append(future) +def _get_memory_sync_lock_key(app_id: str, conversation_id: str) -> str: + """Generate Redis lock key for memory sync updates - # Wait for all updates to complete - for future in futures: - try: - future.result() - except Exception as e: - logger.exception("Failed to update memory", exc_info=e) - except Exception as e: - logger.exception("Failed to update sync memory for app %s", workflow.app_id, exc_info=e) + Args: + app_id: Application ID + conversation_id: Conversation ID + + Returns: + Formatted lock key + """ + return f"memory_sync_update:{app_id}:{conversation_id}" diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 426637d84e..19e6361284 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -761,9 +761,9 @@ def _fetch_memory_blocks(workflow: Workflow, conversation_id: str, is_draft: boo is_draft=is_draft, ) for memory in memories: - if memory.scope == MemoryScope.APP: - memory_blocks[memory.memory_id] = memory.value + if memory.spec.scope == MemoryScope.APP: + memory_blocks[memory.spec.id] = memory.value else: # NODE scope - memory_blocks[f"{memory.node_id}.{memory.memory_id}"] = memory.value + memory_blocks[f"{memory.node_id}.{memory.spec.id}"] = memory.value return memory_blocks From 48f3c69c693c5db5f4efefb68dfee5d604e5fffa Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 17:54:18 +0800 Subject: [PATCH 26/82] fix: fix bugs check by Claude Code --- .../service_api/app/chatflow_memory.py | 21 ++++++++++--------- api/controllers/web/chatflow_memory.py | 21 ++++++++++--------- api/services/chatflow_memory_service.py | 4 ++-- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 1a7ab6733e..d82820d252 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -39,17 +39,18 @@ class MemoryEditApi(Resource): if not memory_spec: return {'error': 'Memory not found'}, 404 with Session(db.engine) as session: - ChatflowMemoryVariable( - tenant_id=app_model.tenant_id, - app_id=app_model.id, - node_id=args['node_id'], - memory_id=args['id'], - name=memory_spec.name, - value=args['update'], - scope=memory_spec.scope, - term=memory_spec.term, + session.merge( + ChatflowMemoryVariable( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'], + memory_id=args['id'], + name=memory_spec.name, + value=args['update'], + scope=memory_spec.scope, + term=memory_spec.term, + ) ) - session.add(memory_spec) session.commit() return '', 204 diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 92b259c498..43c330ab50 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -39,17 +39,18 @@ class MemoryEditApi(WebApiResource): if not memory_spec.end_user_editable: return {'error': 'Memory not editable'}, 403 with Session(db.engine) as session: - ChatflowMemoryVariable( - tenant_id=app_model.tenant_id, - app_id=app_model.id, - node_id=args['node_id'], - memory_id=args['id'], - name=memory_spec.name, - value=args['update'], - scope=memory_spec.scope, - term=memory_spec.term, + session.merge( + ChatflowMemoryVariable( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'], + memory_id=args['id'], + name=memory_spec.name, + value=args['update'], + scope=memory_spec.scope, + term=memory_spec.term, + ) ) - session.add(memory_spec) session.commit() return '', 204 diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 9bb0db6e9d..0454544699 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -156,7 +156,7 @@ class ChatflowMemoryService: ChatflowMemoryVariable.tenant_id == tenant_id, ChatflowMemoryVariable.app_id == app_id, ChatflowMemoryVariable.node_id == \ - (node_id if spec.term == MemoryScope.NODE else None), + (node_id if spec.scope == MemoryScope.NODE else None), ChatflowMemoryVariable.conversation_id == \ (conversation_id if spec.term == MemoryTerm.SESSION else None), ) @@ -344,7 +344,7 @@ class ChatflowMemoryService: memory_block: MemoryBlock, visible_history: Sequence[PromptMessage] ) -> bool: - return len(visible_history) > memory_block.spec.update_turns + return len(visible_history) >= memory_block.spec.update_turns @staticmethod def _app_submit_async_memory_update( From 05d231ad3305b373c5ebcd57a3a2cee3040461b5 Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 19:59:17 +0800 Subject: [PATCH 27/82] fix: fix bugs check by Claude Code --- .../service_api/app/chatflow_memory.py | 33 ++++++++++++------- api/controllers/web/chatflow_memory.py | 33 ++++++++++++------- api/core/workflow/entities/variable_pool.py | 2 +- api/services/chatflow_memory_service.py | 6 ++-- 4 files changed, 46 insertions(+), 28 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index d82820d252..10db3231f2 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -29,7 +29,7 @@ class MemoryEditApi(Resource): def put(self, app_model): parser = reqparse.RequestParser() parser.add_argument('id', type=str, required=True) - parser.add_argument('node_id', type=str, required=False) + parser.add_argument('node_id', type=str, required=False, default=None) parser.add_argument('update', type=str, required=True) args = parser.parse_args() workflow = WorkflowService().get_published_workflow(app_model) @@ -39,18 +39,27 @@ class MemoryEditApi(Resource): if not memory_spec: return {'error': 'Memory not found'}, 404 with Session(db.engine) as session: - session.merge( - ChatflowMemoryVariable( - tenant_id=app_model.tenant_id, - app_id=app_model.id, - node_id=args['node_id'], - memory_id=args['id'], - name=memory_spec.name, - value=args['update'], - scope=memory_spec.scope, - term=memory_spec.term, + existing = session.query(ChatflowMemoryVariable).filter_by( + memory_id=args['id'], + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'] + ).first() + if existing: + existing.value = args['update'] + else: + session.add( + ChatflowMemoryVariable( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'], + memory_id=args['id'], + name=memory_spec.name, + value=args['update'], + scope=memory_spec.scope, + term=memory_spec.term, + ) ) - ) session.commit() return '', 204 diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 43c330ab50..078040b204 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -27,7 +27,7 @@ class MemoryEditApi(WebApiResource): def put(self, app_model): parser = reqparse.RequestParser() parser.add_argument('id', type=str, required=True) - parser.add_argument('node_id', type=str, required=False) + parser.add_argument('node_id', type=str, required=False, default=None) parser.add_argument('update', type=str, required=True) args = parser.parse_args() workflow = WorkflowService().get_published_workflow(app_model) @@ -39,18 +39,27 @@ class MemoryEditApi(WebApiResource): if not memory_spec.end_user_editable: return {'error': 'Memory not editable'}, 403 with Session(db.engine) as session: - session.merge( - ChatflowMemoryVariable( - tenant_id=app_model.tenant_id, - app_id=app_model.id, - node_id=args['node_id'], - memory_id=args['id'], - name=memory_spec.name, - value=args['update'], - scope=memory_spec.scope, - term=memory_spec.term, + existing = session.query(ChatflowMemoryVariable).filter_by( + memory_id=args['id'], + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'] + ).first() + if existing: + existing.value = args['update'] + else: + session.add( + ChatflowMemoryVariable( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + node_id=args['node_id'], + memory_id=args['id'], + name=memory_spec.name, + value=args['update'], + scope=memory_spec.scope, + term=memory_spec.term, + ) ) - ) session.commit() return '', 204 diff --git a/api/core/workflow/entities/variable_pool.py b/api/core/workflow/entities/variable_pool.py index 67f5551dea..830cefdcd0 100644 --- a/api/core/workflow/entities/variable_pool.py +++ b/api/core/workflow/entities/variable_pool.py @@ -62,7 +62,7 @@ class VariablePool(BaseModel): self.add((CONVERSATION_VARIABLE_NODE_ID, var.name), var) # Add memory blocks to the variable pool for memory_id, memory_value in self.memory_blocks.items(): - self.add(['memory_block', memory_id], memory_value) + self.add([CONVERSATION_VARIABLE_NODE_ID, memory_id], memory_value) def add(self, selector: Sequence[str], value: Any, /) -> None: """ diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 0454544699..f979acf234 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -60,7 +60,7 @@ class ChatflowMemoryService: @staticmethod def save_memory(memory: MemoryBlock, variable_pool: VariablePool, is_draft: bool) -> None: - key = f"{memory.node_id}:{memory.spec.id}" if memory.node_id else memory.spec.id + key = f"{memory.node_id}.{memory.spec.id}" if memory.node_id else memory.spec.id variable_pool.add([MEMORY_BLOCK_VARIABLE_NODE_ID, key], memory.value) with Session(db.engine) as session: @@ -280,13 +280,13 @@ class ChatflowMemoryService: return True @staticmethod - def wait_for_sync_memory_completion(workflow, conversation_id: str): + def wait_for_sync_memory_completion(workflow: Workflow, conversation_id: str): """Wait for sync memory update to complete, maximum 50 seconds""" memory_blocks = workflow.memory_blocks sync_memory_blocks = [ block for block in memory_blocks - if block.scope == MemoryScope.APP and block.update_mode == "sync" + if block.scope == MemoryScope.APP and block.schedule_mode == MemoryScheduleMode.SYNC ] if not sync_memory_blocks: From 11b6ea742de2fb1f0501f2412173395cbfc220aa Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 20:43:49 +0800 Subject: [PATCH 28/82] feat: add index for data tables --- api/models/chatflow_memory.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/api/models/chatflow_memory.py b/api/models/chatflow_memory.py index f0b36d0d69..2c964c4766 100644 --- a/api/models/chatflow_memory.py +++ b/api/models/chatflow_memory.py @@ -10,6 +10,10 @@ from .types import StringUUID class ChatflowMemoryVariable(Base): __tablename__ = "chatflow_memory_variables" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="chatflow_memory_variables_pkey"), + sa.Index("chatflow_memory_variables_memory_id_idx", "tenant_id", "app_id", "node_id", "memory_id"), + ) id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) @@ -30,6 +34,13 @@ class ChatflowMemoryVariable(Base): class ChatflowConversation(Base): __tablename__ = "chatflow_conversations" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="chatflow_conversations_pkey"), + sa.Index( + "chatflow_conversations_original_conversation_id_idx", + "tenant_id", "app_id", "node_id", "original_conversation_id" + ), + ) id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) @@ -46,6 +57,10 @@ class ChatflowConversation(Base): class ChatflowMessage(Base): __tablename__ = "chatflow_messages" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="chatflow_messages_pkey"), + sa.Index("chatflow_messages_version_idx", "conversation_id", "index", "version"), + ) id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) conversation_id: Mapped[str] = mapped_column(StringUUID, nullable=False) From 29f56cf0cf39734fb48eb05fe67388e9b71eacbf Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 22 Aug 2025 21:07:54 +0800 Subject: [PATCH 29/82] chore: add database migration --- .../versions/2025_08_22_2103-f3747f1446a4_.py | 83 +++++++++++++++++++ api/models/__init__.py | 4 + 2 files changed, 87 insertions(+) create mode 100644 api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py diff --git a/api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py b/api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py new file mode 100644 index 0000000000..3425a39417 --- /dev/null +++ b/api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py @@ -0,0 +1,83 @@ +"""empty message + +Revision ID: f3747f1446a4 +Revises: 3803626caa7c +Create Date: 2025-08-22 21:03:32.462487 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'f3747f1446a4' +down_revision = '3803626caa7c' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('chatflow_conversations', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=False), + sa.Column('node_id', sa.Text(), nullable=True), + sa.Column('original_conversation_id', models.types.StringUUID(), nullable=True), + sa.Column('conversation_metadata', sa.Text(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='chatflow_conversations_pkey') + ) + with op.batch_alter_table('chatflow_conversations', schema=None) as batch_op: + batch_op.create_index('chatflow_conversations_original_conversation_id_idx', ['tenant_id', 'app_id', 'node_id', 'original_conversation_id'], unique=False) + + op.create_table('chatflow_memory_variables', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=True), + sa.Column('conversation_id', models.types.StringUUID(), nullable=True), + sa.Column('node_id', sa.Text(), nullable=True), + sa.Column('memory_id', sa.Text(), nullable=False), + sa.Column('value', sa.Text(), nullable=False), + sa.Column('name', sa.Text(), nullable=False), + sa.Column('scope', sa.String(length=10), nullable=False), + sa.Column('term', sa.String(length=20), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='chatflow_memory_variables_pkey') + ) + with op.batch_alter_table('chatflow_memory_variables', schema=None) as batch_op: + batch_op.create_index('chatflow_memory_variables_memory_id_idx', ['tenant_id', 'app_id', 'node_id', 'memory_id'], unique=False) + + op.create_table('chatflow_messages', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('conversation_id', models.types.StringUUID(), nullable=False), + sa.Column('index', sa.Integer(), nullable=False), + sa.Column('version', sa.Integer(), nullable=False), + sa.Column('data', sa.Text(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='chatflow_messages_pkey') + ) + with op.batch_alter_table('chatflow_messages', schema=None) as batch_op: + batch_op.create_index('chatflow_messages_version_idx', ['conversation_id', 'index', 'version'], unique=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('chatflow_messages', schema=None) as batch_op: + batch_op.drop_index('chatflow_messages_version_idx') + + op.drop_table('chatflow_messages') + with op.batch_alter_table('chatflow_memory_variables', schema=None) as batch_op: + batch_op.drop_index('chatflow_memory_variables_memory_id_idx') + + op.drop_table('chatflow_memory_variables') + with op.batch_alter_table('chatflow_conversations', schema=None) as batch_op: + batch_op.drop_index('chatflow_conversations_original_conversation_id_idx') + + op.drop_table('chatflow_conversations') + # ### end Alembic commands ### diff --git a/api/models/__init__.py b/api/models/__init__.py index 1b4bdd32e4..a7e76af3f3 100644 --- a/api/models/__init__.py +++ b/api/models/__init__.py @@ -9,6 +9,7 @@ from .account import ( TenantStatus, ) from .api_based_extension import APIBasedExtension, APIBasedExtensionPoint +from .chatflow_memory import ChatflowMemoryVariable, ChatflowConversation, ChatflowMessage from .dataset import ( AppDatasetJoin, Dataset, @@ -177,5 +178,8 @@ __all__ = [ "WorkflowRunTriggeredFrom", "WorkflowToolProvider", "WorkflowType", + "ChatflowMemoryVariable", + "ChatflowConversation", + "ChatflowMessage", "db", ] From 008f778e8f7b87d6cf17d4defad29daf185d1576 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 25 Aug 2025 15:16:42 +0800 Subject: [PATCH 30/82] fix: fix mypy --- api/controllers/service_api/app/chatflow_memory.py | 2 +- api/controllers/web/chatflow_memory.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 10db3231f2..c3e9e87c0a 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -1,4 +1,4 @@ -from flask_restful import Resource, reqparse +from flask_restx import Resource, reqparse from sqlalchemy.orm import Session from controllers.service_api import api diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 078040b204..d8fcd49b53 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -1,4 +1,4 @@ -from flask_restful import reqparse +from flask_restx import reqparse from sqlalchemy.orm.session import Session from controllers.web import api From ab389eaa8e7e206aa0a00634a4ef6bf817798602 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 25 Aug 2025 15:17:01 +0800 Subject: [PATCH 31/82] fix: fix ruff --- api/models/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/models/__init__.py b/api/models/__init__.py index a7e76af3f3..2098fa74e0 100644 --- a/api/models/__init__.py +++ b/api/models/__init__.py @@ -9,7 +9,7 @@ from .account import ( TenantStatus, ) from .api_based_extension import APIBasedExtension, APIBasedExtensionPoint -from .chatflow_memory import ChatflowMemoryVariable, ChatflowConversation, ChatflowMessage +from .chatflow_memory import ChatflowConversation, ChatflowMemoryVariable, ChatflowMessage from .dataset import ( AppDatasetJoin, Dataset, @@ -111,6 +111,9 @@ __all__ = [ "BuiltinToolProvider", "CeleryTask", "CeleryTaskSet", + "ChatflowConversation", + "ChatflowMemoryVariable", + "ChatflowMessage", "Conversation", "ConversationVariable", "CreatorUserRole", @@ -178,8 +181,5 @@ __all__ = [ "WorkflowRunTriggeredFrom", "WorkflowToolProvider", "WorkflowType", - "ChatflowMemoryVariable", - "ChatflowConversation", - "ChatflowMessage", "db", ] From 3b868a1ceccd83e477be3c6b822d15adaa6381b3 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 26 Aug 2025 11:29:00 +0800 Subject: [PATCH 32/82] feat: integrate VariablePool into memory update process --- api/core/app/apps/advanced_chat/app_runner.py | 5 +++-- api/core/llm_generator/llm_generator.py | 5 ++++- api/services/chatflow_memory_service.py | 14 +++++++++++--- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index cbb7ad4fd6..f2340386cd 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -187,7 +187,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): self._handle_event(workflow_entry, event) try: - self._check_app_memory_updates() + self._check_app_memory_updates(variable_pool) except Exception as e: logger.exception("Failed to check app memory updates", exc_info=e) @@ -452,11 +452,12 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): tenant_id=self._workflow.tenant_id ) - def _check_app_memory_updates(self): + def _check_app_memory_updates(self, variable_pool: VariablePool): is_draft = (self.application_generate_entity.invoke_from == InvokeFrom.DEBUGGER) ChatflowMemoryService.update_app_memory_if_needed( workflow=self._workflow, conversation_id=self.conversation.id, + variable_pool=variable_pool, is_draft=is_draft ) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index b7c5b0993a..88479a8502 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -29,6 +29,7 @@ from core.ops.entities.trace_entity import TraceTaskName from core.ops.ops_trace_manager import TraceQueueManager, TraceTask from core.ops.utils import measure_time from core.prompt.utils.prompt_template_parser import PromptTemplateParser +from core.workflow.entities.variable_pool import VariablePool from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.graph_engine.entities.event import AgentLogEvent from models import App, Message, WorkflowNodeExecutionModel, db @@ -579,6 +580,7 @@ class LLMGenerator: def update_memory_block( tenant_id: str, visible_history: Sequence[tuple[str, str]], + variable_pool: VariablePool, memory_block: MemoryBlock, memory_spec: MemoryBlockSpec ) -> str: @@ -591,11 +593,12 @@ class LLMGenerator: formatted_history = "" for sender, message in visible_history: formatted_history += f"{sender}: {message}\n" + filled_instruction = variable_pool.convert_template(memory_spec.instruction).text formatted_prompt = PromptTemplateParser(MEMORY_UPDATE_PROMPT).format( inputs={ "formatted_history": formatted_history, "current_value": memory_block.value, - "instruction": memory_spec.instruction, + "instruction": filled_instruction, } ) llm_result = cast( diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index f979acf234..eda135a6e1 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -184,6 +184,7 @@ class ChatflowMemoryService: def update_app_memory_if_needed( workflow: Workflow, conversation_id: str, + variable_pool: VariablePool, is_draft: bool ): visible_messages = ChatflowHistoryService.get_visible_chat_history( @@ -218,6 +219,7 @@ class ChatflowMemoryService: ChatflowMemoryService._app_submit_async_memory_update( block=memory_block, is_draft=is_draft, + variable_pool=variable_pool, visible_messages=visible_messages ) @@ -228,7 +230,8 @@ class ChatflowMemoryService: is_draft=is_draft, conversation_id=conversation_id, app_id=workflow.app_id, - visible_messages=visible_messages + visible_messages=visible_messages, + variable_pool=variable_pool ) @staticmethod @@ -350,6 +353,7 @@ class ChatflowMemoryService: def _app_submit_async_memory_update( block: MemoryBlock, visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, is_draft: bool ): thread = threading.Thread( @@ -357,7 +361,7 @@ class ChatflowMemoryService: kwargs={ 'memory_block': block, 'visible_messages': visible_messages, - 'variable_pool': VariablePool(), + 'variable_pool': variable_pool, 'is_draft': is_draft }, ) @@ -369,6 +373,7 @@ class ChatflowMemoryService: app_id: str, conversation_id: str, visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, is_draft: bool ): """Submit sync memory batch update task""" @@ -379,6 +384,7 @@ class ChatflowMemoryService: 'app_id': app_id, 'conversation_id': conversation_id, 'visible_messages': visible_messages, + 'variable_pool': variable_pool, 'is_draft': is_draft }, ) @@ -390,6 +396,7 @@ class ChatflowMemoryService: app_id: str, conversation_id: str, visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, is_draft: bool ): try: @@ -402,7 +409,7 @@ class ChatflowMemoryService: kwargs={ 'memory_block': block, 'visible_messages': visible_messages, - 'variable_pool': VariablePool(), + 'variable_pool': variable_pool, 'is_draft': is_draft }, ) @@ -457,6 +464,7 @@ class ChatflowMemoryService: updated_value = LLMGenerator.update_memory_block( tenant_id=memory_block.tenant_id, visible_history=ChatflowMemoryService._format_chat_history(visible_messages), + variable_pool=variable_pool, memory_block=memory_block, memory_spec=memory_block.spec, ) From 8685f055ea930be52e6f174e3a70c85789d15b19 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 26 Aug 2025 11:31:59 +0800 Subject: [PATCH 33/82] fix: use model parameters from memory_spec in llm_generator --- api/core/llm_generator/llm_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 88479a8502..e9e3f7c5d8 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -605,7 +605,7 @@ class LLMGenerator: LLMResult, model_instance.invoke_llm( prompt_messages=[UserPromptMessage(content=formatted_prompt)], - model_parameters={"temperature": 0.01, "max_tokens": 2000}, + model_parameters=memory_spec.model.completion_params, stream=False, ) ) From ad07d6399435176d51d1626228424806db040e71 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 13:47:04 +0800 Subject: [PATCH 34/82] feat: add VersionedMemoryValueModel --- api/core/variables/segments.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index a99f5eece3..03d4127052 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -1,7 +1,7 @@ import json import sys from collections.abc import Mapping, Sequence -from typing import Annotated, Any, TypeAlias +from typing import Annotated, Any, TypeAlias, Self, Optional from pydantic import BaseModel, ConfigDict, Discriminator, Tag, field_validator @@ -197,6 +197,30 @@ class ArrayFileSegment(ArraySegment): def text(self) -> str: return "" +class VersionedMemoryValue(BaseModel): + current_value: str + versions: Mapping[str, str] + + model_config = ConfigDict(frozen=True) + + def add_version( + self, + new_value: str, + version_name: Optional[str] = None + ) -> Self: + if version_name is None: + version_name = str(len(self.versions) + 1) + if version_name in self.versions.keys(): + raise ValueError(f"Version '{version_name}' already exists.") + self.current_value = new_value + return VersionedMemoryValue( + current_value=new_value, + versions={ + version_name: new_value, + **self.versions, + } + ) + def get_segment_discriminator(v: Any) -> SegmentType | None: if isinstance(v, Segment): From 03eef65b253d8dd5c7d189961fda5c501c5d9acb Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 13:57:24 +0800 Subject: [PATCH 35/82] feat: add VersionedMemorySegment and VersionedMemoryVariable --- api/core/variables/segments.py | 17 +++++++++++++++++ api/core/variables/types.py | 2 ++ api/core/variables/variables.py | 5 ++++- 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index 03d4127052..aec9959dae 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -221,6 +221,22 @@ class VersionedMemoryValue(BaseModel): } ) +class VersionedMemorySegment(Segment): + value_type: SegmentType = SegmentType.VERSIONED_MEMORY + value: VersionedMemoryValue + + @property + def text(self) -> str: + return self.value.current_value + + @property + def log(self) -> str: + return self.value.current_value + + @property + def markdown(self) -> str: + return self.value.current_value + def get_segment_discriminator(v: Any) -> SegmentType | None: if isinstance(v, Segment): @@ -260,6 +276,7 @@ SegmentUnion: TypeAlias = Annotated[ | Annotated[ArrayNumberSegment, Tag(SegmentType.ARRAY_NUMBER)] | Annotated[ArrayObjectSegment, Tag(SegmentType.ARRAY_OBJECT)] | Annotated[ArrayFileSegment, Tag(SegmentType.ARRAY_FILE)] + | Annotated[VersionedMemorySegment, Tag(SegmentType.VERSIONED_MEMORY)] ), Discriminator(get_segment_discriminator), ] diff --git a/api/core/variables/types.py b/api/core/variables/types.py index 6629056042..bf8af67b54 100644 --- a/api/core/variables/types.py +++ b/api/core/variables/types.py @@ -34,6 +34,8 @@ class SegmentType(StrEnum): ARRAY_OBJECT = "array[object]" ARRAY_FILE = "array[file]" + VERSIONED_MEMORY = "versioned_memory" + NONE = "none" GROUP = "group" diff --git a/api/core/variables/variables.py b/api/core/variables/variables.py index a31ebc848e..6154431460 100644 --- a/api/core/variables/variables.py +++ b/api/core/variables/variables.py @@ -20,7 +20,7 @@ from .segments import ( ObjectSegment, Segment, StringSegment, - get_segment_discriminator, + get_segment_discriminator, VersionedMemorySegment, ) from .types import SegmentType @@ -99,6 +99,8 @@ class FileVariable(FileSegment, Variable): class ArrayFileVariable(ArrayFileSegment, ArrayVariable): pass +class VersionedMemoryVariable(VersionedMemorySegment, Variable): + pass # The `VariableUnion`` type is used to enable serialization and deserialization with Pydantic. # Use `Variable` for type hinting when serialization is not required. @@ -120,6 +122,7 @@ VariableUnion: TypeAlias = Annotated[ | Annotated[ArrayObjectVariable, Tag(SegmentType.ARRAY_OBJECT)] | Annotated[ArrayFileVariable, Tag(SegmentType.ARRAY_FILE)] | Annotated[SecretVariable, Tag(SegmentType.SECRET)] + | Annotated[VersionedMemoryVariable, Tag(SegmentType.VERSIONED_MEMORY)] ), Discriminator(get_segment_discriminator), ] From d6d9554954700e4d63ad973927e98975df649434 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 14:19:44 +0800 Subject: [PATCH 36/82] fix: fix basedpyright errors --- api/core/variables/segments.py | 8 ++++---- api/models/workflow.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index 109845b5ef..fa884ad12e 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -1,7 +1,7 @@ import json import sys from collections.abc import Mapping, Sequence -from typing import Annotated, Any, TypeAlias, Self +from typing import Annotated, Any, TypeAlias from pydantic import BaseModel, ConfigDict, Discriminator, Tag, field_validator @@ -211,8 +211,8 @@ class VersionedMemoryValue(BaseModel): def add_version( self, new_value: str, - version_name: Optional[str] = None - ) -> Self: + version_name: str | None = None + ) -> "VersionedMemoryValue": if version_name is None: version_name = str(len(self.versions) + 1) if version_name in self.versions.keys(): @@ -228,7 +228,7 @@ class VersionedMemoryValue(BaseModel): class VersionedMemorySegment(Segment): value_type: SegmentType = SegmentType.VERSIONED_MEMORY - value: VersionedMemoryValue + value: VersionedMemoryValue = None # type: ignore @property def text(self) -> str: diff --git a/api/models/workflow.py b/api/models/workflow.py index 708aa18169..0de16a67d2 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -1277,7 +1277,7 @@ class WorkflowDraftVariable(Base): def new_memory_block_variable( *, app_id: str, - node_id: Optional[str] = None, + node_id: str | None = None, memory_id: str, name: str, value: str, From 9e0630f012f110140f46053949b05546dfb75242 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 15:30:08 +0800 Subject: [PATCH 37/82] fix: use correct description from spec --- api/services/chatflow_memory_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index eda135a6e1..720d26c34b 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -105,7 +105,7 @@ class ChatflowMemoryService: memory_id=memory.spec.id, name=memory.spec.name, value=memory.value, - description="" + description=memory.spec.description ) session.add(draft_var) session.commit() From 613d086f1e352702577488ec0f4854fc866341cd Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 15:38:20 +0800 Subject: [PATCH 38/82] refactor: give VersionedMemoryValue a default value --- api/core/variables/segments.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index fa884ad12e..2ab566b7b5 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -203,8 +203,8 @@ class ArrayFileSegment(ArraySegment): return "" class VersionedMemoryValue(BaseModel): - current_value: str - versions: Mapping[str, str] + current_value: str = None # type: ignore + versions: Mapping[str, str] = dict() model_config = ConfigDict(frozen=True) From 516b6b0fa8b10222bd7f95b15998909ef7ccc6ad Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 15:39:38 +0800 Subject: [PATCH 39/82] refactor: use VersionedMemoryVariable in creation of WorkflowDraftVariable instead of StringVariable --- api/models/workflow.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/models/workflow.py b/api/models/workflow.py index 0de16a67d2..a6149a9e37 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -14,6 +14,7 @@ from core.file.constants import maybe_file_object from core.file.models import File from core.memory.entities import MemoryBlockSpec from core.variables import utils as variable_utils +from core.variables.segments import VersionedMemoryValue from core.variables.variables import FloatVariable, IntegerVariable, StringVariable from core.workflow.constants import ( CONVERSATION_VARIABLE_NODE_ID, @@ -1280,7 +1281,7 @@ class WorkflowDraftVariable(Base): node_id: str | None = None, memory_id: str, name: str, - value: str, + value: VersionedMemoryValue, description: str = "", ) -> "WorkflowDraftVariable": """Create a new memory block draft variable.""" @@ -1289,11 +1290,11 @@ class WorkflowDraftVariable(Base): app_id=app_id, node_id=MEMORY_BLOCK_VARIABLE_NODE_ID, name=name, - value=value, + value=value.model_dump_json(), description=description, selector=[MEMORY_BLOCK_VARIABLE_NODE_ID, memory_id] if node_id is None else [MEMORY_BLOCK_VARIABLE_NODE_ID, memory_id, node_id], - value_type=SegmentType.STRING, + value_type=SegmentType.VERSIONED_MEMORY, visible=True, editable=True, ) From 626e7b2211ae9ae7c48af86c221f5af1ebdd8782 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 15:41:33 +0800 Subject: [PATCH 40/82] refactor: use VersionedMemoryVariable in ChatflowMemoryService.save_memory --- api/services/chatflow_memory_service.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 720d26c34b..51dccd7c84 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -18,6 +18,7 @@ from core.memory.entities import ( ) from core.memory.errors import MemorySyncTimeoutError from core.model_runtime.entities.message_entities import PromptMessage +from core.variables.segments import VersionedMemoryValue from core.workflow.constants import MEMORY_BLOCK_VARIABLE_NODE_ID from core.workflow.entities.variable_pool import VariablePool from extensions.ext_database import db @@ -98,13 +99,15 @@ class ChatflowMemoryService: ) if existing_vars: draft_var = existing_vars[0] - draft_var.value = memory.value + draft_var.value = VersionedMemoryValue.model_validate_json(draft_var.value)\ + .add_version(memory.value)\ + .model_dump_json() else: draft_var = WorkflowDraftVariable.new_memory_block_variable( app_id=memory.app_id, memory_id=memory.spec.id, name=memory.spec.name, - value=memory.value, + value=VersionedMemoryValue().add_version(memory.value), description=memory.spec.description ) session.add(draft_var) From 2c765ccfae5beac5c6aa372a6a014c40a63285fc Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 15:47:02 +0800 Subject: [PATCH 41/82] refactor: use VersionedMemoryVariable in ChatflowMemoryService.get_memory_by_spec --- api/services/chatflow_memory_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 51dccd7c84..34b14eba49 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -146,7 +146,7 @@ class ChatflowMemoryService: if draft_vars: draft_var = draft_vars[0] return MemoryBlock( - value=draft_var.value, + value=draft_var.get_value().text, tenant_id=tenant_id, app_id=app_id, conversation_id=conversation_id, From 8563ae5511690190fc0ca7be270c98bec91b6b02 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 16:13:07 +0800 Subject: [PATCH 42/82] feat: add inference for VersionedMemory type when deserializing --- api/factories/variable_factory.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/api/factories/variable_factory.py b/api/factories/variable_factory.py index 0274b6e89c..875eae5b13 100644 --- a/api/factories/variable_factory.py +++ b/api/factories/variable_factory.py @@ -20,7 +20,7 @@ from core.variables.segments import ( NoneSegment, ObjectSegment, Segment, - StringSegment, + StringSegment, VersionedMemorySegment, VersionedMemoryValue, ) from core.variables.types import SegmentType from core.variables.variables import ( @@ -38,7 +38,7 @@ from core.variables.variables import ( ObjectVariable, SecretVariable, StringVariable, - Variable, + Variable, VersionedMemoryVariable, ) from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, ENVIRONMENT_VARIABLE_NODE_ID @@ -66,6 +66,7 @@ SEGMENT_TO_VARIABLE_MAP = { NoneSegment: NoneVariable, ObjectSegment: ObjectVariable, StringSegment: StringVariable, + VersionedMemorySegment: VersionedMemoryVariable } @@ -182,6 +183,7 @@ _segment_factory: Mapping[SegmentType, type[Segment]] = { SegmentType.FILE: FileSegment, SegmentType.BOOLEAN: BooleanSegment, SegmentType.OBJECT: ObjectSegment, + SegmentType.VERSIONED_MEMORY: VersionedMemorySegment, # Array types SegmentType.ARRAY_ANY: ArrayAnySegment, SegmentType.ARRAY_STRING: ArrayStringSegment, @@ -248,6 +250,12 @@ def build_segment_with_type(segment_type: SegmentType, value: Any) -> Segment: else: raise TypeMismatchError(f"Type mismatch: expected {segment_type}, but got empty list") + if segment_type == SegmentType.VERSIONED_MEMORY: + return VersionedMemorySegment( + value_type=segment_type, + value=VersionedMemoryValue.model_validate(value) + ) + inferred_type = SegmentType.infer_segment_type(value) # Type compatibility checking if inferred_type is None: From f4f055fb36bba30816de997c323d8756c0c96bcd Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 19:27:17 +0800 Subject: [PATCH 43/82] refactor: add version field to MemoryBlockWithVisibility --- api/core/memory/entities.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 654922d154..974d6c8f29 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -73,6 +73,7 @@ class MemoryBlockWithVisibility(BaseModel): id: str name: str value: str + version: int end_user_visible: bool end_user_editable: bool From e3903f34e4c427f034de3541b0752f1adc4c3efe Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 19:27:41 +0800 Subject: [PATCH 44/82] refactor: add version field to ChatflowMemoryVariable table --- api/models/chatflow_memory.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/models/chatflow_memory.py b/api/models/chatflow_memory.py index 2c964c4766..cde48c5860 100644 --- a/api/models/chatflow_memory.py +++ b/api/models/chatflow_memory.py @@ -25,6 +25,7 @@ class ChatflowMemoryVariable(Base): name: Mapped[str] = mapped_column(sa.Text, nullable=False) scope: Mapped[str] = mapped_column(sa.String(10), nullable=False) # 'app' or 'node' term: Mapped[str] = mapped_column(sa.String(20), nullable=False) # 'session' or 'persistent' + version: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=1) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column( From 3d761a31892125a1602e83ec7ba9b0a8c4ae5714 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 15 Sep 2025 19:28:22 +0800 Subject: [PATCH 45/82] refactor: make save_memory and get_memory_by_spec work on latest version --- api/services/chatflow_memory_service.py | 33 ++++++++++++------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 34b14eba49..3bb600ccc9 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -71,23 +71,22 @@ class ChatflowMemoryService: app_id=memory.app_id, node_id=memory.node_id, conversation_id=memory.conversation_id - ).first() - if existing: - existing.value = memory.value - else: - session.add( - ChatflowMemoryVariable( - memory_id=memory.spec.id, - tenant_id=memory.tenant_id, - app_id=memory.app_id, - node_id=memory.node_id, - conversation_id=memory.conversation_id, - name=memory.spec.name, - value=memory.value, - term=memory.spec.term, - scope=memory.spec.scope, - ) + ).order_by(ChatflowMemoryVariable.version.desc()).first() + new_version = 1 if not existing else existing.version + 1 + session.add( + ChatflowMemoryVariable( + memory_id=memory.spec.id, + tenant_id=memory.tenant_id, + app_id=memory.app_id, + node_id=memory.node_id, + conversation_id=memory.conversation_id, + name=memory.spec.name, + value=memory.value, + term=memory.spec.term, + scope=memory.spec.scope, + version=new_version, ) + ) session.commit() if is_draft: @@ -163,7 +162,7 @@ class ChatflowMemoryService: ChatflowMemoryVariable.conversation_id == \ (conversation_id if spec.term == MemoryTerm.SESSION else None), ) - ) + ).order_by(ChatflowMemoryVariable.version.desc()).limit(1) result = session.execute(stmt).scalar() if result: return MemoryBlock( From 54b272206ec3c54fb680cb22ec2607b2fcd47765 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 16 Sep 2025 18:32:58 +0800 Subject: [PATCH 46/82] refactor: add version param to get_session_memories and get_persistent_memories --- api/services/chatflow_memory_service.py | 58 +++++++++++++++++++------ 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 3bb600ccc9..7b12c2266d 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -34,27 +34,56 @@ logger = logging.getLogger(__name__) class ChatflowMemoryService: @staticmethod - def get_persistent_memories(app: App) -> Sequence[MemoryBlockWithVisibility]: - stmt = select(ChatflowMemoryVariable).where( - and_( - ChatflowMemoryVariable.tenant_id == app.tenant_id, - ChatflowMemoryVariable.app_id == app.id, - ChatflowMemoryVariable.conversation_id == None + def get_persistent_memories( + app: App, + version: int | None = None + ) -> Sequence[MemoryBlockWithVisibility]: + if version is None: + # If version not specified, get the latest version + stmt = select(ChatflowMemoryVariable).distinct(ChatflowMemoryVariable.memory_id).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.conversation_id == None + ) + ).order_by(ChatflowMemoryVariable.version.desc()) + else: + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.conversation_id == None, + ChatflowMemoryVariable.version == version + ) ) - ) with Session(db.engine) as session: db_results = session.execute(stmt).all() return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) @staticmethod - def get_session_memories(app: App, conversation_id: str) -> Sequence[MemoryBlockWithVisibility]: - stmt = select(ChatflowMemoryVariable).where( - and_( - ChatflowMemoryVariable.tenant_id == app.tenant_id, - ChatflowMemoryVariable.app_id == app.id, - ChatflowMemoryVariable.conversation_id == conversation_id + def get_session_memories( + app: App, + conversation_id: str, + version: int | None = None + ) -> Sequence[MemoryBlockWithVisibility]: + if version is None: + # If version not specified, get the latest version + stmt = select(ChatflowMemoryVariable).distinct(ChatflowMemoryVariable.memory_id).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.conversation_id == conversation_id + ) + ).order_by(ChatflowMemoryVariable.version.desc()) + else: + stmt = select(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.conversation_id == conversation_id, + ChatflowMemoryVariable.version == version + ) ) - ) with Session(db.engine) as session: db_results = session.execute(stmt).all() return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) @@ -340,6 +369,7 @@ class ChatflowMemoryService: value=chatflow_memory_variable.value, end_user_editable=spec.end_user_editable, end_user_visible=spec.end_user_visible, + version=chatflow_memory_variable.version ) ) return results From 3005cf3282101f404b9d1e05c93fec97656282b9 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 16 Sep 2025 19:12:08 +0800 Subject: [PATCH 47/82] refactor: update MemoryApi(WebApiResource) for version --- api/controllers/web/chatflow_memory.py | 53 +++++++++++++------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index d8fcd49b53..c56f50dc35 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -1,8 +1,11 @@ from flask_restx import reqparse from sqlalchemy.orm.session import Session +from sympy import false from controllers.web import api from controllers.web.wraps import WebApiResource +from core.memory.entities import MemoryBlock +from core.workflow.entities.variable_pool import VariablePool from libs.helper import uuid_value from models import db from models.chatflow_memory import ChatflowMemoryVariable @@ -13,24 +16,33 @@ from services.workflow_service import WorkflowService class MemoryListApi(WebApiResource): def get(self, app_model): parser = reqparse.RequestParser() - parser.add_argument("conversation_id", required=False, type=uuid_value, location="args") + parser.add_argument("conversation_id", required=False, type=str | None, default=None) + parser.add_argument("memory_id", required=False, type=str | None, default=None) + parser.add_argument("version", required=False, type=int | None, default=None) args = parser.parse_args() conversation_id = args.get("conversation_id") + memory_id = args.get("memory_id") + version = args.get("version") - result = ChatflowMemoryService.get_persistent_memories(app_model) + result = ChatflowMemoryService.get_persistent_memories(app_model, version) if conversation_id: - result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id)] - + result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id, version)] + if memory_id: + result = [it for it in result if it.memory_id == memory_id] return [it for it in result if it.end_user_visible] class MemoryEditApi(WebApiResource): def put(self, app_model): parser = reqparse.RequestParser() parser.add_argument('id', type=str, required=True) - parser.add_argument('node_id', type=str, required=False, default=None) + parser.add_argument("conversation_id", type=str | None, required=False, default=None) + parser.add_argument('node_id', type=str | None, required=False, default=None) parser.add_argument('update', type=str, required=True) args = parser.parse_args() workflow = WorkflowService().get_published_workflow(app_model) + update = args.get("update") + conversation_id = args.get("conversation_id") + node_id = args.get("node_id") if not workflow: return {'error': 'Workflow not found'}, 404 memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) @@ -38,29 +50,18 @@ class MemoryEditApi(WebApiResource): return {'error': 'Memory not found'}, 404 if not memory_spec.end_user_editable: return {'error': 'Memory not editable'}, 403 - with Session(db.engine) as session: - existing = session.query(ChatflowMemoryVariable).filter_by( - memory_id=args['id'], + ChatflowMemoryService.save_memory( + MemoryBlock( + spec=memory_spec, tenant_id=app_model.tenant_id, + value=update, + conversation_id=conversation_id, + node_id=node_id, app_id=app_model.id, - node_id=args['node_id'] - ).first() - if existing: - existing.value = args['update'] - else: - session.add( - ChatflowMemoryVariable( - tenant_id=app_model.tenant_id, - app_id=app_model.id, - node_id=args['node_id'], - memory_id=args['id'], - name=memory_spec.name, - value=args['update'], - scope=memory_spec.scope, - term=memory_spec.term, - ) - ) - session.commit() + ), + variable_pool=VariablePool(), + is_draft=False + ) return '', 204 api.add_resource(MemoryListApi, '/memories') From ac5dd1f45a78fe305f91f359da4acef9ce06f5b9 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 16 Sep 2025 19:25:17 +0800 Subject: [PATCH 48/82] refactor: update MemoryApi(Resource) for version --- .../service_api/app/chatflow_memory.py | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index c3e9e87c0a..5484e18f3b 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -3,6 +3,8 @@ from sqlalchemy.orm import Session from controllers.service_api import api from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token +from core.memory.entities import MemoryBlock +from core.workflow.entities.variable_pool import VariablePool from libs.helper import uuid_value from models import db from models.chatflow_memory import ChatflowMemoryVariable @@ -14,53 +16,51 @@ class MemoryListApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) def get(self, app_model): parser = reqparse.RequestParser() - parser.add_argument("conversation_id", required=False, type=uuid_value, location="args") + parser.add_argument("conversation_id", required=False, type=str | None, default=None) + parser.add_argument("memory_id", required=False, type=str | None, default=None) + parser.add_argument("version", required=False, type=int | None, default=None) args = parser.parse_args() conversation_id = args.get("conversation_id") + memory_id = args.get("memory_id") + version = args.get("version") - result = ChatflowMemoryService.get_persistent_memories(app_model) + result = ChatflowMemoryService.get_persistent_memories(app_model, version) if conversation_id: - result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id)] - - return result + result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id, version)] + if memory_id: + result = [it for it in result if it.memory_id == memory_id] + return [it for it in result if it.end_user_visible] class MemoryEditApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) def put(self, app_model): parser = reqparse.RequestParser() parser.add_argument('id', type=str, required=True) - parser.add_argument('node_id', type=str, required=False, default=None) + parser.add_argument("conversation_id", type=str | None, required=False, default=None) + parser.add_argument('node_id', type=str | None, required=False, default=None) parser.add_argument('update', type=str, required=True) args = parser.parse_args() workflow = WorkflowService().get_published_workflow(app_model) + update = args.get("update") + conversation_id = args.get("conversation_id") + node_id = args.get("node_id") if not workflow: return {'error': 'Workflow not found'}, 404 memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) if not memory_spec: return {'error': 'Memory not found'}, 404 - with Session(db.engine) as session: - existing = session.query(ChatflowMemoryVariable).filter_by( - memory_id=args['id'], + ChatflowMemoryService.save_memory( + MemoryBlock( + spec=memory_spec, tenant_id=app_model.tenant_id, + value=update, + conversation_id=conversation_id, + node_id=node_id, app_id=app_model.id, - node_id=args['node_id'] - ).first() - if existing: - existing.value = args['update'] - else: - session.add( - ChatflowMemoryVariable( - tenant_id=app_model.tenant_id, - app_id=app_model.id, - node_id=args['node_id'], - memory_id=args['id'], - name=memory_spec.name, - value=args['update'], - scope=memory_spec.scope, - term=memory_spec.term, - ) - ) - session.commit() + ), + variable_pool=VariablePool(), + is_draft=False + ) return '', 204 api.add_resource(MemoryListApi, '/memories') From 394b7d09b8b927abe00c434b9fbcc3f65206b816 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 22 Sep 2025 15:17:19 +0800 Subject: [PATCH 49/82] refactor: fix basedpyright/ruff errors --- api/controllers/service_api/app/chatflow_memory.py | 8 ++++---- api/controllers/web/chatflow_memory.py | 9 ++++----- api/core/app/apps/advanced_chat/app_runner.py | 5 ++++- api/core/memory/entities.py | 11 ++++++----- api/core/variables/segments.py | 6 ++++-- api/core/variables/variables.py | 5 ++++- api/core/workflow/entities/variable_pool.py | 1 - api/core/workflow/nodes/llm/node.py | 2 +- api/factories/variable_factory.py | 7 +++++-- api/services/chatflow_memory_service.py | 6 ++++-- api/services/workflow_service.py | 1 + 11 files changed, 37 insertions(+), 24 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 5484e18f3b..20ffed672e 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -1,13 +1,9 @@ from flask_restx import Resource, reqparse -from sqlalchemy.orm import Session from controllers.service_api import api from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token from core.memory.entities import MemoryBlock from core.workflow.entities.variable_pool import VariablePool -from libs.helper import uuid_value -from models import db -from models.chatflow_memory import ChatflowMemoryVariable from services.chatflow_memory_service import ChatflowMemoryService from services.workflow_service import WorkflowService @@ -31,6 +27,7 @@ class MemoryListApi(Resource): result = [it for it in result if it.memory_id == memory_id] return [it for it in result if it.end_user_visible] + class MemoryEditApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) def put(self, app_model): @@ -44,6 +41,8 @@ class MemoryEditApi(Resource): update = args.get("update") conversation_id = args.get("conversation_id") node_id = args.get("node_id") + if not isinstance(update, str): + return {'error': 'Invalid update'}, 400 if not workflow: return {'error': 'Workflow not found'}, 404 memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) @@ -63,5 +62,6 @@ class MemoryEditApi(Resource): ) return '', 204 + api.add_resource(MemoryListApi, '/memories') api.add_resource(MemoryEditApi, '/memory-edit') diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index c56f50dc35..d0952190cb 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -1,14 +1,9 @@ from flask_restx import reqparse -from sqlalchemy.orm.session import Session -from sympy import false from controllers.web import api from controllers.web.wraps import WebApiResource from core.memory.entities import MemoryBlock from core.workflow.entities.variable_pool import VariablePool -from libs.helper import uuid_value -from models import db -from models.chatflow_memory import ChatflowMemoryVariable from services.chatflow_memory_service import ChatflowMemoryService from services.workflow_service import WorkflowService @@ -31,6 +26,7 @@ class MemoryListApi(WebApiResource): result = [it for it in result if it.memory_id == memory_id] return [it for it in result if it.end_user_visible] + class MemoryEditApi(WebApiResource): def put(self, app_model): parser = reqparse.RequestParser() @@ -43,6 +39,8 @@ class MemoryEditApi(WebApiResource): update = args.get("update") conversation_id = args.get("conversation_id") node_id = args.get("node_id") + if not isinstance(update, str): + return {'error': 'Update must be a string'}, 400 if not workflow: return {'error': 'Workflow not found'}, 404 memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) @@ -64,5 +62,6 @@ class MemoryEditApi(WebApiResource): ) return '', 204 + api.add_resource(MemoryListApi, '/memories') api.add_resource(MemoryEditApi, '/memory-edit') diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index ef9ce7f6ac..c20e4c645e 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -28,7 +28,7 @@ from core.moderation.input_moderation import InputModeration from core.variables.variables import VariableUnion from core.workflow.entities import GraphRuntimeState, VariablePool from core.workflow.graph_engine.command_channels.redis_channel import RedisChannel -from core.workflow.graph_engine.entities.event import GraphRunSucceededEvent +from core.workflow.graph_events import GraphRunSucceededEvent from core.workflow.system_variable import SystemVariable from core.workflow.variable_loader import VariableLoader from core.workflow.workflow_entry import WorkflowEntry @@ -223,6 +223,9 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): if not assistant_message: logger.warning("Chatflow output does not contain 'answer'.") return + if not isinstance(assistant_message, str): + logger.warning("Chatflow output 'answer' is not a string.") + return try: self._sync_conversation_to_chatflow_tables(assistant_message) except Exception as e: diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 974d6c8f29..c42a12c2f3 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -1,4 +1,4 @@ -from enum import Enum +from enum import StrEnum from typing import Optional from uuid import uuid4 @@ -7,23 +7,23 @@ from pydantic import BaseModel, Field from core.app.app_config.entities import ModelConfig -class MemoryScope(str, Enum): +class MemoryScope(StrEnum): """Memory scope determined by node_id field""" APP = "app" # node_id is None NODE = "node" # node_id is not None -class MemoryTerm(str, Enum): +class MemoryTerm(StrEnum): """Memory term determined by conversation_id field""" SESSION = "session" # conversation_id is not None PERSISTENT = "persistent" # conversation_id is None -class MemoryStrategy(str, Enum): +class MemoryStrategy(StrEnum): ON_TURNS = "on_turns" -class MemoryScheduleMode(str, Enum): +class MemoryScheduleMode(StrEnum): SYNC = "sync" ASYNC = "async" @@ -69,6 +69,7 @@ class MemoryBlock(BaseModel): conversation_id: Optional[str] = None node_id: Optional[str] = None + class MemoryBlockWithVisibility(BaseModel): id: str name: str diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index c06fe4d1dd..0a50cccbca 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -202,9 +202,10 @@ class ArrayFileSegment(ArraySegment): def text(self) -> str: return "" + class VersionedMemoryValue(BaseModel): current_value: str = None # type: ignore - versions: Mapping[str, str] = dict() + versions: Mapping[str, str] = {} model_config = ConfigDict(frozen=True) @@ -215,7 +216,7 @@ class VersionedMemoryValue(BaseModel): ) -> "VersionedMemoryValue": if version_name is None: version_name = str(len(self.versions) + 1) - if version_name in self.versions.keys(): + if version_name in self.versions: raise ValueError(f"Version '{version_name}' already exists.") self.current_value = new_value return VersionedMemoryValue( @@ -226,6 +227,7 @@ class VersionedMemoryValue(BaseModel): } ) + class VersionedMemorySegment(Segment): value_type: SegmentType = SegmentType.VERSIONED_MEMORY value: VersionedMemoryValue = None # type: ignore diff --git a/api/core/variables/variables.py b/api/core/variables/variables.py index 95789a68d0..5cda5c35e5 100644 --- a/api/core/variables/variables.py +++ b/api/core/variables/variables.py @@ -22,7 +22,8 @@ from .segments import ( ObjectSegment, Segment, StringSegment, - get_segment_discriminator, VersionedMemorySegment, + VersionedMemorySegment, + get_segment_discriminator, ) from .types import SegmentType @@ -105,9 +106,11 @@ class BooleanVariable(BooleanSegment, Variable): class ArrayFileVariable(ArrayFileSegment, ArrayVariable): pass + class VersionedMemoryVariable(VersionedMemorySegment, Variable): pass + class ArrayBooleanVariable(ArrayBooleanSegment, ArrayVariable): pass diff --git a/api/core/workflow/entities/variable_pool.py b/api/core/workflow/entities/variable_pool.py index e550d30476..e1fa3ce627 100644 --- a/api/core/workflow/entities/variable_pool.py +++ b/api/core/workflow/entities/variable_pool.py @@ -83,7 +83,6 @@ class VariablePool(BaseModel): for memory_id, memory_value in self.memory_blocks.items(): self.add([CONVERSATION_VARIABLE_NODE_ID, memory_id], memory_value) - def add(self, selector: Sequence[str], value: Any, /): """ Add a variable to the variable pool. diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 8615d6739e..1c3a3ac264 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -1235,7 +1235,7 @@ class LLMNode(Node): memory_blocks = workflow.memory_blocks for block_id in block_ids: - memory_block_spec = next((block for block in memory_blocks if block.id == block_id),None) + memory_block_spec = next((block for block in memory_blocks if block.id == block_id), None) if memory_block_spec and memory_block_spec.scope == MemoryScope.NODE: is_draft = (self.invoke_from == InvokeFrom.DEBUGGER) diff --git a/api/factories/variable_factory.py b/api/factories/variable_factory.py index 35f83d5799..e510b27b42 100644 --- a/api/factories/variable_factory.py +++ b/api/factories/variable_factory.py @@ -20,7 +20,9 @@ from core.variables.segments import ( NoneSegment, ObjectSegment, Segment, - StringSegment, VersionedMemorySegment, VersionedMemoryValue, + StringSegment, + VersionedMemorySegment, + VersionedMemoryValue, ) from core.variables.types import SegmentType from core.variables.variables import ( @@ -38,7 +40,8 @@ from core.variables.variables import ( ObjectVariable, SecretVariable, StringVariable, - Variable, VersionedMemoryVariable, + Variable, + VersionedMemoryVariable, ) from core.workflow.constants import ( CONVERSATION_VARIABLE_NODE_ID, diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 7b12c2266d..079b7c31f8 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -32,6 +32,7 @@ from services.workflow_service import WorkflowService logger = logging.getLogger(__name__) + class ChatflowMemoryService: @staticmethod def get_persistent_memories( @@ -186,9 +187,9 @@ class ChatflowMemoryService: ChatflowMemoryVariable.memory_id == spec.id, ChatflowMemoryVariable.tenant_id == tenant_id, ChatflowMemoryVariable.app_id == app_id, - ChatflowMemoryVariable.node_id == \ + ChatflowMemoryVariable.node_id == (node_id if spec.scope == MemoryScope.NODE else None), - ChatflowMemoryVariable.conversation_id == \ + ChatflowMemoryVariable.conversation_id == (conversation_id if spec.term == MemoryTerm.SESSION else None), ) ).order_by(ChatflowMemoryVariable.version.desc()).limit(1) @@ -517,6 +518,7 @@ class ChatflowMemoryService: result.append((str(message.role.value), message.get_text_content())) return result + def _get_memory_sync_lock_key(app_id: str, conversation_id: str) -> str: """Generate Redis lock key for memory sync updates diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 76b101d3b3..cc33042950 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -1055,6 +1055,7 @@ def _rebuild_single_file(tenant_id: str, value: Any, variable_entity_type: Varia else: raise Exception("unreachable") + def _fetch_memory_blocks(workflow: Workflow, conversation_id: str, is_draft: bool) -> Mapping[str, str]: memory_blocks = {} memory_block_specs = workflow.memory_blocks From d654d9d8b1263e2cc351873a9991d607e3c97bdd Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 22 Sep 2025 16:46:39 +0800 Subject: [PATCH 50/82] refactor: make ChatflowMemoryVariable.value JSON --- api/core/memory/entities.py | 4 ++++ api/services/chatflow_memory_service.py | 11 ++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index c42a12c2f3..ccc96de3e4 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -79,6 +79,10 @@ class MemoryBlockWithVisibility(BaseModel): end_user_editable: bool +class MemoryValueData(BaseModel): + value: str + + class ChatflowConversationMetadata(BaseModel): """Metadata for chatflow conversation with visible message count""" type: str = "mutable_visible_window" diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 079b7c31f8..57a1d8df3f 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -15,6 +15,7 @@ from core.memory.entities import ( MemoryScheduleMode, MemoryScope, MemoryTerm, + MemoryValueData, ) from core.memory.errors import MemorySyncTimeoutError from core.model_runtime.entities.message_entities import PromptMessage @@ -111,7 +112,7 @@ class ChatflowMemoryService: node_id=memory.node_id, conversation_id=memory.conversation_id, name=memory.spec.name, - value=memory.value, + value=MemoryValueData(value=memory.value).model_dump_json(), term=memory.spec.term, scope=memory.spec.scope, version=new_version, @@ -187,16 +188,16 @@ class ChatflowMemoryService: ChatflowMemoryVariable.memory_id == spec.id, ChatflowMemoryVariable.tenant_id == tenant_id, ChatflowMemoryVariable.app_id == app_id, - ChatflowMemoryVariable.node_id == + ChatflowMemoryVariable.node_id == (node_id if spec.scope == MemoryScope.NODE else None), - ChatflowMemoryVariable.conversation_id == + ChatflowMemoryVariable.conversation_id == (conversation_id if spec.term == MemoryTerm.SESSION else None), ) ).order_by(ChatflowMemoryVariable.version.desc()).limit(1) result = session.execute(stmt).scalar() if result: return MemoryBlock( - value=result.value, + value=MemoryValueData.model_validate_json(result.value).value, tenant_id=tenant_id, app_id=app_id, conversation_id=conversation_id, @@ -367,7 +368,7 @@ class ChatflowMemoryService: MemoryBlockWithVisibility( id=chatflow_memory_variable.memory_id, name=chatflow_memory_variable.name, - value=chatflow_memory_variable.value, + value=MemoryValueData.model_validate_json(chatflow_memory_variable.value).value, end_user_editable=spec.end_user_editable, end_user_visible=spec.end_user_visible, version=chatflow_memory_variable.version From a8c2a300f643004a6b820984f794fa6739e700f8 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 22 Sep 2025 17:14:07 +0800 Subject: [PATCH 51/82] refactor: make memories API return MemoryBlock --- .../service_api/app/chatflow_memory.py | 4 +-- api/controllers/web/chatflow_memory.py | 4 +-- api/services/chatflow_memory_service.py | 35 ++++++++++++++++--- 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 20ffed672e..c321814392 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -24,8 +24,8 @@ class MemoryListApi(Resource): if conversation_id: result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id, version)] if memory_id: - result = [it for it in result if it.memory_id == memory_id] - return [it for it in result if it.end_user_visible] + result = [it for it in result if it.spec.id == memory_id] + return [it for it in result if it.spec.end_user_visible] class MemoryEditApi(Resource): diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index d0952190cb..6883760552 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -23,8 +23,8 @@ class MemoryListApi(WebApiResource): if conversation_id: result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id, version)] if memory_id: - result = [it for it in result if it.memory_id == memory_id] - return [it for it in result if it.end_user_visible] + result = [it for it in result if it.spec.id == memory_id] + return [it for it in result if it.spec.end_user_visible] class MemoryEditApi(WebApiResource): diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 57a1d8df3f..ac2da9be8f 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -39,7 +39,7 @@ class ChatflowMemoryService: def get_persistent_memories( app: App, version: int | None = None - ) -> Sequence[MemoryBlockWithVisibility]: + ) -> Sequence[MemoryBlock]: if version is None: # If version not specified, get the latest version stmt = select(ChatflowMemoryVariable).distinct(ChatflowMemoryVariable.memory_id).where( @@ -60,14 +60,14 @@ class ChatflowMemoryService: ) with Session(db.engine) as session: db_results = session.execute(stmt).all() - return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) + return ChatflowMemoryService._convert_to_memory_blocks(app, [result[0] for result in db_results]) @staticmethod def get_session_memories( app: App, conversation_id: str, version: int | None = None - ) -> Sequence[MemoryBlockWithVisibility]: + ) -> Sequence[MemoryBlock]: if version is None: # If version not specified, get the latest version stmt = select(ChatflowMemoryVariable).distinct(ChatflowMemoryVariable.memory_id).where( @@ -88,7 +88,7 @@ class ChatflowMemoryService: ) with Session(db.engine) as session: db_results = session.execute(stmt).all() - return ChatflowMemoryService._with_visibility(app, [result[0] for result in db_results]) + return ChatflowMemoryService._convert_to_memory_blocks(app, [result[0] for result in db_results]) @staticmethod def save_memory(memory: MemoryBlock, variable_pool: VariablePool, is_draft: bool) -> None: @@ -349,6 +349,33 @@ class ChatflowMemoryService: conversation_id=conversation_id ) + @staticmethod + def _convert_to_memory_blocks( + app: App, + raw_results: Sequence[ChatflowMemoryVariable] + ) -> Sequence[MemoryBlock]: + workflow = WorkflowService().get_published_workflow(app) + if not workflow: + return [] + results = [] + for chatflow_memory_variable in raw_results: + spec = next( + (spec for spec in workflow.memory_blocks if spec.id == chatflow_memory_variable.memory_id), + None + ) + if spec and chatflow_memory_variable.app_id: + results.append( + MemoryBlock( + spec=spec, + tenant_id=chatflow_memory_variable.tenant_id, + value=MemoryValueData.model_validate_json(chatflow_memory_variable.value).value, + app_id=chatflow_memory_variable.app_id, + conversation_id=chatflow_memory_variable.conversation_id, + node_id=chatflow_memory_variable.node_id + ) + ) + return results + @staticmethod def _with_visibility( app: App, From 7c35aaa99d476b3ed96189988888c3d8856ed3a1 Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 22 Sep 2025 18:16:37 +0800 Subject: [PATCH 52/82] refactor: remove MemoryBlockWithVisibility --- api/core/memory/entities.py | 9 -------- api/services/chatflow_memory_service.py | 28 ------------------------- 2 files changed, 37 deletions(-) diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index ccc96de3e4..516ca55c8b 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -70,15 +70,6 @@ class MemoryBlock(BaseModel): node_id: Optional[str] = None -class MemoryBlockWithVisibility(BaseModel): - id: str - name: str - value: str - version: int - end_user_visible: bool - end_user_editable: bool - - class MemoryValueData(BaseModel): value: str diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index ac2da9be8f..0f5d7ae19e 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -11,7 +11,6 @@ from core.llm_generator.llm_generator import LLMGenerator from core.memory.entities import ( MemoryBlock, MemoryBlockSpec, - MemoryBlockWithVisibility, MemoryScheduleMode, MemoryScope, MemoryTerm, @@ -376,33 +375,6 @@ class ChatflowMemoryService: ) return results - @staticmethod - def _with_visibility( - app: App, - raw_results: Sequence[ChatflowMemoryVariable] - ) -> Sequence[MemoryBlockWithVisibility]: - workflow = WorkflowService().get_published_workflow(app) - if not workflow: - return [] - results = [] - for chatflow_memory_variable in raw_results: - spec = next( - (spec for spec in workflow.memory_blocks if spec.id == chatflow_memory_variable.memory_id), - None - ) - if spec: - results.append( - MemoryBlockWithVisibility( - id=chatflow_memory_variable.memory_id, - name=chatflow_memory_variable.name, - value=MemoryValueData.model_validate_json(chatflow_memory_variable.value).value, - end_user_editable=spec.end_user_editable, - end_user_visible=spec.end_user_visible, - version=chatflow_memory_variable.version - ) - ) - return results - @staticmethod def _should_update_memory( memory_block: MemoryBlock, From 28acb70118722693ab24bb071867c9608ae2b8cf Mon Sep 17 00:00:00 2001 From: Stream Date: Mon, 22 Sep 2025 18:37:54 +0800 Subject: [PATCH 53/82] feat: add edited_by_user field --- .../service_api/app/chatflow_memory.py | 1 + api/controllers/web/chatflow_memory.py | 1 + api/core/memory/entities.py | 2 ++ api/services/chatflow_memory_service.py | 20 +++++++++++++------ 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index c321814392..ff890eb326 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -56,6 +56,7 @@ class MemoryEditApi(Resource): conversation_id=conversation_id, node_id=node_id, app_id=app_model.id, + edited_by_user=True ), variable_pool=VariablePool(), is_draft=False diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 6883760552..97d6e28c98 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -56,6 +56,7 @@ class MemoryEditApi(WebApiResource): conversation_id=conversation_id, node_id=node_id, app_id=app_model.id, + edited_by_user=True ), variable_pool=VariablePool(), is_draft=False diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 516ca55c8b..c80cd88a8c 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -68,10 +68,12 @@ class MemoryBlock(BaseModel): app_id: str conversation_id: Optional[str] = None node_id: Optional[str] = None + edited_by_user: bool = False class MemoryValueData(BaseModel): value: str + edited_by_user: bool = False class ChatflowConversationMetadata(BaseModel): diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 0f5d7ae19e..45b602ad39 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -111,7 +111,10 @@ class ChatflowMemoryService: node_id=memory.node_id, conversation_id=memory.conversation_id, name=memory.spec.name, - value=MemoryValueData(value=memory.value).model_dump_json(), + value=MemoryValueData( + value=memory.value, + edited_by_user=memory.edited_by_user + ).model_dump_json(), term=memory.spec.term, scope=memory.spec.scope, version=new_version, @@ -195,13 +198,15 @@ class ChatflowMemoryService: ).order_by(ChatflowMemoryVariable.version.desc()).limit(1) result = session.execute(stmt).scalar() if result: + memory_value_data = MemoryValueData.model_validate_json(result.value) return MemoryBlock( - value=MemoryValueData.model_validate_json(result.value).value, + value=memory_value_data.value, tenant_id=tenant_id, app_id=app_id, conversation_id=conversation_id, node_id=node_id, - spec=spec + spec=spec, + edited_by_user=memory_value_data.edited_by_user ) return MemoryBlock( tenant_id=tenant_id, @@ -363,14 +368,16 @@ class ChatflowMemoryService: None ) if spec and chatflow_memory_variable.app_id: + memory_value_data = MemoryValueData.model_validate_json(chatflow_memory_variable.value) results.append( MemoryBlock( spec=spec, tenant_id=chatflow_memory_variable.tenant_id, - value=MemoryValueData.model_validate_json(chatflow_memory_variable.value).value, + value=memory_value_data.value, app_id=chatflow_memory_variable.app_id, conversation_id=chatflow_memory_variable.conversation_id, - node_id=chatflow_memory_variable.node_id + node_id=chatflow_memory_variable.node_id, + edited_by_user=memory_value_data.edited_by_user ) ) return results @@ -507,7 +514,8 @@ class ChatflowMemoryService: spec=memory_block.spec, app_id=memory_block.app_id, conversation_id=memory_block.conversation_id, - node_id=memory_block.node_id + node_id=memory_block.node_id, + edited_by_user=False ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) From d94e598a89c852c35bd3fc4af587a0227b03f60c Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 23 Sep 2025 14:19:40 +0800 Subject: [PATCH 54/82] revert: remove memory database migration --- .../versions/2025_08_22_2103-f3747f1446a4_.py | 83 ------------------- 1 file changed, 83 deletions(-) delete mode 100644 api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py diff --git a/api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py b/api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py deleted file mode 100644 index 3425a39417..0000000000 --- a/api/migrations/versions/2025_08_22_2103-f3747f1446a4_.py +++ /dev/null @@ -1,83 +0,0 @@ -"""empty message - -Revision ID: f3747f1446a4 -Revises: 3803626caa7c -Create Date: 2025-08-22 21:03:32.462487 - -""" -from alembic import op -import models as models -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'f3747f1446a4' -down_revision = '3803626caa7c' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('chatflow_conversations', - sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), - sa.Column('tenant_id', models.types.StringUUID(), nullable=False), - sa.Column('app_id', models.types.StringUUID(), nullable=False), - sa.Column('node_id', sa.Text(), nullable=True), - sa.Column('original_conversation_id', models.types.StringUUID(), nullable=True), - sa.Column('conversation_metadata', sa.Text(), nullable=False), - sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), - sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), - sa.PrimaryKeyConstraint('id', name='chatflow_conversations_pkey') - ) - with op.batch_alter_table('chatflow_conversations', schema=None) as batch_op: - batch_op.create_index('chatflow_conversations_original_conversation_id_idx', ['tenant_id', 'app_id', 'node_id', 'original_conversation_id'], unique=False) - - op.create_table('chatflow_memory_variables', - sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), - sa.Column('tenant_id', models.types.StringUUID(), nullable=False), - sa.Column('app_id', models.types.StringUUID(), nullable=True), - sa.Column('conversation_id', models.types.StringUUID(), nullable=True), - sa.Column('node_id', sa.Text(), nullable=True), - sa.Column('memory_id', sa.Text(), nullable=False), - sa.Column('value', sa.Text(), nullable=False), - sa.Column('name', sa.Text(), nullable=False), - sa.Column('scope', sa.String(length=10), nullable=False), - sa.Column('term', sa.String(length=20), nullable=False), - sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), - sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), - sa.PrimaryKeyConstraint('id', name='chatflow_memory_variables_pkey') - ) - with op.batch_alter_table('chatflow_memory_variables', schema=None) as batch_op: - batch_op.create_index('chatflow_memory_variables_memory_id_idx', ['tenant_id', 'app_id', 'node_id', 'memory_id'], unique=False) - - op.create_table('chatflow_messages', - sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), - sa.Column('conversation_id', models.types.StringUUID(), nullable=False), - sa.Column('index', sa.Integer(), nullable=False), - sa.Column('version', sa.Integer(), nullable=False), - sa.Column('data', sa.Text(), nullable=False), - sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), - sa.PrimaryKeyConstraint('id', name='chatflow_messages_pkey') - ) - with op.batch_alter_table('chatflow_messages', schema=None) as batch_op: - batch_op.create_index('chatflow_messages_version_idx', ['conversation_id', 'index', 'version'], unique=False) - - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('chatflow_messages', schema=None) as batch_op: - batch_op.drop_index('chatflow_messages_version_idx') - - op.drop_table('chatflow_messages') - with op.batch_alter_table('chatflow_memory_variables', schema=None) as batch_op: - batch_op.drop_index('chatflow_memory_variables_memory_id_idx') - - op.drop_table('chatflow_memory_variables') - with op.batch_alter_table('chatflow_conversations', schema=None) as batch_op: - batch_op.drop_index('chatflow_conversations_original_conversation_id_idx') - - op.drop_table('chatflow_conversations') - # ### end Alembic commands ### From 6eab6a675c41cf0e82038a17d2a6a679d7146c81 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 23 Sep 2025 16:56:07 +0800 Subject: [PATCH 55/82] feat: add created_by to memory blocks --- .../service_api/app/chatflow_memory.py | 28 ++++++--- api/controllers/web/chatflow_memory.py | 28 ++++++--- api/core/app/apps/advanced_chat/app_runner.py | 14 ++++- api/core/memory/entities.py | 6 ++ api/core/workflow/nodes/llm/node.py | 16 +++-- api/models/chatflow_memory.py | 2 + api/services/chatflow_memory_service.py | 61 +++++++++++++++---- api/services/workflow_service.py | 18 ++++-- 8 files changed, 135 insertions(+), 38 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index ff890eb326..9742193645 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -2,27 +2,40 @@ from flask_restx import Resource, reqparse from controllers.service_api import api from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token -from core.memory.entities import MemoryBlock +from core.memory.entities import MemoryBlock, MemoryCreatedBy from core.workflow.entities.variable_pool import VariablePool +from models import App, EndUser from services.chatflow_memory_service import ChatflowMemoryService from services.workflow_service import WorkflowService class MemoryListApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) - def get(self, app_model): + def get(self, app_model: App, end_user: EndUser): parser = reqparse.RequestParser() parser.add_argument("conversation_id", required=False, type=str | None, default=None) parser.add_argument("memory_id", required=False, type=str | None, default=None) parser.add_argument("version", required=False, type=int | None, default=None) args = parser.parse_args() - conversation_id = args.get("conversation_id") + conversation_id: str | None = args.get("conversation_id") memory_id = args.get("memory_id") version = args.get("version") - result = ChatflowMemoryService.get_persistent_memories(app_model, version) + result = ChatflowMemoryService.get_persistent_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + version + ) if conversation_id: - result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id, version)] + result = [ + *result, + *ChatflowMemoryService.get_session_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + conversation_id, + version + ) + ] if memory_id: result = [it for it in result if it.spec.id == memory_id] return [it for it in result if it.spec.end_user_visible] @@ -30,7 +43,7 @@ class MemoryListApi(Resource): class MemoryEditApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) - def put(self, app_model): + def put(self, app_model: App, end_user: EndUser): parser = reqparse.RequestParser() parser.add_argument('id', type=str, required=True) parser.add_argument("conversation_id", type=str | None, required=False, default=None) @@ -56,7 +69,8 @@ class MemoryEditApi(Resource): conversation_id=conversation_id, node_id=node_id, app_id=app_model.id, - edited_by_user=True + edited_by_user=True, + created_by=MemoryCreatedBy(end_user_id=end_user.id), ), variable_pool=VariablePool(), is_draft=False diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 97d6e28c98..d41925cc63 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -2,33 +2,46 @@ from flask_restx import reqparse from controllers.web import api from controllers.web.wraps import WebApiResource -from core.memory.entities import MemoryBlock +from core.memory.entities import MemoryBlock, MemoryCreatedBy from core.workflow.entities.variable_pool import VariablePool +from models import App, EndUser from services.chatflow_memory_service import ChatflowMemoryService from services.workflow_service import WorkflowService class MemoryListApi(WebApiResource): - def get(self, app_model): + def get(self, app_model: App, end_user: EndUser): parser = reqparse.RequestParser() parser.add_argument("conversation_id", required=False, type=str | None, default=None) parser.add_argument("memory_id", required=False, type=str | None, default=None) parser.add_argument("version", required=False, type=int | None, default=None) args = parser.parse_args() - conversation_id = args.get("conversation_id") + conversation_id: str | None = args.get("conversation_id") memory_id = args.get("memory_id") version = args.get("version") - result = ChatflowMemoryService.get_persistent_memories(app_model, version) + result = ChatflowMemoryService.get_persistent_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + version + ) if conversation_id: - result = [*result, *ChatflowMemoryService.get_session_memories(app_model, conversation_id, version)] + result = [ + *result, + *ChatflowMemoryService.get_session_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + conversation_id, + version + ) + ] if memory_id: result = [it for it in result if it.spec.id == memory_id] return [it for it in result if it.spec.end_user_visible] class MemoryEditApi(WebApiResource): - def put(self, app_model): + def put(self, app_model: App, end_user: EndUser): parser = reqparse.RequestParser() parser.add_argument('id', type=str, required=True) parser.add_argument("conversation_id", type=str | None, required=False, default=None) @@ -56,7 +69,8 @@ class MemoryEditApi(WebApiResource): conversation_id=conversation_id, node_id=node_id, app_id=app_model.id, - edited_by_user=True + edited_by_user=True, + created_by=MemoryCreatedBy(end_user_id=end_user.id) ), variable_pool=VariablePool(), is_draft=False diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index c20e4c645e..009c878919 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -21,7 +21,7 @@ from core.app.entities.queue_entities import ( QueueTextChunkEvent, ) from core.app.features.annotation_reply.annotation_reply import AnnotationReplyFeature -from core.memory.entities import MemoryScope +from core.memory.entities import MemoryCreatedBy, MemoryScope from core.model_runtime.entities import AssistantPromptMessage, UserPromptMessage from core.moderation.base import ModerationError from core.moderation.input_moderation import InputModeration @@ -443,7 +443,8 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): app_id=self._workflow.app_id, node_id=None, conversation_id=conversation_id, - is_draft=is_draft + is_draft=is_draft, + created_by=self._get_created_by(), ) # Build memory_id -> value mapping @@ -482,5 +483,12 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): workflow=self._workflow, conversation_id=self.conversation.id, variable_pool=variable_pool, - is_draft=is_draft + is_draft=is_draft, + created_by=self._get_created_by() ) + + def _get_created_by(self) -> MemoryCreatedBy: + if self.application_generate_entity.invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.EXPLORE}: + return MemoryCreatedBy(account_id=self.application_generate_entity.user_id) + else: + return MemoryCreatedBy(end_user_id=self.application_generate_entity.user_id) diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index c80cd88a8c..8ab985ad45 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -49,6 +49,11 @@ class MemoryBlockSpec(BaseModel): end_user_editable: bool = Field(default=False, description="Whether memory is editable by end users") +class MemoryCreatedBy(BaseModel): + end_user_id: str | None = None + account_id: str | None = None + + class MemoryBlock(BaseModel): """Runtime memory block instance @@ -69,6 +74,7 @@ class MemoryBlock(BaseModel): conversation_id: Optional[str] = None node_id: Optional[str] = None edited_by_user: bool = False + created_by: MemoryCreatedBy class MemoryValueData(BaseModel): diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 1c3a3ac264..f89ae6f6c9 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -14,7 +14,7 @@ from core.file import FileType, file_manager from core.helper.code_executor import CodeExecutor, CodeLanguage from core.llm_generator.output_parser.errors import OutputParserError from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output -from core.memory.entities import MemoryScope +from core.memory.entities import MemoryCreatedBy, MemoryScope from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelInstance, ModelManager from core.model_runtime.entities import ( @@ -74,7 +74,8 @@ from core.workflow.node_events import ( from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig, VariableSelector from core.workflow.nodes.base.node import Node from core.workflow.nodes.base.variable_template_parser import VariableTemplateParser -from models import Workflow, db +from models import UserFrom, Workflow +from models.engine import db from services.chatflow_memory_service import ChatflowMemoryService from . import llm_utils @@ -1242,13 +1243,20 @@ class LLMNode(Node): ChatflowMemoryService.update_node_memory_if_needed( tenant_id=self.tenant_id, app_id=self.app_id, - node_id=self.node_id, + node_id=self.id, conversation_id=conversation_id, memory_block_spec=memory_block_spec, variable_pool=variable_pool, - is_draft=is_draft + is_draft=is_draft, + created_by=self._get_user_from_context() ) + def _get_user_from_context(self) -> MemoryCreatedBy: + if self.user_from == UserFrom.ACCOUNT: + return MemoryCreatedBy(account_id=self.user_id) + else: + return MemoryCreatedBy(end_user_id=self.user_id) + def _combine_message_content_with_role( *, contents: str | list[PromptMessageContentUnionTypes] | None = None, role: PromptMessageRole diff --git a/api/models/chatflow_memory.py b/api/models/chatflow_memory.py index cde48c5860..773c69405d 100644 --- a/api/models/chatflow_memory.py +++ b/api/models/chatflow_memory.py @@ -26,6 +26,8 @@ class ChatflowMemoryVariable(Base): scope: Mapped[str] = mapped_column(sa.String(10), nullable=False) # 'app' or 'node' term: Mapped[str] = mapped_column(sa.String(20), nullable=False) # 'session' or 'persistent' version: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=1) + created_by_role: Mapped[str] = mapped_column(sa.String(20)) # 'end_user' or 'account` + created_by: Mapped[str] = mapped_column(StringUUID) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column( diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 45b602ad39..f67b058015 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -11,6 +11,7 @@ from core.llm_generator.llm_generator import LLMGenerator from core.memory.entities import ( MemoryBlock, MemoryBlockSpec, + MemoryCreatedBy, MemoryScheduleMode, MemoryScope, MemoryTerm, @@ -23,7 +24,7 @@ from core.workflow.constants import MEMORY_BLOCK_VARIABLE_NODE_ID from core.workflow.entities.variable_pool import VariablePool from extensions.ext_database import db from extensions.ext_redis import redis_client -from models import App +from models import App, CreatorUserRole from models.chatflow_memory import ChatflowMemoryVariable from models.workflow import Workflow, WorkflowDraftVariable from services.chatflow_history_service import ChatflowHistoryService @@ -37,15 +38,24 @@ class ChatflowMemoryService: @staticmethod def get_persistent_memories( app: App, + created_by: MemoryCreatedBy, version: int | None = None ) -> Sequence[MemoryBlock]: + if created_by.account_id: + created_by_role = CreatorUserRole.ACCOUNT + created_by_id = created_by.account_id + else: + created_by_role = CreatorUserRole.END_USER + created_by_id = created_by.id if version is None: # If version not specified, get the latest version stmt = select(ChatflowMemoryVariable).distinct(ChatflowMemoryVariable.memory_id).where( and_( ChatflowMemoryVariable.tenant_id == app.tenant_id, ChatflowMemoryVariable.app_id == app.id, - ChatflowMemoryVariable.conversation_id == None + ChatflowMemoryVariable.conversation_id == None, + ChatflowMemoryVariable.created_by_role == created_by_role, + ChatflowMemoryVariable.created_by == created_by_id, ) ).order_by(ChatflowMemoryVariable.version.desc()) else: @@ -54,16 +64,19 @@ class ChatflowMemoryService: ChatflowMemoryVariable.tenant_id == app.tenant_id, ChatflowMemoryVariable.app_id == app.id, ChatflowMemoryVariable.conversation_id == None, + ChatflowMemoryVariable.created_by_role == created_by_role, + ChatflowMemoryVariable.created_by == created_by_id, ChatflowMemoryVariable.version == version ) ) with Session(db.engine) as session: db_results = session.execute(stmt).all() - return ChatflowMemoryService._convert_to_memory_blocks(app, [result[0] for result in db_results]) + return ChatflowMemoryService._convert_to_memory_blocks(app, created_by, [result[0] for result in db_results]) @staticmethod def get_session_memories( app: App, + created_by: MemoryCreatedBy, conversation_id: str, version: int | None = None ) -> Sequence[MemoryBlock]: @@ -87,12 +100,18 @@ class ChatflowMemoryService: ) with Session(db.engine) as session: db_results = session.execute(stmt).all() - return ChatflowMemoryService._convert_to_memory_blocks(app, [result[0] for result in db_results]) + return ChatflowMemoryService._convert_to_memory_blocks(app, created_by, [result[0] for result in db_results]) @staticmethod def save_memory(memory: MemoryBlock, variable_pool: VariablePool, is_draft: bool) -> None: key = f"{memory.node_id}.{memory.spec.id}" if memory.node_id else memory.spec.id variable_pool.add([MEMORY_BLOCK_VARIABLE_NODE_ID, key], memory.value) + if memory.created_by.account_id: + created_by_role = CreatorUserRole.ACCOUNT + created_by = memory.created_by.account_id + else: + created_by_role = CreatorUserRole.END_USER + created_by = memory.created_by.id with Session(db.engine) as session: existing = session.query(ChatflowMemoryVariable).filter_by( @@ -100,7 +119,9 @@ class ChatflowMemoryService: tenant_id=memory.tenant_id, app_id=memory.app_id, node_id=memory.node_id, - conversation_id=memory.conversation_id + conversation_id=memory.conversation_id, + created_by_role=created_by_role, + created_by=created_by, ).order_by(ChatflowMemoryVariable.version.desc()).first() new_version = 1 if not existing else existing.version + 1 session.add( @@ -118,6 +139,8 @@ class ChatflowMemoryService: term=memory.spec.term, scope=memory.spec.scope, version=new_version, + created_by_role=created_by_role, + created_by=created_by, ) ) session.commit() @@ -149,12 +172,13 @@ class ChatflowMemoryService: def get_memories_by_specs( memory_block_specs: Sequence[MemoryBlockSpec], tenant_id: str, app_id: str, + created_by: MemoryCreatedBy, conversation_id: Optional[str], node_id: Optional[str], is_draft: bool ) -> Sequence[MemoryBlock]: return [ChatflowMemoryService.get_memory_by_spec( - spec, tenant_id, app_id, conversation_id, node_id, is_draft + spec, tenant_id, app_id, created_by, conversation_id, node_id, is_draft ) for spec in memory_block_specs] @staticmethod @@ -162,6 +186,7 @@ class ChatflowMemoryService: spec: MemoryBlockSpec, tenant_id: str, app_id: str, + created_by: MemoryCreatedBy, conversation_id: Optional[str], node_id: Optional[str], is_draft: bool @@ -183,7 +208,8 @@ class ChatflowMemoryService: app_id=app_id, conversation_id=conversation_id, node_id=node_id, - spec=spec + spec=spec, + created_by=created_by, ) stmt = select(ChatflowMemoryVariable).where( and_( @@ -206,7 +232,8 @@ class ChatflowMemoryService: conversation_id=conversation_id, node_id=node_id, spec=spec, - edited_by_user=memory_value_data.edited_by_user + edited_by_user=memory_value_data.edited_by_user, + created_by=created_by, ) return MemoryBlock( tenant_id=tenant_id, @@ -214,7 +241,8 @@ class ChatflowMemoryService: app_id=app_id, conversation_id=conversation_id, node_id=node_id, - spec=spec + spec=spec, + created_by=created_by, ) @staticmethod @@ -222,6 +250,7 @@ class ChatflowMemoryService: workflow: Workflow, conversation_id: str, variable_pool: VariablePool, + created_by: MemoryCreatedBy, is_draft: bool ): visible_messages = ChatflowHistoryService.get_visible_chat_history( @@ -240,7 +269,8 @@ class ChatflowMemoryService: app_id=workflow.app_id, conversation_id=conversation_id, node_id=None, - is_draft=is_draft + is_draft=is_draft, + created_by=created_by, ) if ChatflowMemoryService._should_update_memory(memory, visible_messages): if memory.spec.schedule_mode == MemoryScheduleMode.SYNC: @@ -276,6 +306,7 @@ class ChatflowMemoryService: tenant_id: str, app_id: str, node_id: str, + created_by: MemoryCreatedBy, conversation_id: str, memory_block_spec: MemoryBlockSpec, variable_pool: VariablePool, @@ -293,7 +324,8 @@ class ChatflowMemoryService: app_id=app_id, conversation_id=conversation_id, node_id=node_id, - is_draft=is_draft + is_draft=is_draft, + created_by=created_by, ) if not ChatflowMemoryService._should_update_memory( memory_block=memory_block, @@ -356,6 +388,7 @@ class ChatflowMemoryService: @staticmethod def _convert_to_memory_blocks( app: App, + created_by: MemoryCreatedBy, raw_results: Sequence[ChatflowMemoryVariable] ) -> Sequence[MemoryBlock]: workflow = WorkflowService().get_published_workflow(app) @@ -377,7 +410,8 @@ class ChatflowMemoryService: app_id=chatflow_memory_variable.app_id, conversation_id=chatflow_memory_variable.conversation_id, node_id=chatflow_memory_variable.node_id, - edited_by_user=memory_value_data.edited_by_user + edited_by_user=memory_value_data.edited_by_user, + created_by=created_by, ) ) return results @@ -515,7 +549,8 @@ class ChatflowMemoryService: app_id=memory_block.app_id, conversation_id=memory_block.conversation_id, node_id=memory_block.node_id, - edited_by_user=False + edited_by_user=False, + created_by=memory_block.created_by, ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index cc33042950..4137f2df91 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -11,7 +11,7 @@ from core.app.app_config.entities import VariableEntityType from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager from core.app.apps.workflow.app_config_manager import WorkflowAppConfigManager from core.file import File -from core.memory.entities import MemoryScope +from core.memory.entities import MemoryCreatedBy, MemoryScope from core.repositories import DifyCoreRepositoryFactory from core.variables import Variable from core.variables.variables import VariableUnion @@ -1008,7 +1008,6 @@ def _setup_variable_pool( system_variable.dialogue_count = 1 else: system_variable = SystemVariable.empty() - # init variable pool variable_pool = VariablePool( system_variables=system_variable, @@ -1017,7 +1016,12 @@ def _setup_variable_pool( # Based on the definition of `VariableUnion`, # `list[Variable]` can be safely used as `list[VariableUnion]` since they are compatible. conversation_variables=cast(list[VariableUnion], conversation_variables), # - memory_blocks=_fetch_memory_blocks(workflow, conversation_id, is_draft=is_draft), + memory_blocks=_fetch_memory_blocks( + workflow, + MemoryCreatedBy(account_id=user_id), + conversation_id, + is_draft=is_draft + ), ) return variable_pool @@ -1056,7 +1060,12 @@ def _rebuild_single_file(tenant_id: str, value: Any, variable_entity_type: Varia raise Exception("unreachable") -def _fetch_memory_blocks(workflow: Workflow, conversation_id: str, is_draft: bool) -> Mapping[str, str]: +def _fetch_memory_blocks( + workflow: Workflow, + created_by: MemoryCreatedBy, + conversation_id: str, + is_draft: bool +) -> Mapping[str, str]: memory_blocks = {} memory_block_specs = workflow.memory_blocks memories = ChatflowMemoryService.get_memories_by_specs( @@ -1066,6 +1075,7 @@ def _fetch_memory_blocks(workflow: Workflow, conversation_id: str, is_draft: boo node_id=None, conversation_id=conversation_id, is_draft=is_draft, + created_by=created_by, ) for memory in memories: if memory.spec.scope == MemoryScope.APP: From 75c221038dfa7e76c2ce7b0ffb78ab7a2810b58b Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 23 Sep 2025 18:35:11 +0800 Subject: [PATCH 56/82] feat: add endpoints to __init__.py --- api/controllers/service_api/__init__.py | 2 ++ api/controllers/web/__init__.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/api/controllers/service_api/__init__.py b/api/controllers/service_api/__init__.py index 9032733e2c..cbad75c7b3 100644 --- a/api/controllers/service_api/__init__.py +++ b/api/controllers/service_api/__init__.py @@ -19,6 +19,7 @@ from .app import ( annotation, app, audio, + chatflow_memory, completion, conversation, file, @@ -40,6 +41,7 @@ __all__ = [ "annotation", "app", "audio", + "chatflow_memory", "completion", "conversation", "dataset", diff --git a/api/controllers/web/__init__.py b/api/controllers/web/__init__.py index 1d22954308..16d868d4dc 100644 --- a/api/controllers/web/__init__.py +++ b/api/controllers/web/__init__.py @@ -18,6 +18,7 @@ web_ns = Namespace("web", description="Web application API operations", path="/" from . import ( app, audio, + chatflow_memory, completion, conversation, feature, @@ -39,6 +40,7 @@ __all__ = [ "app", "audio", "bp", + "chatflow_memory", "completion", "conversation", "feature", From 3d7d4182a67786d0a536de58c814b491b96d5931 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 23 Sep 2025 19:07:37 +0800 Subject: [PATCH 57/82] feat: add endpoints to delete memory --- .../service_api/app/chatflow_memory.py | 24 +++++++++ api/controllers/web/chatflow_memory.py | 23 ++++++++ api/services/chatflow_memory_service.py | 53 ++++++++++++++++++- 3 files changed, 99 insertions(+), 1 deletion(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 9742193645..08a855fd80 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -78,5 +78,29 @@ class MemoryEditApi(Resource): return '', 204 +class MemoryDeleteApi(Resource): + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) + def delete(self, app_model: App, end_user: EndUser): + parser = reqparse.RequestParser() + parser.add_argument('id', type=str, required=False, default=None) + args = parser.parse_args() + memory_id = args.get('id') + + if memory_id: + ChatflowMemoryService.delete_memory( + app_model, + memory_id, + MemoryCreatedBy(end_user_id=end_user.id) + ) + return '', 204 + else: + ChatflowMemoryService.delete_all_user_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id) + ) + return '', 200 + + api.add_resource(MemoryListApi, '/memories') api.add_resource(MemoryEditApi, '/memory-edit') +api.add_resource(MemoryDeleteApi, '/memories') diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index d41925cc63..7835a4f04d 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -78,5 +78,28 @@ class MemoryEditApi(WebApiResource): return '', 204 +class MemoryDeleteApi(WebApiResource): + def delete(self, app_model: App, end_user: EndUser): + parser = reqparse.RequestParser() + parser.add_argument('id', type=str, required=False, default=None) + args = parser.parse_args() + memory_id = args.get('id') + + if memory_id: + ChatflowMemoryService.delete_memory( + app_model, + memory_id, + MemoryCreatedBy(end_user_id=end_user.id) + ) + return '', 204 + else: + ChatflowMemoryService.delete_all_user_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id) + ) + return '', 200 + + api.add_resource(MemoryListApi, '/memories') api.add_resource(MemoryEditApi, '/memory-edit') +api.add_resource(MemoryDeleteApi, '/memories') diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index f67b058015..3bcfa201e6 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -4,7 +4,7 @@ import time from collections.abc import Sequence from typing import Optional -from sqlalchemy import and_, select +from sqlalchemy import and_, delete, select from sqlalchemy.orm import Session from core.llm_generator.llm_generator import LLMGenerator @@ -554,6 +554,57 @@ class ChatflowMemoryService: ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) + @staticmethod + def delete_memory(app: App, memory_id: str, created_by: MemoryCreatedBy): + workflow = WorkflowService().get_published_workflow(app) + if not workflow: + raise ValueError("Workflow not found") + + memory_spec = next((it for it in workflow.memory_blocks if it.id == memory_id), None) + if not memory_spec or not memory_spec.end_user_editable: + raise ValueError("Memory not found or not deletable") + + if created_by.account_id: + created_by_role = CreatorUserRole.ACCOUNT + created_by_id = created_by.account_id + else: + created_by_role = CreatorUserRole.END_USER + created_by_id = created_by.id + + with Session(db.engine) as session: + stmt = delete(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.memory_id == memory_id, + ChatflowMemoryVariable.created_by_role == created_by_role, + ChatflowMemoryVariable.created_by == created_by_id + ) + ) + session.execute(stmt) + session.commit() + + @staticmethod + def delete_all_user_memories(app: App, created_by: MemoryCreatedBy): + if created_by.account_id: + created_by_role = CreatorUserRole.ACCOUNT + created_by_id = created_by.account_id + else: + created_by_role = CreatorUserRole.END_USER + created_by_id = created_by.id + + with Session(db.engine) as session: + stmt = delete(ChatflowMemoryVariable).where( + and_( + ChatflowMemoryVariable.tenant_id == app.tenant_id, + ChatflowMemoryVariable.app_id == app.id, + ChatflowMemoryVariable.created_by_role == created_by_role, + ChatflowMemoryVariable.created_by == created_by_id + ) + ) + session.execute(stmt) + session.commit() + @staticmethod def _format_chat_history(messages: Sequence[PromptMessage]) -> Sequence[tuple[str, str]]: result = [] From 5bf642c3f98a4c1e327856ee32a9afc20ca315d9 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 23 Sep 2025 23:09:45 +0800 Subject: [PATCH 58/82] feat: expose version to MemoryBlock --- .../service_api/app/chatflow_memory.py | 36 ++++++++++++------- api/controllers/web/chatflow_memory.py | 36 ++++++++++++------- api/core/memory/entities.py | 8 +++-- api/services/chatflow_memory_service.py | 5 +++ 4 files changed, 59 insertions(+), 26 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 08a855fd80..69e5590cc2 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -61,20 +61,32 @@ class MemoryEditApi(Resource): memory_spec = next((it for it in workflow.memory_blocks if it.id == args['id']), None) if not memory_spec: return {'error': 'Memory not found'}, 404 - ChatflowMemoryService.save_memory( - MemoryBlock( - spec=memory_spec, - tenant_id=app_model.tenant_id, - value=update, - conversation_id=conversation_id, - node_id=node_id, - app_id=app_model.id, - edited_by_user=True, - created_by=MemoryCreatedBy(end_user_id=end_user.id), - ), - variable_pool=VariablePool(), + + # First get existing memory + existing_memory = ChatflowMemoryService.get_memory_by_spec( + spec=memory_spec, + tenant_id=app_model.tenant_id, + app_id=app_model.id, + created_by=MemoryCreatedBy(end_user_id=end_user.id), + conversation_id=conversation_id, + node_id=node_id, is_draft=False ) + + # Create updated memory instance + updated_memory = MemoryBlock( + spec=existing_memory.spec, + tenant_id=existing_memory.tenant_id, + app_id=existing_memory.app_id, + conversation_id=existing_memory.conversation_id, + node_id=existing_memory.node_id, + value=update, # New value + version=existing_memory.version, # Keep current version (save_memory will handle version increment) + edited_by_user=True, + created_by=existing_memory.created_by, + ) + + ChatflowMemoryService.save_memory(updated_memory, VariablePool(), False) return '', 204 diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 7835a4f04d..60829b972b 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -61,20 +61,32 @@ class MemoryEditApi(WebApiResource): return {'error': 'Memory not found'}, 404 if not memory_spec.end_user_editable: return {'error': 'Memory not editable'}, 403 - ChatflowMemoryService.save_memory( - MemoryBlock( - spec=memory_spec, - tenant_id=app_model.tenant_id, - value=update, - conversation_id=conversation_id, - node_id=node_id, - app_id=app_model.id, - edited_by_user=True, - created_by=MemoryCreatedBy(end_user_id=end_user.id) - ), - variable_pool=VariablePool(), + + # First get existing memory + existing_memory = ChatflowMemoryService.get_memory_by_spec( + spec=memory_spec, + tenant_id=app_model.tenant_id, + app_id=app_model.id, + created_by=MemoryCreatedBy(end_user_id=end_user.id), + conversation_id=conversation_id, + node_id=node_id, is_draft=False ) + + # Create updated memory instance + updated_memory = MemoryBlock( + spec=existing_memory.spec, + tenant_id=existing_memory.tenant_id, + app_id=existing_memory.app_id, + conversation_id=existing_memory.conversation_id, + node_id=existing_memory.node_id, + value=update, # New value + version=existing_memory.version, # Keep current version (save_memory will handle version increment) + edited_by_user=True, + created_by=existing_memory.created_by, + ) + + ChatflowMemoryService.save_memory(updated_memory, VariablePool(), False) return '', 204 diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 8ab985ad45..0cee9ad0e7 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -1,10 +1,13 @@ +from __future__ import annotations + from enum import StrEnum -from typing import Optional +from typing import TYPE_CHECKING, Optional from uuid import uuid4 from pydantic import BaseModel, Field -from core.app.app_config.entities import ModelConfig +if TYPE_CHECKING: + from core.app.app_config.entities import ModelConfig class MemoryScope(StrEnum): @@ -75,6 +78,7 @@ class MemoryBlock(BaseModel): node_id: Optional[str] = None edited_by_user: bool = False created_by: MemoryCreatedBy + version: int = Field(description="Memory block version number") class MemoryValueData(BaseModel): diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 3bcfa201e6..f82d451a2c 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -210,6 +210,7 @@ class ChatflowMemoryService: node_id=node_id, spec=spec, created_by=created_by, + version=1, ) stmt = select(ChatflowMemoryVariable).where( and_( @@ -234,6 +235,7 @@ class ChatflowMemoryService: spec=spec, edited_by_user=memory_value_data.edited_by_user, created_by=created_by, + version=result.version, ) return MemoryBlock( tenant_id=tenant_id, @@ -243,6 +245,7 @@ class ChatflowMemoryService: node_id=node_id, spec=spec, created_by=created_by, + version=1, ) @staticmethod @@ -412,6 +415,7 @@ class ChatflowMemoryService: node_id=chatflow_memory_variable.node_id, edited_by_user=memory_value_data.edited_by_user, created_by=created_by, + version=chatflow_memory_variable.version, ) ) return results @@ -551,6 +555,7 @@ class ChatflowMemoryService: node_id=memory_block.node_id, edited_by_user=False, created_by=memory_block.created_by, + version=memory_block.version, ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) From 8833fee23286691d6d7746351799dacbd4a9ad76 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 23 Sep 2025 23:17:34 +0800 Subject: [PATCH 59/82] feat: move version update logic out of save_memory --- api/controllers/service_api/app/chatflow_memory.py | 4 ++-- api/controllers/web/chatflow_memory.py | 4 ++-- api/services/chatflow_memory_service.py | 14 ++------------ 3 files changed, 6 insertions(+), 16 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index 69e5590cc2..e9bd55a4f3 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -73,7 +73,7 @@ class MemoryEditApi(Resource): is_draft=False ) - # Create updated memory instance + # Create updated memory instance with incremented version updated_memory = MemoryBlock( spec=existing_memory.spec, tenant_id=existing_memory.tenant_id, @@ -81,7 +81,7 @@ class MemoryEditApi(Resource): conversation_id=existing_memory.conversation_id, node_id=existing_memory.node_id, value=update, # New value - version=existing_memory.version, # Keep current version (save_memory will handle version increment) + version=existing_memory.version + 1, # Increment version for update edited_by_user=True, created_by=existing_memory.created_by, ) diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 60829b972b..1387807c6c 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -73,7 +73,7 @@ class MemoryEditApi(WebApiResource): is_draft=False ) - # Create updated memory instance + # Create updated memory instance with incremented version updated_memory = MemoryBlock( spec=existing_memory.spec, tenant_id=existing_memory.tenant_id, @@ -81,7 +81,7 @@ class MemoryEditApi(WebApiResource): conversation_id=existing_memory.conversation_id, node_id=existing_memory.node_id, value=update, # New value - version=existing_memory.version, # Keep current version (save_memory will handle version increment) + version=existing_memory.version + 1, # Increment version for update edited_by_user=True, created_by=existing_memory.created_by, ) diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index f82d451a2c..23e9f4174d 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -114,16 +114,6 @@ class ChatflowMemoryService: created_by = memory.created_by.id with Session(db.engine) as session: - existing = session.query(ChatflowMemoryVariable).filter_by( - memory_id=memory.spec.id, - tenant_id=memory.tenant_id, - app_id=memory.app_id, - node_id=memory.node_id, - conversation_id=memory.conversation_id, - created_by_role=created_by_role, - created_by=created_by, - ).order_by(ChatflowMemoryVariable.version.desc()).first() - new_version = 1 if not existing else existing.version + 1 session.add( ChatflowMemoryVariable( memory_id=memory.spec.id, @@ -138,7 +128,7 @@ class ChatflowMemoryService: ).model_dump_json(), term=memory.spec.term, scope=memory.spec.scope, - version=new_version, + version=memory.version, # Use version from MemoryBlock directly created_by_role=created_by_role, created_by=created_by, ) @@ -555,7 +545,7 @@ class ChatflowMemoryService: node_id=memory_block.node_id, edited_by_user=False, created_by=memory_block.created_by, - version=memory_block.version, + version=memory_block.version + 1, # Increment version for business logic update ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) From 15be85514d0725538891a3b9e7d475940fa09da0 Mon Sep 17 00:00:00 2001 From: Stream Date: Sun, 28 Sep 2025 21:20:37 +0800 Subject: [PATCH 60/82] fix: chatflow message visibility from index --- api/models/chatflow_memory.py | 5 ++++- api/services/chatflow_history_service.py | 16 ++++++++-------- api/services/chatflow_memory_service.py | 9 +++++++++ 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/api/models/chatflow_memory.py b/api/models/chatflow_memory.py index 773c69405d..b02b809cd7 100644 --- a/api/models/chatflow_memory.py +++ b/api/models/chatflow_memory.py @@ -67,7 +67,10 @@ class ChatflowMessage(Base): id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) conversation_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - index: Mapped[int] = mapped_column(sa.Integer, nullable=False) + index: Mapped[int] = mapped_column(sa.Integer, nullable=False) # This index starts from 0 version: Mapped[int] = mapped_column(sa.Integer, nullable=False) data: Mapped[str] = mapped_column(sa.Text, nullable=False) # Serialized PromptMessage JSON created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + ) diff --git a/api/services/chatflow_history_service.py b/api/services/chatflow_history_service.py index 915d5ff2c8..521cf22218 100644 --- a/api/services/chatflow_history_service.py +++ b/api/services/chatflow_history_service.py @@ -68,21 +68,21 @@ class ChatflowHistoryService: next_index = max_index + 1 # Save new message to append-only table - message_data = { - 'role': prompt_message.role.value, - 'content': prompt_message.get_text_content(), - 'timestamp': time.time() - } - new_message = ChatflowMessage( conversation_id=chatflow_conv.id, index=next_index, version=1, - data=json.dumps(message_data) + data=json.dumps(prompt_message) ) session.add(new_message) session.commit() + # 添加:每次保存消息后简单增长visible_count + current_metadata = ChatflowConversationMetadata.model_validate_json(chatflow_conv.conversation_metadata) + new_visible_count = current_metadata.visible_count + 1 + new_metadata = ChatflowConversationMetadata(visible_count=new_visible_count) + chatflow_conv.conversation_metadata = new_metadata.model_dump_json() + @staticmethod def save_app_message( prompt_message: PromptMessage, @@ -209,7 +209,7 @@ class ChatflowHistoryService: else: if create_if_missing: # Create a new chatflow conversation - default_metadata = ChatflowConversationMetadata(visible_count=20) + default_metadata = ChatflowConversationMetadata(visible_count=0) new_chatflow_conv = ChatflowConversation( tenant_id=tenant_id, app_id=app_id, diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 23e9f4174d..0e6e064b89 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -549,6 +549,15 @@ class ChatflowMemoryService: ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) + # 添加以下代码:重置 visible_count 为 preserved_turns + ChatflowHistoryService.update_visible_count( + conversation_id=memory_block.conversation_id, + node_id=memory_block.node_id, + new_visible_count=memory_block.spec.preserved_turns, + app_id=memory_block.app_id, + tenant_id=memory_block.tenant_id + ) + @staticmethod def delete_memory(app: App, memory_id: str, created_by: MemoryCreatedBy): workflow = WorkflowService().get_published_workflow(app) From bb1f1a56a5d94e561ae8fc5a7d98a8dd4ed23f69 Mon Sep 17 00:00:00 2001 From: Stream Date: Sun, 28 Sep 2025 22:36:10 +0800 Subject: [PATCH 61/82] feat: update MemoryListApi response format with ChatflowConversationMetadata --- .../service_api/app/chatflow_memory.py | 34 +++++---- api/controllers/web/chatflow_memory.py | 35 ++++++---- api/core/memory/entities.py | 27 ++++++++ api/services/chatflow_history_service.py | 16 ++++- api/services/chatflow_memory_service.py | 69 ++++++++++++++++--- 5 files changed, 142 insertions(+), 39 deletions(-) diff --git a/api/controllers/service_api/app/chatflow_memory.py b/api/controllers/service_api/app/chatflow_memory.py index e9bd55a4f3..1f56dfba43 100644 --- a/api/controllers/service_api/app/chatflow_memory.py +++ b/api/controllers/service_api/app/chatflow_memory.py @@ -21,21 +21,27 @@ class MemoryListApi(Resource): memory_id = args.get("memory_id") version = args.get("version") - result = ChatflowMemoryService.get_persistent_memories( - app_model, - MemoryCreatedBy(end_user_id=end_user.id), - version - ) if conversation_id: - result = [ - *result, - *ChatflowMemoryService.get_session_memories( - app_model, - MemoryCreatedBy(end_user_id=end_user.id), - conversation_id, - version - ) - ] + result = ChatflowMemoryService.get_persistent_memories_with_conversation( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + conversation_id, + version + ) + session_memories = ChatflowMemoryService.get_session_memories_with_conversation( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + conversation_id, + version + ) + result = [*result, *session_memories] + else: + result = ChatflowMemoryService.get_persistent_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + version + ) + if memory_id: result = [it for it in result if it.spec.id == memory_id] return [it for it in result if it.spec.end_user_visible] diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 1387807c6c..5969d66bcf 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -20,21 +20,28 @@ class MemoryListApi(WebApiResource): memory_id = args.get("memory_id") version = args.get("version") - result = ChatflowMemoryService.get_persistent_memories( - app_model, - MemoryCreatedBy(end_user_id=end_user.id), - version - ) + if conversation_id: - result = [ - *result, - *ChatflowMemoryService.get_session_memories( - app_model, - MemoryCreatedBy(end_user_id=end_user.id), - conversation_id, - version - ) - ] + result = ChatflowMemoryService.get_persistent_memories_with_conversation( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + conversation_id, + version + ) + session_memories = ChatflowMemoryService.get_session_memories_with_conversation( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + conversation_id, + version + ) + result = [*result, *session_memories] + else: + result = ChatflowMemoryService.get_persistent_memories( + app_model, + MemoryCreatedBy(end_user_id=end_user.id), + version + ) + if memory_id: result = [it for it in result if it.spec.id == memory_id] return [it for it in result if it.spec.end_user_visible] diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 0cee9ad0e7..88602142b3 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -90,3 +90,30 @@ class ChatflowConversationMetadata(BaseModel): """Metadata for chatflow conversation with visible message count""" type: str = "mutable_visible_window" visible_count: int = Field(gt=0, description="Number of visible messages to keep") + + +class MemoryBlockWithConversation(MemoryBlock): + """MemoryBlock with optional conversation metadata for session memories""" + conversation_metadata: ChatflowConversationMetadata = Field( + description="Conversation metadata, only present for session memories" + ) + + @classmethod + def from_memory_block( + cls, + memory_block: MemoryBlock, + conversation_metadata: ChatflowConversationMetadata + ) -> "MemoryBlockWithConversation": + """Create MemoryBlockWithConversation from MemoryBlock""" + return cls( + spec=memory_block.spec, + tenant_id=memory_block.tenant_id, + value=memory_block.value, + app_id=memory_block.app_id, + conversation_id=memory_block.conversation_id, + node_id=memory_block.node_id, + edited_by_user=memory_block.edited_by_user, + created_by=memory_block.created_by, + version=memory_block.version, + conversation_metadata=conversation_metadata + ) diff --git a/api/services/chatflow_history_service.py b/api/services/chatflow_history_service.py index 521cf22218..8ea3240f4f 100644 --- a/api/services/chatflow_history_service.py +++ b/api/services/chatflow_history_service.py @@ -1,5 +1,4 @@ import json -import time from collections.abc import MutableMapping, Sequence from typing import Literal, Optional, overload @@ -134,6 +133,21 @@ class ChatflowHistoryService: session.commit() + @staticmethod + def get_conversation_metadata( + tenant_id: str, + app_id: str, + conversation_id: str, + node_id: Optional[str] + ) -> ChatflowConversationMetadata: + with Session(db.engine) as session: + chatflow_conv = ChatflowHistoryService._get_or_create_chatflow_conversation( + session, conversation_id, app_id, tenant_id, node_id, create_if_missing=False + ) + if not chatflow_conv: + raise ValueError(f"Conversation not found: {conversation_id}") + return ChatflowConversationMetadata.model_validate_json(chatflow_conv.conversation_metadata) + @staticmethod def _filter_latest_messages(raw_messages: Sequence[ChatflowMessage]) -> Sequence[ChatflowMessage]: index_to_message: MutableMapping[int, ChatflowMessage] = {} diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index 0e6e064b89..13b2662640 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -11,6 +11,7 @@ from core.llm_generator.llm_generator import LLMGenerator from core.memory.entities import ( MemoryBlock, MemoryBlockSpec, + MemoryBlockWithConversation, MemoryCreatedBy, MemoryScheduleMode, MemoryScope, @@ -280,7 +281,8 @@ class ChatflowMemoryService: block=memory_block, is_draft=is_draft, variable_pool=variable_pool, - visible_messages=visible_messages + visible_messages=visible_messages, + conversation_id=conversation_id, ) # sync mode: submit a batch update task @@ -332,7 +334,8 @@ class ChatflowMemoryService: visible_messages=visible_messages, memory_block=memory_block, variable_pool=variable_pool, - is_draft=is_draft + is_draft=is_draft, + conversation_id=conversation_id ) else: # Node-level async: execute asynchronously @@ -340,7 +343,8 @@ class ChatflowMemoryService: memory_block=memory_block, visible_messages=visible_messages, variable_pool=variable_pool, - is_draft=is_draft + is_draft=is_draft, + conversation_id=conversation_id ) return True @@ -422,6 +426,7 @@ class ChatflowMemoryService: block: MemoryBlock, visible_messages: Sequence[PromptMessage], variable_pool: VariablePool, + conversation_id: str, is_draft: bool ): thread = threading.Thread( @@ -430,7 +435,8 @@ class ChatflowMemoryService: 'memory_block': block, 'visible_messages': visible_messages, 'variable_pool': variable_pool, - 'is_draft': is_draft + 'is_draft': is_draft, + 'conversation_id': conversation_id }, ) thread.start() @@ -478,7 +484,8 @@ class ChatflowMemoryService: 'memory_block': block, 'visible_messages': visible_messages, 'variable_pool': variable_pool, - 'is_draft': is_draft + 'is_draft': is_draft, + 'conversation_id': conversation_id, }, ) threads.append(thread) @@ -494,13 +501,15 @@ class ChatflowMemoryService: memory_block: MemoryBlock, visible_messages: Sequence[PromptMessage], variable_pool: VariablePool, + conversation_id: str, is_draft: bool ): ChatflowMemoryService._perform_memory_update( memory_block=memory_block, visible_messages=visible_messages, variable_pool=variable_pool, - is_draft=is_draft + is_draft=is_draft, + conversation_id=conversation_id ) @staticmethod @@ -508,6 +517,7 @@ class ChatflowMemoryService: memory_block: MemoryBlock, visible_messages: Sequence[PromptMessage], variable_pool: VariablePool, + conversation_id: str, is_draft: bool = False ): thread = threading.Thread( @@ -516,7 +526,8 @@ class ChatflowMemoryService: 'memory_block': memory_block, 'visible_messages': visible_messages, 'variable_pool': variable_pool, - 'is_draft': is_draft + 'is_draft': is_draft, + 'conversation_id': conversation_id, }, daemon=True ) @@ -526,6 +537,7 @@ class ChatflowMemoryService: def _perform_memory_update( memory_block: MemoryBlock, variable_pool: VariablePool, + conversation_id: str, visible_messages: Sequence[PromptMessage], is_draft: bool ): @@ -541,7 +553,7 @@ class ChatflowMemoryService: value=updated_value, spec=memory_block.spec, app_id=memory_block.app_id, - conversation_id=memory_block.conversation_id, + conversation_id=conversation_id, node_id=memory_block.node_id, edited_by_user=False, created_by=memory_block.created_by, @@ -549,9 +561,8 @@ class ChatflowMemoryService: ) ChatflowMemoryService.save_memory(updated_memory, variable_pool, is_draft) - # 添加以下代码:重置 visible_count 为 preserved_turns ChatflowHistoryService.update_visible_count( - conversation_id=memory_block.conversation_id, + conversation_id=conversation_id, node_id=memory_block.node_id, new_visible_count=memory_block.spec.preserved_turns, app_id=memory_block.app_id, @@ -609,6 +620,44 @@ class ChatflowMemoryService: session.execute(stmt) session.commit() + @staticmethod + def get_persistent_memories_with_conversation( + app: App, + created_by: MemoryCreatedBy, + conversation_id: str, + version: int | None = None + ) -> Sequence[MemoryBlockWithConversation]: + """Get persistent memories with conversation metadata (always None for persistent)""" + memory_blocks = ChatflowMemoryService.get_persistent_memories(app, created_by, version) + return [ + MemoryBlockWithConversation.from_memory_block( + block, + ChatflowHistoryService.get_conversation_metadata( + app.tenant_id, app.id, conversation_id, block.node_id + ) + ) + for block in memory_blocks + ] + + @staticmethod + def get_session_memories_with_conversation( + app: App, + created_by: MemoryCreatedBy, + conversation_id: str, + version: int | None = None + ) -> Sequence[MemoryBlockWithConversation]: + """Get session memories with conversation metadata""" + memory_blocks = ChatflowMemoryService.get_session_memories(app, created_by, conversation_id, version) + return [ + MemoryBlockWithConversation.from_memory_block( + block, + ChatflowHistoryService.get_conversation_metadata( + app.tenant_id, app.id, conversation_id, block.node_id + ) + ) + for block in memory_blocks + ] + @staticmethod def _format_chat_history(messages: Sequence[PromptMessage]) -> Sequence[tuple[str, str]]: result = [] From 1e0a3b163eb76bff28d2da946fdc40b553d86d8b Mon Sep 17 00:00:00 2001 From: Stream Date: Sun, 28 Sep 2025 22:41:07 +0800 Subject: [PATCH 62/82] refactor: fix ruff --- api/controllers/web/chatflow_memory.py | 1 - api/core/memory/entities.py | 2 +- api/models/chatflow_memory.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/api/controllers/web/chatflow_memory.py b/api/controllers/web/chatflow_memory.py index 5969d66bcf..276d6d3b83 100644 --- a/api/controllers/web/chatflow_memory.py +++ b/api/controllers/web/chatflow_memory.py @@ -20,7 +20,6 @@ class MemoryListApi(WebApiResource): memory_id = args.get("memory_id") version = args.get("version") - if conversation_id: result = ChatflowMemoryService.get_persistent_memories_with_conversation( app_model, diff --git a/api/core/memory/entities.py b/api/core/memory/entities.py index 88602142b3..3914be218d 100644 --- a/api/core/memory/entities.py +++ b/api/core/memory/entities.py @@ -103,7 +103,7 @@ class MemoryBlockWithConversation(MemoryBlock): cls, memory_block: MemoryBlock, conversation_metadata: ChatflowConversationMetadata - ) -> "MemoryBlockWithConversation": + ) -> MemoryBlockWithConversation: """Create MemoryBlockWithConversation from MemoryBlock""" return cls( spec=memory_block.spec, diff --git a/api/models/chatflow_memory.py b/api/models/chatflow_memory.py index b02b809cd7..92010aeba4 100644 --- a/api/models/chatflow_memory.py +++ b/api/models/chatflow_memory.py @@ -67,7 +67,7 @@ class ChatflowMessage(Base): id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) conversation_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - index: Mapped[int] = mapped_column(sa.Integer, nullable=False) # This index starts from 0 + index: Mapped[int] = mapped_column(sa.Integer, nullable=False) # This index starts from 0 version: Mapped[int] = mapped_column(sa.Integer, nullable=False) data: Mapped[str] = mapped_column(sa.Text, nullable=False) # Serialized PromptMessage JSON created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) From 5d3e8a31d0e88fbe6f6d3c232694065c29ccecfc Mon Sep 17 00:00:00 2001 From: Novice Date: Fri, 10 Oct 2025 10:54:32 +0800 Subject: [PATCH 63/82] fix: restore array flattening behavior in iteration node (#26695) --- .../nodes/iteration/iteration_node.py | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index a05a6b1b96..965e22b74c 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -342,10 +342,13 @@ class IterationNode(Node): iterator_list_value: Sequence[object], iter_run_map: dict[str, float], ) -> Generator[NodeEventBase, None, None]: + # Flatten the list of lists if all outputs are lists + flattened_outputs = self._flatten_outputs_if_needed(outputs) + yield IterationSucceededEvent( start_at=started_at, inputs=inputs, - outputs={"output": outputs}, + outputs={"output": flattened_outputs}, steps=len(iterator_list_value), metadata={ WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, @@ -357,13 +360,39 @@ class IterationNode(Node): yield StreamCompletedEvent( node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, - outputs={"output": outputs}, + outputs={"output": flattened_outputs}, metadata={ WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, }, ) ) + def _flatten_outputs_if_needed(self, outputs: list[object]) -> list[object]: + """ + Flatten the outputs list if all elements are lists. + This maintains backward compatibility with version 1.8.1 behavior. + """ + if not outputs: + return outputs + + # Check if all non-None outputs are lists + non_none_outputs = [output for output in outputs if output is not None] + if not non_none_outputs: + return outputs + + if all(isinstance(output, list) for output in non_none_outputs): + # Flatten the list of lists + flattened: list[Any] = [] + for output in outputs: + if isinstance(output, list): + flattened.extend(output) + elif output is not None: + # This shouldn't happen based on our check, but handle it gracefully + flattened.append(output) + return flattened + + return outputs + def _handle_iteration_failure( self, started_at: datetime, @@ -373,10 +402,13 @@ class IterationNode(Node): iter_run_map: dict[str, float], error: IterationNodeError, ) -> Generator[NodeEventBase, None, None]: + # Flatten the list of lists if all outputs are lists (even in failure case) + flattened_outputs = self._flatten_outputs_if_needed(outputs) + yield IterationFailedEvent( start_at=started_at, inputs=inputs, - outputs={"output": outputs}, + outputs={"output": flattened_outputs}, steps=len(iterator_list_value), metadata={ WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, From 54db4c176a0a12b35d4b1f63e26ea3acb09bb69a Mon Sep 17 00:00:00 2001 From: yihong Date: Fri, 10 Oct 2025 12:59:28 +0800 Subject: [PATCH 64/82] fix: drop useless logic (#26678) Signed-off-by: yihong0618 --- api/core/app/apps/base_app_runner.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index e7db3bc41b..61ac040c05 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -61,9 +61,6 @@ class AppRunner: if model_context_tokens is None: return -1 - if max_tokens is None: - max_tokens = 0 - prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages) if prompt_tokens + max_tokens > model_context_tokens: From cf1778e696ceb9b79eacfb467e4ce03e95d5e817 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:17:33 +0800 Subject: [PATCH 65/82] fix: issue w/ timepicker (#26696) Co-authored-by: lyzno1 Co-authored-by: lyzno1 <92089059+lyzno1@users.noreply.github.com> --- .../time-picker/index.spec.tsx | 95 +++++++++++++ .../time-picker/index.tsx | 131 +++++++++++++---- .../base/date-and-time-picker/types.ts | 2 +- .../date-and-time-picker/utils/dayjs.spec.ts | 67 +++++++++ .../base/date-and-time-picker/utils/dayjs.ts | 134 ++++++++++++++++-- 5 files changed, 388 insertions(+), 41 deletions(-) create mode 100644 web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx create mode 100644 web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts diff --git a/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx b/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx new file mode 100644 index 0000000000..40bc2928c8 --- /dev/null +++ b/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx @@ -0,0 +1,95 @@ +import React from 'react' +import { fireEvent, render, screen } from '@testing-library/react' +import TimePicker from './index' +import dayjs from '../utils/dayjs' +import { isDayjsObject } from '../utils/dayjs' + +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => { + if (key === 'time.defaultPlaceholder') return 'Pick a time...' + if (key === 'time.operation.now') return 'Now' + if (key === 'time.operation.ok') return 'OK' + if (key === 'common.operation.clear') return 'Clear' + return key + }, + }), +})) + +jest.mock('@/app/components/base/portal-to-follow-elem', () => ({ + PortalToFollowElem: ({ children }: { children: React.ReactNode }) =>
{children}
, + PortalToFollowElemTrigger: ({ children, onClick }: { children: React.ReactNode, onClick: (e: React.MouseEvent) => void }) => ( +
{children}
+ ), + PortalToFollowElemContent: ({ children }: { children: React.ReactNode }) => ( +
{children}
+ ), +})) + +jest.mock('./options', () => () =>
) +jest.mock('./header', () => () =>
) + +describe('TimePicker', () => { + const baseProps = { + onChange: jest.fn(), + onClear: jest.fn(), + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + test('renders formatted value for string input (Issue #26692 regression)', () => { + render( + , + ) + + expect(screen.getByDisplayValue('06:45 PM')).toBeInTheDocument() + }) + + test('confirms cleared value when confirming without selection', () => { + render( + , + ) + + const input = screen.getByRole('textbox') + fireEvent.click(input) + + const clearButton = screen.getByRole('button', { name: /clear/i }) + fireEvent.click(clearButton) + + const confirmButton = screen.getByRole('button', { name: 'OK' }) + fireEvent.click(confirmButton) + + expect(baseProps.onChange).toHaveBeenCalledTimes(1) + expect(baseProps.onChange).toHaveBeenCalledWith(undefined) + expect(baseProps.onClear).not.toHaveBeenCalled() + }) + + test('selecting current time emits timezone-aware value', () => { + const onChange = jest.fn() + render( + , + ) + + const nowButton = screen.getByRole('button', { name: 'Now' }) + fireEvent.click(nowButton) + + expect(onChange).toHaveBeenCalledTimes(1) + const emitted = onChange.mock.calls[0][0] + expect(isDayjsObject(emitted)).toBe(true) + expect(emitted?.utcOffset()).toBe(dayjs().tz('America/New_York').utcOffset()) + }) +}) diff --git a/web/app/components/base/date-and-time-picker/time-picker/index.tsx b/web/app/components/base/date-and-time-picker/time-picker/index.tsx index 1fb2cfed11..f23fcf8f4e 100644 --- a/web/app/components/base/date-and-time-picker/time-picker/index.tsx +++ b/web/app/components/base/date-and-time-picker/time-picker/index.tsx @@ -1,6 +1,13 @@ import React, { useCallback, useEffect, useRef, useState } from 'react' -import type { Period, TimePickerProps } from '../types' -import dayjs, { cloneTime, getDateWithTimezone, getHourIn12Hour } from '../utils/dayjs' +import type { Dayjs } from 'dayjs' +import { Period } from '../types' +import type { TimePickerProps } from '../types' +import dayjs, { + getDateWithTimezone, + getHourIn12Hour, + isDayjsObject, + toDayjs, +} from '../utils/dayjs' import { PortalToFollowElem, PortalToFollowElemContent, @@ -13,6 +20,11 @@ import { useTranslation } from 'react-i18next' import { RiCloseCircleFill, RiTimeLine } from '@remixicon/react' import cn from '@/utils/classnames' +const to24Hour = (hour12: string, period: Period) => { + const normalized = Number.parseInt(hour12, 10) % 12 + return period === Period.PM ? normalized + 12 : normalized +} + const TimePicker = ({ value, timezone, @@ -28,7 +40,11 @@ const TimePicker = ({ const [isOpen, setIsOpen] = useState(false) const containerRef = useRef(null) const isInitial = useRef(true) - const [selectedTime, setSelectedTime] = useState(() => value ? getDateWithTimezone({ timezone, date: value }) : undefined) + + // Initialize selectedTime + const [selectedTime, setSelectedTime] = useState(() => { + return toDayjs(value, { timezone }) + }) useEffect(() => { const handleClickOutside = (event: MouseEvent) => { @@ -39,20 +55,47 @@ const TimePicker = ({ return () => document.removeEventListener('mousedown', handleClickOutside) }, []) + // Track previous values to avoid unnecessary updates + const prevValueRef = useRef(value) + const prevTimezoneRef = useRef(timezone) + useEffect(() => { if (isInitial.current) { isInitial.current = false + // Save initial values on first render + prevValueRef.current = value + prevTimezoneRef.current = timezone return } - if (value) { - const newValue = getDateWithTimezone({ date: value, timezone }) - setSelectedTime(newValue) - onChange(newValue) + + // Only update when timezone changes but value doesn't + const valueChanged = prevValueRef.current !== value + const timezoneChanged = prevTimezoneRef.current !== timezone + + // Update reference values + prevValueRef.current = value + prevTimezoneRef.current = timezone + + // Skip if neither timezone changed nor value changed + if (!timezoneChanged && !valueChanged) return + + if (value !== undefined && value !== null) { + const dayjsValue = toDayjs(value, { timezone }) + if (!dayjsValue) return + + setSelectedTime(dayjsValue) + + if (timezoneChanged && !valueChanged) + onChange(dayjsValue) + return } - else { - setSelectedTime(prev => prev ? getDateWithTimezone({ date: prev, timezone }) : undefined) - } - }, [timezone]) + + setSelectedTime((prev) => { + if (!isDayjsObject(prev)) + return undefined + return timezone ? getDateWithTimezone({ date: prev, timezone }) : prev + }) + }, [timezone, value, onChange]) const handleClickTrigger = (e: React.MouseEvent) => { e.stopPropagation() @@ -61,8 +104,16 @@ const TimePicker = ({ return } setIsOpen(true) - if (value) - setSelectedTime(value) + + if (value) { + const dayjsValue = toDayjs(value, { timezone }) + const needsUpdate = dayjsValue && ( + !selectedTime + || !isDayjsObject(selectedTime) + || !dayjsValue.isSame(selectedTime, 'minute') + ) + if (needsUpdate) setSelectedTime(dayjsValue) + } } const handleClear = (e: React.MouseEvent) => { @@ -73,42 +124,68 @@ const TimePicker = ({ } const handleTimeSelect = (hour: string, minute: string, period: Period) => { - const newTime = cloneTime(dayjs(), dayjs(`1/1/2000 ${hour}:${minute} ${period}`)) + const periodAdjustedHour = to24Hour(hour, period) + const nextMinute = Number.parseInt(minute, 10) setSelectedTime((prev) => { - return prev ? cloneTime(prev, newTime) : newTime + const reference = isDayjsObject(prev) + ? prev + : (timezone ? getDateWithTimezone({ timezone }) : dayjs()).startOf('minute') + return reference + .set('hour', periodAdjustedHour) + .set('minute', nextMinute) + .set('second', 0) + .set('millisecond', 0) }) } + const getSafeTimeObject = useCallback(() => { + if (isDayjsObject(selectedTime)) + return selectedTime + return (timezone ? getDateWithTimezone({ timezone }) : dayjs()).startOf('day') + }, [selectedTime, timezone]) + const handleSelectHour = useCallback((hour: string) => { - const time = selectedTime || dayjs().startOf('day') + const time = getSafeTimeObject() handleTimeSelect(hour, time.minute().toString().padStart(2, '0'), time.format('A') as Period) - }, [selectedTime]) + }, [getSafeTimeObject]) const handleSelectMinute = useCallback((minute: string) => { - const time = selectedTime || dayjs().startOf('day') + const time = getSafeTimeObject() handleTimeSelect(getHourIn12Hour(time).toString().padStart(2, '0'), minute, time.format('A') as Period) - }, [selectedTime]) + }, [getSafeTimeObject]) const handleSelectPeriod = useCallback((period: Period) => { - const time = selectedTime || dayjs().startOf('day') + const time = getSafeTimeObject() handleTimeSelect(getHourIn12Hour(time).toString().padStart(2, '0'), time.minute().toString().padStart(2, '0'), period) - }, [selectedTime]) + }, [getSafeTimeObject]) const handleSelectCurrentTime = useCallback(() => { const newDate = getDateWithTimezone({ timezone }) setSelectedTime(newDate) onChange(newDate) setIsOpen(false) - }, [onChange, timezone]) + }, [timezone, onChange]) const handleConfirm = useCallback(() => { - onChange(selectedTime) + const valueToEmit = isDayjsObject(selectedTime) ? selectedTime : undefined + onChange(valueToEmit) setIsOpen(false) - }, [onChange, selectedTime]) + }, [selectedTime, onChange]) const timeFormat = 'hh:mm A' - const displayValue = value?.format(timeFormat) || '' - const placeholderDate = isOpen && selectedTime ? selectedTime.format(timeFormat) : (placeholder || t('time.defaultPlaceholder')) + + const formatTimeValue = useCallback((timeValue: string | Dayjs | undefined): string => { + if (!timeValue) return '' + + const dayjsValue = toDayjs(timeValue, { timezone }) + return dayjsValue?.format(timeFormat) || '' + }, [timezone]) + + const displayValue = formatTimeValue(value) + + const placeholderDate = isOpen && isDayjsObject(selectedTime) + ? selectedTime.format(timeFormat) + : (placeholder || t('time.defaultPlaceholder')) const inputElem = (
diff --git a/web/app/components/base/date-and-time-picker/types.ts b/web/app/components/base/date-and-time-picker/types.ts index 4ac01c142a..b51c2ebb01 100644 --- a/web/app/components/base/date-and-time-picker/types.ts +++ b/web/app/components/base/date-and-time-picker/types.ts @@ -54,7 +54,7 @@ export type TriggerParams = { onClick: (e: React.MouseEvent) => void } export type TimePickerProps = { - value: Dayjs | undefined + value: Dayjs | string | undefined timezone?: string placeholder?: string onChange: (date: Dayjs | undefined) => void diff --git a/web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts b/web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts new file mode 100644 index 0000000000..549ab01029 --- /dev/null +++ b/web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts @@ -0,0 +1,67 @@ +import dayjs from './dayjs' +import { + getDateWithTimezone, + isDayjsObject, + toDayjs, +} from './dayjs' + +describe('dayjs utilities', () => { + const timezone = 'UTC' + + test('toDayjs parses time-only strings with timezone support', () => { + const result = toDayjs('18:45', { timezone }) + expect(result).toBeDefined() + expect(result?.format('HH:mm')).toBe('18:45') + expect(result?.utcOffset()).toBe(getDateWithTimezone({ timezone }).utcOffset()) + }) + + test('toDayjs parses 12-hour time strings', () => { + const tz = 'America/New_York' + const result = toDayjs('07:15 PM', { timezone: tz }) + expect(result).toBeDefined() + expect(result?.format('HH:mm')).toBe('19:15') + expect(result?.utcOffset()).toBe(getDateWithTimezone({ timezone: tz }).utcOffset()) + }) + + test('isDayjsObject detects dayjs instances', () => { + const date = dayjs() + expect(isDayjsObject(date)).toBe(true) + expect(isDayjsObject(getDateWithTimezone({ timezone }))).toBe(true) + expect(isDayjsObject('2024-01-01')).toBe(false) + expect(isDayjsObject({})).toBe(false) + }) + + test('toDayjs parses datetime strings in target timezone', () => { + const value = '2024-05-01 12:00:00' + const tz = 'America/New_York' + + const result = toDayjs(value, { timezone: tz }) + + expect(result).toBeDefined() + expect(result?.hour()).toBe(12) + expect(result?.format('YYYY-MM-DD HH:mm')).toBe('2024-05-01 12:00') + }) + + test('toDayjs parses ISO datetime strings in target timezone', () => { + const value = '2024-05-01T14:30:00' + const tz = 'Europe/London' + + const result = toDayjs(value, { timezone: tz }) + + expect(result).toBeDefined() + expect(result?.hour()).toBe(14) + expect(result?.minute()).toBe(30) + }) + + test('toDayjs handles dates without time component', () => { + const value = '2024-05-01' + const tz = 'America/Los_Angeles' + + const result = toDayjs(value, { timezone: tz }) + + expect(result).toBeDefined() + expect(result?.format('YYYY-MM-DD')).toBe('2024-05-01') + expect(result?.hour()).toBe(0) + expect(result?.minute()).toBe(0) + }) +}) diff --git a/web/app/components/base/date-and-time-picker/utils/dayjs.ts b/web/app/components/base/date-and-time-picker/utils/dayjs.ts index fef35bf6ca..808b50247a 100644 --- a/web/app/components/base/date-and-time-picker/utils/dayjs.ts +++ b/web/app/components/base/date-and-time-picker/utils/dayjs.ts @@ -10,6 +10,25 @@ dayjs.extend(timezone) export default dayjs const monthMaps: Record = {} +const DEFAULT_OFFSET_STR = 'UTC+0' +const TIME_ONLY_REGEX = /^(\d{1,2}):(\d{2})(?::(\d{2})(?:\.(\d{1,3}))?)?$/ +const TIME_ONLY_12H_REGEX = /^(\d{1,2}):(\d{2})(?::(\d{2}))?\s?(AM|PM)$/i + +const COMMON_PARSE_FORMATS = [ + 'YYYY-MM-DD', + 'YYYY/MM/DD', + 'DD-MM-YYYY', + 'DD/MM/YYYY', + 'MM-DD-YYYY', + 'MM/DD/YYYY', + 'YYYY-MM-DDTHH:mm:ss.SSSZ', + 'YYYY-MM-DDTHH:mm:ssZ', + 'YYYY-MM-DD HH:mm:ss', + 'YYYY-MM-DDTHH:mm', + 'YYYY-MM-DDTHH:mmZ', + 'YYYY-MM-DDTHH:mm:ss', + 'YYYY-MM-DDTHH:mm:ss.SSS', +] export const cloneTime = (targetDate: Dayjs, sourceDate: Dayjs) => { return targetDate.clone() @@ -76,21 +95,116 @@ export const getHourIn12Hour = (date: Dayjs) => { return hour === 0 ? 12 : hour >= 12 ? hour - 12 : hour } -export const getDateWithTimezone = (props: { date?: Dayjs, timezone?: string }) => { - return props.date ? dayjs.tz(props.date, props.timezone) : dayjs().tz(props.timezone) +export const getDateWithTimezone = ({ date, timezone }: { date?: Dayjs, timezone?: string }) => { + if (!timezone) + return (date ?? dayjs()).clone() + return date ? dayjs.tz(date, timezone) : dayjs().tz(timezone) } -// Asia/Shanghai -> UTC+8 -const DEFAULT_OFFSET_STR = 'UTC+0' export const convertTimezoneToOffsetStr = (timezone?: string) => { if (!timezone) return DEFAULT_OFFSET_STR const tzItem = tz.find(item => item.value === timezone) - if(!tzItem) + if (!tzItem) return DEFAULT_OFFSET_STR return `UTC${tzItem.name.charAt(0)}${tzItem.name.charAt(2)}` } +export const isDayjsObject = (value: unknown): value is Dayjs => dayjs.isDayjs(value) + +export type ToDayjsOptions = { + timezone?: string + format?: string + formats?: string[] +} + +const warnParseFailure = (value: string) => { + if (process.env.NODE_ENV !== 'production') + console.warn('[TimePicker] Failed to parse time value', value) +} + +const normalizeMillisecond = (value: string | undefined) => { + if (!value) return 0 + if (value.length === 3) return Number(value) + if (value.length > 3) return Number(value.slice(0, 3)) + return Number(value.padEnd(3, '0')) +} + +const applyTimezone = (date: Dayjs, timezone?: string) => { + return timezone ? getDateWithTimezone({ date, timezone }) : date +} + +export const toDayjs = (value: string | Dayjs | undefined, options: ToDayjsOptions = {}): Dayjs | undefined => { + if (!value) + return undefined + + const { timezone: tzName, format, formats } = options + + if (isDayjsObject(value)) + return applyTimezone(value, tzName) + + if (typeof value !== 'string') + return undefined + + const trimmed = value.trim() + + if (format) { + const parsedWithFormat = tzName + ? dayjs.tz(trimmed, format, tzName, true) + : dayjs(trimmed, format, true) + if (parsedWithFormat.isValid()) + return parsedWithFormat + } + + const timeMatch = TIME_ONLY_REGEX.exec(trimmed) + if (timeMatch) { + const base = applyTimezone(dayjs(), tzName).startOf('day') + const rawHour = Number(timeMatch[1]) + const minute = Number(timeMatch[2]) + const second = timeMatch[3] ? Number(timeMatch[3]) : 0 + const millisecond = normalizeMillisecond(timeMatch[4]) + + return base + .set('hour', rawHour) + .set('minute', minute) + .set('second', second) + .set('millisecond', millisecond) + } + + const timeMatch12h = TIME_ONLY_12H_REGEX.exec(trimmed) + if (timeMatch12h) { + const base = applyTimezone(dayjs(), tzName).startOf('day') + let hour = Number(timeMatch12h[1]) % 12 + const isPM = timeMatch12h[4]?.toUpperCase() === 'PM' + if (isPM) + hour += 12 + const minute = Number(timeMatch12h[2]) + const second = timeMatch12h[3] ? Number(timeMatch12h[3]) : 0 + + return base + .set('hour', hour) + .set('minute', minute) + .set('second', second) + .set('millisecond', 0) + } + + const candidateFormats = formats ?? COMMON_PARSE_FORMATS + for (const fmt of candidateFormats) { + const parsed = tzName + ? dayjs.tz(trimmed, fmt, tzName, true) + : dayjs(trimmed, fmt, true) + if (parsed.isValid()) + return parsed + } + + const fallbackParsed = tzName ? dayjs.tz(trimmed, tzName) : dayjs(trimmed) + if (fallbackParsed.isValid()) + return fallbackParsed + + warnParseFailure(value) + return undefined +} + // Parse date with multiple format support export const parseDateWithFormat = (dateString: string, format?: string): Dayjs | null => { if (!dateString) return null @@ -103,15 +217,7 @@ export const parseDateWithFormat = (dateString: string, format?: string): Dayjs // Try common date formats const formats = [ - 'YYYY-MM-DD', // Standard format - 'YYYY/MM/DD', // Slash format - 'DD-MM-YYYY', // European format - 'DD/MM/YYYY', // European slash format - 'MM-DD-YYYY', // US format - 'MM/DD/YYYY', // US slash format - 'YYYY-MM-DDTHH:mm:ss.SSSZ', // ISO format - 'YYYY-MM-DDTHH:mm:ssZ', // ISO format (no milliseconds) - 'YYYY-MM-DD HH:mm:ss', // Standard datetime format + ...COMMON_PARSE_FORMATS, ] for (const fmt of formats) { From 3a5aa4587c45530e678fd3f7bc1de6774ed68e38 Mon Sep 17 00:00:00 2001 From: Coding On Star <447357187@qq.com> Date: Fri, 10 Oct 2025 15:34:56 +0800 Subject: [PATCH 66/82] feat(billing): add tax information tooltips in pricing footer (#26705) Co-authored-by: CodingOnStar --- web/app/components/billing/pricing/footer.tsx | 12 +++++++++++- web/app/components/billing/pricing/index.tsx | 11 ++++++++--- web/i18n/en-US/billing.ts | 2 ++ web/i18n/ja-JP/billing.ts | 2 ++ web/i18n/zh-Hans/billing.ts | 2 ++ 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/web/app/components/billing/pricing/footer.tsx b/web/app/components/billing/pricing/footer.tsx index 4e3cdfee3d..fd713eb3da 100644 --- a/web/app/components/billing/pricing/footer.tsx +++ b/web/app/components/billing/pricing/footer.tsx @@ -2,19 +2,29 @@ import React from 'react' import Link from 'next/link' import { useTranslation } from 'react-i18next' import { RiArrowRightUpLine } from '@remixicon/react' +import { type Category, CategoryEnum } from '.' +import cn from '@/utils/classnames' type FooterProps = { pricingPageURL: string + currentCategory: Category } const Footer = ({ pricingPageURL, + currentCategory, }: FooterProps) => { const { t } = useTranslation() return (
-
+
+ {currentCategory === CategoryEnum.CLOUD && ( +
+ {t('billing.plansCommon.taxTip')} + {t('billing.plansCommon.taxTipSecond')} +
+ )} void @@ -25,7 +30,7 @@ const Pricing: FC = ({ const { plan } = useProviderContext() const { isCurrentWorkspaceManager } = useAppContext() const [planRange, setPlanRange] = React.useState(PlanRange.monthly) - const [currentCategory, setCurrentCategory] = useState('cloud') + const [currentCategory, setCurrentCategory] = useState(CategoryEnum.CLOUD) const canPay = isCurrentWorkspaceManager useKeyPress(['esc'], onCancel) @@ -57,7 +62,7 @@ const Pricing: FC = ({ planRange={planRange} canPay={canPay} /> -
+
diff --git a/web/i18n/en-US/billing.ts b/web/i18n/en-US/billing.ts index 72cf9a3fca..9169631281 100644 --- a/web/i18n/en-US/billing.ts +++ b/web/i18n/en-US/billing.ts @@ -37,6 +37,8 @@ const translation = { save: 'Save ', free: 'Free', annualBilling: 'Bill Annually Save {{percent}}%', + taxTip: 'All subscription prices (monthly/annual) exclude applicable taxes (e.g., VAT, sales tax).', + taxTipSecond: 'If your region has no applicable tax requirements, no tax will appear in your checkout, and you won’t be charged any additional fees for the entire subscription term.', comparePlanAndFeatures: 'Compare plans & features', priceTip: 'per workspace/', currentPlan: 'Current Plan', diff --git a/web/i18n/ja-JP/billing.ts b/web/i18n/ja-JP/billing.ts index 426687da6c..6dbff60d5a 100644 --- a/web/i18n/ja-JP/billing.ts +++ b/web/i18n/ja-JP/billing.ts @@ -36,6 +36,8 @@ const translation = { save: '節約 ', free: '無料', annualBilling: '年次請求', + taxTip: 'すべてのサブスクリプション料金(月額/年額)は、適用される税金(例:消費税、付加価値税)を含みません。', + taxTipSecond: 'お客様の地域に適用税がない場合、チェックアウト時に税金は表示されず、サブスクリプション期間中に追加料金が請求されることもありません。', comparePlanAndFeatures: 'プランと機能を比較する', priceTip: 'ワークスペース/', currentPlan: '現在のプラン', diff --git a/web/i18n/zh-Hans/billing.ts b/web/i18n/zh-Hans/billing.ts index 96ba7970c8..00a7dd909a 100644 --- a/web/i18n/zh-Hans/billing.ts +++ b/web/i18n/zh-Hans/billing.ts @@ -36,6 +36,8 @@ const translation = { save: '节省', free: '免费', annualBilling: '按年计费节省 {{percent}}%', + taxTip: '所有订阅价格(按月/按年)均不含适用税费(如增值税、销售税)。', + taxTipSecond: '如果您所在地区无适用税费要求,结账时将不会显示税费,且在整个订阅周期内您都无需支付任何额外费用。', comparePlanAndFeatures: '对比套餐 & 功能特性', priceTip: '每个团队空间/', currentPlan: '当前计划', From 294e01a8c120b4ac4038b8e01890746097a04e91 Mon Sep 17 00:00:00 2001 From: fenglin <790872612@qq.com> Date: Fri, 10 Oct 2025 15:52:09 +0800 Subject: [PATCH 67/82] Fix/tool provider tag internationalization (#26710) Co-authored-by: qiaofenglin --- web/app/components/plugins/hooks.ts | 107 ++++++++++++--------- web/app/components/tools/provider-list.tsx | 4 +- 2 files changed, 64 insertions(+), 47 deletions(-) diff --git a/web/app/components/plugins/hooks.ts b/web/app/components/plugins/hooks.ts index 0af7c1a170..f22b2c4d69 100644 --- a/web/app/components/plugins/hooks.ts +++ b/web/app/components/plugins/hooks.ts @@ -1,3 +1,4 @@ +import { useMemo } from 'react' import { useTranslation } from 'react-i18next' import type { TFunction } from 'i18next' import { @@ -14,23 +15,29 @@ export const useTags = (translateFromOut?: TFunction) => { const { t: translation } = useTranslation() const t = translateFromOut || translation - const tags = tagKeys.map((tag) => { - return { - name: tag, - label: t(`pluginTags.tags.${tag}`), + const tags = useMemo(() => { + return tagKeys.map((tag) => { + return { + name: tag, + label: t(`pluginTags.tags.${tag}`), + } + }) + }, [t]) + + const tagsMap = useMemo(() => { + return tags.reduce((acc, tag) => { + acc[tag.name] = tag + return acc + }, {} as Record) + }, [tags]) + + const getTagLabel = useMemo(() => { + return (name: string) => { + if (!tagsMap[name]) + return name + return tagsMap[name].label } - }) - - const tagsMap = tags.reduce((acc, tag) => { - acc[tag.name] = tag - return acc - }, {} as Record) - - const getTagLabel = (name: string) => { - if (!tagsMap[name]) - return name - return tagsMap[name].label - } + }, [tagsMap]) return { tags, @@ -48,23 +55,27 @@ export const useCategories = (translateFromOut?: TFunction) => { const { t: translation } = useTranslation() const t = translateFromOut || translation - const categories = categoryKeys.map((category) => { - if (category === 'agent-strategy') { - return { - name: 'agent-strategy', - label: t('plugin.category.agents'), + const categories = useMemo(() => { + return categoryKeys.map((category) => { + if (category === 'agent-strategy') { + return { + name: 'agent-strategy', + label: t('plugin.category.agents'), + } } - } - return { - name: category, - label: t(`plugin.category.${category}s`), - } - }) + return { + name: category, + label: t(`plugin.category.${category}s`), + } + }) + }, [t]) - const categoriesMap = categories.reduce((acc, category) => { - acc[category.name] = category - return acc - }, {} as Record) + const categoriesMap = useMemo(() => { + return categories.reduce((acc, category) => { + acc[category.name] = category + return acc + }, {} as Record) + }, [categories]) return { categories, @@ -76,23 +87,27 @@ export const useSingleCategories = (translateFromOut?: TFunction) => { const { t: translation } = useTranslation() const t = translateFromOut || translation - const categories = categoryKeys.map((category) => { - if (category === 'agent-strategy') { - return { - name: 'agent-strategy', - label: t('plugin.categorySingle.agent'), + const categories = useMemo(() => { + return categoryKeys.map((category) => { + if (category === 'agent-strategy') { + return { + name: 'agent-strategy', + label: t('plugin.categorySingle.agent'), + } } - } - return { - name: category, - label: t(`plugin.categorySingle.${category}`), - } - }) + return { + name: category, + label: t(`plugin.categorySingle.${category}`), + } + }) + }, [t]) - const categoriesMap = categories.reduce((acc, category) => { - acc[category.name] = category - return acc - }, {} as Record) + const categoriesMap = useMemo(() => { + return categories.reduce((acc, category) => { + acc[category.name] = category + return acc + }, {} as Record) + }, [categories]) return { categories, diff --git a/web/app/components/tools/provider-list.tsx b/web/app/components/tools/provider-list.tsx index 08a4aa0b5d..1679b4469b 100644 --- a/web/app/components/tools/provider-list.tsx +++ b/web/app/components/tools/provider-list.tsx @@ -21,6 +21,7 @@ import { useCheckInstalled, useInvalidateInstalledPluginList } from '@/service/u import { useGlobalPublicStore } from '@/context/global-public-context' import { ToolTypeEnum } from '../workflow/block-selector/types' import { useMarketplace } from './marketplace/hooks' +import { useTags } from '@/app/components/plugins/hooks' const getToolType = (type: string) => { switch (type) { @@ -40,6 +41,7 @@ const ProviderList = () => { // const searchParams = useSearchParams() // searchParams.get('category') === 'workflow' const { t } = useTranslation() + const { getTagLabel } = useTags() const { enable_marketplace } = useGlobalPublicStore(s => s.systemFeatures) const containerRef = useRef(null) @@ -180,7 +182,7 @@ const ProviderList = () => { } as any} footer={ getTagLabel(label)) || []} /> } /> From 298d8c2d881a3407152a1bd82cb83c55de77493f Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Fri, 10 Oct 2025 15:54:33 +0800 Subject: [PATCH 68/82] Update deploy-dev.yml (#26712) --- .github/workflows/deploy-dev.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml index de732c3134..cd1c86e668 100644 --- a/.github/workflows/deploy-dev.yml +++ b/.github/workflows/deploy-dev.yml @@ -18,7 +18,7 @@ jobs: - name: Deploy to server uses: appleboy/ssh-action@v0.1.8 with: - host: ${{ secrets.RAG_SSH_HOST }} + host: ${{ secrets.SSH_HOST }} username: ${{ secrets.SSH_USER }} key: ${{ secrets.SSH_PRIVATE_KEY }} script: | From 3068526797385de0d5a8808597a577722d0f7287 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 15:55:24 +0800 Subject: [PATCH 69/82] chore: translate i18n files and update type definitions (#26709) Co-authored-by: iamjoel <2120155+iamjoel@users.noreply.github.com> --- web/i18n/de-DE/billing.ts | 2 ++ web/i18n/es-ES/billing.ts | 2 ++ web/i18n/fa-IR/billing.ts | 2 ++ web/i18n/fr-FR/billing.ts | 2 ++ web/i18n/hi-IN/billing.ts | 2 ++ web/i18n/id-ID/billing.ts | 2 ++ web/i18n/it-IT/billing.ts | 2 ++ web/i18n/ko-KR/billing.ts | 2 ++ web/i18n/pl-PL/billing.ts | 2 ++ web/i18n/pt-BR/billing.ts | 2 ++ web/i18n/ro-RO/billing.ts | 2 ++ web/i18n/ru-RU/billing.ts | 2 ++ web/i18n/sl-SI/billing.ts | 2 ++ web/i18n/th-TH/billing.ts | 2 ++ web/i18n/tr-TR/billing.ts | 2 ++ web/i18n/uk-UA/billing.ts | 2 ++ web/i18n/vi-VN/billing.ts | 2 ++ web/i18n/zh-Hant/billing.ts | 2 ++ 18 files changed, 36 insertions(+) diff --git a/web/i18n/de-DE/billing.ts b/web/i18n/de-DE/billing.ts index 98d4488fab..fc45f3889c 100644 --- a/web/i18n/de-DE/billing.ts +++ b/web/i18n/de-DE/billing.ts @@ -94,6 +94,8 @@ const translation = { teamMember_one: '{{count,number}} Teammitglied', documentsRequestQuotaTooltip: 'Gibt die Gesamtzahl der Aktionen an, die ein Arbeitsbereich pro Minute innerhalb der Wissensbasis ausführen kann, einschließlich der Erstellung, Löschung, Aktualisierung von Datensätzen, des Hochladens von Dokumenten, von Änderungen, der Archivierung und von Abfragen in der Wissensbasis. Diese Kennzahl wird verwendet, um die Leistung von Anfragen an die Wissensbasis zu bewerten. Wenn ein Sandbox-Nutzer beispielsweise in einer Minute 10 aufeinanderfolgende Testdurchläufe durchführt, wird sein Arbeitsbereich für die nächste Minute vorübergehend daran gehindert, die folgenden Aktionen auszuführen: Erstellung, Löschung, Aktualisierung von Datensätzen sowie das Hochladen oder Ändern von Dokumenten.', startBuilding: 'Beginnen Sie mit der Entwicklung', + taxTipSecond: 'Wenn in Ihrer Region keine relevanten Steuervorschriften gelten, wird an der Kasse keine Steuer angezeigt und Ihnen werden während der gesamten Abonnementlaufzeit keine zusätzlichen Gebühren berechnet.', + taxTip: 'Alle Abonnementspreise (monatlich/jährlich) verstehen sich zuzüglich der geltenden Steuern (z. B. MwSt., Umsatzsteuer).', }, plans: { sandbox: { diff --git a/web/i18n/es-ES/billing.ts b/web/i18n/es-ES/billing.ts index c5d4ef95b9..a8180e2d07 100644 --- a/web/i18n/es-ES/billing.ts +++ b/web/i18n/es-ES/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'El límite de tasa de la API se aplica a todas las solicitudes realizadas a través de la API de Dify, incluidos la generación de texto, las conversaciones de chat, las ejecuciones de flujo de trabajo y el procesamiento de documentos.', documentsRequestQuotaTooltip: 'Especifica el número total de acciones que un espacio de trabajo puede realizar por minuto dentro de la base de conocimientos, incluyendo la creación, eliminación, actualización de conjuntos de datos, carga de documentos, modificaciones, archivo y consultas a la base de conocimientos. Esta métrica se utiliza para evaluar el rendimiento de las solicitudes a la base de conocimientos. Por ejemplo, si un usuario de Sandbox realiza 10 pruebas consecutivas en un minuto, su espacio de trabajo será temporalmente restringido de realizar las siguientes acciones durante el siguiente minuto: creación de conjuntos de datos, eliminación, actualizaciones y carga o modificaciones de documentos.', startBuilding: 'Empezar a construir', + taxTip: 'Todos los precios de suscripción (mensuales/anuales) excluyen los impuestos aplicables (por ejemplo, IVA, impuesto sobre ventas).', + taxTipSecond: 'Si su región no tiene requisitos fiscales aplicables, no se mostrará ningún impuesto en su pago y no se le cobrará ninguna tarifa adicional durante todo el período de suscripción.', }, plans: { sandbox: { diff --git a/web/i18n/fa-IR/billing.ts b/web/i18n/fa-IR/billing.ts index 5634692dc2..3749036f3c 100644 --- a/web/i18n/fa-IR/billing.ts +++ b/web/i18n/fa-IR/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'محدودیت نرخ API برای همه درخواست‌های انجام شده از طریق API Dify اعمال می‌شود، از جمله تولید متن، محاوره‌های چت، اجرای گردش‌های کار و پردازش اسناد.', documentsRequestQuotaTooltip: 'تعیین می‌کند که تعداد کلی اقداماتی که یک فضای کاری می‌تواند در هر دقیقه در داخل پایگاه دانش انجام دهد، شامل ایجاد مجموعه داده، حذف، به‌روزرسانی، بارگذاری مستندات، تغییرات، بایگانی و پرسش از پایگاه دانش است. این معیار برای ارزیابی عملکرد درخواست‌های پایگاه دانش استفاده می‌شود. به عنوان مثال، اگر یک کاربر Sandbox در طی یک دقیقه 10 آزمایش متوالی انجام دهد، فضای کاری او به طور موقت از انجام اقدامات زیر در دقیقه بعدی محدود خواهد شد: ایجاد مجموعه داده، حذف، به‌روزرسانی و بارگذاری یا تغییر مستندات.', startBuilding: 'شروع به ساخت کنید', + taxTip: 'تمام قیمت‌های اشتراک (ماهانه/سالانه) شامل مالیات‌های مربوطه (مثلاً مالیات بر ارزش افزوده، مالیات فروش) نمی‌شوند.', + taxTipSecond: 'اگر منطقه شما هیچ الزامات مالیاتی قابل اجرا نداشته باشد، هیچ مالیاتی در هنگام پرداخت نشان داده نمی‌شود و برای کل مدت اشتراک هیچ هزینه اضافی از شما دریافت نخواهد شد.', }, plans: { sandbox: { diff --git a/web/i18n/fr-FR/billing.ts b/web/i18n/fr-FR/billing.ts index 117d1c6654..a41eed7e23 100644 --- a/web/i18n/fr-FR/billing.ts +++ b/web/i18n/fr-FR/billing.ts @@ -94,6 +94,8 @@ const translation = { documents: '{{count,number}} Documents de connaissance', documentsRequestQuotaTooltip: 'Spécifie le nombre total d\'actions qu\'un espace de travail peut effectuer par minute dans la base de connaissances, y compris la création, la suppression, les mises à jour de jeux de données, le téléchargement de documents, les modifications, l\'archivage et les requêtes de la base de connaissances. Ce paramètre est utilisé pour évaluer les performances des requêtes de la base de connaissances. Par exemple, si un utilisateur de Sandbox effectue 10 tests de validité consécutifs en une minute, son espace de travail sera temporairement restreint dans l\'exécution des actions suivantes pendant la minute suivante : création, suppression, mises à jour de jeux de données, et téléchargements ou modifications de documents.', startBuilding: 'Commencez à construire', + taxTip: 'Tous les prix des abonnements (mensuels/annuels) s\'entendent hors taxes applicables (par exemple, TVA, taxe de vente).', + taxTipSecond: 'Si votre région n\'a pas de exigences fiscales applicables, aucune taxe n\'apparaîtra lors de votre paiement et vous ne serez pas facturé de frais supplémentaires pendant toute la durée de l\'abonnement.', }, plans: { sandbox: { diff --git a/web/i18n/hi-IN/billing.ts b/web/i18n/hi-IN/billing.ts index 749ab804ab..fbc6dffc7c 100644 --- a/web/i18n/hi-IN/billing.ts +++ b/web/i18n/hi-IN/billing.ts @@ -102,6 +102,8 @@ const translation = { teamMember_one: '{{count,number}} टीम सदस्य', documentsRequestQuotaTooltip: 'यह ज्ञान आधार में एक कार्यक्षेत्र द्वारा प्रति मिनट किए जा सकने वाले कुल कार्यों की संख्या को निर्दिष्ट करता है, जिसमें डेटासेट बनाना, हटाना, अपडेट करना, दस्तावेज़ अपलोड करना, संशोधन करना, संग्रहित करना और ज्ञान आधार अनुरोध शामिल हैं। इस मीट्रिक का उपयोग ज्ञान आधार अनुरोधों के प्रदर्शन का मूल्यांकन करने के लिए किया जाता है। उदाहरण के लिए, यदि एक सैंडबॉक्स उपयोगकर्ता एक मिनट के भीतर 10 लगातार हिट परीक्षण करता है, तो उनके कार्यक्षेत्र को अगले मिनट के लिए निम्नलिखित कार्यों को करने से अस्थायी रूप से प्रतिबंधित किया जाएगा: डेटासेट बनाना, हटाना, अपडेट करना और दस्तावेज़ अपलोड या संशोधन करना।', startBuilding: 'बनाना शुरू करें', + taxTip: 'सभी सदस्यता मूल्य (मासिक/वार्षिक) लागू करों (जैसे, VAT, बिक्री कर) को शामिल नहीं करते हैं।', + taxTipSecond: 'यदि आपके क्षेत्र में कोई लागू कर आवश्यकताएँ नहीं हैं, तो आपकी चेकआउट में कोई कर नहीं दिखाई देगा, और पूरे सदस्यता अवधि के लिए आपसे कोई अतिरिक्त शुल्क नहीं लिया जाएगा।', }, plans: { sandbox: { diff --git a/web/i18n/id-ID/billing.ts b/web/i18n/id-ID/billing.ts index 11419c3b16..c6c718d15b 100644 --- a/web/i18n/id-ID/billing.ts +++ b/web/i18n/id-ID/billing.ts @@ -87,6 +87,8 @@ const translation = { modelProviders: 'Mendukung OpenAI/Anthropic/Llama2/Azure OpenAI/Hugging Face/Replite', member: 'Anggota', startBuilding: 'Mulai Membangun', + taxTip: 'Semua harga langganan (bulanan/tahunan) belum termasuk pajak yang berlaku (misalnya, PPN, pajak penjualan).', + taxTipSecond: 'Jika wilayah Anda tidak memiliki persyaratan pajak yang berlaku, tidak akan ada pajak yang muncul saat checkout, dan Anda tidak akan dikenakan biaya tambahan apa pun selama masa langganan.', }, plans: { sandbox: { diff --git a/web/i18n/it-IT/billing.ts b/web/i18n/it-IT/billing.ts index f89502ee5b..ef6b1943e3 100644 --- a/web/i18n/it-IT/billing.ts +++ b/web/i18n/it-IT/billing.ts @@ -102,6 +102,8 @@ const translation = { annualBilling: 'Fatturazione annuale', documentsRequestQuotaTooltip: 'Specifica il numero totale di azioni che un\'area di lavoro può eseguire al minuto all\'interno della base di conoscenza, compresi la creazione, l\'eliminazione, gli aggiornamenti dei dataset, il caricamento di documenti, le modifiche, l\'archiviazione e le query sulla base di conoscenza. Questa metrica viene utilizzata per valutare le prestazioni delle richieste alla base di conoscenza. Ad esempio, se un utente di Sandbox esegue 10 test consecutivi in un minuto, la sua area di lavoro sarà temporaneamente limitata dall\'eseguire le seguenti azioni per il minuto successivo: creazione, eliminazione, aggiornamenti dei dataset e caricamento o modifica di documenti.', startBuilding: 'Inizia a costruire', + taxTip: 'Tutti i prezzi degli abbonamenti (mensili/annuali) non includono le tasse applicabili (ad esempio, IVA, imposta sulle vendite).', + taxTipSecond: 'Se nella tua regione non ci sono requisiti fiscali applicabili, nessuna tassa apparirà al momento del pagamento e non ti verranno addebitate spese aggiuntive per l\'intera durata dell\'abbonamento.', }, plans: { sandbox: { diff --git a/web/i18n/ko-KR/billing.ts b/web/i18n/ko-KR/billing.ts index ff0dd189e4..c5f081d41b 100644 --- a/web/i18n/ko-KR/billing.ts +++ b/web/i18n/ko-KR/billing.ts @@ -103,6 +103,8 @@ const translation = { documentsRequestQuotaTooltip: '지식 기반 내에서 작업 공간이 분당 수행할 수 있는 총 작업 수를 지정합니다. 여기에는 데이터 세트 생성, 삭제, 업데이트, 문서 업로드, 수정, 보관 및 지식 기반 쿼리가 포함됩니다. 이 지표는 지식 기반 요청의 성능을 평가하는 데 사용됩니다. 예를 들어, 샌드박스 사용자가 1 분 이내에 10 회의 연속 히트 테스트를 수행하면, 해당 작업 공간은 다음 1 분 동안 데이터 세트 생성, 삭제, 업데이트 및 문서 업로드 또는 수정과 같은 작업을 수행하는 것이 일시적으로 제한됩니다.', startBuilding: '구축 시작', + taxTip: '모든 구독 요금(월간/연간)에는 해당 세금(예: 부가가치세, 판매세)이 포함되어 있지 않습니다.', + taxTipSecond: '귀하의 지역에 적용 가능한 세금 요구 사항이 없는 경우, 결제 시 세금이 표시되지 않으며 전체 구독 기간 동안 추가 요금이 부과되지 않습니다.', }, plans: { sandbox: { diff --git a/web/i18n/pl-PL/billing.ts b/web/i18n/pl-PL/billing.ts index 3bf0867877..cf0859468b 100644 --- a/web/i18n/pl-PL/billing.ts +++ b/web/i18n/pl-PL/billing.ts @@ -101,6 +101,8 @@ const translation = { documentsRequestQuota: '{{count,number}}/min Limit wiedzy na żądanie', documentsRequestQuotaTooltip: 'Określa całkowitą liczbę działań, jakie przestrzeń robocza może wykonać na minutę w ramach bazy wiedzy, w tym tworzenie zbiorów danych, usuwanie, aktualizacje, przesyłanie dokumentów, modyfikacje, archiwizowanie i zapytania do bazy wiedzy. Ta metryka jest używana do oceny wydajności zapytań do bazy wiedzy. Na przykład, jeśli użytkownik Sandbox wykona 10 kolejnych testów w ciągu jednej minuty, jego przestrzeń robocza zostanie tymczasowo ograniczona w wykonywaniu następujących działań przez następną minutę: tworzenie zbiorów danych, usuwanie, aktualizacje oraz przesyłanie lub modyfikacje dokumentów.', startBuilding: 'Zacznij budować', + taxTip: 'Wszystkie ceny subskrypcji (miesięczne/roczne) nie obejmują obowiązujących podatków (np. VAT, podatek od sprzedaży).', + taxTipSecond: 'Jeśli w Twoim regionie nie ma obowiązujących przepisów podatkowych, podatek nie pojawi się podczas realizacji zamówienia i nie zostaną naliczone żadne dodatkowe opłaty przez cały okres subskrypcji.', }, plans: { sandbox: { diff --git a/web/i18n/pt-BR/billing.ts b/web/i18n/pt-BR/billing.ts index 91ccaa7794..e4ca0a064a 100644 --- a/web/i18n/pt-BR/billing.ts +++ b/web/i18n/pt-BR/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'O limite da taxa da API se aplica a todas as solicitações feitas através da API Dify, incluindo geração de texto, conversas de chat, execuções de fluxo de trabalho e processamento de documentos.', documentsRequestQuotaTooltip: 'Especifica o número total de ações que um espaço de trabalho pode realizar por minuto dentro da base de conhecimento, incluindo criação, exclusão, atualizações de conjuntos de dados, uploads de documentos, modificações, arquivamento e consultas à base de conhecimento. Esse métrica é utilizada para avaliar o desempenho das solicitações à base de conhecimento. Por exemplo, se um usuário do Sandbox realizar 10 testes de impacto consecutivos dentro de um minuto, seu espaço de trabalho ficará temporariamente restrito de realizar as seguintes ações no minuto seguinte: criação, exclusão, atualizações de conjuntos de dados e uploads ou modificações de documentos.', startBuilding: 'Comece a construir', + taxTip: 'Todos os preços de assinatura (mensal/anual) não incluem os impostos aplicáveis (por exemplo, IVA, imposto sobre vendas).', + taxTipSecond: 'Se a sua região não tiver requisitos fiscais aplicáveis, nenhum imposto aparecerá no seu checkout e você não será cobrado por taxas adicionais durante todo o período da assinatura.', }, plans: { sandbox: { diff --git a/web/i18n/ro-RO/billing.ts b/web/i18n/ro-RO/billing.ts index 550ff3e677..3f5577dc32 100644 --- a/web/i18n/ro-RO/billing.ts +++ b/web/i18n/ro-RO/billing.ts @@ -94,6 +94,8 @@ const translation = { documentsRequestQuotaTooltip: 'Specificați numărul total de acțiuni pe care un spațiu de lucru le poate efectua pe minut în cadrul bazei de cunoștințe, inclusiv crearea, ștergerea, actualizările setului de date, încărcările de documente, modificările, arhivarea și interogările bazei de cunoștințe. Acest metric este utilizat pentru a evalua performanța cererilor din baza de cunoștințe. De exemplu, dacă un utilizator Sandbox efectuează 10 teste consecutive de hituri într-un minut, spațiul său de lucru va fi restricționat temporar de la efectuarea următoarelor acțiuni pentru minutul următor: crearea setului de date, ștergerea, actualizările și încărcările sau modificările documentelor.', apiRateLimitTooltip: 'Limita de rată API se aplică tuturor cererilor efectuate prin API-ul Dify, inclusiv generarea de texte, conversațiile de chat, execuțiile fluxului de lucru și procesarea documentelor.', startBuilding: 'Începeți să construiți', + taxTip: 'Toate prețurile abonamentelor (lunare/anuale) nu includ taxele aplicabile (de exemplu, TVA, taxa pe vânzări).', + taxTipSecond: 'Dacă regiunea dumneavoastră nu are cerințe fiscale aplicabile, niciun impozit nu va apărea la finalizarea comenzii și nu vi se vor percepe taxe suplimentare pe întreaga durată a abonamentului.', }, plans: { sandbox: { diff --git a/web/i18n/ru-RU/billing.ts b/web/i18n/ru-RU/billing.ts index 27f5c71685..7017f90cc2 100644 --- a/web/i18n/ru-RU/billing.ts +++ b/web/i18n/ru-RU/billing.ts @@ -94,6 +94,8 @@ const translation = { priceTip: 'по рабочему месту/', documentsTooltip: 'Квота на количество документов, импортируемых из источника знаний.', startBuilding: 'Начать строительство', + taxTip: 'Все цены на подписку (ежемесячную/годовую) не включают применимые налоги (например, НДС, налог с продаж).', + taxTipSecond: 'Если в вашем регионе нет применимых налоговых требований, налоги не будут отображаться при оформлении заказа, и с вас не будут взиматься дополнительные сборы за весь срок подписки.', }, plans: { sandbox: { diff --git a/web/i18n/sl-SI/billing.ts b/web/i18n/sl-SI/billing.ts index 4481100dd8..fb9d9ec435 100644 --- a/web/i18n/sl-SI/billing.ts +++ b/web/i18n/sl-SI/billing.ts @@ -94,6 +94,8 @@ const translation = { getStarted: 'Začnite', documentsRequestQuotaTooltip: 'Določa skupno število dejanj, ki jih lahko delovno mesto opravi na minuto znotraj znanja baze, vključno s kreiranjem, brisanjem, posodobitvami, nalaganjem dokumentov, spremembami, arhiviranjem in poizvedbami po znanju bazi. Ta meritev se uporablja za ocenjevanje uspešnosti poizvedb v bazi znanja. Na primer, če uporabnik Sandbox izvede 10 zaporednih testov udarca v eni minuti, bo njegovo delovno mesto začasno omejeno pri izvajanju naslednjih dejanj v naslednji minuti: kreiranje podatkovnih nizov, brisanje, posodobitve in nalaganje ali spremembe dokumentov.', startBuilding: 'Začnite graditi', + taxTip: 'Vse cene naročnin (mesečne/letne) ne vključujejo veljavnih davkov (npr. DDV, davek na promet).', + taxTipSecond: 'Če vaša regija nima veljavnih davčnih zahtev, se v vaši košarici ne bo prikazal noben davek in za celotno obdobje naročnine vam ne bodo zaračunani nobeni dodatni stroški.', }, plans: { sandbox: { diff --git a/web/i18n/th-TH/billing.ts b/web/i18n/th-TH/billing.ts index 55a01449eb..461e4a8240 100644 --- a/web/i18n/th-TH/billing.ts +++ b/web/i18n/th-TH/billing.ts @@ -94,6 +94,8 @@ const translation = { annualBilling: 'การเรียกเก็บเงินประจำปี', documentsRequestQuotaTooltip: 'ระบุจำนวนรวมของการกระทำที่เวิร์กสเปซสามารถดำเนินการต่อหนึ่งนาทีภายในฐานความรู้ รวมถึงการสร้างชุดข้อมูล การลบ การอัปเดต การอัปโหลดเอกสาร การปรับเปลี่ยน การเก็บถาวร และการสอบถามฐานความรู้ เมตริกนี้ถูกใช้ในการประเมินประสิทธิภาพของคำขอฐานความรู้ ตัวอย่างเช่น หากผู้ใช้ Sandbox ทำการทดสอบการตี 10 ครั้งต่อเนื่องภายในหนึ่งนาที เวิร์กสเปซของพวกเขาจะถูกจำกัดชั่วคราวในการดำเนินการต่อไปนี้ในนาทีถัดไป: การสร้างชุดข้อมูล การลบ การอัปเดต หรือการอัปโหลดหรือปรับเปลี่ยนเอกสาร.', startBuilding: 'เริ่มสร้าง', + taxTip: 'ราคาการสมัครสมาชิกทั้งหมด (รายเดือน/รายปี) ไม่รวมภาษีที่ใช้บังคับ (เช่น ภาษีมูลค่าเพิ่ม, ภาษีการขาย)', + taxTipSecond: 'หากภูมิภาคของคุณไม่มีข้อกำหนดเกี่ยวกับภาษีที่ใช้ได้ จะไม่มีการคิดภาษีในขั้นตอนการชำระเงินของคุณ และคุณจะไม่ถูกเรียกเก็บค่าธรรมเนียมเพิ่มเติมใด ๆ ตลอดระยะเวลาสมาชิกทั้งหมด', }, plans: { sandbox: { diff --git a/web/i18n/tr-TR/billing.ts b/web/i18n/tr-TR/billing.ts index 62d6e0a07e..6d01d9dd32 100644 --- a/web/i18n/tr-TR/billing.ts +++ b/web/i18n/tr-TR/billing.ts @@ -94,6 +94,8 @@ const translation = { teamWorkspace: '{{count,number}} Takım Çalışma Alanı', documentsRequestQuotaTooltip: 'Bir çalışma alanının bilgi tabanında, veri seti oluşturma, silme, güncellemeler, belge yüklemeleri, değişiklikler, arşivleme ve bilgi tabanı sorguları dahil olmak üzere, dakikada gerçekleştirebileceği toplam işlem sayısını belirtir. Bu ölçüt, bilgi tabanı taleplerinin performansını değerlendirmek için kullanılır. Örneğin, bir Sandbox kullanıcısı bir dakika içinde ardışık 10 vurma testi gerçekleştirirse, çalışma alanı bir sonraki dakika için aşağıdaki işlemleri gerçekleştirmesi geçici olarak kısıtlanacaktır: veri seti oluşturma, silme, güncellemeler ve belge yüklemeleri veya değişiklikler.', startBuilding: 'İnşa Etmeye Başlayın', + taxTip: 'Tüm abonelik fiyatları (aylık/yıllık) geçerli vergiler (ör. KDV, satış vergisi) hariçtir.', + taxTipSecond: 'Bölgenizde geçerli vergi gereksinimleri yoksa, ödeme sayfanızda herhangi bir vergi görünmeyecek ve tüm abonelik süresi boyunca ek bir ücret tahsil edilmeyecektir.', }, plans: { sandbox: { diff --git a/web/i18n/uk-UA/billing.ts b/web/i18n/uk-UA/billing.ts index 10dafedb24..03b743e4fe 100644 --- a/web/i18n/uk-UA/billing.ts +++ b/web/i18n/uk-UA/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'Обмеження частоти запитів застосовується до всіх запитів, зроблених через API Dify, включаючи генерацію тексту, чат-розмови, виконання робочих процесів та обробку документів.', documentsRequestQuotaTooltip: 'Вказує загальну кількість дій, які робоча область може виконувати за хвилину в межах бази знань, включаючи створення, видалення, оновлення наборів даних, завантаження документів, модифікації, архівування та запити до бази знань. Цей показник використовується для оцінки ефективності запитів до бази знань. Наприклад, якщо користувач Sandbox виконує 10 послідовних тестів за один хвилину, його робочій області буде тимчасово заборонено виконувати наступні дії протягом наступної хвилини: створення наборів даних, видалення, оновлення, а також завантаження чи модифікацію документів.', startBuilding: 'Почніть будувати', + taxTip: 'Всі ціни на підписку (щомісячна/щорічна) не включають відповідні податки (наприклад, ПДВ, податок з продажу).', + taxTipSecond: 'Якщо для вашого регіону немає відповідних податкових вимог, податок не відображатиметься на вашому чек-ауті, і з вас не стягуватимуть додаткові збори протягом усього терміну підписки.', }, plans: { sandbox: { diff --git a/web/i18n/vi-VN/billing.ts b/web/i18n/vi-VN/billing.ts index 68e662425f..0166185e45 100644 --- a/web/i18n/vi-VN/billing.ts +++ b/web/i18n/vi-VN/billing.ts @@ -94,6 +94,8 @@ const translation = { freeTrialTipSuffix: 'Không cần thẻ tín dụng', documentsRequestQuotaTooltip: 'Chỉ định tổng số hành động mà một không gian làm việc có thể thực hiện mỗi phút trong cơ sở tri thức, bao gồm tạo mới tập dữ liệu, xóa, cập nhật, tải tài liệu lên, thay đổi, lưu trữ và truy vấn cơ sở tri thức. Chỉ số này được sử dụng để đánh giá hiệu suất của các yêu cầu cơ sở tri thức. Ví dụ, nếu một người dùng Sandbox thực hiện 10 lần kiểm tra liên tiếp trong một phút, không gian làm việc của họ sẽ bị hạn chế tạm thời không thực hiện các hành động sau trong phút tiếp theo: tạo mới tập dữ liệu, xóa, cập nhật và tải tài liệu lên hoặc thay đổi.', startBuilding: 'Bắt đầu xây dựng', + taxTipSecond: 'Nếu khu vực của bạn không có yêu cầu thuế áp dụng, sẽ không có thuế xuất hiện trong quá trình thanh toán của bạn và bạn sẽ không bị tính bất kỳ khoản phí bổ sung nào trong suốt thời gian đăng ký.', + taxTip: 'Tất cả giá đăng ký (hàng tháng/hàng năm) chưa bao gồm các loại thuế áp dụng (ví dụ: VAT, thuế bán hàng).', }, plans: { sandbox: { diff --git a/web/i18n/zh-Hant/billing.ts b/web/i18n/zh-Hant/billing.ts index f99b1ef2cf..1b0b1f5e1f 100644 --- a/web/i18n/zh-Hant/billing.ts +++ b/web/i18n/zh-Hant/billing.ts @@ -94,6 +94,8 @@ const translation = { documentsTooltip: '從知識數據來源導入的文件數量配額。', documentsRequestQuotaTooltip: '指定工作區在知識基礎中每分鐘可以執行的總操作次數,包括數據集的創建、刪除、更新、文檔上傳、修改、歸檔和知識基礎查詢。這個指標用於評估知識基礎請求的性能。例如,如果一個沙箱用戶在一分鐘內連續執行 10 次命中測試,他們的工作區將在接下來的一分鐘內暫時禁止執行以下操作:數據集的創建、刪除、更新以及文檔上傳或修改。', startBuilding: '開始建造', + taxTip: '所有訂閱價格(月費/年費)不包含適用的稅費(例如增值稅、銷售稅)。', + taxTipSecond: '如果您的地區沒有適用的稅務要求,結帳時將不會顯示任何稅款,且在整個訂閱期間您也不會被收取任何額外費用。', }, plans: { sandbox: { From aa51662d98ddfdee55691eb1a4062ce2c367f4ed Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Fri, 10 Oct 2025 15:59:14 +0800 Subject: [PATCH 70/82] refactor(api): add new endpoints for workspace management and update routing (#26465) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../console/workspace/workspace.py | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 6bec70b5da..13a61052ae 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -14,7 +14,7 @@ from controllers.common.errors import ( TooManyFilesError, UnsupportedFileTypeError, ) -from controllers.console import api +from controllers.console import console_ns from controllers.console.admin import admin_required from controllers.console.error import AccountNotLinkTenantError from controllers.console.wraps import ( @@ -65,6 +65,7 @@ tenants_fields = { workspace_fields = {"id": fields.String, "name": fields.String, "status": fields.String, "created_at": TimestampField} +@console_ns.route("/workspaces") class TenantListApi(Resource): @setup_required @login_required @@ -93,6 +94,7 @@ class TenantListApi(Resource): return {"workspaces": marshal(tenant_dicts, tenants_fields)}, 200 +@console_ns.route("/all-workspaces") class WorkspaceListApi(Resource): @setup_required @admin_required @@ -118,6 +120,8 @@ class WorkspaceListApi(Resource): }, 200 +@console_ns.route("/workspaces/current") +@console_ns.route("/info") # Deprecated class TenantApi(Resource): @setup_required @login_required @@ -143,11 +147,10 @@ class TenantApi(Resource): else: raise Unauthorized("workspace is archived") - if not tenant: - raise ValueError("No tenant available") return WorkspaceService.get_tenant_info(tenant), 200 +@console_ns.route("/workspaces/switch") class SwitchWorkspaceApi(Resource): @setup_required @login_required @@ -172,6 +175,7 @@ class SwitchWorkspaceApi(Resource): return {"result": "success", "new_tenant": marshal(WorkspaceService.get_tenant_info(new_tenant), tenant_fields)} +@console_ns.route("/workspaces/custom-config") class CustomConfigWorkspaceApi(Resource): @setup_required @login_required @@ -202,6 +206,7 @@ class CustomConfigWorkspaceApi(Resource): return {"result": "success", "tenant": marshal(WorkspaceService.get_tenant_info(tenant), tenant_fields)} +@console_ns.route("/workspaces/custom-config/webapp-logo/upload") class WebappLogoWorkspaceApi(Resource): @setup_required @login_required @@ -242,6 +247,7 @@ class WebappLogoWorkspaceApi(Resource): return {"id": upload_file.id}, 201 +@console_ns.route("/workspaces/info") class WorkspaceInfoApi(Resource): @setup_required @login_required @@ -261,13 +267,3 @@ class WorkspaceInfoApi(Resource): db.session.commit() return {"result": "success", "tenant": marshal(WorkspaceService.get_tenant_info(tenant), tenant_fields)} - - -api.add_resource(TenantListApi, "/workspaces") # GET for getting all tenants -api.add_resource(WorkspaceListApi, "/all-workspaces") # GET for getting all tenants -api.add_resource(TenantApi, "/workspaces/current", endpoint="workspaces_current") # GET for getting current tenant info -api.add_resource(TenantApi, "/info", endpoint="info") # Deprecated -api.add_resource(SwitchWorkspaceApi, "/workspaces/switch") # POST for switching tenant -api.add_resource(CustomConfigWorkspaceApi, "/workspaces/custom-config") -api.add_resource(WebappLogoWorkspaceApi, "/workspaces/custom-config/webapp-logo/upload") -api.add_resource(WorkspaceInfoApi, "/workspaces/info") # POST for changing workspace info From 2b6882bd978255852cf0af2588199fe3645bafe8 Mon Sep 17 00:00:00 2001 From: znn Date: Fri, 10 Oct 2025 13:31:33 +0530 Subject: [PATCH 71/82] fix chunks 2 (#26623) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/rag/splitter/fixed_text_splitter.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/api/core/rag/splitter/fixed_text_splitter.py b/api/core/rag/splitter/fixed_text_splitter.py index 8356861242..801d2a2a52 100644 --- a/api/core/rag/splitter/fixed_text_splitter.py +++ b/api/core/rag/splitter/fixed_text_splitter.py @@ -2,6 +2,7 @@ from __future__ import annotations +import re from typing import Any from core.model_manager import ModelInstance @@ -52,7 +53,7 @@ class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter) """Create a new TextSplitter.""" super().__init__(**kwargs) self._fixed_separator = fixed_separator - self._separators = separators or ["\n\n", "\n", " ", ""] + self._separators = separators or ["\n\n", "\n", "。", ". ", " ", ""] def split_text(self, text: str) -> list[str]: """Split incoming text and return chunks.""" @@ -90,16 +91,19 @@ class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter) # Now that we have the separator, split the text if separator: if separator == " ": - splits = text.split() + splits = re.split(r" +", text) else: splits = text.split(separator) splits = [item + separator if i < len(splits) else item for i, item in enumerate(splits)] else: splits = list(text) - splits = [s for s in splits if (s not in {"", "\n"})] + if separator == "\n": + splits = [s for s in splits if s != ""] + else: + splits = [s for s in splits if (s not in {"", "\n"})] _good_splits = [] _good_splits_lengths = [] # cache the lengths of the splits - _separator = "" if self._keep_separator else separator + _separator = separator if self._keep_separator else "" s_lens = self._length_function(splits) if separator != "": for s, s_len in zip(splits, s_lens): From 8a2b2082992a49597cb4d9b9832b22d468c0092f Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Fri, 10 Oct 2025 17:12:12 +0900 Subject: [PATCH 72/82] Refactor account models to use SQLAlchemy 2.0 dataclass mapping (#26415) Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- api/models/account.py | 159 +++++++++++------- api/services/account_service.py | 24 +-- .../app/test_chat_message_permissions.py | 14 +- .../app/test_model_config_permissions.py | 9 +- .../services/test_account_service.py | 3 +- .../services/test_workflow_service.py | 83 ++++----- .../test_delete_segment_from_index_task.py | 21 +-- .../test_disable_segments_from_index_task.py | 54 +++--- .../tasks/test_mail_invite_member_task.py | 17 +- api/tests/unit_tests/libs/test_helper.py | 4 +- .../test_sqlalchemy_repository.py | 5 +- .../test_workflow_draft_variable_service.py | 3 +- 12 files changed, 219 insertions(+), 177 deletions(-) diff --git a/api/models/account.py b/api/models/account.py index 8c1f990aa2..86cd9e41b5 100644 --- a/api/models/account.py +++ b/api/models/account.py @@ -1,15 +1,16 @@ import enum import json +from dataclasses import field from datetime import datetime from typing import Any, Optional import sqlalchemy as sa from flask_login import UserMixin # type: ignore[import-untyped] from sqlalchemy import DateTime, String, func, select -from sqlalchemy.orm import Mapped, Session, mapped_column, reconstructor +from sqlalchemy.orm import Mapped, Session, mapped_column from typing_extensions import deprecated -from models.base import Base +from models.base import TypeBase from .engine import db from .types import StringUUID @@ -83,31 +84,37 @@ class AccountStatus(enum.StrEnum): CLOSED = "closed" -class Account(UserMixin, Base): +class Account(UserMixin, TypeBase): __tablename__ = "accounts" __table_args__ = (sa.PrimaryKeyConstraint("id", name="account_pkey"), sa.Index("account_email_idx", "email")) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) name: Mapped[str] = mapped_column(String(255)) email: Mapped[str] = mapped_column(String(255)) - password: Mapped[str | None] = mapped_column(String(255)) - password_salt: Mapped[str | None] = mapped_column(String(255)) - avatar: Mapped[str | None] = mapped_column(String(255), nullable=True) - interface_language: Mapped[str | None] = mapped_column(String(255)) - interface_theme: Mapped[str | None] = mapped_column(String(255), nullable=True) - timezone: Mapped[str | None] = mapped_column(String(255)) - last_login_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - last_login_ip: Mapped[str | None] = mapped_column(String(255), nullable=True) - last_active_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - status: Mapped[str] = mapped_column(String(16), server_default=sa.text("'active'::character varying")) - initialized_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) + password: Mapped[str | None] = mapped_column(String(255), default=None) + password_salt: Mapped[str | None] = mapped_column(String(255), default=None) + avatar: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + interface_language: Mapped[str | None] = mapped_column(String(255), default=None) + interface_theme: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + timezone: Mapped[str | None] = mapped_column(String(255), default=None) + last_login_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None) + last_login_ip: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + last_active_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + status: Mapped[str] = mapped_column( + String(16), server_default=sa.text("'active'::character varying"), default="active" + ) + initialized_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) - @reconstructor - def init_on_load(self): - self.role: TenantAccountRole | None = None - self._current_tenant: Tenant | None = None + role: TenantAccountRole | None = field(default=None, init=False) + _current_tenant: "Tenant | None" = field(default=None, init=False) @property def is_password_set(self): @@ -226,18 +233,24 @@ class TenantStatus(enum.StrEnum): ARCHIVE = "archive" -class Tenant(Base): +class Tenant(TypeBase): __tablename__ = "tenants" __table_args__ = (sa.PrimaryKeyConstraint("id", name="tenant_pkey"),) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) name: Mapped[str] = mapped_column(String(255)) - encrypt_public_key: Mapped[str | None] = mapped_column(sa.Text) - plan: Mapped[str] = mapped_column(String(255), server_default=sa.text("'basic'::character varying")) - status: Mapped[str] = mapped_column(String(255), server_default=sa.text("'normal'::character varying")) - custom_config: Mapped[str | None] = mapped_column(sa.Text) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) + encrypt_public_key: Mapped[str | None] = mapped_column(sa.Text, default=None) + plan: Mapped[str] = mapped_column( + String(255), server_default=sa.text("'basic'::character varying"), default="basic" + ) + status: Mapped[str] = mapped_column( + String(255), server_default=sa.text("'normal'::character varying"), default="normal" + ) + custom_config: Mapped[str | None] = mapped_column(sa.Text, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), init=False) def get_accounts(self) -> list[Account]: return list( @@ -257,7 +270,7 @@ class Tenant(Base): self.custom_config = json.dumps(value) -class TenantAccountJoin(Base): +class TenantAccountJoin(TypeBase): __tablename__ = "tenant_account_joins" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="tenant_account_join_pkey"), @@ -266,17 +279,21 @@ class TenantAccountJoin(Base): sa.UniqueConstraint("tenant_id", "account_id", name="unique_tenant_account_join"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) tenant_id: Mapped[str] = mapped_column(StringUUID) account_id: Mapped[str] = mapped_column(StringUUID) - current: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false")) - role: Mapped[str] = mapped_column(String(16), server_default="normal") - invited_by: Mapped[str | None] = mapped_column(StringUUID) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) + current: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false"), default=False) + role: Mapped[str] = mapped_column(String(16), server_default="normal", default="normal") + invited_by: Mapped[str | None] = mapped_column(StringUUID, nullable=True, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) -class AccountIntegrate(Base): +class AccountIntegrate(TypeBase): __tablename__ = "account_integrates" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="account_integrate_pkey"), @@ -284,16 +301,20 @@ class AccountIntegrate(Base): sa.UniqueConstraint("provider", "open_id", name="unique_provider_open_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) account_id: Mapped[str] = mapped_column(StringUUID) provider: Mapped[str] = mapped_column(String(16)) open_id: Mapped[str] = mapped_column(String(255)) encrypted_token: Mapped[str] = mapped_column(String(255)) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) -class InvitationCode(Base): +class InvitationCode(TypeBase): __tablename__ = "invitation_codes" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="invitation_code_pkey"), @@ -301,18 +322,22 @@ class InvitationCode(Base): sa.Index("invitation_codes_code_idx", "code", "status"), ) - id: Mapped[int] = mapped_column(sa.Integer) + id: Mapped[int] = mapped_column(sa.Integer, init=False) batch: Mapped[str] = mapped_column(String(255)) code: Mapped[str] = mapped_column(String(32)) - status: Mapped[str] = mapped_column(String(16), server_default=sa.text("'unused'::character varying")) - used_at: Mapped[datetime | None] = mapped_column(DateTime) - used_by_tenant_id: Mapped[str | None] = mapped_column(StringUUID) - used_by_account_id: Mapped[str | None] = mapped_column(StringUUID) - deprecated_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=sa.text("CURRENT_TIMESTAMP(0)")) + status: Mapped[str] = mapped_column( + String(16), server_default=sa.text("'unused'::character varying"), default="unused" + ) + used_at: Mapped[datetime | None] = mapped_column(DateTime, default=None) + used_by_tenant_id: Mapped[str | None] = mapped_column(StringUUID, default=None) + used_by_account_id: Mapped[str | None] = mapped_column(StringUUID, default=None) + deprecated_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=sa.text("CURRENT_TIMESTAMP(0)"), nullable=False, init=False + ) -class TenantPluginPermission(Base): +class TenantPluginPermission(TypeBase): class InstallPermission(enum.StrEnum): EVERYONE = "everyone" ADMINS = "admins" @@ -329,13 +354,17 @@ class TenantPluginPermission(Base): sa.UniqueConstraint("tenant_id", name="unique_tenant_plugin"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - install_permission: Mapped[InstallPermission] = mapped_column(String(16), nullable=False, server_default="everyone") - debug_permission: Mapped[DebugPermission] = mapped_column(String(16), nullable=False, server_default="noone") + install_permission: Mapped[InstallPermission] = mapped_column( + String(16), nullable=False, server_default="everyone", default=InstallPermission.EVERYONE + ) + debug_permission: Mapped[DebugPermission] = mapped_column( + String(16), nullable=False, server_default="noone", default=DebugPermission.NOBODY + ) -class TenantPluginAutoUpgradeStrategy(Base): +class TenantPluginAutoUpgradeStrategy(TypeBase): class StrategySetting(enum.StrEnum): DISABLED = "disabled" FIX_ONLY = "fix_only" @@ -352,12 +381,20 @@ class TenantPluginAutoUpgradeStrategy(Base): sa.UniqueConstraint("tenant_id", name="unique_tenant_plugin_auto_upgrade_strategy"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - strategy_setting: Mapped[StrategySetting] = mapped_column(String(16), nullable=False, server_default="fix_only") - upgrade_time_of_day: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) # seconds of the day - upgrade_mode: Mapped[UpgradeMode] = mapped_column(String(16), nullable=False, server_default="exclude") - exclude_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False) # plugin_id (author/name) - include_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False) # plugin_id (author/name) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + strategy_setting: Mapped[StrategySetting] = mapped_column( + String(16), nullable=False, server_default="fix_only", default=StrategySetting.FIX_ONLY + ) + upgrade_mode: Mapped[UpgradeMode] = mapped_column( + String(16), nullable=False, server_default="exclude", default=UpgradeMode.EXCLUDE + ) + exclude_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False, default_factory=list) + include_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False, default_factory=list) + upgrade_time_of_day: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) + created_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) diff --git a/api/services/account_service.py b/api/services/account_service.py index 0e699d16da..77b8744020 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -246,10 +246,8 @@ class AccountService: ) ) - account = Account() - account.email = email - account.name = name - + password_to_set = None + salt_to_set = None if password: valid_password(password) @@ -261,14 +259,18 @@ class AccountService: password_hashed = hash_password(password, salt) base64_password_hashed = base64.b64encode(password_hashed).decode() - account.password = base64_password_hashed - account.password_salt = base64_salt + password_to_set = base64_password_hashed + salt_to_set = base64_salt - account.interface_language = interface_language - account.interface_theme = interface_theme - - # Set timezone based on language - account.timezone = language_timezone_mapping.get(interface_language, "UTC") + account = Account( + name=name, + email=email, + password=password_to_set, + password_salt=salt_to_set, + interface_language=interface_language, + interface_theme=interface_theme, + timezone=language_timezone_mapping.get(interface_language, "UTC"), + ) db.session.add(account) db.session.commit() diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index da1524ff2e..4d1c1227bd 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -33,17 +33,19 @@ class TestChatMessageApiPermissions: @pytest.fixture def mock_account(self, monkeypatch: pytest.MonkeyPatch): """Create a mock Account for testing.""" - account = Account() - account.id = str(uuid.uuid4()) - account.name = "Test User" - account.email = "test@example.com" + + account = Account( + name="Test User", + email="test@example.com", + ) account.last_active_at = naive_utc_now() account.created_at = naive_utc_now() account.updated_at = naive_utc_now() + account.id = str(uuid.uuid4()) - tenant = Tenant() + # Create mock tenant + tenant = Tenant(name="Test Tenant") tenant.id = str(uuid.uuid4()) - tenant.name = "Test Tenant" mock_session_instance = mock.Mock() diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index c0fd56ef63..e158f26f3a 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -32,17 +32,16 @@ class TestModelConfigResourcePermissions: @pytest.fixture def mock_account(self, monkeypatch: pytest.MonkeyPatch): """Create a mock Account for testing.""" - account = Account() + + account = Account(name="Test User", email="test@example.com") account.id = str(uuid.uuid4()) - account.name = "Test User" - account.email = "test@example.com" account.last_active_at = naive_utc_now() account.created_at = naive_utc_now() account.updated_at = naive_utc_now() - tenant = Tenant() + # Create mock tenant + tenant = Tenant(name="Test Tenant") tenant.id = str(uuid.uuid4()) - tenant.name = "Test Tenant" mock_session_instance = mock.Mock() diff --git a/api/tests/test_containers_integration_tests/services/test_account_service.py b/api/tests/test_containers_integration_tests/services/test_account_service.py index c98406d845..0a2fb955ae 100644 --- a/api/tests/test_containers_integration_tests/services/test_account_service.py +++ b/api/tests/test_containers_integration_tests/services/test_account_service.py @@ -16,6 +16,7 @@ from services.errors.account import ( AccountPasswordError, AccountRegisterError, CurrentPasswordIncorrectError, + TenantNotFoundError, ) from services.errors.workspace import WorkSpaceNotAllowedCreateError, WorkspacesLimitExceededError @@ -1414,7 +1415,7 @@ class TestTenantService: ) # Try to get current tenant (should fail) - with pytest.raises(AttributeError): + with pytest.raises((AttributeError, TenantNotFoundError)): TenantService.get_current_tenant_by_account(account) def test_switch_tenant_success(self, db_session_with_containers, mock_external_service_dependencies): diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index 60150667ed..0dd3909ba7 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -44,27 +44,26 @@ class TestWorkflowService: Account: Created test account instance """ fake = fake or Faker() - account = Account() - account.id = fake.uuid4() - account.email = fake.email() - account.name = fake.name() - account.avatar_url = fake.url() - account.tenant_id = fake.uuid4() - account.status = "active" - account.type = "normal" - account.role = "owner" - account.interface_language = "en-US" # Set interface language for Site creation + account = Account( + email=fake.email(), + name=fake.name(), + avatar=fake.url(), + status="active", + interface_language="en-US", # Set interface language for Site creation + ) account.created_at = fake.date_time_this_year() + account.id = fake.uuid4() account.updated_at = account.created_at # Create a tenant for the account from models.account import Tenant - tenant = Tenant() - tenant.id = account.tenant_id - tenant.name = f"Test Tenant {fake.company()}" - tenant.plan = "basic" - tenant.status = "active" + tenant = Tenant( + name=f"Test Tenant {fake.company()}", + plan="basic", + status="active", + ) + tenant.id = account.current_tenant_id tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -91,20 +90,21 @@ class TestWorkflowService: App: Created test app instance """ fake = fake or Faker() - app = App() - app.id = fake.uuid4() - app.tenant_id = fake.uuid4() - app.name = fake.company() - app.description = fake.text() - app.mode = AppMode.WORKFLOW - app.icon_type = "emoji" - app.icon = "🤖" - app.icon_background = "#FFEAD5" - app.enable_site = True - app.enable_api = True - app.created_by = fake.uuid4() + app = App( + id=fake.uuid4(), + tenant_id=fake.uuid4(), + name=fake.company(), + description=fake.text(), + mode=AppMode.WORKFLOW, + icon_type="emoji", + icon="🤖", + icon_background="#FFEAD5", + enable_site=True, + enable_api=True, + created_by=fake.uuid4(), + workflow_id=None, # Will be set when workflow is created + ) app.updated_by = app.created_by - app.workflow_id = None # Will be set when workflow is created from extensions.ext_database import db @@ -126,19 +126,20 @@ class TestWorkflowService: Workflow: Created test workflow instance """ fake = fake or Faker() - workflow = Workflow() - workflow.id = fake.uuid4() - workflow.tenant_id = app.tenant_id - workflow.app_id = app.id - workflow.type = WorkflowType.WORKFLOW.value - workflow.version = Workflow.VERSION_DRAFT - workflow.graph = json.dumps({"nodes": [], "edges": []}) - workflow.features = json.dumps({"features": []}) - # unique_hash is a computed property based on graph and features - workflow.created_by = account.id - workflow.updated_by = account.id - workflow.environment_variables = [] - workflow.conversation_variables = [] + workflow = Workflow( + id=fake.uuid4(), + tenant_id=app.tenant_id, + app_id=app.id, + type=WorkflowType.WORKFLOW.value, + version=Workflow.VERSION_DRAFT, + graph=json.dumps({"nodes": [], "edges": []}), + features=json.dumps({"features": []}), + # unique_hash is a computed property based on graph and features + created_by=account.id, + updated_by=account.id, + environment_variables=[], + conversation_variables=[], + ) from extensions.ext_database import db diff --git a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py index 7af4f238be..94e9b76965 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py @@ -48,11 +48,8 @@ class TestDeleteSegmentFromIndexTask: Tenant: Created test tenant instance """ fake = fake or Faker() - tenant = Tenant() + tenant = Tenant(name=f"Test Tenant {fake.company()}", plan="basic", status="active") tenant.id = fake.uuid4() - tenant.name = f"Test Tenant {fake.company()}" - tenant.plan = "basic" - tenant.status = "active" tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -73,16 +70,14 @@ class TestDeleteSegmentFromIndexTask: Account: Created test account instance """ fake = fake or Faker() - account = Account() + account = Account( + name=fake.name(), + email=fake.email(), + avatar=fake.url(), + status="active", + interface_language="en-US", + ) account.id = fake.uuid4() - account.email = fake.email() - account.name = fake.name() - account.avatar_url = fake.url() - account.tenant_id = tenant.id - account.status = "active" - account.type = "normal" - account.role = "owner" - account.interface_language = "en-US" account.created_at = fake.date_time_this_year() account.updated_at = account.created_at diff --git a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py index 5fdb8c617c..0b36e0914a 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py @@ -43,27 +43,30 @@ class TestDisableSegmentsFromIndexTask: Account: Created test account instance """ fake = fake or Faker() - account = Account() + account = Account( + email=fake.email(), + name=fake.name(), + avatar=fake.url(), + status="active", + interface_language="en-US", + ) account.id = fake.uuid4() - account.email = fake.email() - account.name = fake.name() - account.avatar_url = fake.url() + # monkey-patch attributes for test setup account.tenant_id = fake.uuid4() - account.status = "active" account.type = "normal" account.role = "owner" - account.interface_language = "en-US" account.created_at = fake.date_time_this_year() account.updated_at = account.created_at # Create a tenant for the account from models.account import Tenant - tenant = Tenant() + tenant = Tenant( + name=f"Test Tenant {fake.company()}", + plan="basic", + status="active", + ) tenant.id = account.tenant_id - tenant.name = f"Test Tenant {fake.company()}" - tenant.plan = "basic" - tenant.status = "active" tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -91,20 +94,21 @@ class TestDisableSegmentsFromIndexTask: Dataset: Created test dataset instance """ fake = fake or Faker() - dataset = Dataset() - dataset.id = fake.uuid4() - dataset.tenant_id = account.tenant_id - dataset.name = f"Test Dataset {fake.word()}" - dataset.description = fake.text(max_nb_chars=200) - dataset.provider = "vendor" - dataset.permission = "only_me" - dataset.data_source_type = "upload_file" - dataset.indexing_technique = "high_quality" - dataset.created_by = account.id - dataset.updated_by = account.id - dataset.embedding_model = "text-embedding-ada-002" - dataset.embedding_model_provider = "openai" - dataset.built_in_field_enabled = False + dataset = Dataset( + id=fake.uuid4(), + tenant_id=account.tenant_id, + name=f"Test Dataset {fake.word()}", + description=fake.text(max_nb_chars=200), + provider="vendor", + permission="only_me", + data_source_type="upload_file", + indexing_technique="high_quality", + created_by=account.id, + updated_by=account.id, + embedding_model="text-embedding-ada-002", + embedding_model_provider="openai", + built_in_field_enabled=False, + ) from extensions.ext_database import db @@ -128,6 +132,7 @@ class TestDisableSegmentsFromIndexTask: """ fake = fake or Faker() document = DatasetDocument() + document.id = fake.uuid4() document.tenant_id = dataset.tenant_id document.dataset_id = dataset.id @@ -153,7 +158,6 @@ class TestDisableSegmentsFromIndexTask: document.archived = False document.doc_form = "text_model" # Use text_model form for testing document.doc_language = "en" - from extensions.ext_database import db db.session.add(document) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py index 8fef87b317..ead7757c13 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py @@ -96,9 +96,9 @@ class TestMailInviteMemberTask: password=fake.password(), interface_language="en-US", status=AccountStatus.ACTIVE.value, - created_at=datetime.now(UTC), - updated_at=datetime.now(UTC), ) + account.created_at = datetime.now(UTC) + account.updated_at = datetime.now(UTC) db_session_with_containers.add(account) db_session_with_containers.commit() db_session_with_containers.refresh(account) @@ -106,9 +106,9 @@ class TestMailInviteMemberTask: # Create tenant tenant = Tenant( name=fake.company(), - created_at=datetime.now(UTC), - updated_at=datetime.now(UTC), ) + tenant.created_at = datetime.now(UTC) + tenant.updated_at = datetime.now(UTC) db_session_with_containers.add(tenant) db_session_with_containers.commit() db_session_with_containers.refresh(tenant) @@ -118,8 +118,8 @@ class TestMailInviteMemberTask: tenant_id=tenant.id, account_id=account.id, role=TenantAccountRole.OWNER.value, - created_at=datetime.now(UTC), ) + tenant_join.created_at = datetime.now(UTC) db_session_with_containers.add(tenant_join) db_session_with_containers.commit() @@ -164,9 +164,10 @@ class TestMailInviteMemberTask: password="", interface_language="en-US", status=AccountStatus.PENDING.value, - created_at=datetime.now(UTC), - updated_at=datetime.now(UTC), ) + + account.created_at = datetime.now(UTC) + account.updated_at = datetime.now(UTC) db_session_with_containers.add(account) db_session_with_containers.commit() db_session_with_containers.refresh(account) @@ -176,8 +177,8 @@ class TestMailInviteMemberTask: tenant_id=tenant.id, account_id=account.id, role=TenantAccountRole.NORMAL.value, - created_at=datetime.now(UTC), ) + tenant_join.created_at = datetime.now(UTC) db_session_with_containers.add(tenant_join) db_session_with_containers.commit() diff --git a/api/tests/unit_tests/libs/test_helper.py b/api/tests/unit_tests/libs/test_helper.py index b7701055f5..85789bfa7e 100644 --- a/api/tests/unit_tests/libs/test_helper.py +++ b/api/tests/unit_tests/libs/test_helper.py @@ -11,7 +11,7 @@ class TestExtractTenantId: def test_extract_tenant_id_from_account_with_tenant(self): """Test extracting tenant_id from Account with current_tenant_id.""" # Create a mock Account object - account = Account() + account = Account(name="test", email="test@example.com") # Mock the current_tenant_id property account._current_tenant = type("MockTenant", (), {"id": "account-tenant-123"})() @@ -21,7 +21,7 @@ class TestExtractTenantId: def test_extract_tenant_id_from_account_without_tenant(self): """Test extracting tenant_id from Account without current_tenant_id.""" # Create a mock Account object - account = Account() + account = Account(name="test", email="test@example.com") account._current_tenant = None tenant_id = extract_tenant_id(account) diff --git a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py index fadd1ee88f..28b339fe85 100644 --- a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py +++ b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py @@ -59,12 +59,11 @@ def session(): @pytest.fixture def mock_user(): """Create a user instance for testing.""" - user = Account() + user = Account(name="test", email="test@example.com") user.id = "test-user-id" - tenant = Tenant() + tenant = Tenant(name="Test Workspace") tenant.id = "test-tenant" - tenant.name = "Test Workspace" user._current_tenant = MagicMock() user._current_tenant.id = "test-tenant" diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index 7e324ca4db..66361f26e0 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -47,7 +47,8 @@ class TestDraftVariableSaver: def test__should_variable_be_visible(self): mock_session = MagicMock(spec=Session) - mock_user = Account(id=str(uuid.uuid4())) + mock_user = Account(name="test", email="test@example.com") + mock_user.id = str(uuid.uuid4()) test_app_id = self._get_test_app_id() saver = DraftVariableSaver( session=mock_session, From c1e8584b9760a42cd2436f763b97cb4fb5546365 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Fri, 10 Oct 2025 17:23:39 +0900 Subject: [PATCH 73/82] feat: Refactor api.add_resource to @console_ns.route decorator (#26386) Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- .../console/explore/installed_app.py | 8 +- api/controllers/console/explore/parameter.py | 10 +- .../console/explore/recommended_app.py | 8 +- .../console/explore/saved_message.py | 18 +-- api/controllers/console/workspace/account.py | 46 ++++---- .../workspace/load_balancing_config.py | 20 ++-- api/controllers/console/workspace/members.py | 21 ++-- .../console/workspace/model_providers.py | 27 ++--- api/controllers/console/workspace/models.py | 44 +++----- api/controllers/console/workspace/plugin.py | 61 +++++----- .../console/workspace/tool_providers.py | 104 +++++++----------- .../console/workspace/workspace.py | 4 +- 12 files changed, 141 insertions(+), 230 deletions(-) diff --git a/api/controllers/console/explore/installed_app.py b/api/controllers/console/explore/installed_app.py index bdc3fb0dbd..c86c243c9b 100644 --- a/api/controllers/console/explore/installed_app.py +++ b/api/controllers/console/explore/installed_app.py @@ -6,7 +6,7 @@ from flask_restx import Resource, inputs, marshal_with, reqparse from sqlalchemy import and_, select from werkzeug.exceptions import BadRequest, Forbidden, NotFound -from controllers.console import api +from controllers.console import console_ns from controllers.console.explore.wraps import InstalledAppResource from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check from extensions.ext_database import db @@ -22,6 +22,7 @@ from services.feature_service import FeatureService logger = logging.getLogger(__name__) +@console_ns.route("/installed-apps") class InstalledAppsListApi(Resource): @login_required @account_initialization_required @@ -154,6 +155,7 @@ class InstalledAppsListApi(Resource): return {"message": "App installed successfully"} +@console_ns.route("/installed-apps/") class InstalledAppApi(InstalledAppResource): """ update and delete an installed app @@ -185,7 +187,3 @@ class InstalledAppApi(InstalledAppResource): db.session.commit() return {"result": "success", "message": "App info updated successfully"} - - -api.add_resource(InstalledAppsListApi, "/installed-apps") -api.add_resource(InstalledAppApi, "/installed-apps/") diff --git a/api/controllers/console/explore/parameter.py b/api/controllers/console/explore/parameter.py index 7742ea24a9..9c6b2aedfb 100644 --- a/api/controllers/console/explore/parameter.py +++ b/api/controllers/console/explore/parameter.py @@ -1,7 +1,7 @@ from flask_restx import marshal_with from controllers.common import fields -from controllers.console import api +from controllers.console import console_ns from controllers.console.app.error import AppUnavailableError from controllers.console.explore.wraps import InstalledAppResource from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict @@ -9,6 +9,7 @@ from models.model import AppMode, InstalledApp from services.app_service import AppService +@console_ns.route("/installed-apps//parameters", endpoint="installed_app_parameters") class AppParameterApi(InstalledAppResource): """Resource for app variables.""" @@ -39,6 +40,7 @@ class AppParameterApi(InstalledAppResource): return get_parameters_from_feature_dict(features_dict=features_dict, user_input_form=user_input_form) +@console_ns.route("/installed-apps//meta", endpoint="installed_app_meta") class ExploreAppMetaApi(InstalledAppResource): def get(self, installed_app: InstalledApp): """Get app meta""" @@ -46,9 +48,3 @@ class ExploreAppMetaApi(InstalledAppResource): if not app_model: raise ValueError("App not found") return AppService().get_app_meta(app_model) - - -api.add_resource( - AppParameterApi, "/installed-apps//parameters", endpoint="installed_app_parameters" -) -api.add_resource(ExploreAppMetaApi, "/installed-apps//meta", endpoint="installed_app_meta") diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 974222ddf7..6d627a929a 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -1,7 +1,7 @@ from flask_restx import Resource, fields, marshal_with, reqparse from constants.languages import languages -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required from libs.helper import AppIconUrlField from libs.login import current_user, login_required @@ -35,6 +35,7 @@ recommended_app_list_fields = { } +@console_ns.route("/explore/apps") class RecommendedAppListApi(Resource): @login_required @account_initialization_required @@ -56,13 +57,10 @@ class RecommendedAppListApi(Resource): return RecommendedAppService.get_recommended_apps_and_categories(language_prefix) +@console_ns.route("/explore/apps/") class RecommendedAppApi(Resource): @login_required @account_initialization_required def get(self, app_id): app_id = str(app_id) return RecommendedAppService.get_recommend_app_detail(app_id) - - -api.add_resource(RecommendedAppListApi, "/explore/apps") -api.add_resource(RecommendedAppApi, "/explore/apps/") diff --git a/api/controllers/console/explore/saved_message.py b/api/controllers/console/explore/saved_message.py index 6f05f898f9..79e4a4339e 100644 --- a/api/controllers/console/explore/saved_message.py +++ b/api/controllers/console/explore/saved_message.py @@ -2,7 +2,7 @@ from flask_restx import fields, marshal_with, reqparse from flask_restx.inputs import int_range from werkzeug.exceptions import NotFound -from controllers.console import api +from controllers.console import console_ns from controllers.console.explore.error import NotCompletionAppError from controllers.console.explore.wraps import InstalledAppResource from fields.conversation_fields import message_file_fields @@ -25,6 +25,7 @@ message_fields = { } +@console_ns.route("/installed-apps//saved-messages", endpoint="installed_app_saved_messages") class SavedMessageListApi(InstalledAppResource): saved_message_infinite_scroll_pagination_fields = { "limit": fields.Integer, @@ -66,6 +67,9 @@ class SavedMessageListApi(InstalledAppResource): return {"result": "success"} +@console_ns.route( + "/installed-apps//saved-messages/", endpoint="installed_app_saved_message" +) class SavedMessageApi(InstalledAppResource): def delete(self, installed_app, message_id): app_model = installed_app.app @@ -80,15 +84,3 @@ class SavedMessageApi(InstalledAppResource): SavedMessageService.delete(app_model, current_user, message_id) return {"result": "success"}, 204 - - -api.add_resource( - SavedMessageListApi, - "/installed-apps//saved-messages", - endpoint="installed_app_saved_messages", -) -api.add_resource( - SavedMessageApi, - "/installed-apps//saved-messages/", - endpoint="installed_app_saved_message", -) diff --git a/api/controllers/console/workspace/account.py b/api/controllers/console/workspace/account.py index 7a41a8a5cc..e2b0e3f84d 100644 --- a/api/controllers/console/workspace/account.py +++ b/api/controllers/console/workspace/account.py @@ -9,7 +9,7 @@ from sqlalchemy.orm import Session from configs import dify_config from constants.languages import supported_language -from controllers.console import api +from controllers.console import console_ns from controllers.console.auth.error import ( EmailAlreadyInUseError, EmailChangeLimitError, @@ -45,6 +45,7 @@ from services.billing_service import BillingService from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError +@console_ns.route("/account/init") class AccountInitApi(Resource): @setup_required @login_required @@ -97,6 +98,7 @@ class AccountInitApi(Resource): return {"result": "success"} +@console_ns.route("/account/profile") class AccountProfileApi(Resource): @setup_required @login_required @@ -109,6 +111,7 @@ class AccountProfileApi(Resource): return current_user +@console_ns.route("/account/name") class AccountNameApi(Resource): @setup_required @login_required @@ -130,6 +133,7 @@ class AccountNameApi(Resource): return updated_account +@console_ns.route("/account/avatar") class AccountAvatarApi(Resource): @setup_required @login_required @@ -147,6 +151,7 @@ class AccountAvatarApi(Resource): return updated_account +@console_ns.route("/account/interface-language") class AccountInterfaceLanguageApi(Resource): @setup_required @login_required @@ -164,6 +169,7 @@ class AccountInterfaceLanguageApi(Resource): return updated_account +@console_ns.route("/account/interface-theme") class AccountInterfaceThemeApi(Resource): @setup_required @login_required @@ -181,6 +187,7 @@ class AccountInterfaceThemeApi(Resource): return updated_account +@console_ns.route("/account/timezone") class AccountTimezoneApi(Resource): @setup_required @login_required @@ -202,6 +209,7 @@ class AccountTimezoneApi(Resource): return updated_account +@console_ns.route("/account/password") class AccountPasswordApi(Resource): @setup_required @login_required @@ -227,6 +235,7 @@ class AccountPasswordApi(Resource): return {"result": "success"} +@console_ns.route("/account/integrates") class AccountIntegrateApi(Resource): integrate_fields = { "provider": fields.String, @@ -283,6 +292,7 @@ class AccountIntegrateApi(Resource): return {"data": integrate_data} +@console_ns.route("/account/delete/verify") class AccountDeleteVerifyApi(Resource): @setup_required @login_required @@ -298,6 +308,7 @@ class AccountDeleteVerifyApi(Resource): return {"result": "success", "data": token} +@console_ns.route("/account/delete") class AccountDeleteApi(Resource): @setup_required @login_required @@ -320,6 +331,7 @@ class AccountDeleteApi(Resource): return {"result": "success"} +@console_ns.route("/account/delete/feedback") class AccountDeleteUpdateFeedbackApi(Resource): @setup_required def post(self): @@ -333,6 +345,7 @@ class AccountDeleteUpdateFeedbackApi(Resource): return {"result": "success"} +@console_ns.route("/account/education/verify") class EducationVerifyApi(Resource): verify_fields = { "token": fields.String, @@ -352,6 +365,7 @@ class EducationVerifyApi(Resource): return BillingService.EducationIdentity.verify(account.id, account.email) +@console_ns.route("/account/education") class EducationApi(Resource): status_fields = { "result": fields.Boolean, @@ -396,6 +410,7 @@ class EducationApi(Resource): return res +@console_ns.route("/account/education/autocomplete") class EducationAutoCompleteApi(Resource): data_fields = { "data": fields.List(fields.String), @@ -419,6 +434,7 @@ class EducationAutoCompleteApi(Resource): return BillingService.EducationIdentity.autocomplete(args["keywords"], args["page"], args["limit"]) +@console_ns.route("/account/change-email") class ChangeEmailSendEmailApi(Resource): @enable_change_email @setup_required @@ -467,6 +483,7 @@ class ChangeEmailSendEmailApi(Resource): return {"result": "success", "data": token} +@console_ns.route("/account/change-email/validity") class ChangeEmailCheckApi(Resource): @enable_change_email @setup_required @@ -508,6 +525,7 @@ class ChangeEmailCheckApi(Resource): return {"is_valid": True, "email": token_data.get("email"), "token": new_token} +@console_ns.route("/account/change-email/reset") class ChangeEmailResetApi(Resource): @enable_change_email @setup_required @@ -547,6 +565,7 @@ class ChangeEmailResetApi(Resource): return updated_account +@console_ns.route("/account/change-email/check-email-unique") class CheckEmailUnique(Resource): @setup_required def post(self): @@ -558,28 +577,3 @@ class CheckEmailUnique(Resource): if not AccountService.check_email_unique(args["email"]): raise EmailAlreadyInUseError() return {"result": "success"} - - -# Register API resources -api.add_resource(AccountInitApi, "/account/init") -api.add_resource(AccountProfileApi, "/account/profile") -api.add_resource(AccountNameApi, "/account/name") -api.add_resource(AccountAvatarApi, "/account/avatar") -api.add_resource(AccountInterfaceLanguageApi, "/account/interface-language") -api.add_resource(AccountInterfaceThemeApi, "/account/interface-theme") -api.add_resource(AccountTimezoneApi, "/account/timezone") -api.add_resource(AccountPasswordApi, "/account/password") -api.add_resource(AccountIntegrateApi, "/account/integrates") -api.add_resource(AccountDeleteVerifyApi, "/account/delete/verify") -api.add_resource(AccountDeleteApi, "/account/delete") -api.add_resource(AccountDeleteUpdateFeedbackApi, "/account/delete/feedback") -api.add_resource(EducationVerifyApi, "/account/education/verify") -api.add_resource(EducationApi, "/account/education") -api.add_resource(EducationAutoCompleteApi, "/account/education/autocomplete") -# Change email -api.add_resource(ChangeEmailSendEmailApi, "/account/change-email") -api.add_resource(ChangeEmailCheckApi, "/account/change-email/validity") -api.add_resource(ChangeEmailResetApi, "/account/change-email/reset") -api.add_resource(CheckEmailUnique, "/account/change-email/check-email-unique") -# api.add_resource(AccountEmailApi, '/account/email') -# api.add_resource(AccountEmailVerifyApi, '/account/email-verify') diff --git a/api/controllers/console/workspace/load_balancing_config.py b/api/controllers/console/workspace/load_balancing_config.py index 7c1bc7c075..99a1c1f032 100644 --- a/api/controllers/console/workspace/load_balancing_config.py +++ b/api/controllers/console/workspace/load_balancing_config.py @@ -1,7 +1,7 @@ from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -10,6 +10,9 @@ from models.account import Account, TenantAccountRole from services.model_load_balancing_service import ModelLoadBalancingService +@console_ns.route( + "/workspaces/current/model-providers//models/load-balancing-configs/credentials-validate" +) class LoadBalancingCredentialsValidateApi(Resource): @setup_required @login_required @@ -61,6 +64,9 @@ class LoadBalancingCredentialsValidateApi(Resource): return response +@console_ns.route( + "/workspaces/current/model-providers//models/load-balancing-configs//credentials-validate" +) class LoadBalancingConfigCredentialsValidateApi(Resource): @setup_required @login_required @@ -111,15 +117,3 @@ class LoadBalancingConfigCredentialsValidateApi(Resource): response["error"] = error return response - - -# Load Balancing Config -api.add_resource( - LoadBalancingCredentialsValidateApi, - "/workspaces/current/model-providers//models/load-balancing-configs/credentials-validate", -) - -api.add_resource( - LoadBalancingConfigCredentialsValidateApi, - "/workspaces/current/model-providers//models/load-balancing-configs//credentials-validate", -) diff --git a/api/controllers/console/workspace/members.py b/api/controllers/console/workspace/members.py index 77f0c9a735..8b89853bd9 100644 --- a/api/controllers/console/workspace/members.py +++ b/api/controllers/console/workspace/members.py @@ -6,7 +6,7 @@ from flask_restx import Resource, marshal_with, reqparse import services from configs import dify_config -from controllers.console import api +from controllers.console import console_ns from controllers.console.auth.error import ( CannotTransferOwnerToSelfError, EmailCodeError, @@ -33,6 +33,7 @@ from services.errors.account import AccountAlreadyInTenantError from services.feature_service import FeatureService +@console_ns.route("/workspaces/current/members") class MemberListApi(Resource): """List all members of current tenant.""" @@ -49,6 +50,7 @@ class MemberListApi(Resource): return {"result": "success", "accounts": members}, 200 +@console_ns.route("/workspaces/current/members/invite-email") class MemberInviteEmailApi(Resource): """Invite a new member by email.""" @@ -111,6 +113,7 @@ class MemberInviteEmailApi(Resource): }, 201 +@console_ns.route("/workspaces/current/members/") class MemberCancelInviteApi(Resource): """Cancel an invitation by member id.""" @@ -143,6 +146,7 @@ class MemberCancelInviteApi(Resource): }, 200 +@console_ns.route("/workspaces/current/members//update-role") class MemberUpdateRoleApi(Resource): """Update member role.""" @@ -177,6 +181,7 @@ class MemberUpdateRoleApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/dataset-operators") class DatasetOperatorMemberListApi(Resource): """List all members of current tenant.""" @@ -193,6 +198,7 @@ class DatasetOperatorMemberListApi(Resource): return {"result": "success", "accounts": members}, 200 +@console_ns.route("/workspaces/current/members/send-owner-transfer-confirm-email") class SendOwnerTransferEmailApi(Resource): """Send owner transfer email.""" @@ -233,6 +239,7 @@ class SendOwnerTransferEmailApi(Resource): return {"result": "success", "data": token} +@console_ns.route("/workspaces/current/members/owner-transfer-check") class OwnerTransferCheckApi(Resource): @setup_required @login_required @@ -278,6 +285,7 @@ class OwnerTransferCheckApi(Resource): return {"is_valid": True, "email": token_data.get("email"), "token": new_token} +@console_ns.route("/workspaces/current/members//owner-transfer") class OwnerTransfer(Resource): @setup_required @login_required @@ -339,14 +347,3 @@ class OwnerTransfer(Resource): raise ValueError(str(e)) return {"result": "success"} - - -api.add_resource(MemberListApi, "/workspaces/current/members") -api.add_resource(MemberInviteEmailApi, "/workspaces/current/members/invite-email") -api.add_resource(MemberCancelInviteApi, "/workspaces/current/members/") -api.add_resource(MemberUpdateRoleApi, "/workspaces/current/members//update-role") -api.add_resource(DatasetOperatorMemberListApi, "/workspaces/current/dataset-operators") -# owner transfer -api.add_resource(SendOwnerTransferEmailApi, "/workspaces/current/members/send-owner-transfer-confirm-email") -api.add_resource(OwnerTransferCheckApi, "/workspaces/current/members/owner-transfer-check") -api.add_resource(OwnerTransfer, "/workspaces/current/members//owner-transfer") diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py index 0c9db660aa..7012580362 100644 --- a/api/controllers/console/workspace/model_providers.py +++ b/api/controllers/console/workspace/model_providers.py @@ -5,7 +5,7 @@ from flask_login import current_user from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -17,6 +17,7 @@ from services.billing_service import BillingService from services.model_provider_service import ModelProviderService +@console_ns.route("/workspaces/current/model-providers") class ModelProviderListApi(Resource): @setup_required @login_required @@ -45,6 +46,7 @@ class ModelProviderListApi(Resource): return jsonable_encoder({"data": provider_list}) +@console_ns.route("/workspaces/current/model-providers//credentials") class ModelProviderCredentialApi(Resource): @setup_required @login_required @@ -151,6 +153,7 @@ class ModelProviderCredentialApi(Resource): return {"result": "success"}, 204 +@console_ns.route("/workspaces/current/model-providers//credentials/switch") class ModelProviderCredentialSwitchApi(Resource): @setup_required @login_required @@ -175,6 +178,7 @@ class ModelProviderCredentialSwitchApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//credentials/validate") class ModelProviderValidateApi(Resource): @setup_required @login_required @@ -211,6 +215,7 @@ class ModelProviderValidateApi(Resource): return response +@console_ns.route("/workspaces//model-providers///") class ModelProviderIconApi(Resource): """ Get model provider icon @@ -229,6 +234,7 @@ class ModelProviderIconApi(Resource): return send_file(io.BytesIO(icon), mimetype=mimetype) +@console_ns.route("/workspaces/current/model-providers//preferred-provider-type") class PreferredProviderTypeUpdateApi(Resource): @setup_required @login_required @@ -262,6 +268,7 @@ class PreferredProviderTypeUpdateApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//checkout-url") class ModelProviderPaymentCheckoutUrlApi(Resource): @setup_required @login_required @@ -281,21 +288,3 @@ class ModelProviderPaymentCheckoutUrlApi(Resource): prefilled_email=current_user.email, ) return data - - -api.add_resource(ModelProviderListApi, "/workspaces/current/model-providers") - -api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-providers//credentials") -api.add_resource( - ModelProviderCredentialSwitchApi, "/workspaces/current/model-providers//credentials/switch" -) -api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers//credentials/validate") - -api.add_resource( - PreferredProviderTypeUpdateApi, "/workspaces/current/model-providers//preferred-provider-type" -) -api.add_resource(ModelProviderPaymentCheckoutUrlApi, "/workspaces/current/model-providers//checkout-url") -api.add_resource( - ModelProviderIconApi, - "/workspaces//model-providers///", -) diff --git a/api/controllers/console/workspace/models.py b/api/controllers/console/workspace/models.py index f174fcc5d3..d38bb16ea7 100644 --- a/api/controllers/console/workspace/models.py +++ b/api/controllers/console/workspace/models.py @@ -4,7 +4,7 @@ from flask_login import current_user from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -17,6 +17,7 @@ from services.model_provider_service import ModelProviderService logger = logging.getLogger(__name__) +@console_ns.route("/workspaces/current/default-model") class DefaultModelApi(Resource): @setup_required @login_required @@ -85,6 +86,7 @@ class DefaultModelApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//models") class ModelProviderModelApi(Resource): @setup_required @login_required @@ -187,6 +189,7 @@ class ModelProviderModelApi(Resource): return {"result": "success"}, 204 +@console_ns.route("/workspaces/current/model-providers//models/credentials") class ModelProviderModelCredentialApi(Resource): @setup_required @login_required @@ -364,6 +367,7 @@ class ModelProviderModelCredentialApi(Resource): return {"result": "success"}, 204 +@console_ns.route("/workspaces/current/model-providers//models/credentials/switch") class ModelProviderModelCredentialSwitchApi(Resource): @setup_required @login_required @@ -395,6 +399,9 @@ class ModelProviderModelCredentialSwitchApi(Resource): return {"result": "success"} +@console_ns.route( + "/workspaces/current/model-providers//models/enable", endpoint="model-provider-model-enable" +) class ModelProviderModelEnableApi(Resource): @setup_required @login_required @@ -422,6 +429,9 @@ class ModelProviderModelEnableApi(Resource): return {"result": "success"} +@console_ns.route( + "/workspaces/current/model-providers//models/disable", endpoint="model-provider-model-disable" +) class ModelProviderModelDisableApi(Resource): @setup_required @login_required @@ -449,6 +459,7 @@ class ModelProviderModelDisableApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//models/credentials/validate") class ModelProviderModelValidateApi(Resource): @setup_required @login_required @@ -494,6 +505,7 @@ class ModelProviderModelValidateApi(Resource): return response +@console_ns.route("/workspaces/current/model-providers//models/parameter-rules") class ModelProviderModelParameterRuleApi(Resource): @setup_required @login_required @@ -513,6 +525,7 @@ class ModelProviderModelParameterRuleApi(Resource): return jsonable_encoder({"data": parameter_rules}) +@console_ns.route("/workspaces/current/models/model-types/") class ModelProviderAvailableModelApi(Resource): @setup_required @login_required @@ -524,32 +537,3 @@ class ModelProviderAvailableModelApi(Resource): models = model_provider_service.get_models_by_model_type(tenant_id=tenant_id, model_type=model_type) return jsonable_encoder({"data": models}) - - -api.add_resource(ModelProviderModelApi, "/workspaces/current/model-providers//models") -api.add_resource( - ModelProviderModelEnableApi, - "/workspaces/current/model-providers//models/enable", - endpoint="model-provider-model-enable", -) -api.add_resource( - ModelProviderModelDisableApi, - "/workspaces/current/model-providers//models/disable", - endpoint="model-provider-model-disable", -) -api.add_resource( - ModelProviderModelCredentialApi, "/workspaces/current/model-providers//models/credentials" -) -api.add_resource( - ModelProviderModelCredentialSwitchApi, - "/workspaces/current/model-providers//models/credentials/switch", -) -api.add_resource( - ModelProviderModelValidateApi, "/workspaces/current/model-providers//models/credentials/validate" -) - -api.add_resource( - ModelProviderModelParameterRuleApi, "/workspaces/current/model-providers//models/parameter-rules" -) -api.add_resource(ModelProviderAvailableModelApi, "/workspaces/current/models/model-types/") -api.add_resource(DefaultModelApi, "/workspaces/current/default-model") diff --git a/api/controllers/console/workspace/plugin.py b/api/controllers/console/workspace/plugin.py index fd5421fa64..7c70fb8aa0 100644 --- a/api/controllers/console/workspace/plugin.py +++ b/api/controllers/console/workspace/plugin.py @@ -6,7 +6,7 @@ from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden from configs import dify_config -from controllers.console import api +from controllers.console import console_ns from controllers.console.workspace import plugin_permission_required from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.utils.encoders import jsonable_encoder @@ -19,6 +19,7 @@ from services.plugin.plugin_permission_service import PluginPermissionService from services.plugin.plugin_service import PluginService +@console_ns.route("/workspaces/current/plugin/debugging-key") class PluginDebuggingKeyApi(Resource): @setup_required @login_required @@ -37,6 +38,7 @@ class PluginDebuggingKeyApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/list") class PluginListApi(Resource): @setup_required @login_required @@ -55,6 +57,7 @@ class PluginListApi(Resource): return jsonable_encoder({"plugins": plugins_with_total.list, "total": plugins_with_total.total}) +@console_ns.route("/workspaces/current/plugin/list/latest-versions") class PluginListLatestVersionsApi(Resource): @setup_required @login_required @@ -72,6 +75,7 @@ class PluginListLatestVersionsApi(Resource): return jsonable_encoder({"versions": versions}) +@console_ns.route("/workspaces/current/plugin/list/installations/ids") class PluginListInstallationsFromIdsApi(Resource): @setup_required @login_required @@ -91,6 +95,7 @@ class PluginListInstallationsFromIdsApi(Resource): return jsonable_encoder({"plugins": plugins}) +@console_ns.route("/workspaces/current/plugin/icon") class PluginIconApi(Resource): @setup_required def get(self): @@ -108,6 +113,7 @@ class PluginIconApi(Resource): return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age) +@console_ns.route("/workspaces/current/plugin/upload/pkg") class PluginUploadFromPkgApi(Resource): @setup_required @login_required @@ -131,6 +137,7 @@ class PluginUploadFromPkgApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/upload/github") class PluginUploadFromGithubApi(Resource): @setup_required @login_required @@ -153,6 +160,7 @@ class PluginUploadFromGithubApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/upload/bundle") class PluginUploadFromBundleApi(Resource): @setup_required @login_required @@ -176,6 +184,7 @@ class PluginUploadFromBundleApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/install/pkg") class PluginInstallFromPkgApi(Resource): @setup_required @login_required @@ -201,6 +210,7 @@ class PluginInstallFromPkgApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/install/github") class PluginInstallFromGithubApi(Resource): @setup_required @login_required @@ -230,6 +240,7 @@ class PluginInstallFromGithubApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/install/marketplace") class PluginInstallFromMarketplaceApi(Resource): @setup_required @login_required @@ -255,6 +266,7 @@ class PluginInstallFromMarketplaceApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/marketplace/pkg") class PluginFetchMarketplacePkgApi(Resource): @setup_required @login_required @@ -280,6 +292,7 @@ class PluginFetchMarketplacePkgApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/fetch-manifest") class PluginFetchManifestApi(Resource): @setup_required @login_required @@ -304,6 +317,7 @@ class PluginFetchManifestApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks") class PluginFetchInstallTasksApi(Resource): @setup_required @login_required @@ -325,6 +339,7 @@ class PluginFetchInstallTasksApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks/") class PluginFetchInstallTaskApi(Resource): @setup_required @login_required @@ -339,6 +354,7 @@ class PluginFetchInstallTaskApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks//delete") class PluginDeleteInstallTaskApi(Resource): @setup_required @login_required @@ -353,6 +369,7 @@ class PluginDeleteInstallTaskApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks/delete_all") class PluginDeleteAllInstallTaskItemsApi(Resource): @setup_required @login_required @@ -367,6 +384,7 @@ class PluginDeleteAllInstallTaskItemsApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks//delete/") class PluginDeleteInstallTaskItemApi(Resource): @setup_required @login_required @@ -381,6 +399,7 @@ class PluginDeleteInstallTaskItemApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/upgrade/marketplace") class PluginUpgradeFromMarketplaceApi(Resource): @setup_required @login_required @@ -404,6 +423,7 @@ class PluginUpgradeFromMarketplaceApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/upgrade/github") class PluginUpgradeFromGithubApi(Resource): @setup_required @login_required @@ -435,6 +455,7 @@ class PluginUpgradeFromGithubApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/uninstall") class PluginUninstallApi(Resource): @setup_required @login_required @@ -453,6 +474,7 @@ class PluginUninstallApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/permission/change") class PluginChangePermissionApi(Resource): @setup_required @login_required @@ -475,6 +497,7 @@ class PluginChangePermissionApi(Resource): return {"success": PluginPermissionService.change_permission(tenant_id, install_permission, debug_permission)} +@console_ns.route("/workspaces/current/plugin/permission/fetch") class PluginFetchPermissionApi(Resource): @setup_required @login_required @@ -499,6 +522,7 @@ class PluginFetchPermissionApi(Resource): ) +@console_ns.route("/workspaces/current/plugin/parameters/dynamic-options") class PluginFetchDynamicSelectOptionsApi(Resource): @setup_required @login_required @@ -535,6 +559,7 @@ class PluginFetchDynamicSelectOptionsApi(Resource): return jsonable_encoder({"options": options}) +@console_ns.route("/workspaces/current/plugin/preferences/change") class PluginChangePreferencesApi(Resource): @setup_required @login_required @@ -590,6 +615,7 @@ class PluginChangePreferencesApi(Resource): return jsonable_encoder({"success": True}) +@console_ns.route("/workspaces/current/plugin/preferences/fetch") class PluginFetchPreferencesApi(Resource): @setup_required @login_required @@ -628,6 +654,7 @@ class PluginFetchPreferencesApi(Resource): return jsonable_encoder({"permission": permission_dict, "auto_upgrade": auto_upgrade_dict}) +@console_ns.route("/workspaces/current/plugin/preferences/autoupgrade/exclude") class PluginAutoUpgradeExcludePluginApi(Resource): @setup_required @login_required @@ -641,35 +668,3 @@ class PluginAutoUpgradeExcludePluginApi(Resource): args = req.parse_args() return jsonable_encoder({"success": PluginAutoUpgradeService.exclude_plugin(tenant_id, args["plugin_id"])}) - - -api.add_resource(PluginDebuggingKeyApi, "/workspaces/current/plugin/debugging-key") -api.add_resource(PluginListApi, "/workspaces/current/plugin/list") -api.add_resource(PluginListLatestVersionsApi, "/workspaces/current/plugin/list/latest-versions") -api.add_resource(PluginListInstallationsFromIdsApi, "/workspaces/current/plugin/list/installations/ids") -api.add_resource(PluginIconApi, "/workspaces/current/plugin/icon") -api.add_resource(PluginUploadFromPkgApi, "/workspaces/current/plugin/upload/pkg") -api.add_resource(PluginUploadFromGithubApi, "/workspaces/current/plugin/upload/github") -api.add_resource(PluginUploadFromBundleApi, "/workspaces/current/plugin/upload/bundle") -api.add_resource(PluginInstallFromPkgApi, "/workspaces/current/plugin/install/pkg") -api.add_resource(PluginInstallFromGithubApi, "/workspaces/current/plugin/install/github") -api.add_resource(PluginUpgradeFromMarketplaceApi, "/workspaces/current/plugin/upgrade/marketplace") -api.add_resource(PluginUpgradeFromGithubApi, "/workspaces/current/plugin/upgrade/github") -api.add_resource(PluginInstallFromMarketplaceApi, "/workspaces/current/plugin/install/marketplace") -api.add_resource(PluginFetchManifestApi, "/workspaces/current/plugin/fetch-manifest") -api.add_resource(PluginFetchInstallTasksApi, "/workspaces/current/plugin/tasks") -api.add_resource(PluginFetchInstallTaskApi, "/workspaces/current/plugin/tasks/") -api.add_resource(PluginDeleteInstallTaskApi, "/workspaces/current/plugin/tasks//delete") -api.add_resource(PluginDeleteAllInstallTaskItemsApi, "/workspaces/current/plugin/tasks/delete_all") -api.add_resource(PluginDeleteInstallTaskItemApi, "/workspaces/current/plugin/tasks//delete/") -api.add_resource(PluginUninstallApi, "/workspaces/current/plugin/uninstall") -api.add_resource(PluginFetchMarketplacePkgApi, "/workspaces/current/plugin/marketplace/pkg") - -api.add_resource(PluginChangePermissionApi, "/workspaces/current/plugin/permission/change") -api.add_resource(PluginFetchPermissionApi, "/workspaces/current/plugin/permission/fetch") - -api.add_resource(PluginFetchDynamicSelectOptionsApi, "/workspaces/current/plugin/parameters/dynamic-options") - -api.add_resource(PluginFetchPreferencesApi, "/workspaces/current/plugin/preferences/fetch") -api.add_resource(PluginChangePreferencesApi, "/workspaces/current/plugin/preferences/change") -api.add_resource(PluginAutoUpgradeExcludePluginApi, "/workspaces/current/plugin/preferences/autoupgrade/exclude") diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 8693d99e23..9285577f72 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -10,7 +10,7 @@ from flask_restx import ( from werkzeug.exceptions import Forbidden from configs import dify_config -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import ( account_initialization_required, enterprise_license_required, @@ -47,6 +47,7 @@ def is_valid_url(url: str) -> bool: return False +@console_ns.route("/workspaces/current/tool-providers") class ToolProviderListApi(Resource): @setup_required @login_required @@ -71,6 +72,7 @@ class ToolProviderListApi(Resource): return ToolCommonService.list_tool_providers(user_id, tenant_id, args.get("type", None)) +@console_ns.route("/workspaces/current/tool-provider/builtin//tools") class ToolBuiltinProviderListToolsApi(Resource): @setup_required @login_required @@ -88,6 +90,7 @@ class ToolBuiltinProviderListToolsApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//info") class ToolBuiltinProviderInfoApi(Resource): @setup_required @login_required @@ -100,6 +103,7 @@ class ToolBuiltinProviderInfoApi(Resource): return jsonable_encoder(BuiltinToolManageService.get_builtin_tool_provider_info(tenant_id, provider)) +@console_ns.route("/workspaces/current/tool-provider/builtin//delete") class ToolBuiltinProviderDeleteApi(Resource): @setup_required @login_required @@ -121,6 +125,7 @@ class ToolBuiltinProviderDeleteApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//add") class ToolBuiltinProviderAddApi(Resource): @setup_required @login_required @@ -150,6 +155,7 @@ class ToolBuiltinProviderAddApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//update") class ToolBuiltinProviderUpdateApi(Resource): @setup_required @login_required @@ -181,6 +187,7 @@ class ToolBuiltinProviderUpdateApi(Resource): return result +@console_ns.route("/workspaces/current/tool-provider/builtin//credentials") class ToolBuiltinProviderGetCredentialsApi(Resource): @setup_required @login_required @@ -196,6 +203,7 @@ class ToolBuiltinProviderGetCredentialsApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//icon") class ToolBuiltinProviderIconApi(Resource): @setup_required def get(self, provider): @@ -204,6 +212,7 @@ class ToolBuiltinProviderIconApi(Resource): return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age) +@console_ns.route("/workspaces/current/tool-provider/api/add") class ToolApiProviderAddApi(Resource): @setup_required @login_required @@ -243,6 +252,7 @@ class ToolApiProviderAddApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/remote") class ToolApiProviderGetRemoteSchemaApi(Resource): @setup_required @login_required @@ -266,6 +276,7 @@ class ToolApiProviderGetRemoteSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/tools") class ToolApiProviderListToolsApi(Resource): @setup_required @login_required @@ -291,6 +302,7 @@ class ToolApiProviderListToolsApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/update") class ToolApiProviderUpdateApi(Resource): @setup_required @login_required @@ -332,6 +344,7 @@ class ToolApiProviderUpdateApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/delete") class ToolApiProviderDeleteApi(Resource): @setup_required @login_required @@ -358,6 +371,7 @@ class ToolApiProviderDeleteApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/get") class ToolApiProviderGetApi(Resource): @setup_required @login_required @@ -381,6 +395,7 @@ class ToolApiProviderGetApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//credential/schema/") class ToolBuiltinProviderCredentialsSchemaApi(Resource): @setup_required @login_required @@ -396,6 +411,7 @@ class ToolBuiltinProviderCredentialsSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/schema") class ToolApiProviderSchemaApi(Resource): @setup_required @login_required @@ -412,6 +428,7 @@ class ToolApiProviderSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/test/pre") class ToolApiProviderPreviousTestApi(Resource): @setup_required @login_required @@ -439,6 +456,7 @@ class ToolApiProviderPreviousTestApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/create") class ToolWorkflowProviderCreateApi(Resource): @setup_required @login_required @@ -478,6 +496,7 @@ class ToolWorkflowProviderCreateApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/update") class ToolWorkflowProviderUpdateApi(Resource): @setup_required @login_required @@ -520,6 +539,7 @@ class ToolWorkflowProviderUpdateApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/delete") class ToolWorkflowProviderDeleteApi(Resource): @setup_required @login_required @@ -545,6 +565,7 @@ class ToolWorkflowProviderDeleteApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/get") class ToolWorkflowProviderGetApi(Resource): @setup_required @login_required @@ -579,6 +600,7 @@ class ToolWorkflowProviderGetApi(Resource): return jsonable_encoder(tool) +@console_ns.route("/workspaces/current/tool-provider/workflow/tools") class ToolWorkflowProviderListToolApi(Resource): @setup_required @login_required @@ -603,6 +625,7 @@ class ToolWorkflowProviderListToolApi(Resource): ) +@console_ns.route("/workspaces/current/tools/builtin") class ToolBuiltinListApi(Resource): @setup_required @login_required @@ -624,6 +647,7 @@ class ToolBuiltinListApi(Resource): ) +@console_ns.route("/workspaces/current/tools/api") class ToolApiListApi(Resource): @setup_required @login_required @@ -642,6 +666,7 @@ class ToolApiListApi(Resource): ) +@console_ns.route("/workspaces/current/tools/workflow") class ToolWorkflowListApi(Resource): @setup_required @login_required @@ -663,6 +688,7 @@ class ToolWorkflowListApi(Resource): ) +@console_ns.route("/workspaces/current/tool-labels") class ToolLabelsApi(Resource): @setup_required @login_required @@ -672,6 +698,7 @@ class ToolLabelsApi(Resource): return jsonable_encoder(ToolLabelsService.list_tool_labels()) +@console_ns.route("/oauth/plugin//tool/authorization-url") class ToolPluginOAuthApi(Resource): @setup_required @login_required @@ -716,6 +743,7 @@ class ToolPluginOAuthApi(Resource): return response +@console_ns.route("/oauth/plugin//tool/callback") class ToolOAuthCallback(Resource): @setup_required def get(self, provider): @@ -766,6 +794,7 @@ class ToolOAuthCallback(Resource): return redirect(f"{dify_config.CONSOLE_WEB_URL}/oauth-callback") +@console_ns.route("/workspaces/current/tool-provider/builtin//default-credential") class ToolBuiltinProviderSetDefaultApi(Resource): @setup_required @login_required @@ -779,6 +808,7 @@ class ToolBuiltinProviderSetDefaultApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//oauth/custom-client") class ToolOAuthCustomClient(Resource): @setup_required @login_required @@ -822,6 +852,7 @@ class ToolOAuthCustomClient(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//oauth/client-schema") class ToolBuiltinProviderGetOauthClientSchemaApi(Resource): @setup_required @login_required @@ -834,6 +865,7 @@ class ToolBuiltinProviderGetOauthClientSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//credential/info") class ToolBuiltinProviderGetCredentialInfoApi(Resource): @setup_required @login_required @@ -849,6 +881,7 @@ class ToolBuiltinProviderGetCredentialInfoApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/mcp") class ToolProviderMCPApi(Resource): @setup_required @login_required @@ -933,6 +966,7 @@ class ToolProviderMCPApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/tool-provider/mcp/auth") class ToolMCPAuthApi(Resource): @setup_required @login_required @@ -978,6 +1012,7 @@ class ToolMCPAuthApi(Resource): raise ValueError(f"Failed to connect to MCP server: {e}") from e +@console_ns.route("/workspaces/current/tool-provider/mcp/tools/") class ToolMCPDetailApi(Resource): @setup_required @login_required @@ -988,6 +1023,7 @@ class ToolMCPDetailApi(Resource): return jsonable_encoder(ToolTransformService.mcp_provider_to_user_provider(provider, for_list=True)) +@console_ns.route("/workspaces/current/tools/mcp") class ToolMCPListAllApi(Resource): @setup_required @login_required @@ -1001,6 +1037,7 @@ class ToolMCPListAllApi(Resource): return [tool.to_dict() for tool in tools] +@console_ns.route("/workspaces/current/tool-provider/mcp/update/") class ToolMCPUpdateApi(Resource): @setup_required @login_required @@ -1014,6 +1051,7 @@ class ToolMCPUpdateApi(Resource): return jsonable_encoder(tools) +@console_ns.route("/mcp/oauth/callback") class ToolMCPCallbackApi(Resource): def get(self): parser = reqparse.RequestParser() @@ -1024,67 +1062,3 @@ class ToolMCPCallbackApi(Resource): authorization_code = args["code"] handle_callback(state_key, authorization_code) return redirect(f"{dify_config.CONSOLE_WEB_URL}/oauth-callback") - - -# tool provider -api.add_resource(ToolProviderListApi, "/workspaces/current/tool-providers") - -# tool oauth -api.add_resource(ToolPluginOAuthApi, "/oauth/plugin//tool/authorization-url") -api.add_resource(ToolOAuthCallback, "/oauth/plugin//tool/callback") -api.add_resource(ToolOAuthCustomClient, "/workspaces/current/tool-provider/builtin//oauth/custom-client") - -# builtin tool provider -api.add_resource(ToolBuiltinProviderListToolsApi, "/workspaces/current/tool-provider/builtin//tools") -api.add_resource(ToolBuiltinProviderInfoApi, "/workspaces/current/tool-provider/builtin//info") -api.add_resource(ToolBuiltinProviderAddApi, "/workspaces/current/tool-provider/builtin//add") -api.add_resource(ToolBuiltinProviderDeleteApi, "/workspaces/current/tool-provider/builtin//delete") -api.add_resource(ToolBuiltinProviderUpdateApi, "/workspaces/current/tool-provider/builtin//update") -api.add_resource( - ToolBuiltinProviderSetDefaultApi, "/workspaces/current/tool-provider/builtin//default-credential" -) -api.add_resource( - ToolBuiltinProviderGetCredentialInfoApi, "/workspaces/current/tool-provider/builtin//credential/info" -) -api.add_resource( - ToolBuiltinProviderGetCredentialsApi, "/workspaces/current/tool-provider/builtin//credentials" -) -api.add_resource( - ToolBuiltinProviderCredentialsSchemaApi, - "/workspaces/current/tool-provider/builtin//credential/schema/", -) -api.add_resource( - ToolBuiltinProviderGetOauthClientSchemaApi, - "/workspaces/current/tool-provider/builtin//oauth/client-schema", -) -api.add_resource(ToolBuiltinProviderIconApi, "/workspaces/current/tool-provider/builtin//icon") - -# api tool provider -api.add_resource(ToolApiProviderAddApi, "/workspaces/current/tool-provider/api/add") -api.add_resource(ToolApiProviderGetRemoteSchemaApi, "/workspaces/current/tool-provider/api/remote") -api.add_resource(ToolApiProviderListToolsApi, "/workspaces/current/tool-provider/api/tools") -api.add_resource(ToolApiProviderUpdateApi, "/workspaces/current/tool-provider/api/update") -api.add_resource(ToolApiProviderDeleteApi, "/workspaces/current/tool-provider/api/delete") -api.add_resource(ToolApiProviderGetApi, "/workspaces/current/tool-provider/api/get") -api.add_resource(ToolApiProviderSchemaApi, "/workspaces/current/tool-provider/api/schema") -api.add_resource(ToolApiProviderPreviousTestApi, "/workspaces/current/tool-provider/api/test/pre") - -# workflow tool provider -api.add_resource(ToolWorkflowProviderCreateApi, "/workspaces/current/tool-provider/workflow/create") -api.add_resource(ToolWorkflowProviderUpdateApi, "/workspaces/current/tool-provider/workflow/update") -api.add_resource(ToolWorkflowProviderDeleteApi, "/workspaces/current/tool-provider/workflow/delete") -api.add_resource(ToolWorkflowProviderGetApi, "/workspaces/current/tool-provider/workflow/get") -api.add_resource(ToolWorkflowProviderListToolApi, "/workspaces/current/tool-provider/workflow/tools") - -# mcp tool provider -api.add_resource(ToolMCPDetailApi, "/workspaces/current/tool-provider/mcp/tools/") -api.add_resource(ToolProviderMCPApi, "/workspaces/current/tool-provider/mcp") -api.add_resource(ToolMCPUpdateApi, "/workspaces/current/tool-provider/mcp/update/") -api.add_resource(ToolMCPAuthApi, "/workspaces/current/tool-provider/mcp/auth") -api.add_resource(ToolMCPCallbackApi, "/mcp/oauth/callback") - -api.add_resource(ToolBuiltinListApi, "/workspaces/current/tools/builtin") -api.add_resource(ToolApiListApi, "/workspaces/current/tools/api") -api.add_resource(ToolMCPListAllApi, "/workspaces/current/tools/mcp") -api.add_resource(ToolWorkflowListApi, "/workspaces/current/tools/workflow") -api.add_resource(ToolLabelsApi, "/workspaces/current/tool-labels") diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 13a61052ae..bc748ac3d2 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -120,8 +120,8 @@ class WorkspaceListApi(Resource): }, 200 -@console_ns.route("/workspaces/current") -@console_ns.route("/info") # Deprecated +@console_ns.route("/workspaces/current", endpoint="workspaces_current") +@console_ns.route("/info", endpoint="info") # Deprecated class TenantApi(Resource): @setup_required @login_required From aead192743a43392a62c9c082694d6c37ce35267 Mon Sep 17 00:00:00 2001 From: Xiyuan Chen <52963600+GareArc@users.noreply.github.com> Date: Fri, 10 Oct 2025 01:24:36 -0700 Subject: [PATCH 74/82] Fix/token exp when exchange main (#26708) --- api/controllers/web/passport.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/api/controllers/web/passport.py b/api/controllers/web/passport.py index 6f7105a724..7190f06426 100644 --- a/api/controllers/web/passport.py +++ b/api/controllers/web/passport.py @@ -126,6 +126,8 @@ def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: end_user_id = enterprise_user_decoded.get("end_user_id") session_id = enterprise_user_decoded.get("session_id") user_auth_type = enterprise_user_decoded.get("auth_type") + exchanged_token_expires_unix = enterprise_user_decoded.get("exp") + if not user_auth_type: raise Unauthorized("Missing auth_type in the token.") @@ -169,8 +171,11 @@ def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: ) db.session.add(end_user) db.session.commit() - exp_dt = datetime.now(UTC) + timedelta(minutes=dify_config.ACCESS_TOKEN_EXPIRE_MINUTES) - exp = int(exp_dt.timestamp()) + + exp = int((datetime.now(UTC) + timedelta(minutes=dify_config.ACCESS_TOKEN_EXPIRE_MINUTES)).timestamp()) + if exchanged_token_expires_unix: + exp = int(exchanged_token_expires_unix) + payload = { "iss": site.id, "sub": "Web API Passport", From ab2eacb6c1a18d329213ecbe9ff780c39646594a Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Fri, 10 Oct 2025 17:30:13 +0900 Subject: [PATCH 75/82] use model_validate (#26182) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/controllers/console/app/model_config.py | 4 +- .../console/datasets/data_source.py | 18 +++++---- api/controllers/console/datasets/datasets.py | 36 +++++++++-------- .../console/datasets/datasets_document.py | 40 ++++++++++--------- .../console/datasets/datasets_segments.py | 4 +- api/controllers/console/datasets/metadata.py | 4 +- .../datasets/rag_pipeline/rag_pipeline.py | 2 +- api/controllers/inner_api/plugin/wraps.py | 2 +- .../service_api/dataset/dataset.py | 2 +- .../service_api/dataset/document.py | 8 ++-- .../service_api/dataset/metadata.py | 4 +- .../rag_pipeline/rag_pipeline_workflow.py | 2 +- .../service_api/dataset/segment.py | 2 +- .../easy_ui_based_app/agent/manager.py | 2 +- api/core/app/apps/pipeline/pipeline_runner.py | 2 +- .../datasource/entities/common_entities.py | 7 ++-- api/core/entities/provider_configuration.py | 8 ++-- .../helper/code_executor/code_executor.py | 2 +- api/core/helper/marketplace.py | 4 +- api/core/indexing_runner.py | 38 ++++++++++-------- api/core/mcp/session/client_session.py | 2 +- .../model_runtime/entities/common_entities.py | 7 ++-- .../entities/provider_entities.py | 7 ++-- .../model_providers/model_provider_factory.py | 12 +++--- api/core/moderation/api/api.py | 4 +- api/core/plugin/entities/request.py | 10 ++--- api/core/plugin/impl/base.py | 28 ++++++------- api/core/plugin/impl/datasource.py | 6 ++- api/core/plugin/impl/model.py | 18 ++++----- api/core/rag/datasource/retrieval_service.py | 2 +- .../rag/extractor/entity/extract_setting.py | 6 --- .../processor/paragraph_index_processor.py | 4 +- .../processor/parent_child_index_processor.py | 8 ++-- .../processor/qa_index_processor.py | 6 +-- api/core/tools/builtin_tool/provider.py | 2 +- api/core/tools/entities/common_entities.py | 7 ++-- api/core/tools/mcp_tool/provider.py | 2 +- api/core/tools/tool_manager.py | 2 +- .../command_channels/redis_channel.py | 4 +- api/core/workflow/nodes/end/end_node.py | 2 +- .../nodes/iteration/iteration_start_node.py | 2 +- api/core/workflow/nodes/list_operator/node.py | 2 +- api/core/workflow/nodes/loop/loop_end_node.py | 2 +- .../workflow/nodes/loop/loop_start_node.py | 2 +- api/core/workflow/nodes/start/start_node.py | 2 +- .../variable_aggregator_node.py | 2 +- ...rameters_cache_when_sync_draft_workflow.py | 2 +- ...oin_when_app_published_workflow_updated.py | 2 +- api/models/dataset.py | 4 +- api/models/tools.py | 11 +++-- api/services/app_dsl_service.py | 12 +++--- api/services/enterprise/enterprise_service.py | 4 +- .../entities/model_provider_entities.py | 23 ++++++----- api/services/hit_testing_service.py | 2 +- api/services/ops_service.py | 2 +- api/services/plugin/plugin_migration.py | 2 +- api/services/rag_pipeline/rag_pipeline.py | 2 +- .../rag_pipeline/rag_pipeline_dsl_service.py | 20 +++++----- .../rag_pipeline_transform_service.py | 4 +- api/services/tools/tools_transform_service.py | 3 +- api/tasks/ops_trace_task.py | 2 +- .../priority_rag_pipeline_run_task.py | 4 +- .../rag_pipeline/rag_pipeline_run_task.py | 4 +- .../tools/api_tool/test_api_tool.py | 2 +- .../rag/datasource/vdb/milvus/test_milvus.py | 4 +- .../core/workflow/nodes/test_list_operator.py | 2 +- .../nodes/test_question_classifier_node.py | 4 +- .../core/workflow/test_system_variable.py | 38 +++++++++--------- .../services/test_metadata_bug_complete.py | 4 +- .../services/test_metadata_nullable_bug.py | 2 +- 70 files changed, 260 insertions(+), 241 deletions(-) diff --git a/api/controllers/console/app/model_config.py b/api/controllers/console/app/model_config.py index 11df511840..e71b774d3e 100644 --- a/api/controllers/console/app/model_config.py +++ b/api/controllers/console/app/model_config.py @@ -90,7 +90,7 @@ class ModelConfigResource(Resource): if not isinstance(tool, dict) or len(tool.keys()) <= 3: continue - agent_tool_entity = AgentToolEntity(**tool) + agent_tool_entity = AgentToolEntity.model_validate(tool) # get tool try: tool_runtime = ToolManager.get_agent_tool_runtime( @@ -124,7 +124,7 @@ class ModelConfigResource(Resource): # encrypt agent tool parameters if it's secret-input agent_mode = new_app_model_config.agent_mode_dict for tool in agent_mode.get("tools") or []: - agent_tool_entity = AgentToolEntity(**tool) + agent_tool_entity = AgentToolEntity.model_validate(tool) # get tool key = f"{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}" diff --git a/api/controllers/console/datasets/data_source.py b/api/controllers/console/datasets/data_source.py index 370e0c0d14..b0f18c11d4 100644 --- a/api/controllers/console/datasets/data_source.py +++ b/api/controllers/console/datasets/data_source.py @@ -15,7 +15,7 @@ from core.datasource.entities.datasource_entities import DatasourceProviderType, from core.datasource.online_document.online_document_plugin import OnlineDocumentDatasourcePlugin from core.indexing_runner import IndexingRunner from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo from core.rag.extractor.notion_extractor import NotionExtractor from extensions.ext_database import db from fields.data_source_fields import integrate_list_fields, integrate_notion_info_list_fields @@ -257,13 +257,15 @@ class DataSourceNotionApi(Resource): for page in notion_info["pages"]: extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": credential_id, - "notion_workspace_id": workspace_id, - "notion_obj_id": page["page_id"], - "notion_page_type": page["type"], - "tenant_id": current_user.current_tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": credential_id, + "notion_workspace_id": workspace_id, + "notion_obj_id": page["page_id"], + "notion_page_type": page["type"], + "tenant_id": current_user.current_tenant_id, + } + ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index ac088b790e..284f88ff1e 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -24,7 +24,7 @@ from core.model_runtime.entities.model_entities import ModelType from core.provider_manager import ProviderManager from core.rag.datasource.vdb.vector_type import VectorType from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo, WebsiteInfo from core.rag.retrieval.retrieval_methods import RetrievalMethod from extensions.ext_database import db from fields.app_fields import related_app_list @@ -513,13 +513,15 @@ class DatasetIndexingEstimateApi(Resource): for page in notion_info["pages"]: extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": credential_id, - "notion_workspace_id": workspace_id, - "notion_obj_id": page["page_id"], - "notion_page_type": page["type"], - "tenant_id": current_user.current_tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": credential_id, + "notion_workspace_id": workspace_id, + "notion_obj_id": page["page_id"], + "notion_page_type": page["type"], + "tenant_id": current_user.current_tenant_id, + } + ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) @@ -528,14 +530,16 @@ class DatasetIndexingEstimateApi(Resource): for url in website_info_list["urls"]: extract_setting = ExtractSetting( datasource_type=DatasourceType.WEBSITE.value, - website_info={ - "provider": website_info_list["provider"], - "job_id": website_info_list["job_id"], - "url": url, - "tenant_id": current_user.current_tenant_id, - "mode": "crawl", - "only_main_content": website_info_list["only_main_content"], - }, + website_info=WebsiteInfo.model_validate( + { + "provider": website_info_list["provider"], + "job_id": website_info_list["job_id"], + "url": url, + "tenant_id": current_user.current_tenant_id, + "mode": "crawl", + "only_main_content": website_info_list["only_main_content"], + } + ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index c5fa2061bf..a90730e997 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -44,7 +44,7 @@ from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.invoke import InvokeAuthorizationError from core.plugin.impl.exc import PluginDaemonClientSideError from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo, WebsiteInfo from extensions.ext_database import db from fields.document_fields import ( dataset_and_document_fields, @@ -305,7 +305,7 @@ class DatasetDocumentListApi(Resource): "doc_language", type=str, default="English", required=False, nullable=False, location="json" ) args = parser.parse_args() - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) if not dataset.indexing_technique and not knowledge_config.indexing_technique: raise ValueError("indexing_technique is required.") @@ -395,7 +395,7 @@ class DatasetInitApi(Resource): parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json") args = parser.parse_args() - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) if knowledge_config.indexing_technique == "high_quality": if knowledge_config.embedding_model is None or knowledge_config.embedding_model_provider is None: raise ValueError("embedding model and embedding model provider are required for high quality indexing.") @@ -547,13 +547,15 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): continue extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": data_source_info["credential_id"], - "notion_workspace_id": data_source_info["notion_workspace_id"], - "notion_obj_id": data_source_info["notion_page_id"], - "notion_page_type": data_source_info["type"], - "tenant_id": current_user.current_tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": data_source_info["credential_id"], + "notion_workspace_id": data_source_info["notion_workspace_id"], + "notion_obj_id": data_source_info["notion_page_id"], + "notion_page_type": data_source_info["type"], + "tenant_id": current_user.current_tenant_id, + } + ), document_model=document.doc_form, ) extract_settings.append(extract_setting) @@ -562,14 +564,16 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): continue extract_setting = ExtractSetting( datasource_type=DatasourceType.WEBSITE.value, - website_info={ - "provider": data_source_info["provider"], - "job_id": data_source_info["job_id"], - "url": data_source_info["url"], - "tenant_id": current_user.current_tenant_id, - "mode": data_source_info["mode"], - "only_main_content": data_source_info["only_main_content"], - }, + website_info=WebsiteInfo.model_validate( + { + "provider": data_source_info["provider"], + "job_id": data_source_info["job_id"], + "url": data_source_info["url"], + "tenant_id": current_user.current_tenant_id, + "mode": data_source_info["mode"], + "only_main_content": data_source_info["only_main_content"], + } + ), document_model=document.doc_form, ) extract_settings.append(extract_setting) diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index 9f2805e2c6..d6bd02483d 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -309,7 +309,7 @@ class DatasetDocumentSegmentUpdateApi(Resource): ) args = parser.parse_args() SegmentService.segment_create_args_validate(args, document) - segment = SegmentService.update_segment(SegmentUpdateArgs(**args), segment, document, dataset) + segment = SegmentService.update_segment(SegmentUpdateArgs.model_validate(args), segment, document, dataset) return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 @setup_required @@ -564,7 +564,7 @@ class ChildChunkAddApi(Resource): args = parser.parse_args() try: chunks_data = args["chunks"] - chunks = [ChildChunkUpdateArgs(**chunk) for chunk in chunks_data] + chunks = [ChildChunkUpdateArgs.model_validate(chunk) for chunk in chunks_data] child_chunks = SegmentService.update_child_chunks(chunks, segment, document, dataset) except ChildChunkIndexingServiceError as e: raise ChildChunkIndexingError(str(e)) diff --git a/api/controllers/console/datasets/metadata.py b/api/controllers/console/datasets/metadata.py index dc3cd3fce9..8438458617 100644 --- a/api/controllers/console/datasets/metadata.py +++ b/api/controllers/console/datasets/metadata.py @@ -28,7 +28,7 @@ class DatasetMetadataCreateApi(Resource): parser.add_argument("type", type=str, required=True, nullable=False, location="json") parser.add_argument("name", type=str, required=True, nullable=False, location="json") args = parser.parse_args() - metadata_args = MetadataArgs(**args) + metadata_args = MetadataArgs.model_validate(args) dataset_id_str = str(dataset_id) dataset = DatasetService.get_dataset(dataset_id_str) @@ -137,7 +137,7 @@ class DocumentMetadataEditApi(Resource): parser = reqparse.RequestParser() parser.add_argument("operation_data", type=list, required=True, nullable=False, location="json") args = parser.parse_args() - metadata_args = MetadataOperationData(**args) + metadata_args = MetadataOperationData.model_validate(args) MetadataService.update_documents_metadata(dataset, metadata_args) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py index 3af590afc8..e021f95283 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py @@ -88,7 +88,7 @@ class CustomizedPipelineTemplateApi(Resource): nullable=True, ) args = parser.parse_args() - pipeline_template_info = PipelineTemplateInfoEntity(**args) + pipeline_template_info = PipelineTemplateInfoEntity.model_validate(args) RagPipelineService.update_customized_pipeline_template(template_id, pipeline_template_info) return 200 diff --git a/api/controllers/inner_api/plugin/wraps.py b/api/controllers/inner_api/plugin/wraps.py index b683aa3160..a36d6b0745 100644 --- a/api/controllers/inner_api/plugin/wraps.py +++ b/api/controllers/inner_api/plugin/wraps.py @@ -128,7 +128,7 @@ def plugin_data(view: Callable[P, R] | None = None, *, payload_type: type[BaseMo raise ValueError("invalid json") try: - payload = payload_type(**data) + payload = payload_type.model_validate(data) except Exception as e: raise ValueError(f"invalid payload: {str(e)}") diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 961b96db91..92bbb76f0f 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -280,7 +280,7 @@ class DatasetListApi(DatasetApiResource): external_knowledge_id=args["external_knowledge_id"], embedding_model_provider=args["embedding_model_provider"], embedding_model_name=args["embedding_model"], - retrieval_model=RetrievalModel(**args["retrieval_model"]) + retrieval_model=RetrievalModel.model_validate(args["retrieval_model"]) if args["retrieval_model"] is not None else None, ) diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index c1122acd7b..961a338bc5 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -136,7 +136,7 @@ class DocumentAddByTextApi(DatasetApiResource): "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, } args["data_source"] = data_source - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) # validate args DocumentService.document_create_args_validate(knowledge_config) @@ -221,7 +221,7 @@ class DocumentUpdateByTextApi(DatasetApiResource): args["data_source"] = data_source # validate args args["original_document_id"] = str(document_id) - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) DocumentService.document_create_args_validate(knowledge_config) try: @@ -328,7 +328,7 @@ class DocumentAddByFileApi(DatasetApiResource): } args["data_source"] = data_source # validate args - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) DocumentService.document_create_args_validate(knowledge_config) dataset_process_rule = dataset.latest_process_rule if "process_rule" not in args else None @@ -426,7 +426,7 @@ class DocumentUpdateByFileApi(DatasetApiResource): # validate args args["original_document_id"] = str(document_id) - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) DocumentService.document_create_args_validate(knowledge_config) try: diff --git a/api/controllers/service_api/dataset/metadata.py b/api/controllers/service_api/dataset/metadata.py index e01659dc68..51420fdd5f 100644 --- a/api/controllers/service_api/dataset/metadata.py +++ b/api/controllers/service_api/dataset/metadata.py @@ -51,7 +51,7 @@ class DatasetMetadataCreateServiceApi(DatasetApiResource): def post(self, tenant_id, dataset_id): """Create metadata for a dataset.""" args = metadata_create_parser.parse_args() - metadata_args = MetadataArgs(**args) + metadata_args = MetadataArgs.model_validate(args) dataset_id_str = str(dataset_id) dataset = DatasetService.get_dataset(dataset_id_str) @@ -200,7 +200,7 @@ class DocumentMetadataEditServiceApi(DatasetApiResource): DatasetService.check_dataset_permission(dataset, current_user) args = document_metadata_parser.parse_args() - metadata_args = MetadataOperationData(**args) + metadata_args = MetadataOperationData.model_validate(args) MetadataService.update_documents_metadata(dataset, metadata_args) diff --git a/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py index f05325d711..13ef8abc2d 100644 --- a/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py @@ -98,7 +98,7 @@ class DatasourceNodeRunApi(DatasetApiResource): parser.add_argument("is_published", type=bool, required=True, location="json") args: ParseResult = parser.parse_args() - datasource_node_run_api_entity: DatasourceNodeRunApiEntity = DatasourceNodeRunApiEntity(**args) + datasource_node_run_api_entity = DatasourceNodeRunApiEntity.model_validate(args) assert isinstance(current_user, Account) rag_pipeline_service: RagPipelineService = RagPipelineService() pipeline: Pipeline = rag_pipeline_service.get_pipeline(tenant_id=tenant_id, dataset_id=dataset_id) diff --git a/api/controllers/service_api/dataset/segment.py b/api/controllers/service_api/dataset/segment.py index a22155b07a..d674c7467d 100644 --- a/api/controllers/service_api/dataset/segment.py +++ b/api/controllers/service_api/dataset/segment.py @@ -252,7 +252,7 @@ class DatasetSegmentApi(DatasetApiResource): args = segment_update_parser.parse_args() updated_segment = SegmentService.update_segment( - SegmentUpdateArgs(**args["segment"]), segment, document, dataset + SegmentUpdateArgs.model_validate(args["segment"]), segment, document, dataset ) return {"data": marshal(updated_segment, segment_fields), "doc_form": document.doc_form}, 200 diff --git a/api/core/app/app_config/easy_ui_based_app/agent/manager.py b/api/core/app/app_config/easy_ui_based_app/agent/manager.py index eab26e5af9..c1f336fdde 100644 --- a/api/core/app/app_config/easy_ui_based_app/agent/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/agent/manager.py @@ -40,7 +40,7 @@ class AgentConfigManager: "credential_id": tool.get("credential_id", None), } - agent_tools.append(AgentToolEntity(**agent_tool_properties)) + agent_tools.append(AgentToolEntity.model_validate(agent_tool_properties)) if "strategy" in config["agent_mode"] and config["agent_mode"]["strategy"] not in { "react_router", diff --git a/api/core/app/apps/pipeline/pipeline_runner.py b/api/core/app/apps/pipeline/pipeline_runner.py index 145f629c4d..866c46d963 100644 --- a/api/core/app/apps/pipeline/pipeline_runner.py +++ b/api/core/app/apps/pipeline/pipeline_runner.py @@ -116,7 +116,7 @@ class PipelineRunner(WorkflowBasedAppRunner): rag_pipeline_variables = [] if workflow.rag_pipeline_variables: for v in workflow.rag_pipeline_variables: - rag_pipeline_variable = RAGPipelineVariable(**v) + rag_pipeline_variable = RAGPipelineVariable.model_validate(v) if ( rag_pipeline_variable.belong_to_node_id in (self.application_generate_entity.start_node_id, "shared") diff --git a/api/core/datasource/entities/common_entities.py b/api/core/datasource/entities/common_entities.py index ac36d83ae3..3c64632dbb 100644 --- a/api/core/datasource/entities/common_entities.py +++ b/api/core/datasource/entities/common_entities.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator class I18nObject(BaseModel): @@ -11,11 +11,12 @@ class I18nObject(BaseModel): pt_BR: str | None = Field(default=None) ja_JP: str | None = Field(default=None) - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _(self): self.zh_Hans = self.zh_Hans or self.en_US self.pt_BR = self.pt_BR or self.en_US self.ja_JP = self.ja_JP or self.en_US + return self def to_dict(self) -> dict: return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR, "ja_JP": self.ja_JP} diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 111de89178..2857729a81 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -5,7 +5,7 @@ from collections import defaultdict from collections.abc import Iterator, Sequence from json import JSONDecodeError -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, model_validator from sqlalchemy import func, select from sqlalchemy.orm import Session @@ -73,9 +73,8 @@ class ProviderConfiguration(BaseModel): # pydantic configs model_config = ConfigDict(protected_namespaces=()) - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): if self.provider.provider not in original_provider_configurate_methods: original_provider_configurate_methods[self.provider.provider] = [] for configurate_method in self.provider.configurate_methods: @@ -90,6 +89,7 @@ class ProviderConfiguration(BaseModel): and ConfigurateMethod.PREDEFINED_MODEL not in self.provider.configurate_methods ): self.provider.configurate_methods.append(ConfigurateMethod.PREDEFINED_MODEL) + return self def get_current_credentials(self, model_type: ModelType, model: str) -> dict | None: """ diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 0c1d03dc13..f92278f9e2 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -131,7 +131,7 @@ class CodeExecutor: if (code := response_data.get("code")) != 0: raise CodeExecutionError(f"Got error code: {code}. Got error msg: {response_data.get('message')}") - response_code = CodeExecutionResponse(**response_data) + response_code = CodeExecutionResponse.model_validate(response_data) if response_code.data.error: raise CodeExecutionError(response_code.data.error) diff --git a/api/core/helper/marketplace.py b/api/core/helper/marketplace.py index 10f304c087..bddb864a95 100644 --- a/api/core/helper/marketplace.py +++ b/api/core/helper/marketplace.py @@ -26,7 +26,7 @@ def batch_fetch_plugin_manifests(plugin_ids: list[str]) -> Sequence[MarketplaceP response = httpx.post(url, json={"plugin_ids": plugin_ids}, headers={"X-Dify-Version": dify_config.project.version}) response.raise_for_status() - return [MarketplacePluginDeclaration(**plugin) for plugin in response.json()["data"]["plugins"]] + return [MarketplacePluginDeclaration.model_validate(plugin) for plugin in response.json()["data"]["plugins"]] def batch_fetch_plugin_manifests_ignore_deserialization_error( @@ -41,7 +41,7 @@ def batch_fetch_plugin_manifests_ignore_deserialization_error( result: list[MarketplacePluginDeclaration] = [] for plugin in response.json()["data"]["plugins"]: try: - result.append(MarketplacePluginDeclaration(**plugin)) + result.append(MarketplacePluginDeclaration.model_validate(plugin)) except Exception: pass diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index ee37024260..3682fdb667 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -20,7 +20,7 @@ from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.docstore.dataset_docstore import DatasetDocumentStore from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo, WebsiteInfo from core.rag.index_processor.constant.index_type import IndexType from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.index_processor.index_processor_factory import IndexProcessorFactory @@ -357,14 +357,16 @@ class IndexingRunner: raise ValueError("no notion import info found") extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": data_source_info["credential_id"], - "notion_workspace_id": data_source_info["notion_workspace_id"], - "notion_obj_id": data_source_info["notion_page_id"], - "notion_page_type": data_source_info["type"], - "document": dataset_document, - "tenant_id": dataset_document.tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": data_source_info["credential_id"], + "notion_workspace_id": data_source_info["notion_workspace_id"], + "notion_obj_id": data_source_info["notion_page_id"], + "notion_page_type": data_source_info["type"], + "document": dataset_document, + "tenant_id": dataset_document.tenant_id, + } + ), document_model=dataset_document.doc_form, ) text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"]) @@ -378,14 +380,16 @@ class IndexingRunner: raise ValueError("no website import info found") extract_setting = ExtractSetting( datasource_type=DatasourceType.WEBSITE.value, - website_info={ - "provider": data_source_info["provider"], - "job_id": data_source_info["job_id"], - "tenant_id": dataset_document.tenant_id, - "url": data_source_info["url"], - "mode": data_source_info["mode"], - "only_main_content": data_source_info["only_main_content"], - }, + website_info=WebsiteInfo.model_validate( + { + "provider": data_source_info["provider"], + "job_id": data_source_info["job_id"], + "tenant_id": dataset_document.tenant_id, + "url": data_source_info["url"], + "mode": data_source_info["mode"], + "only_main_content": data_source_info["only_main_content"], + } + ), document_model=dataset_document.doc_form, ) text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"]) diff --git a/api/core/mcp/session/client_session.py b/api/core/mcp/session/client_session.py index 5817416ba4..fa1d309134 100644 --- a/api/core/mcp/session/client_session.py +++ b/api/core/mcp/session/client_session.py @@ -294,7 +294,7 @@ class ClientSession( method="completion/complete", params=types.CompleteRequestParams( ref=ref, - argument=types.CompletionArgument(**argument), + argument=types.CompletionArgument.model_validate(argument), ), ) ), diff --git a/api/core/model_runtime/entities/common_entities.py b/api/core/model_runtime/entities/common_entities.py index c7353de5af..b673efae22 100644 --- a/api/core/model_runtime/entities/common_entities.py +++ b/api/core/model_runtime/entities/common_entities.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel +from pydantic import BaseModel, model_validator class I18nObject(BaseModel): @@ -9,7 +9,8 @@ class I18nObject(BaseModel): zh_Hans: str | None = None en_US: str - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _(self): if not self.zh_Hans: self.zh_Hans = self.en_US + return self diff --git a/api/core/model_runtime/entities/provider_entities.py b/api/core/model_runtime/entities/provider_entities.py index 2ccc9e0eae..831fb9d4db 100644 --- a/api/core/model_runtime/entities/provider_entities.py +++ b/api/core/model_runtime/entities/provider_entities.py @@ -1,7 +1,7 @@ from collections.abc import Sequence from enum import Enum, StrEnum, auto -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, ModelType @@ -46,10 +46,11 @@ class FormOption(BaseModel): value: str show_on: list[FormShowOnObject] = [] - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _(self): if not self.label: self.label = I18nObject(en_US=self.value) + return self class CredentialFormSchema(BaseModel): diff --git a/api/core/model_runtime/model_providers/model_provider_factory.py b/api/core/model_runtime/model_providers/model_provider_factory.py index e070c17abd..e1afc41bee 100644 --- a/api/core/model_runtime/model_providers/model_provider_factory.py +++ b/api/core/model_runtime/model_providers/model_provider_factory.py @@ -269,17 +269,17 @@ class ModelProviderFactory: } if model_type == ModelType.LLM: - return LargeLanguageModel(**init_params) # type: ignore + return LargeLanguageModel.model_validate(init_params) elif model_type == ModelType.TEXT_EMBEDDING: - return TextEmbeddingModel(**init_params) # type: ignore + return TextEmbeddingModel.model_validate(init_params) elif model_type == ModelType.RERANK: - return RerankModel(**init_params) # type: ignore + return RerankModel.model_validate(init_params) elif model_type == ModelType.SPEECH2TEXT: - return Speech2TextModel(**init_params) # type: ignore + return Speech2TextModel.model_validate(init_params) elif model_type == ModelType.MODERATION: - return ModerationModel(**init_params) # type: ignore + return ModerationModel.model_validate(init_params) elif model_type == ModelType.TTS: - return TTSModel(**init_params) # type: ignore + return TTSModel.model_validate(init_params) def get_provider_icon(self, provider: str, icon_type: str, lang: str) -> tuple[bytes, str]: """ diff --git a/api/core/moderation/api/api.py b/api/core/moderation/api/api.py index 573f4ec2a7..2d72b17a04 100644 --- a/api/core/moderation/api/api.py +++ b/api/core/moderation/api/api.py @@ -51,7 +51,7 @@ class ApiModeration(Moderation): params = ModerationInputParams(app_id=self.app_id, inputs=inputs, query=query) result = self._get_config_by_requestor(APIBasedExtensionPoint.APP_MODERATION_INPUT, params.model_dump()) - return ModerationInputsResult(**result) + return ModerationInputsResult.model_validate(result) return ModerationInputsResult( flagged=flagged, action=ModerationAction.DIRECT_OUTPUT, preset_response=preset_response @@ -67,7 +67,7 @@ class ApiModeration(Moderation): params = ModerationOutputParams(app_id=self.app_id, text=text) result = self._get_config_by_requestor(APIBasedExtensionPoint.APP_MODERATION_OUTPUT, params.model_dump()) - return ModerationOutputsResult(**result) + return ModerationOutputsResult.model_validate(result) return ModerationOutputsResult( flagged=flagged, action=ModerationAction.DIRECT_OUTPUT, preset_response=preset_response diff --git a/api/core/plugin/entities/request.py b/api/core/plugin/entities/request.py index 10f37f75f8..7b789d8ac9 100644 --- a/api/core/plugin/entities/request.py +++ b/api/core/plugin/entities/request.py @@ -84,15 +84,15 @@ class RequestInvokeLLM(BaseRequestInvokeModel): for i in range(len(v)): if v[i]["role"] == PromptMessageRole.USER.value: - v[i] = UserPromptMessage(**v[i]) + v[i] = UserPromptMessage.model_validate(v[i]) elif v[i]["role"] == PromptMessageRole.ASSISTANT.value: - v[i] = AssistantPromptMessage(**v[i]) + v[i] = AssistantPromptMessage.model_validate(v[i]) elif v[i]["role"] == PromptMessageRole.SYSTEM.value: - v[i] = SystemPromptMessage(**v[i]) + v[i] = SystemPromptMessage.model_validate(v[i]) elif v[i]["role"] == PromptMessageRole.TOOL.value: - v[i] = ToolPromptMessage(**v[i]) + v[i] = ToolPromptMessage.model_validate(v[i]) else: - v[i] = PromptMessage(**v[i]) + v[i] = PromptMessage.model_validate(v[i]) return v diff --git a/api/core/plugin/impl/base.py b/api/core/plugin/impl/base.py index 8e3df4da2c..62a5cc535a 100644 --- a/api/core/plugin/impl/base.py +++ b/api/core/plugin/impl/base.py @@ -94,7 +94,7 @@ class BasePluginClient: self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | dict | None = None, params: dict | None = None, @@ -104,13 +104,13 @@ class BasePluginClient: Make a stream request to the plugin daemon inner API and yield the response as a model. """ for line in self._stream_request(method, path, params, headers, data, files): - yield type(**json.loads(line)) # type: ignore + yield type_(**json.loads(line)) # type: ignore def _request_with_model( self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | None = None, params: dict | None = None, @@ -120,13 +120,13 @@ class BasePluginClient: Make a request to the plugin daemon inner API and return the response as a model. """ response = self._request(method, path, headers, data, params, files) - return type(**response.json()) # type: ignore + return type_(**response.json()) # type: ignore def _request_with_plugin_daemon_response( self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | dict | None = None, params: dict | None = None, @@ -140,22 +140,22 @@ class BasePluginClient: response = self._request(method, path, headers, data, params, files) response.raise_for_status() except HTTPError as e: - msg = f"Failed to request plugin daemon, status: {e.response.status_code}, url: {path}" - logger.exception(msg) + logger.exception("Failed to request plugin daemon, status: %s, url: %s", e.response.status_code, path) raise e except Exception as e: msg = f"Failed to request plugin daemon, url: {path}" - logger.exception(msg) + logger.exception("Failed to request plugin daemon, url: %s", path) raise ValueError(msg) from e try: json_response = response.json() if transformer: json_response = transformer(json_response) - rep = PluginDaemonBasicResponse[type](**json_response) # type: ignore + # https://stackoverflow.com/questions/59634937/variable-foo-class-is-not-valid-as-type-but-why + rep = PluginDaemonBasicResponse[type_].model_validate(json_response) # type: ignore except Exception: msg = ( - f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type.__name__)}]," + f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type_.__name__)}]," f" url: {path}" ) logger.exception(msg) @@ -163,7 +163,7 @@ class BasePluginClient: if rep.code != 0: try: - error = PluginDaemonError(**json.loads(rep.message)) + error = PluginDaemonError.model_validate(json.loads(rep.message)) except Exception: raise ValueError(f"{rep.message}, code: {rep.code}") @@ -178,7 +178,7 @@ class BasePluginClient: self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | dict | None = None, params: dict | None = None, @@ -189,7 +189,7 @@ class BasePluginClient: """ for line in self._stream_request(method, path, params, headers, data, files): try: - rep = PluginDaemonBasicResponse[type].model_validate_json(line) # type: ignore + rep = PluginDaemonBasicResponse[type_].model_validate_json(line) # type: ignore except (ValueError, TypeError): # TODO modify this when line_data has code and message try: @@ -204,7 +204,7 @@ class BasePluginClient: if rep.code != 0: if rep.code == -500: try: - error = PluginDaemonError(**json.loads(rep.message)) + error = PluginDaemonError.model_validate(json.loads(rep.message)) except Exception: raise PluginDaemonInnerError(code=rep.code, message=rep.message) diff --git a/api/core/plugin/impl/datasource.py b/api/core/plugin/impl/datasource.py index 84087f8104..ce1ef71494 100644 --- a/api/core/plugin/impl/datasource.py +++ b/api/core/plugin/impl/datasource.py @@ -46,7 +46,9 @@ class PluginDatasourceManager(BasePluginClient): params={"page": 1, "page_size": 256}, transformer=transformer, ) - local_file_datasource_provider = PluginDatasourceProviderEntity(**self._get_local_file_datasource_provider()) + local_file_datasource_provider = PluginDatasourceProviderEntity.model_validate( + self._get_local_file_datasource_provider() + ) for provider in response: ToolTransformService.repack_provider(tenant_id=tenant_id, provider=provider) @@ -104,7 +106,7 @@ class PluginDatasourceManager(BasePluginClient): Fetch datasource provider for the given tenant and plugin. """ if provider_id == "langgenius/file/file": - return PluginDatasourceProviderEntity(**self._get_local_file_datasource_provider()) + return PluginDatasourceProviderEntity.model_validate(self._get_local_file_datasource_provider()) tool_provider_id = DatasourceProviderID(provider_id) diff --git a/api/core/plugin/impl/model.py b/api/core/plugin/impl/model.py index 153da142f4..5dfc3c212e 100644 --- a/api/core/plugin/impl/model.py +++ b/api/core/plugin/impl/model.py @@ -162,7 +162,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/llm/invoke", - type=LLMResultChunk, + type_=LLMResultChunk, data=jsonable_encoder( { "user_id": user_id, @@ -208,7 +208,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/llm/num_tokens", - type=PluginLLMNumTokensResponse, + type_=PluginLLMNumTokensResponse, data=jsonable_encoder( { "user_id": user_id, @@ -250,7 +250,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/text_embedding/invoke", - type=TextEmbeddingResult, + type_=TextEmbeddingResult, data=jsonable_encoder( { "user_id": user_id, @@ -291,7 +291,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/text_embedding/num_tokens", - type=PluginTextEmbeddingNumTokensResponse, + type_=PluginTextEmbeddingNumTokensResponse, data=jsonable_encoder( { "user_id": user_id, @@ -334,7 +334,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/rerank/invoke", - type=RerankResult, + type_=RerankResult, data=jsonable_encoder( { "user_id": user_id, @@ -378,7 +378,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/tts/invoke", - type=PluginStringResultResponse, + type_=PluginStringResultResponse, data=jsonable_encoder( { "user_id": user_id, @@ -422,7 +422,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/tts/model/voices", - type=PluginVoicesResponse, + type_=PluginVoicesResponse, data=jsonable_encoder( { "user_id": user_id, @@ -466,7 +466,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/speech2text/invoke", - type=PluginStringResultResponse, + type_=PluginStringResultResponse, data=jsonable_encoder( { "user_id": user_id, @@ -506,7 +506,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/moderation/invoke", - type=PluginBasicBooleanResponse, + type_=PluginBasicBooleanResponse, data=jsonable_encoder( { "user_id": user_id, diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 63a1d911ca..38358ccd6d 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -134,7 +134,7 @@ class RetrievalService: if not dataset: return [] metadata_condition = ( - MetadataCondition(**metadata_filtering_conditions) if metadata_filtering_conditions else None + MetadataCondition.model_validate(metadata_filtering_conditions) if metadata_filtering_conditions else None ) all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval( dataset.tenant_id, diff --git a/api/core/rag/extractor/entity/extract_setting.py b/api/core/rag/extractor/entity/extract_setting.py index b9bf9d0d8c..c3bfbce98f 100644 --- a/api/core/rag/extractor/entity/extract_setting.py +++ b/api/core/rag/extractor/entity/extract_setting.py @@ -17,9 +17,6 @@ class NotionInfo(BaseModel): tenant_id: str model_config = ConfigDict(arbitrary_types_allowed=True) - def __init__(self, **data): - super().__init__(**data) - class WebsiteInfo(BaseModel): """ @@ -47,6 +44,3 @@ class ExtractSetting(BaseModel): website_info: WebsiteInfo | None = None document_model: str | None = None model_config = ConfigDict(arbitrary_types_allowed=True) - - def __init__(self, **data): - super().__init__(**data) diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index 755aa88d08..4fcffbcc77 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -38,11 +38,11 @@ class ParagraphIndexProcessor(BaseIndexProcessor): raise ValueError("No process rule found.") if process_rule.get("mode") == "automatic": automatic_rule = DatasetProcessRule.AUTOMATIC_RULES - rules = Rule(**automatic_rule) + rules = Rule.model_validate(automatic_rule) else: if not process_rule.get("rules"): raise ValueError("No rules found in process rule.") - rules = Rule(**process_rule.get("rules")) + rules = Rule.model_validate(process_rule.get("rules")) # Split the text documents into nodes. if not rules.segmentation: raise ValueError("No segmentation found in rules.") diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index e0ccd8b567..7bdde286f5 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -40,7 +40,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): raise ValueError("No process rule found.") if not process_rule.get("rules"): raise ValueError("No rules found in process rule.") - rules = Rule(**process_rule.get("rules")) + rules = Rule.model_validate(process_rule.get("rules")) all_documents: list[Document] = [] if rules.parent_mode == ParentMode.PARAGRAPH: # Split the text documents into nodes. @@ -110,7 +110,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): child_documents = document.children if child_documents: formatted_child_documents = [ - Document(**child_document.model_dump()) for child_document in child_documents + Document.model_validate(child_document.model_dump()) for child_document in child_documents ] vector.create(formatted_child_documents) @@ -224,7 +224,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): return child_nodes def index(self, dataset: Dataset, document: DatasetDocument, chunks: Any): - parent_childs = ParentChildStructureChunk(**chunks) + parent_childs = ParentChildStructureChunk.model_validate(chunks) documents = [] for parent_child in parent_childs.parent_child_chunks: metadata = { @@ -274,7 +274,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): vector.create(all_child_documents) def format_preview(self, chunks: Any) -> Mapping[str, Any]: - parent_childs = ParentChildStructureChunk(**chunks) + parent_childs = ParentChildStructureChunk.model_validate(chunks) preview = [] for parent_child in parent_childs.parent_child_chunks: preview.append({"content": parent_child.parent_content, "child_chunks": parent_child.child_contents}) diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 2054031643..9c8f70dba8 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -47,7 +47,7 @@ class QAIndexProcessor(BaseIndexProcessor): raise ValueError("No process rule found.") if not process_rule.get("rules"): raise ValueError("No rules found in process rule.") - rules = Rule(**process_rule.get("rules")) + rules = Rule.model_validate(process_rule.get("rules")) splitter = self._get_splitter( processing_rule_mode=process_rule.get("mode"), max_tokens=rules.segmentation.max_tokens if rules.segmentation else 0, @@ -168,7 +168,7 @@ class QAIndexProcessor(BaseIndexProcessor): return docs def index(self, dataset: Dataset, document: DatasetDocument, chunks: Any): - qa_chunks = QAStructureChunk(**chunks) + qa_chunks = QAStructureChunk.model_validate(chunks) documents = [] for qa_chunk in qa_chunks.qa_chunks: metadata = { @@ -191,7 +191,7 @@ class QAIndexProcessor(BaseIndexProcessor): raise ValueError("Indexing technique must be high quality.") def format_preview(self, chunks: Any) -> Mapping[str, Any]: - qa_chunks = QAStructureChunk(**chunks) + qa_chunks = QAStructureChunk.model_validate(chunks) preview = [] for qa_chunk in qa_chunks.qa_chunks: preview.append({"question": qa_chunk.question, "answer": qa_chunk.answer}) diff --git a/api/core/tools/builtin_tool/provider.py b/api/core/tools/builtin_tool/provider.py index 45fd16d684..29d34e722a 100644 --- a/api/core/tools/builtin_tool/provider.py +++ b/api/core/tools/builtin_tool/provider.py @@ -90,7 +90,7 @@ class BuiltinToolProviderController(ToolProviderController): tools.append( assistant_tool_class( provider=provider, - entity=ToolEntity(**tool), + entity=ToolEntity.model_validate(tool), runtime=ToolRuntime(tenant_id=""), ) ) diff --git a/api/core/tools/entities/common_entities.py b/api/core/tools/entities/common_entities.py index 2c6d9c1964..21d310bbb9 100644 --- a/api/core/tools/entities/common_entities.py +++ b/api/core/tools/entities/common_entities.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator class I18nObject(BaseModel): @@ -11,11 +11,12 @@ class I18nObject(BaseModel): pt_BR: str | None = Field(default=None) ja_JP: str | None = Field(default=None) - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _populate_missing_locales(self): self.zh_Hans = self.zh_Hans or self.en_US self.pt_BR = self.pt_BR or self.en_US self.ja_JP = self.ja_JP or self.en_US + return self def to_dict(self): return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR, "ja_JP": self.ja_JP} diff --git a/api/core/tools/mcp_tool/provider.py b/api/core/tools/mcp_tool/provider.py index 5b04f0edbe..f269b8db9b 100644 --- a/api/core/tools/mcp_tool/provider.py +++ b/api/core/tools/mcp_tool/provider.py @@ -54,7 +54,7 @@ class MCPToolProviderController(ToolProviderController): """ tools = [] tools_data = json.loads(db_provider.tools) - remote_mcp_tools = [RemoteMCPTool(**tool) for tool in tools_data] + remote_mcp_tools = [RemoteMCPTool.model_validate(tool) for tool in tools_data] user = db_provider.load_user() tools = [ ToolEntity( diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 9e5f5a7c23..af68971ca7 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -1008,7 +1008,7 @@ class ToolManager: config = tool_configurations.get(parameter.name, {}) if not (config and isinstance(config, dict) and config.get("value") is not None): continue - tool_input = ToolNodeData.ToolInput(**tool_configurations.get(parameter.name, {})) + tool_input = ToolNodeData.ToolInput.model_validate(tool_configurations.get(parameter.name, {})) if tool_input.type == "variable": variable = variable_pool.get(tool_input.value) if variable is None: diff --git a/api/core/workflow/graph_engine/command_channels/redis_channel.py b/api/core/workflow/graph_engine/command_channels/redis_channel.py index 056e17bf5d..c841459170 100644 --- a/api/core/workflow/graph_engine/command_channels/redis_channel.py +++ b/api/core/workflow/graph_engine/command_channels/redis_channel.py @@ -105,10 +105,10 @@ class RedisChannel: command_type = CommandType(command_type_value) if command_type == CommandType.ABORT: - return AbortCommand(**data) + return AbortCommand.model_validate(data) else: # For other command types, use base class - return GraphEngineCommand(**data) + return GraphEngineCommand.model_validate(data) except (ValueError, TypeError): return None diff --git a/api/core/workflow/nodes/end/end_node.py b/api/core/workflow/nodes/end/end_node.py index 2bdfe4efce..7ec74084d0 100644 --- a/api/core/workflow/nodes/end/end_node.py +++ b/api/core/workflow/nodes/end/end_node.py @@ -16,7 +16,7 @@ class EndNode(Node): _node_data: EndNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = EndNodeData(**data) + self._node_data = EndNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/iteration/iteration_start_node.py b/api/core/workflow/nodes/iteration/iteration_start_node.py index 80f39ccebc..90b7f4539b 100644 --- a/api/core/workflow/nodes/iteration/iteration_start_node.py +++ b/api/core/workflow/nodes/iteration/iteration_start_node.py @@ -18,7 +18,7 @@ class IterationStartNode(Node): _node_data: IterationStartNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = IterationStartNodeData(**data) + self._node_data = IterationStartNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/list_operator/node.py b/api/core/workflow/nodes/list_operator/node.py index 3243b22d44..180eb2ad90 100644 --- a/api/core/workflow/nodes/list_operator/node.py +++ b/api/core/workflow/nodes/list_operator/node.py @@ -41,7 +41,7 @@ class ListOperatorNode(Node): _node_data: ListOperatorNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = ListOperatorNodeData(**data) + self._node_data = ListOperatorNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/loop/loop_end_node.py b/api/core/workflow/nodes/loop/loop_end_node.py index 38aef06d24..e5bce1230c 100644 --- a/api/core/workflow/nodes/loop/loop_end_node.py +++ b/api/core/workflow/nodes/loop/loop_end_node.py @@ -18,7 +18,7 @@ class LoopEndNode(Node): _node_data: LoopEndNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = LoopEndNodeData(**data) + self._node_data = LoopEndNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/loop/loop_start_node.py b/api/core/workflow/nodes/loop/loop_start_node.py index e777a8cbe9..e065dc90a0 100644 --- a/api/core/workflow/nodes/loop/loop_start_node.py +++ b/api/core/workflow/nodes/loop/loop_start_node.py @@ -18,7 +18,7 @@ class LoopStartNode(Node): _node_data: LoopStartNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = LoopStartNodeData(**data) + self._node_data = LoopStartNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/start/start_node.py b/api/core/workflow/nodes/start/start_node.py index 2f33c54128..3b134be1a1 100644 --- a/api/core/workflow/nodes/start/start_node.py +++ b/api/core/workflow/nodes/start/start_node.py @@ -16,7 +16,7 @@ class StartNode(Node): _node_data: StartNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = StartNodeData(**data) + self._node_data = StartNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py b/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py index be00d55937..0ac0d3d858 100644 --- a/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py +++ b/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py @@ -15,7 +15,7 @@ class VariableAggregatorNode(Node): _node_data: VariableAssignerNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = VariableAssignerNodeData(**data) + self._node_data = VariableAssignerNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py index 6c9fc0bf1d..21b73b76b5 100644 --- a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py +++ b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py @@ -14,7 +14,7 @@ def handle(sender, **kwargs): for node_data in synced_draft_workflow.graph_dict.get("nodes", []): if node_data.get("data", {}).get("type") == NodeType.TOOL.value: try: - tool_entity = ToolEntity(**node_data["data"]) + tool_entity = ToolEntity.model_validate(node_data["data"]) tool_runtime = ToolManager.get_tool_runtime( provider_type=tool_entity.provider_type, provider_id=tool_entity.provider_id, diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py index 898ec1f153..7605d4082c 100644 --- a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py +++ b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py @@ -61,7 +61,7 @@ def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set[str]: for node in knowledge_retrieval_nodes: try: - node_data = KnowledgeRetrievalNodeData(**node.get("data", {})) + node_data = KnowledgeRetrievalNodeData.model_validate(node.get("data", {})) dataset_ids.update(dataset_id for dataset_id in node_data.dataset_ids) except Exception: continue diff --git a/api/models/dataset.py b/api/models/dataset.py index 25ebe14738..6263c04365 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -754,7 +754,7 @@ class DocumentSegment(Base): if process_rule and process_rule.mode == "hierarchical": rules_dict = process_rule.rules_dict if rules_dict: - rules = Rule(**rules_dict) + rules = Rule.model_validate(rules_dict) if rules.parent_mode and rules.parent_mode != ParentMode.FULL_DOC: child_chunks = ( db.session.query(ChildChunk) @@ -772,7 +772,7 @@ class DocumentSegment(Base): if process_rule and process_rule.mode == "hierarchical": rules_dict = process_rule.rules_dict if rules_dict: - rules = Rule(**rules_dict) + rules = Rule.model_validate(rules_dict) if rules.parent_mode: child_chunks = ( db.session.query(ChildChunk) diff --git a/api/models/tools.py b/api/models/tools.py index 7211d7aa3a..d581d588a4 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -152,7 +152,7 @@ class ApiToolProvider(Base): def tools(self) -> list["ApiToolBundle"]: from core.tools.entities.tool_bundle import ApiToolBundle - return [ApiToolBundle(**tool) for tool in json.loads(self.tools_str)] + return [ApiToolBundle.model_validate(tool) for tool in json.loads(self.tools_str)] @property def credentials(self) -> dict[str, Any]: @@ -242,7 +242,10 @@ class WorkflowToolProvider(Base): def parameter_configurations(self) -> list["WorkflowToolParameterConfiguration"]: from core.tools.entities.tool_entities import WorkflowToolParameterConfiguration - return [WorkflowToolParameterConfiguration(**config) for config in json.loads(self.parameter_configuration)] + return [ + WorkflowToolParameterConfiguration.model_validate(config) + for config in json.loads(self.parameter_configuration) + ] @property def app(self) -> App | None: @@ -312,7 +315,7 @@ class MCPToolProvider(Base): def mcp_tools(self) -> list["MCPTool"]: from core.mcp.types import Tool as MCPTool - return [MCPTool(**tool) for tool in json.loads(self.tools)] + return [MCPTool.model_validate(tool) for tool in json.loads(self.tools)] @property def provider_icon(self) -> Mapping[str, str] | str: @@ -552,4 +555,4 @@ class DeprecatedPublishedAppTool(Base): def description_i18n(self) -> "I18nObject": from core.tools.entities.common_entities import I18nObject - return I18nObject(**json.loads(self.description)) + return I18nObject.model_validate(json.loads(self.description)) diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index 8701fe4f4e..129e3b0492 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -659,31 +659,31 @@ class AppDslService: typ = node.get("data", {}).get("type") match typ: case NodeType.TOOL.value: - tool_entity = ToolNodeData(**node["data"]) + tool_entity = ToolNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_tool_dependency(tool_entity.provider_id), ) case NodeType.LLM.value: - llm_entity = LLMNodeData(**node["data"]) + llm_entity = LLMNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency(llm_entity.model.provider), ) case NodeType.QUESTION_CLASSIFIER.value: - question_classifier_entity = QuestionClassifierNodeData(**node["data"]) + question_classifier_entity = QuestionClassifierNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( question_classifier_entity.model.provider ), ) case NodeType.PARAMETER_EXTRACTOR.value: - parameter_extractor_entity = ParameterExtractorNodeData(**node["data"]) + parameter_extractor_entity = ParameterExtractorNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( parameter_extractor_entity.model.provider ), ) case NodeType.KNOWLEDGE_RETRIEVAL.value: - knowledge_retrieval_entity = KnowledgeRetrievalNodeData(**node["data"]) + knowledge_retrieval_entity = KnowledgeRetrievalNodeData.model_validate(node["data"]) if knowledge_retrieval_entity.retrieval_mode == "multiple": if knowledge_retrieval_entity.multiple_retrieval_config: if ( @@ -773,7 +773,7 @@ class AppDslService: """ Returns the leaked dependencies in current workspace """ - dependencies = [PluginDependency(**dep) for dep in dsl_dependencies] + dependencies = [PluginDependency.model_validate(dep) for dep in dsl_dependencies] if not dependencies: return [] diff --git a/api/services/enterprise/enterprise_service.py b/api/services/enterprise/enterprise_service.py index f8612456d6..4fbf33fd6f 100644 --- a/api/services/enterprise/enterprise_service.py +++ b/api/services/enterprise/enterprise_service.py @@ -70,7 +70,7 @@ class EnterpriseService: data = EnterpriseRequest.send_request("GET", "/webapp/access-mode/id", params=params) if not data: raise ValueError("No data found.") - return WebAppSettings(**data) + return WebAppSettings.model_validate(data) @classmethod def batch_get_app_access_mode_by_id(cls, app_ids: list[str]) -> dict[str, WebAppSettings]: @@ -100,7 +100,7 @@ class EnterpriseService: data = EnterpriseRequest.send_request("GET", "/webapp/access-mode/code", params=params) if not data: raise ValueError("No data found.") - return WebAppSettings(**data) + return WebAppSettings.model_validate(data) @classmethod def update_app_access_mode(cls, app_id: str, access_mode: str): diff --git a/api/services/entities/model_provider_entities.py b/api/services/entities/model_provider_entities.py index 49d48f044c..0f5151919f 100644 --- a/api/services/entities/model_provider_entities.py +++ b/api/services/entities/model_provider_entities.py @@ -1,6 +1,7 @@ +from collections.abc import Sequence from enum import Enum -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, model_validator from configs import dify_config from core.entities.model_entities import ( @@ -71,7 +72,7 @@ class ProviderResponse(BaseModel): icon_large: I18nObject | None = None background: str | None = None help: ProviderHelpEntity | None = None - supported_model_types: list[ModelType] + supported_model_types: Sequence[ModelType] configurate_methods: list[ConfigurateMethod] provider_credential_schema: ProviderCredentialSchema | None = None model_credential_schema: ModelCredentialSchema | None = None @@ -82,9 +83,8 @@ class ProviderResponse(BaseModel): # pydantic configs model_config = ConfigDict(protected_namespaces=()) - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): url_prefix = ( dify_config.CONSOLE_API_URL + f"/console/api/workspaces/{self.tenant_id}/model-providers/{self.provider}" ) @@ -97,6 +97,7 @@ class ProviderResponse(BaseModel): self.icon_large = I18nObject( en_US=f"{url_prefix}/icon_large/en_US", zh_Hans=f"{url_prefix}/icon_large/zh_Hans" ) + return self class ProviderWithModelsResponse(BaseModel): @@ -112,9 +113,8 @@ class ProviderWithModelsResponse(BaseModel): status: CustomConfigurationStatus models: list[ProviderModelWithStatusEntity] - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): url_prefix = ( dify_config.CONSOLE_API_URL + f"/console/api/workspaces/{self.tenant_id}/model-providers/{self.provider}" ) @@ -127,6 +127,7 @@ class ProviderWithModelsResponse(BaseModel): self.icon_large = I18nObject( en_US=f"{url_prefix}/icon_large/en_US", zh_Hans=f"{url_prefix}/icon_large/zh_Hans" ) + return self class SimpleProviderEntityResponse(SimpleProviderEntity): @@ -136,9 +137,8 @@ class SimpleProviderEntityResponse(SimpleProviderEntity): tenant_id: str - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): url_prefix = ( dify_config.CONSOLE_API_URL + f"/console/api/workspaces/{self.tenant_id}/model-providers/{self.provider}" ) @@ -151,6 +151,7 @@ class SimpleProviderEntityResponse(SimpleProviderEntity): self.icon_large = I18nObject( en_US=f"{url_prefix}/icon_large/en_US", zh_Hans=f"{url_prefix}/icon_large/zh_Hans" ) + return self class DefaultModelResponse(BaseModel): diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 00ec3babf3..6174ce8b3b 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -46,7 +46,7 @@ class HitTestingService: from core.app.app_config.entities import MetadataFilteringCondition - metadata_filtering_conditions = MetadataFilteringCondition(**metadata_filtering_conditions) + metadata_filtering_conditions = MetadataFilteringCondition.model_validate(metadata_filtering_conditions) metadata_filter_document_ids, metadata_condition = dataset_retrieval.get_metadata_filter_condition( dataset_ids=[dataset.id], diff --git a/api/services/ops_service.py b/api/services/ops_service.py index c214640653..b4b23b8360 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -123,7 +123,7 @@ class OpsService: config_class: type[BaseTracingConfig] = provider_config["config_class"] other_keys: list[str] = provider_config["other_keys"] - default_config_instance: BaseTracingConfig = config_class(**tracing_config) + default_config_instance = config_class.model_validate(tracing_config) for key in other_keys: if key in tracing_config and tracing_config[key] == "": tracing_config[key] = getattr(default_config_instance, key, None) diff --git a/api/services/plugin/plugin_migration.py b/api/services/plugin/plugin_migration.py index 99946d8fa9..76bb9a57f9 100644 --- a/api/services/plugin/plugin_migration.py +++ b/api/services/plugin/plugin_migration.py @@ -269,7 +269,7 @@ class PluginMigration: for tool in agent_config["tools"]: if isinstance(tool, dict): try: - tool_entity = AgentToolEntity(**tool) + tool_entity = AgentToolEntity.model_validate(tool) if ( tool_entity.provider_type == ToolProviderType.BUILT_IN.value and tool_entity.provider_id not in excluded_providers diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index fdaaa73bcc..3ced0fd9ec 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -358,7 +358,7 @@ class RagPipelineService: for node in nodes: if node.get("data", {}).get("type") == "knowledge-index": knowledge_configuration = node.get("data", {}) - knowledge_configuration = KnowledgeConfiguration(**knowledge_configuration) + knowledge_configuration = KnowledgeConfiguration.model_validate(knowledge_configuration) # update dataset dataset = pipeline.retrieve_dataset(session=session) diff --git a/api/services/rag_pipeline/rag_pipeline_dsl_service.py b/api/services/rag_pipeline/rag_pipeline_dsl_service.py index f74de1bcab..9dede31ab4 100644 --- a/api/services/rag_pipeline/rag_pipeline_dsl_service.py +++ b/api/services/rag_pipeline/rag_pipeline_dsl_service.py @@ -288,7 +288,7 @@ class RagPipelineDslService: dataset_id = None for node in nodes: if node.get("data", {}).get("type") == "knowledge-index": - knowledge_configuration = KnowledgeConfiguration(**node.get("data", {})) + knowledge_configuration = KnowledgeConfiguration.model_validate(node.get("data", {})) if ( dataset and pipeline.is_published @@ -426,7 +426,7 @@ class RagPipelineDslService: dataset_id = None for node in nodes: if node.get("data", {}).get("type") == "knowledge-index": - knowledge_configuration = KnowledgeConfiguration(**node.get("data", {})) + knowledge_configuration = KnowledgeConfiguration.model_validate(node.get("data", {})) if not dataset: dataset = Dataset( tenant_id=account.current_tenant_id, @@ -734,35 +734,35 @@ class RagPipelineDslService: typ = node.get("data", {}).get("type") match typ: case NodeType.TOOL.value: - tool_entity = ToolNodeData(**node["data"]) + tool_entity = ToolNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_tool_dependency(tool_entity.provider_id), ) case NodeType.DATASOURCE.value: - datasource_entity = DatasourceNodeData(**node["data"]) + datasource_entity = DatasourceNodeData.model_validate(node["data"]) if datasource_entity.provider_type != "local_file": dependencies.append(datasource_entity.plugin_id) case NodeType.LLM.value: - llm_entity = LLMNodeData(**node["data"]) + llm_entity = LLMNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency(llm_entity.model.provider), ) case NodeType.QUESTION_CLASSIFIER.value: - question_classifier_entity = QuestionClassifierNodeData(**node["data"]) + question_classifier_entity = QuestionClassifierNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( question_classifier_entity.model.provider ), ) case NodeType.PARAMETER_EXTRACTOR.value: - parameter_extractor_entity = ParameterExtractorNodeData(**node["data"]) + parameter_extractor_entity = ParameterExtractorNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( parameter_extractor_entity.model.provider ), ) case NodeType.KNOWLEDGE_INDEX.value: - knowledge_index_entity = KnowledgeConfiguration(**node["data"]) + knowledge_index_entity = KnowledgeConfiguration.model_validate(node["data"]) if knowledge_index_entity.indexing_technique == "high_quality": if knowledge_index_entity.embedding_model_provider: dependencies.append( @@ -783,7 +783,7 @@ class RagPipelineDslService: ), ) case NodeType.KNOWLEDGE_RETRIEVAL.value: - knowledge_retrieval_entity = KnowledgeRetrievalNodeData(**node["data"]) + knowledge_retrieval_entity = KnowledgeRetrievalNodeData.model_validate(node["data"]) if knowledge_retrieval_entity.retrieval_mode == "multiple": if knowledge_retrieval_entity.multiple_retrieval_config: if ( @@ -873,7 +873,7 @@ class RagPipelineDslService: """ Returns the leaked dependencies in current workspace """ - dependencies = [PluginDependency(**dep) for dep in dsl_dependencies] + dependencies = [PluginDependency.model_validate(dep) for dep in dsl_dependencies] if not dependencies: return [] diff --git a/api/services/rag_pipeline/rag_pipeline_transform_service.py b/api/services/rag_pipeline/rag_pipeline_transform_service.py index 3d5a85b57f..b4425d85a6 100644 --- a/api/services/rag_pipeline/rag_pipeline_transform_service.py +++ b/api/services/rag_pipeline/rag_pipeline_transform_service.py @@ -156,13 +156,13 @@ class RagPipelineTransformService: self, dataset: Dataset, doc_form: str, indexing_technique: str | None, retrieval_model: dict, node: dict ): knowledge_configuration_dict = node.get("data", {}) - knowledge_configuration = KnowledgeConfiguration(**knowledge_configuration_dict) + knowledge_configuration = KnowledgeConfiguration.model_validate(knowledge_configuration_dict) if indexing_technique == "high_quality": knowledge_configuration.embedding_model = dataset.embedding_model knowledge_configuration.embedding_model_provider = dataset.embedding_model_provider if retrieval_model: - retrieval_setting = RetrievalSetting(**retrieval_model) + retrieval_setting = RetrievalSetting.model_validate(retrieval_model) if indexing_technique == "economy": retrieval_setting.search_method = "keyword_search" knowledge_configuration.retrieval_model = retrieval_setting diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 6b36ed0eb7..7ae1b97b30 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -242,7 +242,7 @@ class ToolTransformService: is_team_authorization=db_provider.authed, server_url=db_provider.masked_server_url, tools=ToolTransformService.mcp_tool_to_user_tool( - db_provider, [MCPTool(**tool) for tool in json.loads(db_provider.tools)] + db_provider, [MCPTool.model_validate(tool) for tool in json.loads(db_provider.tools)] ), updated_at=int(db_provider.updated_at.timestamp()), label=I18nObject(en_US=db_provider.name, zh_Hans=db_provider.name), @@ -387,6 +387,7 @@ class ToolTransformService: labels=labels or [], ) else: + assert tool.operation_id return ToolApiEntity( author=tool.author, name=tool.operation_id or "", diff --git a/api/tasks/ops_trace_task.py b/api/tasks/ops_trace_task.py index 7b254ac3b5..72e3b42ca7 100644 --- a/api/tasks/ops_trace_task.py +++ b/api/tasks/ops_trace_task.py @@ -36,7 +36,7 @@ def process_trace_tasks(file_info): if trace_info.get("workflow_data"): trace_info["workflow_data"] = WorkflowRun.from_dict(data=trace_info["workflow_data"]) if trace_info.get("documents"): - trace_info["documents"] = [Document(**doc) for doc in trace_info["documents"]] + trace_info["documents"] = [Document.model_validate(doc) for doc in trace_info["documents"]] try: if trace_instance: diff --git a/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py b/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py index a2c99554f1..4171656131 100644 --- a/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py +++ b/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py @@ -79,7 +79,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], # Create Flask application context for this thread with flask_app.app_context(): try: - rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity(**rag_pipeline_invoke_entity) + rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity.model_validate(rag_pipeline_invoke_entity) user_id = rag_pipeline_invoke_entity_model.user_id tenant_id = rag_pipeline_invoke_entity_model.tenant_id pipeline_id = rag_pipeline_invoke_entity_model.pipeline_id @@ -112,7 +112,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], workflow_execution_id = str(uuid.uuid4()) # Create application generate entity from dict - entity = RagPipelineGenerateEntity(**application_generate_entity) + entity = RagPipelineGenerateEntity.model_validate(application_generate_entity) # Create workflow repositories session_factory = sessionmaker(bind=db.engine, expire_on_commit=False) diff --git a/api/tasks/rag_pipeline/rag_pipeline_run_task.py b/api/tasks/rag_pipeline/rag_pipeline_run_task.py index 4e00f072bf..90ebe80daf 100644 --- a/api/tasks/rag_pipeline/rag_pipeline_run_task.py +++ b/api/tasks/rag_pipeline/rag_pipeline_run_task.py @@ -100,7 +100,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], # Create Flask application context for this thread with flask_app.app_context(): try: - rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity(**rag_pipeline_invoke_entity) + rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity.model_validate(rag_pipeline_invoke_entity) user_id = rag_pipeline_invoke_entity_model.user_id tenant_id = rag_pipeline_invoke_entity_model.tenant_id pipeline_id = rag_pipeline_invoke_entity_model.pipeline_id @@ -133,7 +133,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], workflow_execution_id = str(uuid.uuid4()) # Create application generate entity from dict - entity = RagPipelineGenerateEntity(**application_generate_entity) + entity = RagPipelineGenerateEntity.model_validate(application_generate_entity) # Create workflow repositories session_factory = sessionmaker(bind=db.engine, expire_on_commit=False) diff --git a/api/tests/integration_tests/tools/api_tool/test_api_tool.py b/api/tests/integration_tests/tools/api_tool/test_api_tool.py index 7c1a200c8f..e637530265 100644 --- a/api/tests/integration_tests/tools/api_tool/test_api_tool.py +++ b/api/tests/integration_tests/tools/api_tool/test_api_tool.py @@ -36,7 +36,7 @@ def test_api_tool(setup_http_mock): entity=ToolEntity( identity=ToolIdentity(provider="", author="", name="", label=I18nObject(en_US="test tool")), ), - api_bundle=ApiToolBundle(**tool_bundle), + api_bundle=ApiToolBundle.model_validate(tool_bundle), runtime=ToolRuntime(tenant_id="", credentials={"auth_type": "none"}), provider_id="test_tool", ) diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py b/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py index 48cc8a7e1c..fb2ddfe162 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py @@ -11,8 +11,8 @@ def test_default_value(): config = valid_config.copy() del config[key] with pytest.raises(ValidationError) as e: - MilvusConfig(**config) + MilvusConfig.model_validate(config) assert e.value.errors()[0]["msg"] == f"Value error, config MILVUS_{key.upper()} is required" - config = MilvusConfig(**valid_config) + config = MilvusConfig.model_validate(valid_config) assert config.database == "default" diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index b942614232..55fe62ca43 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -35,7 +35,7 @@ def list_operator_node(): "extract_by": ExtractConfig(enabled=False, serial="1"), "title": "Test Title", } - node_data = ListOperatorNodeData(**config) + node_data = ListOperatorNodeData.model_validate(config) node_config = { "id": "test_node_id", "data": node_data.model_dump(), diff --git a/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py b/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py index f990280c5f..47ef289ef3 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py @@ -17,7 +17,7 @@ def test_init_question_classifier_node_data(): "vision": {"enabled": True, "configs": {"variable_selector": ["image"], "detail": "low"}}, } - node_data = QuestionClassifierNodeData(**data) + node_data = QuestionClassifierNodeData.model_validate(data) assert node_data.query_variable_selector == ["id", "name"] assert node_data.model.provider == "openai" @@ -49,7 +49,7 @@ def test_init_question_classifier_node_data_without_vision_config(): }, } - node_data = QuestionClassifierNodeData(**data) + node_data = QuestionClassifierNodeData.model_validate(data) assert node_data.query_variable_selector == ["id", "name"] assert node_data.model.provider == "openai" diff --git a/api/tests/unit_tests/core/workflow/test_system_variable.py b/api/tests/unit_tests/core/workflow/test_system_variable.py index 11d788ed79..3ae5edb383 100644 --- a/api/tests/unit_tests/core/workflow/test_system_variable.py +++ b/api/tests/unit_tests/core/workflow/test_system_variable.py @@ -46,7 +46,7 @@ class TestSystemVariableSerialization: def test_basic_deserialization(self): """Test successful deserialization from JSON structure with all fields correctly mapped.""" # Test with complete data - system_var = SystemVariable(**COMPLETE_VALID_DATA) + system_var = SystemVariable.model_validate(COMPLETE_VALID_DATA) # Verify all fields are correctly mapped assert system_var.user_id == COMPLETE_VALID_DATA["user_id"] @@ -59,7 +59,7 @@ class TestSystemVariableSerialization: assert system_var.files == [] # Test with minimal data (only required fields) - minimal_var = SystemVariable(**VALID_BASE_DATA) + minimal_var = SystemVariable.model_validate(VALID_BASE_DATA) assert minimal_var.user_id == VALID_BASE_DATA["user_id"] assert minimal_var.app_id == VALID_BASE_DATA["app_id"] assert minimal_var.workflow_id == VALID_BASE_DATA["workflow_id"] @@ -75,12 +75,12 @@ class TestSystemVariableSerialization: # Test workflow_run_id only (preferred alias) data_run_id = {**VALID_BASE_DATA, "workflow_run_id": workflow_id} - system_var1 = SystemVariable(**data_run_id) + system_var1 = SystemVariable.model_validate(data_run_id) assert system_var1.workflow_execution_id == workflow_id # Test workflow_execution_id only (direct field name) data_execution_id = {**VALID_BASE_DATA, "workflow_execution_id": workflow_id} - system_var2 = SystemVariable(**data_execution_id) + system_var2 = SystemVariable.model_validate(data_execution_id) assert system_var2.workflow_execution_id == workflow_id # Test both present - workflow_run_id should take precedence @@ -89,17 +89,17 @@ class TestSystemVariableSerialization: "workflow_execution_id": "should-be-ignored", "workflow_run_id": workflow_id, } - system_var3 = SystemVariable(**data_both) + system_var3 = SystemVariable.model_validate(data_both) assert system_var3.workflow_execution_id == workflow_id # Test neither present - should be None - system_var4 = SystemVariable(**VALID_BASE_DATA) + system_var4 = SystemVariable.model_validate(VALID_BASE_DATA) assert system_var4.workflow_execution_id is None def test_serialization_round_trip(self): """Test that serialize → deserialize produces the same result with alias handling.""" # Create original SystemVariable - original = SystemVariable(**COMPLETE_VALID_DATA) + original = SystemVariable.model_validate(COMPLETE_VALID_DATA) # Serialize to dict serialized = original.model_dump(mode="json") @@ -110,7 +110,7 @@ class TestSystemVariableSerialization: assert serialized["workflow_run_id"] == COMPLETE_VALID_DATA["workflow_run_id"] # Deserialize back - deserialized = SystemVariable(**serialized) + deserialized = SystemVariable.model_validate(serialized) # Verify all fields match after round-trip assert deserialized.user_id == original.user_id @@ -125,7 +125,7 @@ class TestSystemVariableSerialization: def test_json_round_trip(self): """Test JSON serialization/deserialization consistency with proper structure.""" # Create original SystemVariable - original = SystemVariable(**COMPLETE_VALID_DATA) + original = SystemVariable.model_validate(COMPLETE_VALID_DATA) # Serialize to JSON string json_str = original.model_dump_json() @@ -137,7 +137,7 @@ class TestSystemVariableSerialization: assert json_data["workflow_run_id"] == COMPLETE_VALID_DATA["workflow_run_id"] # Deserialize from JSON data - deserialized = SystemVariable(**json_data) + deserialized = SystemVariable.model_validate(json_data) # Verify key fields match after JSON round-trip assert deserialized.workflow_execution_id == original.workflow_execution_id @@ -149,13 +149,13 @@ class TestSystemVariableSerialization: """Test deserialization with File objects in the files field - SystemVariable specific logic.""" # Test with empty files list data_empty = {**VALID_BASE_DATA, "files": []} - system_var_empty = SystemVariable(**data_empty) + system_var_empty = SystemVariable.model_validate(data_empty) assert system_var_empty.files == [] # Test with single File object test_file = create_test_file() data_single = {**VALID_BASE_DATA, "files": [test_file]} - system_var_single = SystemVariable(**data_single) + system_var_single = SystemVariable.model_validate(data_single) assert len(system_var_single.files) == 1 assert system_var_single.files[0].filename == "test.txt" assert system_var_single.files[0].tenant_id == "test-tenant-id" @@ -179,14 +179,14 @@ class TestSystemVariableSerialization: ) data_multiple = {**VALID_BASE_DATA, "files": [file1, file2]} - system_var_multiple = SystemVariable(**data_multiple) + system_var_multiple = SystemVariable.model_validate(data_multiple) assert len(system_var_multiple.files) == 2 assert system_var_multiple.files[0].filename == "doc1.txt" assert system_var_multiple.files[1].filename == "image.jpg" # Verify files field serialization/deserialization serialized = system_var_multiple.model_dump(mode="json") - deserialized = SystemVariable(**serialized) + deserialized = SystemVariable.model_validate(serialized) assert len(deserialized.files) == 2 assert deserialized.files[0].filename == "doc1.txt" assert deserialized.files[1].filename == "image.jpg" @@ -197,7 +197,7 @@ class TestSystemVariableSerialization: # Create with workflow_run_id (alias) data_with_alias = {**VALID_BASE_DATA, "workflow_run_id": workflow_id} - system_var = SystemVariable(**data_with_alias) + system_var = SystemVariable.model_validate(data_with_alias) # Serialize and verify alias is used serialized = system_var.model_dump() @@ -205,7 +205,7 @@ class TestSystemVariableSerialization: assert "workflow_execution_id" not in serialized # Deserialize and verify field mapping - deserialized = SystemVariable(**serialized) + deserialized = SystemVariable.model_validate(serialized) assert deserialized.workflow_execution_id == workflow_id # Test JSON serialization path @@ -213,7 +213,7 @@ class TestSystemVariableSerialization: assert json_serialized["workflow_run_id"] == workflow_id assert "workflow_execution_id" not in json_serialized - json_deserialized = SystemVariable(**json_serialized) + json_deserialized = SystemVariable.model_validate(json_serialized) assert json_deserialized.workflow_execution_id == workflow_id def test_model_validator_serialization_logic(self): @@ -222,7 +222,7 @@ class TestSystemVariableSerialization: # Test direct instantiation with workflow_execution_id (should work) data1 = {**VALID_BASE_DATA, "workflow_execution_id": workflow_id} - system_var1 = SystemVariable(**data1) + system_var1 = SystemVariable.model_validate(data1) assert system_var1.workflow_execution_id == workflow_id # Test serialization of the above (should use alias) @@ -236,7 +236,7 @@ class TestSystemVariableSerialization: "workflow_execution_id": "should-be-removed", "workflow_run_id": workflow_id, } - system_var2 = SystemVariable(**data2) + system_var2 = SystemVariable.model_validate(data2) assert system_var2.workflow_execution_id == workflow_id # Verify serialization consistency diff --git a/api/tests/unit_tests/services/test_metadata_bug_complete.py b/api/tests/unit_tests/services/test_metadata_bug_complete.py index 0ff1edc950..31fe9b2868 100644 --- a/api/tests/unit_tests/services/test_metadata_bug_complete.py +++ b/api/tests/unit_tests/services/test_metadata_bug_complete.py @@ -118,7 +118,7 @@ class TestMetadataBugCompleteValidation: # But would crash when trying to create MetadataArgs with pytest.raises((ValueError, TypeError)): - MetadataArgs(**args) + MetadataArgs.model_validate(args) def test_7_end_to_end_validation_layers(self): """Test all validation layers work together correctly.""" @@ -131,7 +131,7 @@ class TestMetadataBugCompleteValidation: valid_data = {"type": "string", "name": "test_metadata"} # Should create valid Pydantic object - metadata_args = MetadataArgs(**valid_data) + metadata_args = MetadataArgs.model_validate(valid_data) assert metadata_args.type == "string" assert metadata_args.name == "test_metadata" diff --git a/api/tests/unit_tests/services/test_metadata_nullable_bug.py b/api/tests/unit_tests/services/test_metadata_nullable_bug.py index d151100cf3..c8cd7025c2 100644 --- a/api/tests/unit_tests/services/test_metadata_nullable_bug.py +++ b/api/tests/unit_tests/services/test_metadata_nullable_bug.py @@ -76,7 +76,7 @@ class TestMetadataNullableBug: # Step 2: Try to create MetadataArgs with None values # This should fail at Pydantic validation level with pytest.raises((ValueError, TypeError)): - metadata_args = MetadataArgs(**args) + metadata_args = MetadataArgs.model_validate(args) # Step 3: If we bypass Pydantic (simulating the bug scenario) # Move this outside the request context to avoid Flask-Login issues From 94a07706ec8e2114cedf2258e8f851afbe79fd3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=AF=97=E6=B5=93?= <844670992@qq.com> Date: Fri, 10 Oct 2025 16:32:09 +0800 Subject: [PATCH 76/82] fix: restore None guards for _environment_variables/_conversation_variables getters (#25633) --- api/models/workflow.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/api/models/workflow.py b/api/models/workflow.py index e61005953e..877f571f25 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -360,7 +360,9 @@ class Workflow(Base): @property def environment_variables(self) -> Sequence[StringVariable | IntegerVariable | FloatVariable | SecretVariable]: - # _environment_variables is guaranteed to be non-None due to server_default="{}" + # TODO: find some way to init `self._environment_variables` when instance created. + if self._environment_variables is None: + self._environment_variables = "{}" # Use workflow.tenant_id to avoid relying on request user in background threads tenant_id = self.tenant_id @@ -444,7 +446,9 @@ class Workflow(Base): @property def conversation_variables(self) -> Sequence[Variable]: - # _conversation_variables is guaranteed to be non-None due to server_default="{}" + # TODO: find some way to init `self._conversation_variables` when instance created. + if self._conversation_variables is None: + self._conversation_variables = "{}" variables_dict: dict[str, Any] = json.loads(self._conversation_variables) results = [variable_factory.build_conversation_variable_from_mapping(v) for v in variables_dict.values()] From a90b60c36fe257bd0ced58c98dcf8a960234cc64 Mon Sep 17 00:00:00 2001 From: znn Date: Fri, 10 Oct 2025 14:30:03 +0530 Subject: [PATCH 77/82] removing horus eye and adding mcp icon (#25323) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: crazywoola <427733928@qq.com> --- .../plugins/card/base/card-icon.tsx | 3 +++ web/app/components/tools/mcp/modal.tsx | 5 ++++- web/utils/mcp.ts | 22 +++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 web/utils/mcp.ts diff --git a/web/app/components/plugins/card/base/card-icon.tsx b/web/app/components/plugins/card/base/card-icon.tsx index 7f7468ece2..b4c052c13c 100644 --- a/web/app/components/plugins/card/base/card-icon.tsx +++ b/web/app/components/plugins/card/base/card-icon.tsx @@ -1,6 +1,8 @@ import { RiCheckLine, RiCloseLine } from '@remixicon/react' +import { Mcp } from '@/app/components/base/icons/src/vender/other' import AppIcon from '@/app/components/base/app-icon' import cn from '@/utils/classnames' +import { shouldUseMcpIcon } from '@/utils/mcp' const iconSizeMap = { xs: 'w-4 h-4 text-base', @@ -35,6 +37,7 @@ const Icon = ({ icon={src.content} background={src.background} className='rounded-md' + innerIcon={shouldUseMcpIcon(src) ? : undefined} />
) diff --git a/web/app/components/tools/mcp/modal.tsx b/web/app/components/tools/mcp/modal.tsx index 1a12b3b3e9..1d888c57e8 100644 --- a/web/app/components/tools/mcp/modal.tsx +++ b/web/app/components/tools/mcp/modal.tsx @@ -3,6 +3,7 @@ import React, { useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { getDomain } from 'tldts' import { RiCloseLine, RiEditLine } from '@remixicon/react' +import { Mcp } from '@/app/components/base/icons/src/vender/other' import AppIconPicker from '@/app/components/base/app-icon-picker' import type { AppIconSelection } from '@/app/components/base/app-icon-picker' import AppIcon from '@/app/components/base/app-icon' @@ -17,6 +18,7 @@ import Toast from '@/app/components/base/toast' import { uploadRemoteFileInfo } from '@/service/common' import cn from '@/utils/classnames' import { useHover } from 'ahooks' +import { shouldUseMcpIconForAppIcon } from '@/utils/mcp' export type DuplicateAppModalProps = { data?: ToolWithProvider @@ -35,7 +37,7 @@ export type DuplicateAppModalProps = { onHide: () => void } -const DEFAULT_ICON = { type: 'emoji', icon: '🧿', background: '#EFF1F5' } +const DEFAULT_ICON = { type: 'emoji', icon: '🔗', background: '#6366F1' } const extractFileId = (url: string) => { const match = url.match(/files\/(.+?)\/file-preview/) return match ? match[1] : null @@ -208,6 +210,7 @@ const MCPModal = ({ icon={appIcon.type === 'emoji' ? appIcon.icon : appIcon.fileId} background={appIcon.type === 'emoji' ? appIcon.background : undefined} imageUrl={appIcon.type === 'image' ? appIcon.url : undefined} + innerIcon={shouldUseMcpIconForAppIcon(appIcon.type, appIcon.type === 'emoji' ? appIcon.icon : '') ? : undefined} size='xxl' className='relative cursor-pointer rounded-2xl' coverElement={ diff --git a/web/utils/mcp.ts b/web/utils/mcp.ts new file mode 100644 index 0000000000..dcbb63ee8a --- /dev/null +++ b/web/utils/mcp.ts @@ -0,0 +1,22 @@ +/** + * MCP (Model Context Protocol) utility functions + */ + +/** + * Determines if the MCP icon should be used based on the icon source + * @param src - The icon source, can be a string URL or an object with content and background + * @returns true if the MCP icon should be used (when it's an emoji object with 🔗 content) + */ +export const shouldUseMcpIcon = (src: any): boolean => { + return typeof src === 'object' && src?.content === '🔗' +} + +/** + * Checks if an app icon should use the MCP icon + * @param iconType - The type of icon ('emoji' | 'image') + * @param icon - The icon content (emoji or file ID) + * @returns true if the MCP icon should be used + */ +export const shouldUseMcpIconForAppIcon = (iconType: string, icon: string): boolean => { + return iconType === 'emoji' && icon === '🔗' +} From 65b832c46c5227ab89ceaba86fade66b71998dd4 Mon Sep 17 00:00:00 2001 From: znn Date: Fri, 10 Oct 2025 14:37:25 +0530 Subject: [PATCH 78/82] pan and zoom during workflow execution (#24254) --- web/app/components/workflow/index.tsx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index 75c4d51390..b289cafefd 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -412,10 +412,10 @@ export const Workflow: FC = memo(({ nodesFocusable={!nodesReadOnly} edgesFocusable={!nodesReadOnly} panOnScroll={false} - panOnDrag={controlMode === ControlMode.Hand && !workflowReadOnly} - zoomOnPinch={!workflowReadOnly} - zoomOnScroll={!workflowReadOnly} - zoomOnDoubleClick={!workflowReadOnly} + panOnDrag={controlMode === ControlMode.Hand} + zoomOnPinch={true} + zoomOnScroll={true} + zoomOnDoubleClick={true} isValidConnection={isValidConnection} selectionKeyCode={null} selectionMode={SelectionMode.Partial} From d0dd81cf84289fb4df6893deba86a8ff8f73423a Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Fri, 10 Oct 2025 18:10:23 +0800 Subject: [PATCH 79/82] chore: bump ruff to 0.14 (#26063) --- api/.ruff.toml | 1 - .../dataset_multi_retriever_tool.py | 2 +- api/pyproject.toml | 2 +- api/uv.lock | 42 +++++++++---------- 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/api/.ruff.toml b/api/.ruff.toml index 643bc063a1..5a29e1d8fa 100644 --- a/api/.ruff.toml +++ b/api/.ruff.toml @@ -81,7 +81,6 @@ ignore = [ "SIM113", # enumerate-for-loop "SIM117", # multiple-with-statements "SIM210", # if-expr-with-true-false - "UP038", # deprecated and not recommended by Ruff, https://docs.astral.sh/ruff/rules/non-pep604-isinstance/ ] [lint.per-file-ignores] diff --git a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py index 75c0c6738e..cce5ec6b1b 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py @@ -126,7 +126,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool): data_source_type=document.data_source_type, segment_id=segment.id, retriever_from=self.retriever_from, - score=document_score_list.get(segment.index_node_id, None), + score=document_score_list.get(segment.index_node_id), doc_metadata=document.doc_metadata, ) diff --git a/api/pyproject.toml b/api/pyproject.toml index 1f51d60098..e2a50a43f6 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -110,7 +110,7 @@ dev = [ "lxml-stubs~=0.5.1", "ty~=0.0.1a19", "basedpyright~=1.31.0", - "ruff~=0.12.3", + "ruff~=0.14.0", "pytest~=8.3.2", "pytest-benchmark~=4.0.0", "pytest-cov~=4.1.0", diff --git a/api/uv.lock b/api/uv.lock index 21d1f17bad..43db17b06f 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1571,7 +1571,7 @@ dev = [ { name = "pytest-cov", specifier = "~=4.1.0" }, { name = "pytest-env", specifier = "~=1.1.3" }, { name = "pytest-mock", specifier = "~=3.14.0" }, - { name = "ruff", specifier = "~=0.12.3" }, + { name = "ruff", specifier = "~=0.14.0" }, { name = "scipy-stubs", specifier = ">=1.15.3.0" }, { name = "sseclient-py", specifier = ">=1.8.0" }, { name = "testcontainers", specifier = "~=4.10.0" }, @@ -5461,28 +5461,28 @@ wheels = [ [[package]] name = "ruff" -version = "0.12.12" +version = "0.14.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a8/f0/e0965dd709b8cabe6356811c0ee8c096806bb57d20b5019eb4e48a117410/ruff-0.12.12.tar.gz", hash = "sha256:b86cd3415dbe31b3b46a71c598f4c4b2f550346d1ccf6326b347cc0c8fd063d6", size = 5359915, upload-time = "2025-09-04T16:50:18.273Z" } +sdist = { url = "https://files.pythonhosted.org/packages/41/b9/9bd84453ed6dd04688de9b3f3a4146a1698e8faae2ceeccce4e14c67ae17/ruff-0.14.0.tar.gz", hash = "sha256:62ec8969b7510f77945df916de15da55311fade8d6050995ff7f680afe582c57", size = 5452071, upload-time = "2025-10-07T18:21:55.763Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/09/79/8d3d687224d88367b51c7974cec1040c4b015772bfbeffac95face14c04a/ruff-0.12.12-py3-none-linux_armv6l.whl", hash = "sha256:de1c4b916d98ab289818e55ce481e2cacfaad7710b01d1f990c497edf217dafc", size = 12116602, upload-time = "2025-09-04T16:49:18.892Z" }, - { url = "https://files.pythonhosted.org/packages/c3/c3/6e599657fe192462f94861a09aae935b869aea8a1da07f47d6eae471397c/ruff-0.12.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7acd6045e87fac75a0b0cdedacf9ab3e1ad9d929d149785903cff9bb69ad9727", size = 12868393, upload-time = "2025-09-04T16:49:23.043Z" }, - { url = "https://files.pythonhosted.org/packages/e8/d2/9e3e40d399abc95336b1843f52fc0daaceb672d0e3c9290a28ff1a96f79d/ruff-0.12.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:abf4073688d7d6da16611f2f126be86523a8ec4343d15d276c614bda8ec44edb", size = 12036967, upload-time = "2025-09-04T16:49:26.04Z" }, - { url = "https://files.pythonhosted.org/packages/e9/03/6816b2ed08836be272e87107d905f0908be5b4a40c14bfc91043e76631b8/ruff-0.12.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:968e77094b1d7a576992ac078557d1439df678a34c6fe02fd979f973af167577", size = 12276038, upload-time = "2025-09-04T16:49:29.056Z" }, - { url = "https://files.pythonhosted.org/packages/9f/d5/707b92a61310edf358a389477eabd8af68f375c0ef858194be97ca5b6069/ruff-0.12.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42a67d16e5b1ffc6d21c5f67851e0e769517fb57a8ebad1d0781b30888aa704e", size = 11901110, upload-time = "2025-09-04T16:49:32.07Z" }, - { url = "https://files.pythonhosted.org/packages/9d/3d/f8b1038f4b9822e26ec3d5b49cf2bc313e3c1564cceb4c1a42820bf74853/ruff-0.12.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b216ec0a0674e4b1214dcc998a5088e54eaf39417327b19ffefba1c4a1e4971e", size = 13668352, upload-time = "2025-09-04T16:49:35.148Z" }, - { url = "https://files.pythonhosted.org/packages/98/0e/91421368ae6c4f3765dd41a150f760c5f725516028a6be30e58255e3c668/ruff-0.12.12-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:59f909c0fdd8f1dcdbfed0b9569b8bf428cf144bec87d9de298dcd4723f5bee8", size = 14638365, upload-time = "2025-09-04T16:49:38.892Z" }, - { url = "https://files.pythonhosted.org/packages/74/5d/88f3f06a142f58ecc8ecb0c2fe0b82343e2a2b04dcd098809f717cf74b6c/ruff-0.12.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ac93d87047e765336f0c18eacad51dad0c1c33c9df7484c40f98e1d773876f5", size = 14060812, upload-time = "2025-09-04T16:49:42.732Z" }, - { url = "https://files.pythonhosted.org/packages/13/fc/8962e7ddd2e81863d5c92400820f650b86f97ff919c59836fbc4c1a6d84c/ruff-0.12.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:01543c137fd3650d322922e8b14cc133b8ea734617c4891c5a9fccf4bfc9aa92", size = 13050208, upload-time = "2025-09-04T16:49:46.434Z" }, - { url = "https://files.pythonhosted.org/packages/53/06/8deb52d48a9a624fd37390555d9589e719eac568c020b27e96eed671f25f/ruff-0.12.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afc2fa864197634e549d87fb1e7b6feb01df0a80fd510d6489e1ce8c0b1cc45", size = 13311444, upload-time = "2025-09-04T16:49:49.931Z" }, - { url = "https://files.pythonhosted.org/packages/2a/81/de5a29af7eb8f341f8140867ffb93f82e4fde7256dadee79016ac87c2716/ruff-0.12.12-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0c0945246f5ad776cb8925e36af2438e66188d2b57d9cf2eed2c382c58b371e5", size = 13279474, upload-time = "2025-09-04T16:49:53.465Z" }, - { url = "https://files.pythonhosted.org/packages/7f/14/d9577fdeaf791737ada1b4f5c6b59c21c3326f3f683229096cccd7674e0c/ruff-0.12.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:a0fbafe8c58e37aae28b84a80ba1817f2ea552e9450156018a478bf1fa80f4e4", size = 12070204, upload-time = "2025-09-04T16:49:56.882Z" }, - { url = "https://files.pythonhosted.org/packages/77/04/a910078284b47fad54506dc0af13839c418ff704e341c176f64e1127e461/ruff-0.12.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b9c456fb2fc8e1282affa932c9e40f5ec31ec9cbb66751a316bd131273b57c23", size = 11880347, upload-time = "2025-09-04T16:49:59.729Z" }, - { url = "https://files.pythonhosted.org/packages/df/58/30185fcb0e89f05e7ea82e5817b47798f7fa7179863f9d9ba6fd4fe1b098/ruff-0.12.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5f12856123b0ad0147d90b3961f5c90e7427f9acd4b40050705499c98983f489", size = 12891844, upload-time = "2025-09-04T16:50:02.591Z" }, - { url = "https://files.pythonhosted.org/packages/21/9c/28a8dacce4855e6703dcb8cdf6c1705d0b23dd01d60150786cd55aa93b16/ruff-0.12.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:26a1b5a2bf7dd2c47e3b46d077cd9c0fc3b93e6c6cc9ed750bd312ae9dc302ee", size = 13360687, upload-time = "2025-09-04T16:50:05.8Z" }, - { url = "https://files.pythonhosted.org/packages/c8/fa/05b6428a008e60f79546c943e54068316f32ec8ab5c4f73e4563934fbdc7/ruff-0.12.12-py3-none-win32.whl", hash = "sha256:173be2bfc142af07a01e3a759aba6f7791aa47acf3604f610b1c36db888df7b1", size = 12052870, upload-time = "2025-09-04T16:50:09.121Z" }, - { url = "https://files.pythonhosted.org/packages/85/60/d1e335417804df452589271818749d061b22772b87efda88354cf35cdb7a/ruff-0.12.12-py3-none-win_amd64.whl", hash = "sha256:e99620bf01884e5f38611934c09dd194eb665b0109104acae3ba6102b600fd0d", size = 13178016, upload-time = "2025-09-04T16:50:12.559Z" }, - { url = "https://files.pythonhosted.org/packages/28/7e/61c42657f6e4614a4258f1c3b0c5b93adc4d1f8575f5229d1906b483099b/ruff-0.12.12-py3-none-win_arm64.whl", hash = "sha256:2a8199cab4ce4d72d158319b63370abf60991495fb733db96cd923a34c52d093", size = 12256762, upload-time = "2025-09-04T16:50:15.737Z" }, + { url = "https://files.pythonhosted.org/packages/3a/4e/79d463a5f80654e93fa653ebfb98e0becc3f0e7cf6219c9ddedf1e197072/ruff-0.14.0-py3-none-linux_armv6l.whl", hash = "sha256:58e15bffa7054299becf4bab8a1187062c6f8cafbe9f6e39e0d5aface455d6b3", size = 12494532, upload-time = "2025-10-07T18:21:00.373Z" }, + { url = "https://files.pythonhosted.org/packages/ee/40/e2392f445ed8e02aa6105d49db4bfff01957379064c30f4811c3bf38aece/ruff-0.14.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:838d1b065f4df676b7c9957992f2304e41ead7a50a568185efd404297d5701e8", size = 13160768, upload-time = "2025-10-07T18:21:04.73Z" }, + { url = "https://files.pythonhosted.org/packages/75/da/2a656ea7c6b9bd14c7209918268dd40e1e6cea65f4bb9880eaaa43b055cd/ruff-0.14.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:703799d059ba50f745605b04638fa7e9682cc3da084b2092feee63500ff3d9b8", size = 12363376, upload-time = "2025-10-07T18:21:07.833Z" }, + { url = "https://files.pythonhosted.org/packages/42/e2/1ffef5a1875add82416ff388fcb7ea8b22a53be67a638487937aea81af27/ruff-0.14.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ba9a8925e90f861502f7d974cc60e18ca29c72bb0ee8bfeabb6ade35a3abde7", size = 12608055, upload-time = "2025-10-07T18:21:10.72Z" }, + { url = "https://files.pythonhosted.org/packages/4a/32/986725199d7cee510d9f1dfdf95bf1efc5fa9dd714d0d85c1fb1f6be3bc3/ruff-0.14.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e41f785498bd200ffc276eb9e1570c019c1d907b07cfb081092c8ad51975bbe7", size = 12318544, upload-time = "2025-10-07T18:21:13.741Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ed/4969cefd53315164c94eaf4da7cfba1f267dc275b0abdd593d11c90829a3/ruff-0.14.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30a58c087aef4584c193aebf2700f0fbcfc1e77b89c7385e3139956fa90434e2", size = 14001280, upload-time = "2025-10-07T18:21:16.411Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ad/96c1fc9f8854c37681c9613d825925c7f24ca1acfc62a4eb3896b50bacd2/ruff-0.14.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f8d07350bc7af0a5ce8812b7d5c1a7293cf02476752f23fdfc500d24b79b783c", size = 15027286, upload-time = "2025-10-07T18:21:19.577Z" }, + { url = "https://files.pythonhosted.org/packages/b3/00/1426978f97df4fe331074baf69615f579dc4e7c37bb4c6f57c2aad80c87f/ruff-0.14.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eec3bbbf3a7d5482b5c1f42d5fc972774d71d107d447919fca620b0be3e3b75e", size = 14451506, upload-time = "2025-10-07T18:21:22.779Z" }, + { url = "https://files.pythonhosted.org/packages/58/d5/9c1cea6e493c0cf0647674cca26b579ea9d2a213b74b5c195fbeb9678e15/ruff-0.14.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16b68e183a0e28e5c176d51004aaa40559e8f90065a10a559176713fcf435206", size = 13437384, upload-time = "2025-10-07T18:21:25.758Z" }, + { url = "https://files.pythonhosted.org/packages/29/b4/4cd6a4331e999fc05d9d77729c95503f99eae3ba1160469f2b64866964e3/ruff-0.14.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb732d17db2e945cfcbbc52af0143eda1da36ca8ae25083dd4f66f1542fdf82e", size = 13447976, upload-time = "2025-10-07T18:21:28.83Z" }, + { url = "https://files.pythonhosted.org/packages/3b/c0/ac42f546d07e4f49f62332576cb845d45c67cf5610d1851254e341d563b6/ruff-0.14.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:c958f66ab884b7873e72df38dcabee03d556a8f2ee1b8538ee1c2bbd619883dd", size = 13682850, upload-time = "2025-10-07T18:21:31.842Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c4/4b0c9bcadd45b4c29fe1af9c5d1dc0ca87b4021665dfbe1c4688d407aa20/ruff-0.14.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7eb0499a2e01f6e0c285afc5bac43ab380cbfc17cd43a2e1dd10ec97d6f2c42d", size = 12449825, upload-time = "2025-10-07T18:21:35.074Z" }, + { url = "https://files.pythonhosted.org/packages/4b/a8/e2e76288e6c16540fa820d148d83e55f15e994d852485f221b9524514730/ruff-0.14.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4c63b2d99fafa05efca0ab198fd48fa6030d57e4423df3f18e03aa62518c565f", size = 12272599, upload-time = "2025-10-07T18:21:38.08Z" }, + { url = "https://files.pythonhosted.org/packages/18/14/e2815d8eff847391af632b22422b8207704222ff575dec8d044f9ab779b2/ruff-0.14.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:668fce701b7a222f3f5327f86909db2bbe99c30877c8001ff934c5413812ac02", size = 13193828, upload-time = "2025-10-07T18:21:41.216Z" }, + { url = "https://files.pythonhosted.org/packages/44/c6/61ccc2987cf0aecc588ff8f3212dea64840770e60d78f5606cd7dc34de32/ruff-0.14.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a86bf575e05cb68dcb34e4c7dfe1064d44d3f0c04bbc0491949092192b515296", size = 13628617, upload-time = "2025-10-07T18:21:44.04Z" }, + { url = "https://files.pythonhosted.org/packages/73/e6/03b882225a1b0627e75339b420883dc3c90707a8917d2284abef7a58d317/ruff-0.14.0-py3-none-win32.whl", hash = "sha256:7450a243d7125d1c032cb4b93d9625dea46c8c42b4f06c6b709baac168e10543", size = 12367872, upload-time = "2025-10-07T18:21:46.67Z" }, + { url = "https://files.pythonhosted.org/packages/41/77/56cf9cf01ea0bfcc662de72540812e5ba8e9563f33ef3d37ab2174892c47/ruff-0.14.0-py3-none-win_amd64.whl", hash = "sha256:ea95da28cd874c4d9c922b39381cbd69cb7e7b49c21b8152b014bd4f52acddc2", size = 13464628, upload-time = "2025-10-07T18:21:50.318Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2a/65880dfd0e13f7f13a775998f34703674a4554906167dce02daf7865b954/ruff-0.14.0-py3-none-win_arm64.whl", hash = "sha256:f42c9495f5c13ff841b1da4cb3c2a42075409592825dada7c5885c2c844ac730", size = 12565142, upload-time = "2025-10-07T18:21:53.577Z" }, ] [[package]] From 61d9428064fbc037f5ce1b6baba977748f170936 Mon Sep 17 00:00:00 2001 From: Stream Date: Fri, 10 Oct 2025 18:47:16 +0800 Subject: [PATCH 80/82] refactor: fix basedpyright error --- api/core/llm_generator/llm_generator.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index ec6c537af0..605767bda7 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -589,12 +589,9 @@ class LLMGenerator: "instruction": filled_instruction, } ) - llm_result = cast( - LLMResult, - model_instance.invoke_llm( - prompt_messages=[UserPromptMessage(content=formatted_prompt)], - model_parameters=memory_spec.model.completion_params, - stream=False, - ) + llm_result = model_instance.invoke_llm( + prompt_messages=[UserPromptMessage(content=formatted_prompt)], + model_parameters=memory_spec.model.completion_params, + stream=False, ) return llm_result.message.get_text_content() From a1e3a72274fc87bde93687d9e2d040b883503755 Mon Sep 17 00:00:00 2001 From: Stream Date: Sat, 11 Oct 2025 15:38:05 +0800 Subject: [PATCH 81/82] chore: add database migration file --- ...d00b2b40ea3e_add_chatflow_memory_tables.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 api/migrations/versions/2025_10_11_1529-d00b2b40ea3e_add_chatflow_memory_tables.py diff --git a/api/migrations/versions/2025_10_11_1529-d00b2b40ea3e_add_chatflow_memory_tables.py b/api/migrations/versions/2025_10_11_1529-d00b2b40ea3e_add_chatflow_memory_tables.py new file mode 100644 index 0000000000..d1c39943e7 --- /dev/null +++ b/api/migrations/versions/2025_10_11_1529-d00b2b40ea3e_add_chatflow_memory_tables.py @@ -0,0 +1,104 @@ +"""add_chatflow_memory_tables + +Revision ID: d00b2b40ea3e +Revises: 68519ad5cd18 +Create Date: 2025-10-11 15:29:20.244675 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'd00b2b40ea3e' +down_revision = '68519ad5cd18' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('chatflow_conversations', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=False), + sa.Column('node_id', sa.Text(), nullable=True), + sa.Column('original_conversation_id', models.types.StringUUID(), nullable=True), + sa.Column('conversation_metadata', sa.Text(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='chatflow_conversations_pkey') + ) + with op.batch_alter_table('chatflow_conversations', schema=None) as batch_op: + batch_op.create_index('chatflow_conversations_original_conversation_id_idx', ['tenant_id', 'app_id', 'node_id', 'original_conversation_id'], unique=False) + + op.create_table('chatflow_memory_variables', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=True), + sa.Column('conversation_id', models.types.StringUUID(), nullable=True), + sa.Column('node_id', sa.Text(), nullable=True), + sa.Column('memory_id', sa.Text(), nullable=False), + sa.Column('value', sa.Text(), nullable=False), + sa.Column('name', sa.Text(), nullable=False), + sa.Column('scope', sa.String(length=10), nullable=False), + sa.Column('term', sa.String(length=20), nullable=False), + sa.Column('version', sa.Integer(), nullable=False), + sa.Column('created_by_role', sa.String(length=20), nullable=False), + sa.Column('created_by', models.types.StringUUID(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='chatflow_memory_variables_pkey') + ) + with op.batch_alter_table('chatflow_memory_variables', schema=None) as batch_op: + batch_op.create_index('chatflow_memory_variables_memory_id_idx', ['tenant_id', 'app_id', 'node_id', 'memory_id'], unique=False) + + op.create_table('chatflow_messages', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('conversation_id', models.types.StringUUID(), nullable=False), + sa.Column('index', sa.Integer(), nullable=False), + sa.Column('version', sa.Integer(), nullable=False), + sa.Column('data', sa.Text(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='chatflow_messages_pkey') + ) + with op.batch_alter_table('chatflow_messages', schema=None) as batch_op: + batch_op.create_index('chatflow_messages_version_idx', ['conversation_id', 'index', 'version'], unique=False) + + with op.batch_alter_table('datasource_providers', schema=None) as batch_op: + batch_op.alter_column('avatar_url', + existing_type=sa.TEXT(), + type_=sa.String(length=255), + existing_nullable=True) + + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.drop_column('credential_status') + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.add_column(sa.Column('credential_status', sa.VARCHAR(length=20), server_default=sa.text("'active'::character varying"), autoincrement=False, nullable=True)) + + with op.batch_alter_table('datasource_providers', schema=None) as batch_op: + batch_op.alter_column('avatar_url', + existing_type=sa.String(length=255), + type_=sa.TEXT(), + existing_nullable=True) + + with op.batch_alter_table('chatflow_messages', schema=None) as batch_op: + batch_op.drop_index('chatflow_messages_version_idx') + + op.drop_table('chatflow_messages') + with op.batch_alter_table('chatflow_memory_variables', schema=None) as batch_op: + batch_op.drop_index('chatflow_memory_variables_memory_id_idx') + + op.drop_table('chatflow_memory_variables') + with op.batch_alter_table('chatflow_conversations', schema=None) as batch_op: + batch_op.drop_index('chatflow_conversations_original_conversation_id_idx') + + op.drop_table('chatflow_conversations') + # ### end Alembic commands ### From e7d63a9fa31503710e873a98a213fe5e39f3d62e Mon Sep 17 00:00:00 2001 From: Stream Date: Sat, 11 Oct 2025 16:15:32 +0800 Subject: [PATCH 82/82] fix: fix circular ref --- api/core/workflow/nodes/llm/node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 11d592b24e..6483a5c593 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -77,7 +77,6 @@ from core.workflow.nodes.base.node import Node from core.workflow.nodes.base.variable_template_parser import VariableTemplateParser from models import UserFrom, Workflow from models.engine import db -from services.chatflow_memory_service import ChatflowMemoryService from . import llm_utils from .entities import ( @@ -1251,6 +1250,7 @@ class LLMNode(Node): if memory_block_spec and memory_block_spec.scope == MemoryScope.NODE: is_draft = (self.invoke_from == InvokeFrom.DEBUGGER) + from services.chatflow_memory_service import ChatflowMemoryService ChatflowMemoryService.update_node_memory_if_needed( tenant_id=self.tenant_id, app_id=self.app_id,