From 3b868a1ceccd83e477be3c6b822d15adaa6381b3 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 26 Aug 2025 11:29:00 +0800 Subject: [PATCH] feat: integrate VariablePool into memory update process --- api/core/app/apps/advanced_chat/app_runner.py | 5 +++-- api/core/llm_generator/llm_generator.py | 5 ++++- api/services/chatflow_memory_service.py | 14 +++++++++++--- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index cbb7ad4fd6..f2340386cd 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -187,7 +187,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): self._handle_event(workflow_entry, event) try: - self._check_app_memory_updates() + self._check_app_memory_updates(variable_pool) except Exception as e: logger.exception("Failed to check app memory updates", exc_info=e) @@ -452,11 +452,12 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): tenant_id=self._workflow.tenant_id ) - def _check_app_memory_updates(self): + def _check_app_memory_updates(self, variable_pool: VariablePool): is_draft = (self.application_generate_entity.invoke_from == InvokeFrom.DEBUGGER) ChatflowMemoryService.update_app_memory_if_needed( workflow=self._workflow, conversation_id=self.conversation.id, + variable_pool=variable_pool, is_draft=is_draft ) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index b7c5b0993a..88479a8502 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -29,6 +29,7 @@ from core.ops.entities.trace_entity import TraceTaskName from core.ops.ops_trace_manager import TraceQueueManager, TraceTask from core.ops.utils import measure_time from core.prompt.utils.prompt_template_parser import PromptTemplateParser +from core.workflow.entities.variable_pool import VariablePool from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.graph_engine.entities.event import AgentLogEvent from models import App, Message, WorkflowNodeExecutionModel, db @@ -579,6 +580,7 @@ class LLMGenerator: def update_memory_block( tenant_id: str, visible_history: Sequence[tuple[str, str]], + variable_pool: VariablePool, memory_block: MemoryBlock, memory_spec: MemoryBlockSpec ) -> str: @@ -591,11 +593,12 @@ class LLMGenerator: formatted_history = "" for sender, message in visible_history: formatted_history += f"{sender}: {message}\n" + filled_instruction = variable_pool.convert_template(memory_spec.instruction).text formatted_prompt = PromptTemplateParser(MEMORY_UPDATE_PROMPT).format( inputs={ "formatted_history": formatted_history, "current_value": memory_block.value, - "instruction": memory_spec.instruction, + "instruction": filled_instruction, } ) llm_result = cast( diff --git a/api/services/chatflow_memory_service.py b/api/services/chatflow_memory_service.py index f979acf234..eda135a6e1 100644 --- a/api/services/chatflow_memory_service.py +++ b/api/services/chatflow_memory_service.py @@ -184,6 +184,7 @@ class ChatflowMemoryService: def update_app_memory_if_needed( workflow: Workflow, conversation_id: str, + variable_pool: VariablePool, is_draft: bool ): visible_messages = ChatflowHistoryService.get_visible_chat_history( @@ -218,6 +219,7 @@ class ChatflowMemoryService: ChatflowMemoryService._app_submit_async_memory_update( block=memory_block, is_draft=is_draft, + variable_pool=variable_pool, visible_messages=visible_messages ) @@ -228,7 +230,8 @@ class ChatflowMemoryService: is_draft=is_draft, conversation_id=conversation_id, app_id=workflow.app_id, - visible_messages=visible_messages + visible_messages=visible_messages, + variable_pool=variable_pool ) @staticmethod @@ -350,6 +353,7 @@ class ChatflowMemoryService: def _app_submit_async_memory_update( block: MemoryBlock, visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, is_draft: bool ): thread = threading.Thread( @@ -357,7 +361,7 @@ class ChatflowMemoryService: kwargs={ 'memory_block': block, 'visible_messages': visible_messages, - 'variable_pool': VariablePool(), + 'variable_pool': variable_pool, 'is_draft': is_draft }, ) @@ -369,6 +373,7 @@ class ChatflowMemoryService: app_id: str, conversation_id: str, visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, is_draft: bool ): """Submit sync memory batch update task""" @@ -379,6 +384,7 @@ class ChatflowMemoryService: 'app_id': app_id, 'conversation_id': conversation_id, 'visible_messages': visible_messages, + 'variable_pool': variable_pool, 'is_draft': is_draft }, ) @@ -390,6 +396,7 @@ class ChatflowMemoryService: app_id: str, conversation_id: str, visible_messages: Sequence[PromptMessage], + variable_pool: VariablePool, is_draft: bool ): try: @@ -402,7 +409,7 @@ class ChatflowMemoryService: kwargs={ 'memory_block': block, 'visible_messages': visible_messages, - 'variable_pool': VariablePool(), + 'variable_pool': variable_pool, 'is_draft': is_draft }, ) @@ -457,6 +464,7 @@ class ChatflowMemoryService: updated_value = LLMGenerator.update_memory_block( tenant_id=memory_block.tenant_id, visible_history=ChatflowMemoryService._format_chat_history(visible_messages), + variable_pool=variable_pool, memory_block=memory_block, memory_spec=memory_block.spec, )