From 72818e946d7f45599f5c1c6aff77253006726b4c Mon Sep 17 00:00:00 2001 From: takatost Date: Thu, 21 Mar 2024 15:36:25 +0800 Subject: [PATCH] fix llm memory --- api/core/workflow/nodes/llm/llm_node.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/llm/llm_node.py b/api/core/workflow/nodes/llm/llm_node.py index cbb6d954b9..cc49a22020 100644 --- a/api/core/workflow/nodes/llm/llm_node.py +++ b/api/core/workflow/nodes/llm/llm_node.py @@ -73,6 +73,8 @@ class LLMNode(BaseNode): # fetch prompt messages prompt_messages, stop = self._fetch_prompt_messages( node_data=node_data, + query=variable_pool.get_variable_value(['sys', SystemVariable.QUERY.value]) + if node_data.memory else None, inputs=inputs, files=files, context=context, @@ -391,6 +393,7 @@ class LLMNode(BaseNode): return memory def _fetch_prompt_messages(self, node_data: LLMNodeData, + query: Optional[str], inputs: dict[str, str], files: list[FileVar], context: Optional[str], @@ -400,6 +403,7 @@ class LLMNode(BaseNode): """ Fetch prompt messages :param node_data: node data + :param query: query :param inputs: inputs :param files: files :param context: context @@ -411,7 +415,7 @@ class LLMNode(BaseNode): prompt_messages = prompt_transform.get_prompt( prompt_template=node_data.prompt_template, inputs=inputs, - query='', + query=query if query else '', files=files, context=context, memory_config=node_data.memory,