From cc1d437dc17e620e419629fb4acc7bfacfaed329 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Sun, 7 Sep 2025 12:48:50 +0800 Subject: [PATCH] fix: correct indentation in TokenBufferMemory get_history_prompt_messages method --- api/core/memory/token_buffer_memory.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index 20a120840e..f2178b0270 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -167,11 +167,11 @@ class TokenBufferMemory: else: prompt_messages.append(AssistantPromptMessage(content=message.answer)) - if not prompt_messages: - return [] + if not prompt_messages: + return [] - # prune the chat message if it exceeds the max token limit - curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) + # prune the chat message if it exceeds the max token limit + curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) if curr_message_tokens > max_token_limit: while curr_message_tokens > max_token_limit and len(prompt_messages) > 1: