From 110b6a0863d21010ca4af4e47b1d0b5592823f3e Mon Sep 17 00:00:00 2001 From: Will Date: Fri, 5 Sep 2025 14:01:07 +0800 Subject: [PATCH] fix incorrect indent in TokenBufferMemory (#25215) --- api/core/memory/token_buffer_memory.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index cb768e2036..17050fcadf 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -167,11 +167,11 @@ class TokenBufferMemory: else: prompt_messages.append(AssistantPromptMessage(content=message.answer)) - if not prompt_messages: - return [] + if not prompt_messages: + return [] - # prune the chat message if it exceeds the max token limit - curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) + # prune the chat message if it exceeds the max token limit + curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) if curr_message_tokens > max_token_limit: while curr_message_tokens > max_token_limit and len(prompt_messages) > 1: