fix: correct indentation in TokenBufferMemory get_history_prompt_messages method

This commit is contained in:
-LAN- 2025-09-07 12:48:50 +08:00
parent 7aef0b54e5
commit cc1d437dc1
No known key found for this signature in database
GPG Key ID: 6BA0D108DED011FF
1 changed files with 4 additions and 4 deletions

View File

@ -167,11 +167,11 @@ class TokenBufferMemory:
else:
prompt_messages.append(AssistantPromptMessage(content=message.answer))
if not prompt_messages:
return []
if not prompt_messages:
return []
# prune the chat message if it exceeds the max token limit
curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages)
# prune the chat message if it exceeds the max token limit
curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages)
if curr_message_tokens > max_token_limit:
while curr_message_tokens > max_token_limit and len(prompt_messages) > 1: