mirror of
https://github.com/langgenius/dify.git
synced 2026-04-27 19:27:23 +08:00
fix: correct indentation in TokenBufferMemory get_history_prompt_messages method
This commit is contained in:
parent
7aef0b54e5
commit
cc1d437dc1
@ -167,11 +167,11 @@ class TokenBufferMemory:
|
|||||||
else:
|
else:
|
||||||
prompt_messages.append(AssistantPromptMessage(content=message.answer))
|
prompt_messages.append(AssistantPromptMessage(content=message.answer))
|
||||||
|
|
||||||
if not prompt_messages:
|
if not prompt_messages:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# prune the chat message if it exceeds the max token limit
|
# prune the chat message if it exceeds the max token limit
|
||||||
curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages)
|
curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages)
|
||||||
|
|
||||||
if curr_message_tokens > max_token_limit:
|
if curr_message_tokens > max_token_limit:
|
||||||
while curr_message_tokens > max_token_limit and len(prompt_messages) > 1:
|
while curr_message_tokens > max_token_limit and len(prompt_messages) > 1:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user