From c3eac450ce2d0ffd96279c08da293ed94194dfe2 Mon Sep 17 00:00:00 2001 From: takatost Date: Mon, 4 Mar 2024 14:15:17 +0800 Subject: [PATCH] fix typo --- api/core/agent/cot_agent_runner.py | 2 +- api/core/agent/fc_agent_runner.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index ad1e6e610d..8b444ef3be 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -134,7 +134,7 @@ class CotAgentRunner(BaseAgentRunner): input=query ) - # recale llm max tokens + # recalc llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 3c7e55e293..30e5cdd694 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -107,7 +107,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): messages_ids=message_file_ids ) - # recale llm max tokens + # recalc llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(