From 7b738e045e16fa896b9df8ea4981c688788adc5b Mon Sep 17 00:00:00 2001 From: takatost Date: Mon, 4 Mar 2024 13:32:59 +0800 Subject: [PATCH] fix typo --- api/core/agent/cot_agent_runner.py | 2 +- api/core/agent/fc_agent_runner.py | 2 +- api/core/app/apps/base_app_runner.py | 2 +- api/core/app/apps/chat/app_runner.py | 2 +- api/core/app/apps/completion/app_runner.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 8b444ef3be..ad1e6e610d 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -134,7 +134,7 @@ class CotAgentRunner(BaseAgentRunner): input=query ) - # recalc llm max tokens + # recale llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 30e5cdd694..3c7e55e293 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -107,7 +107,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): messages_ids=message_file_ids ) - # recalc llm max tokens + # recale llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm( diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index 4e099c9ae1..dda240d778 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -84,7 +84,7 @@ class AppRunner: return rest_tokens - def recale_llm_max_tokens(self, model_config: ModelConfigWithCredentialsEntity, + def recalc_llm_max_tokens(self, model_config: ModelConfigWithCredentialsEntity, prompt_messages: list[PromptMessage]): # recalc max_tokens if sum(prompt_token + max_tokens) over model token limit model_type_instance = model_config.provider_model_bundle.model_type_instance diff --git a/api/core/app/apps/chat/app_runner.py b/api/core/app/apps/chat/app_runner.py index 57aca9d3e6..bce4606f21 100644 --- a/api/core/app/apps/chat/app_runner.py +++ b/api/core/app/apps/chat/app_runner.py @@ -189,7 +189,7 @@ class ChatAppRunner(AppRunner): return # Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit - self.recale_llm_max_tokens( + self.recalc_llm_max_tokens( model_config=application_generate_entity.model_config, prompt_messages=prompt_messages ) diff --git a/api/core/app/apps/completion/app_runner.py b/api/core/app/apps/completion/app_runner.py index c5b8ca6c9a..d67d485e1d 100644 --- a/api/core/app/apps/completion/app_runner.py +++ b/api/core/app/apps/completion/app_runner.py @@ -149,7 +149,7 @@ class CompletionAppRunner(AppRunner): return # Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit - self.recale_llm_max_tokens( + self.recalc_llm_max_tokens( model_config=application_generate_entity.model_config, prompt_messages=prompt_messages )