This commit is contained in:
takatost 2024-03-04 13:32:59 +08:00
parent 3f6c17247f
commit 7b738e045e
5 changed files with 5 additions and 5 deletions

View File

@ -134,7 +134,7 @@ class CotAgentRunner(BaseAgentRunner):
input=query
)
# recalc llm max tokens
# recale llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(

View File

@ -107,7 +107,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
messages_ids=message_file_ids
)
# recalc llm max tokens
# recale llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(

View File

@ -84,7 +84,7 @@ class AppRunner:
return rest_tokens
def recale_llm_max_tokens(self, model_config: ModelConfigWithCredentialsEntity,
def recalc_llm_max_tokens(self, model_config: ModelConfigWithCredentialsEntity,
prompt_messages: list[PromptMessage]):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_type_instance = model_config.provider_model_bundle.model_type_instance

View File

@ -189,7 +189,7 @@ class ChatAppRunner(AppRunner):
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recale_llm_max_tokens(
self.recalc_llm_max_tokens(
model_config=application_generate_entity.model_config,
prompt_messages=prompt_messages
)

View File

@ -149,7 +149,7 @@ class CompletionAppRunner(AppRunner):
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recale_llm_max_tokens(
self.recalc_llm_max_tokens(
model_config=application_generate_entity.model_config,
prompt_messages=prompt_messages
)