fix: use model parameters from memory_spec in llm_generator

This commit is contained in:
Stream 2025-08-26 11:31:59 +08:00
parent 3b868a1cec
commit 8685f055ea
No known key found for this signature in database
GPG Key ID: 033728094B100D70
1 changed files with 1 additions and 1 deletions

View File

@ -605,7 +605,7 @@ class LLMGenerator:
LLMResult,
model_instance.invoke_llm(
prompt_messages=[UserPromptMessage(content=formatted_prompt)],
model_parameters={"temperature": 0.01, "max_tokens": 2000},
model_parameters=memory_spec.model.completion_params,
stream=False,
)
)