From 8685f055ea930be52e6f174e3a70c85789d15b19 Mon Sep 17 00:00:00 2001 From: Stream Date: Tue, 26 Aug 2025 11:31:59 +0800 Subject: [PATCH] fix: use model parameters from memory_spec in llm_generator --- api/core/llm_generator/llm_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 88479a8502..e9e3f7c5d8 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -605,7 +605,7 @@ class LLMGenerator: LLMResult, model_instance.invoke_llm( prompt_messages=[UserPromptMessage(content=formatted_prompt)], - model_parameters={"temperature": 0.01, "max_tokens": 2000}, + model_parameters=memory_spec.model.completion_params, stream=False, ) )