mirror of https://github.com/langgenius/dify.git
fix: use model parameters from memory_spec in llm_generator
This commit is contained in:
parent
3b868a1cec
commit
8685f055ea
|
|
@ -605,7 +605,7 @@ class LLMGenerator:
|
|||
LLMResult,
|
||||
model_instance.invoke_llm(
|
||||
prompt_messages=[UserPromptMessage(content=formatted_prompt)],
|
||||
model_parameters={"temperature": 0.01, "max_tokens": 2000},
|
||||
model_parameters=memory_spec.model.completion_params,
|
||||
stream=False,
|
||||
)
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in New Issue