From b3bee58e74d03e785c00f4147a1b7bf90c154527 Mon Sep 17 00:00:00 2001 From: huangjilong Date: Thu, 18 Dec 2025 19:58:59 +0800 Subject: [PATCH 1/2] fix(agent): fix model type mismatch error in agent node with multiple model selectors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Problem When using a custom agent strategy with multiple models (e.g., LLM + Embedding model), the second query fails with error: "Model type instance is not LargeLanguageModel" ## Root Cause 1. `_fetch_model()` method hardcoded `ModelType.LLM` when calling `get_provider_model_bundle()` and `get_current_credentials()`, but used the actual model type from config when calling `get_model_instance()`. This caused a type mismatch for non-LLM models. 2. `_generate_agent_parameters()` attempted to create memory for ALL MODEL_SELECTOR parameters, but memory requires LLM model to calculate tokens. When processing embedding model parameters, it failed because embedding models don't support `get_llm_num_tokens()`. ## Solution 1. Use the actual model type from config consistently in `_fetch_model()` method 2. Only create memory when the model type is LLM in `_generate_agent_parameters()` 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- api/core/workflow/nodes/agent/agent_node.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 4be006de11..c718367dea 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -303,9 +303,10 @@ class AgentNode(Node[AgentNodeData]): if parameter.type == AgentStrategyParameter.AgentStrategyParameterType.MODEL_SELECTOR: value = cast(dict[str, Any], value) model_instance, model_schema = self._fetch_model(value) - # memory config + # memory config - only for LLM models history_prompt_messages = [] - if node_data.memory: + model_type_str = value.get("model_type", "") + if node_data.memory and model_type_str == ModelType.LLM.value: memory = self._fetch_memory(model_instance) if memory: prompt_messages = memory.get_history_prompt_messages( @@ -415,12 +416,13 @@ class AgentNode(Node[AgentNodeData]): def _fetch_model(self, value: dict[str, Any]) -> tuple[ModelInstance, AIModelEntity | None]: provider_manager = ProviderManager() + model_type = ModelType(value.get("model_type", ModelType.LLM.value)) provider_model_bundle = provider_manager.get_provider_model_bundle( - tenant_id=self.tenant_id, provider=value.get("provider", ""), model_type=ModelType.LLM + tenant_id=self.tenant_id, provider=value.get("provider", ""), model_type=model_type ) model_name = value.get("model", "") model_credentials = provider_model_bundle.configuration.get_current_credentials( - model_type=ModelType.LLM, model=model_name + model_type=model_type, model=model_name ) provider_name = provider_model_bundle.configuration.provider.provider model_type_instance = provider_model_bundle.model_type_instance From c74af1e0656436dec9cf5e5128f358c743dda395 Mon Sep 17 00:00:00 2001 From: "leon.Huang" Date: Thu, 18 Dec 2025 20:18:57 +0800 Subject: [PATCH 2/2] Update api/core/workflow/nodes/agent/agent_node.py accept Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- api/core/workflow/nodes/agent/agent_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index c718367dea..5090cccb5d 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -305,7 +305,7 @@ class AgentNode(Node[AgentNodeData]): model_instance, model_schema = self._fetch_model(value) # memory config - only for LLM models history_prompt_messages = [] - model_type_str = value.get("model_type", "") + model_type_str = value.get("model_type", ModelType.LLM.value) if node_data.memory and model_type_str == ModelType.LLM.value: memory = self._fetch_memory(model_instance) if memory: