diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py
index 4e228ab932..ba8bbc7135 100644
--- a/api/configs/middleware/__init__.py
+++ b/api/configs/middleware/__init__.py
@@ -144,7 +144,8 @@ class DatabaseConfig(BaseSettings):
default="postgresql",
)
- @computed_field
+ @computed_field # type: ignore[misc]
+ @property
def SQLALCHEMY_DATABASE_URI(self) -> str:
db_extras = (
f"{self.DB_EXTRAS}&client_encoding={self.DB_CHARSET}" if self.DB_CHARSET else self.DB_EXTRAS
diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
index abb8db34de..5db7539926 100644
--- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py
+++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
@@ -568,7 +568,7 @@ class AdvancedChatAppGenerateTaskPipeline:
)
yield workflow_finish_resp
- self._base_task_pipeline._queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
+ self._base_task_pipeline.queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
def _handle_workflow_partial_success_event(
self,
@@ -600,7 +600,7 @@ class AdvancedChatAppGenerateTaskPipeline:
)
yield workflow_finish_resp
- self._base_task_pipeline._queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
+ self._base_task_pipeline.queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
def _handle_workflow_failed_event(
self,
@@ -845,7 +845,7 @@ class AdvancedChatAppGenerateTaskPipeline:
# Initialize graph runtime state
graph_runtime_state: Optional[GraphRuntimeState] = None
- for queue_message in self._base_task_pipeline._queue_manager.listen():
+ for queue_message in self._base_task_pipeline.queue_manager.listen():
event = queue_message.event
match event:
@@ -959,11 +959,11 @@ class AdvancedChatAppGenerateTaskPipeline:
if self._base_task_pipeline._output_moderation_handler:
if self._base_task_pipeline._output_moderation_handler.should_direct_output():
self._task_state.answer = self._base_task_pipeline._output_moderation_handler.get_final_output()
- self._base_task_pipeline._queue_manager.publish(
+ self._base_task_pipeline.queue_manager.publish(
QueueTextChunkEvent(text=self._task_state.answer), PublishFrom.TASK_PIPELINE
)
- self._base_task_pipeline._queue_manager.publish(
+ self._base_task_pipeline.queue_manager.publish(
QueueStopEvent(stopped_by=QueueStopEvent.StopBy.OUTPUT_MODERATION), PublishFrom.TASK_PIPELINE
)
return True
diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py
index b1e9a340bd..537c070adf 100644
--- a/api/core/app/apps/workflow/generate_task_pipeline.py
+++ b/api/core/app/apps/workflow/generate_task_pipeline.py
@@ -711,7 +711,7 @@ class WorkflowAppGenerateTaskPipeline:
# Initialize graph runtime state
graph_runtime_state = None
- for queue_message in self._base_task_pipeline._queue_manager.listen():
+ for queue_message in self._base_task_pipeline.queue_manager.listen():
event = queue_message.event
match event:
diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py
index 65ed267959..11f37c4baa 100644
--- a/api/core/app/entities/app_invoke_entities.py
+++ b/api/core/app/entities/app_invoke_entities.py
@@ -9,7 +9,6 @@ from core.app.app_config.entities import EasyUIBasedAppConfig, WorkflowUIBasedAp
from core.entities.provider_configuration import ProviderModelBundle
from core.file import File, FileUploadConfig
from core.model_runtime.entities.model_entities import AIModelEntity
-from core.ops.ops_trace_manager import TraceQueueManager
class InvokeFrom(Enum):
@@ -114,7 +113,8 @@ class AppGenerateEntity(BaseModel):
extras: dict[str, Any] = Field(default_factory=dict)
# tracing instance
- trace_manager: Optional[TraceQueueManager] = None
+ # Using Any to avoid circular import with TraceQueueManager
+ trace_manager: Optional[Any] = None
class EasyUIBasedAppGenerateEntity(AppGenerateEntity):
diff --git a/api/core/app/task_pipeline/based_generate_task_pipeline.py b/api/core/app/task_pipeline/based_generate_task_pipeline.py
index 3ed0c3352f..014c7fd4f5 100644
--- a/api/core/app/task_pipeline/based_generate_task_pipeline.py
+++ b/api/core/app/task_pipeline/based_generate_task_pipeline.py
@@ -37,7 +37,7 @@ class BasedGenerateTaskPipeline:
stream: bool,
) -> None:
self._application_generate_entity = application_generate_entity
- self._queue_manager = queue_manager
+ self.queue_manager = queue_manager
self._start_at = time.perf_counter()
self._output_moderation_handler = self._init_output_moderation()
self._stream = stream
@@ -113,7 +113,7 @@ class BasedGenerateTaskPipeline:
tenant_id=app_config.tenant_id,
app_id=app_config.app_id,
rule=ModerationRule(type=sensitive_word_avoidance.type, config=sensitive_word_avoidance.config),
- queue_manager=self._queue_manager,
+ queue_manager=self.queue_manager,
)
return None
diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
index 888434798a..56131d99c9 100644
--- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
+++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
@@ -257,7 +257,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
Process stream response.
:return:
"""
- for message in self._queue_manager.listen():
+ for message in self.queue_manager.listen():
if publisher:
publisher.publish(message)
event = message.event
@@ -499,7 +499,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
if self._output_moderation_handler.should_direct_output():
# stop subscribe new token when output moderation should direct output
self._task_state.llm_result.message.content = self._output_moderation_handler.get_final_output()
- self._queue_manager.publish(
+ self.queue_manager.publish(
QueueLLMChunkEvent(
chunk=LLMResultChunk(
model=self._task_state.llm_result.model,
@@ -513,7 +513,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
PublishFrom.TASK_PIPELINE,
)
- self._queue_manager.publish(
+ self.queue_manager.publish(
QueueStopEvent(stopped_by=QueueStopEvent.StopBy.OUTPUT_MODERATION), PublishFrom.TASK_PIPELINE
)
return True
diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py
index 91f17568b6..2a76b1f41a 100644
--- a/api/core/memory/token_buffer_memory.py
+++ b/api/core/memory/token_buffer_memory.py
@@ -99,13 +99,13 @@ class TokenBufferMemory:
prompt_messages.append(UserPromptMessage(content=message.query))
else:
prompt_message_contents: list[PromptMessageContentUnionTypes] = []
- prompt_message_contents.append(TextPromptMessageContent(data=message.query))
for file in file_objs:
prompt_message = file_manager.to_prompt_message_content(
file,
image_detail_config=detail,
)
prompt_message_contents.append(prompt_message)
+ prompt_message_contents.append(TextPromptMessageContent(data=message.query))
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
diff --git a/api/core/model_runtime/model_providers/model_provider_factory.py b/api/core/model_runtime/model_providers/model_provider_factory.py
index ad46f64ec3..f8590b38f8 100644
--- a/api/core/model_runtime/model_providers/model_provider_factory.py
+++ b/api/core/model_runtime/model_providers/model_provider_factory.py
@@ -257,11 +257,6 @@ class ModelProviderFactory:
# scan all providers
plugin_model_provider_entities = self.get_plugin_model_providers()
- # convert provider_configs to dict
- provider_credentials_dict = {}
- for provider_config in provider_configs:
- provider_credentials_dict[provider_config.provider] = provider_config.credentials
-
# traverse all model_provider_extensions
providers = []
for plugin_model_provider_entity in plugin_model_provider_entities:
diff --git a/api/core/model_runtime/schema_validators/common_validator.py b/api/core/model_runtime/schema_validators/common_validator.py
index 810a7c4c44..b689007401 100644
--- a/api/core/model_runtime/schema_validators/common_validator.py
+++ b/api/core/model_runtime/schema_validators/common_validator.py
@@ -68,7 +68,7 @@ class CommonValidator:
if credential_form_schema.max_length:
if len(value) > credential_form_schema.max_length:
raise ValueError(
- f"Variable {credential_form_schema.variable} length should not"
+ f"Variable {credential_form_schema.variable} length should not be"
f" greater than {credential_form_schema.max_length}"
)
diff --git a/api/core/model_runtime/utils/helper.py b/api/core/model_runtime/utils/helper.py
deleted file mode 100644
index 5e8a723ec7..0000000000
--- a/api/core/model_runtime/utils/helper.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import pydantic
-from pydantic import BaseModel
-
-
-def dump_model(model: BaseModel) -> dict:
- if hasattr(pydantic, "model_dump"):
- # FIXME mypy error, try to fix it instead of using type: ignore
- return pydantic.model_dump(model) # type: ignore
- else:
- return model.model_dump()
diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py
index d1c8142b3d..303c3fe31c 100644
--- a/api/core/rag/datasource/vdb/oracle/oraclevector.py
+++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py
@@ -109,8 +109,19 @@ class OracleVector(BaseVector):
)
def _get_connection(self) -> Connection:
- connection = oracledb.connect(user=self.config.user, password=self.config.password, dsn=self.config.dsn)
- return connection
+ if self.config.is_autonomous:
+ connection = oracledb.connect(
+ user=self.config.user,
+ password=self.config.password,
+ dsn=self.config.dsn,
+ config_dir=self.config.config_dir,
+ wallet_location=self.config.wallet_location,
+ wallet_password=self.config.wallet_password,
+ )
+ return connection
+ else:
+ connection = oracledb.connect(user=self.config.user, password=self.config.password, dsn=self.config.dsn)
+ return connection
def _create_connection_pool(self, config: OracleVectorConfig):
pool_params = {
diff --git a/api/core/variables/consts.py b/api/core/variables/consts.py
index 03b277d619..8f3f78f740 100644
--- a/api/core/variables/consts.py
+++ b/api/core/variables/consts.py
@@ -4,4 +4,4 @@
#
# If the selector length is more than 2, the remaining parts are the keys / indexes paths used
# to extract part of the variable value.
-MIN_SELECTORS_LENGTH = 2
+SELECTORS_LENGTH = 2
diff --git a/api/core/workflow/entities/variable_pool.py b/api/core/workflow/entities/variable_pool.py
index fbb8df6b01..fb0794844e 100644
--- a/api/core/workflow/entities/variable_pool.py
+++ b/api/core/workflow/entities/variable_pool.py
@@ -7,8 +7,8 @@ from pydantic import BaseModel, Field
from core.file import File, FileAttribute, file_manager
from core.variables import Segment, SegmentGroup, Variable
-from core.variables.consts import MIN_SELECTORS_LENGTH
-from core.variables.segments import FileSegment, NoneSegment
+from core.variables.consts import SELECTORS_LENGTH
+from core.variables.segments import FileSegment, ObjectSegment
from core.variables.variables import VariableUnion
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, ENVIRONMENT_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
from core.workflow.system_variable import SystemVariable
@@ -24,7 +24,7 @@ class VariablePool(BaseModel):
# The first element of the selector is the node id, it's the first-level key in the dictionary.
# Other elements of the selector are the keys in the second-level dictionary. To get the key, we hash the
# elements of the selector except the first one.
- variable_dictionary: defaultdict[str, Annotated[dict[int, VariableUnion], Field(default_factory=dict)]] = Field(
+ variable_dictionary: defaultdict[str, Annotated[dict[str, VariableUnion], Field(default_factory=dict)]] = Field(
description="Variables mapping",
default=defaultdict(dict),
)
@@ -36,6 +36,7 @@ class VariablePool(BaseModel):
)
system_variables: SystemVariable = Field(
description="System variables",
+ default_factory=SystemVariable.empty,
)
environment_variables: Sequence[VariableUnion] = Field(
description="Environment variables.",
@@ -58,23 +59,29 @@ class VariablePool(BaseModel):
def add(self, selector: Sequence[str], value: Any, /) -> None:
"""
- Adds a variable to the variable pool.
+ Add a variable to the variable pool.
- NOTE: You should not add a non-Segment value to the variable pool
- even if it is allowed now.
+ This method accepts a selector path and a value, converting the value
+ to a Variable object if necessary before storing it in the pool.
Args:
- selector (Sequence[str]): The selector for the variable.
- value (VariableValue): The value of the variable.
+ selector: A two-element sequence containing [node_id, variable_name].
+ The selector must have exactly 2 elements to be valid.
+ value: The value to store. Can be a Variable, Segment, or any value
+ that can be converted to a Segment (str, int, float, dict, list, File).
Raises:
- ValueError: If the selector is invalid.
+ ValueError: If selector length is not exactly 2 elements.
- Returns:
- None
+ Note:
+ While non-Segment values are currently accepted and automatically
+ converted, it's recommended to pass Segment or Variable objects directly.
"""
- if len(selector) < MIN_SELECTORS_LENGTH:
- raise ValueError("Invalid selector")
+ if len(selector) != SELECTORS_LENGTH:
+ raise ValueError(
+ f"Invalid selector: expected {SELECTORS_LENGTH} elements (node_id, variable_name), "
+ f"got {len(selector)} elements"
+ )
if isinstance(value, Variable):
variable = value
@@ -84,57 +91,85 @@ class VariablePool(BaseModel):
segment = variable_factory.build_segment(value)
variable = variable_factory.segment_to_variable(segment=segment, selector=selector)
- key, hash_key = self._selector_to_keys(selector)
+ node_id, name = self._selector_to_keys(selector)
# Based on the definition of `VariableUnion`,
# `list[Variable]` can be safely used as `list[VariableUnion]` since they are compatible.
- self.variable_dictionary[key][hash_key] = cast(VariableUnion, variable)
+ self.variable_dictionary[node_id][name] = cast(VariableUnion, variable)
@classmethod
- def _selector_to_keys(cls, selector: Sequence[str]) -> tuple[str, int]:
- return selector[0], hash(tuple(selector[1:]))
+ def _selector_to_keys(cls, selector: Sequence[str]) -> tuple[str, str]:
+ return selector[0], selector[1]
def _has(self, selector: Sequence[str]) -> bool:
- key, hash_key = self._selector_to_keys(selector)
- if key not in self.variable_dictionary:
+ node_id, name = self._selector_to_keys(selector)
+ if node_id not in self.variable_dictionary:
return False
- if hash_key not in self.variable_dictionary[key]:
+ if name not in self.variable_dictionary[node_id]:
return False
return True
def get(self, selector: Sequence[str], /) -> Segment | None:
"""
- Retrieves the value from the variable pool based on the given selector.
+ Retrieve a variable's value from the pool as a Segment.
+
+ This method supports both simple selectors [node_id, variable_name] and
+ extended selectors that include attribute access for FileSegment and
+ ObjectSegment types.
Args:
- selector (Sequence[str]): The selector used to identify the variable.
+ selector: A sequence with at least 2 elements:
+ - [node_id, variable_name]: Returns the full segment
+ - [node_id, variable_name, attr, ...]: Returns a nested value
+ from FileSegment (e.g., 'url', 'name') or ObjectSegment
Returns:
- Any: The value associated with the given selector.
+ The Segment associated with the selector, or None if not found.
+ Returns None if selector has fewer than 2 elements.
Raises:
- ValueError: If the selector is invalid.
+ ValueError: If attempting to access an invalid FileAttribute.
"""
- if len(selector) < MIN_SELECTORS_LENGTH:
+ if len(selector) < SELECTORS_LENGTH:
return None
- key, hash_key = self._selector_to_keys(selector)
- value: Segment | None = self.variable_dictionary[key].get(hash_key)
+ node_id, name = self._selector_to_keys(selector)
+ segment: Segment | None = self.variable_dictionary[node_id].get(name)
- if value is None:
- selector, attr = selector[:-1], selector[-1]
+ if segment is None:
+ return None
+
+ if len(selector) == 2:
+ return segment
+
+ if isinstance(segment, FileSegment):
+ attr = selector[2]
# Python support `attr in FileAttribute` after 3.12
if attr not in {item.value for item in FileAttribute}:
return None
- value = self.get(selector)
- if not isinstance(value, FileSegment | NoneSegment):
- return None
- if isinstance(value, FileSegment):
- attr = FileAttribute(attr)
- attr_value = file_manager.get_attr(file=value.value, attr=attr)
- return variable_factory.build_segment(attr_value)
- return value
+ attr = FileAttribute(attr)
+ attr_value = file_manager.get_attr(file=segment.value, attr=attr)
+ return variable_factory.build_segment(attr_value)
- return value
+ # Navigate through nested attributes
+ result: Any = segment
+ for attr in selector[2:]:
+ result = self._extract_value(result)
+ result = self._get_nested_attribute(result, attr)
+ if result is None:
+ return None
+
+ # Return result as Segment
+ return result if isinstance(result, Segment) else variable_factory.build_segment(result)
+
+ def _extract_value(self, obj: Any) -> Any:
+ """Extract the actual value from an ObjectSegment."""
+ return obj.value if isinstance(obj, ObjectSegment) else obj
+
+ def _get_nested_attribute(self, obj: Mapping[str, Any], attr: str) -> Any:
+ """Get a nested attribute from a dictionary-like object."""
+ if not isinstance(obj, dict):
+ return None
+ return obj.get(attr)
def remove(self, selector: Sequence[str], /):
"""
diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py
index ef13277e0c..b9663d32f7 100644
--- a/api/core/workflow/graph_engine/graph_engine.py
+++ b/api/core/workflow/graph_engine/graph_engine.py
@@ -15,7 +15,7 @@ from configs import dify_config
from core.app.apps.exc import GenerateTaskStoppedError
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunResult
-from core.workflow.entities.variable_pool import VariablePool, VariableValue
+from core.workflow.entities.variable_pool import VariablePool
from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
from core.workflow.graph_engine.condition_handlers.condition_manager import ConditionManager
from core.workflow.graph_engine.entities.event import (
@@ -51,7 +51,6 @@ from core.workflow.nodes.base import BaseNode
from core.workflow.nodes.end.end_stream_processor import EndStreamProcessor
from core.workflow.nodes.enums import ErrorStrategy, FailBranchSourceHandle
from core.workflow.nodes.event import RunCompletedEvent, RunRetrieverResourceEvent, RunStreamChunkEvent
-from core.workflow.utils import variable_utils
from libs.flask_utils import preserve_flask_contexts
from models.enums import UserFrom
from models.workflow import WorkflowType
@@ -701,11 +700,9 @@ class GraphEngine:
route_node_state.status = RouteNodeState.Status.EXCEPTION
if run_result.outputs:
for variable_key, variable_value in run_result.outputs.items():
- # append variables to variable pool recursively
- self._append_variables_recursively(
- node_id=node.node_id,
- variable_key_list=[variable_key],
- variable_value=variable_value,
+ # Add variables to variable pool
+ self.graph_runtime_state.variable_pool.add(
+ [node.node_id, variable_key], variable_value
)
yield NodeRunExceptionEvent(
error=run_result.error or "System Error",
@@ -758,11 +755,9 @@ class GraphEngine:
# append node output variables to variable pool
if run_result.outputs:
for variable_key, variable_value in run_result.outputs.items():
- # append variables to variable pool recursively
- self._append_variables_recursively(
- node_id=node.node_id,
- variable_key_list=[variable_key],
- variable_value=variable_value,
+ # Add variables to variable pool
+ self.graph_runtime_state.variable_pool.add(
+ [node.node_id, variable_key], variable_value
)
# When setting metadata, convert to dict first
@@ -851,21 +846,6 @@ class GraphEngine:
logger.exception("Node %s run failed", node.title)
raise e
- def _append_variables_recursively(self, node_id: str, variable_key_list: list[str], variable_value: VariableValue):
- """
- Append variables recursively
- :param node_id: node id
- :param variable_key_list: variable key list
- :param variable_value: variable value
- :return:
- """
- variable_utils.append_variables_recursively(
- self.graph_runtime_state.variable_pool,
- node_id,
- variable_key_list,
- variable_value,
- )
-
def _is_timed_out(self, start_at: float, max_execution_time: int) -> bool:
"""
Check timeout
diff --git a/api/core/workflow/nodes/variable_assigner/common/helpers.py b/api/core/workflow/nodes/variable_assigner/common/helpers.py
index 0d2822233e..48deda724a 100644
--- a/api/core/workflow/nodes/variable_assigner/common/helpers.py
+++ b/api/core/workflow/nodes/variable_assigner/common/helpers.py
@@ -4,7 +4,7 @@ from typing import Any, TypeVar
from pydantic import BaseModel
from core.variables import Segment
-from core.variables.consts import MIN_SELECTORS_LENGTH
+from core.variables.consts import SELECTORS_LENGTH
from core.variables.types import SegmentType
# Use double underscore (`__`) prefix for internal variables
@@ -23,7 +23,7 @@ _T = TypeVar("_T", bound=MutableMapping[str, Any])
def variable_to_processed_data(selector: Sequence[str], seg: Segment) -> UpdatedVariable:
- if len(selector) < MIN_SELECTORS_LENGTH:
+ if len(selector) < SELECTORS_LENGTH:
raise Exception("selector too short")
node_id, var_name = selector[:2]
return UpdatedVariable(
diff --git a/api/core/workflow/nodes/variable_assigner/v2/node.py b/api/core/workflow/nodes/variable_assigner/v2/node.py
index c0215cae71..00ee921cee 100644
--- a/api/core/workflow/nodes/variable_assigner/v2/node.py
+++ b/api/core/workflow/nodes/variable_assigner/v2/node.py
@@ -4,7 +4,7 @@ from typing import Any, Optional, cast
from core.app.entities.app_invoke_entities import InvokeFrom
from core.variables import SegmentType, Variable
-from core.variables.consts import MIN_SELECTORS_LENGTH
+from core.variables.consts import SELECTORS_LENGTH
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID
from core.workflow.conversation_variable_updater import ConversationVariableUpdater
from core.workflow.entities.node_entities import NodeRunResult
@@ -46,7 +46,7 @@ def _source_mapping_from_item(mapping: MutableMapping[str, Sequence[str]], node_
selector = item.value
if not isinstance(selector, list):
raise InvalidDataError(f"selector is not a list, {node_id=}, {item=}")
- if len(selector) < MIN_SELECTORS_LENGTH:
+ if len(selector) < SELECTORS_LENGTH:
raise InvalidDataError(f"selector too short, {node_id=}, {item=}")
selector_str = ".".join(selector)
key = f"{node_id}.#{selector_str}#"
diff --git a/api/core/workflow/utils/variable_utils.py b/api/core/workflow/utils/variable_utils.py
deleted file mode 100644
index 868868315b..0000000000
--- a/api/core/workflow/utils/variable_utils.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from core.variables.segments import ObjectSegment, Segment
-from core.workflow.entities.variable_pool import VariablePool, VariableValue
-
-
-def append_variables_recursively(
- pool: VariablePool, node_id: str, variable_key_list: list[str], variable_value: VariableValue | Segment
-):
- """
- Append variables recursively
- :param pool: variable pool to append variables to
- :param node_id: node id
- :param variable_key_list: variable key list
- :param variable_value: variable value
- :return:
- """
- pool.add([node_id] + variable_key_list, variable_value)
-
- # if variable_value is a dict, then recursively append variables
- if isinstance(variable_value, ObjectSegment):
- variable_dict = variable_value.value
- elif isinstance(variable_value, dict):
- variable_dict = variable_value
- else:
- return
-
- for key, value in variable_dict.items():
- # construct new key list
- new_key_list = variable_key_list + [key]
- append_variables_recursively(pool, node_id=node_id, variable_key_list=new_key_list, variable_value=value)
diff --git a/api/core/workflow/variable_loader.py b/api/core/workflow/variable_loader.py
index 1e13871d0a..a35215855e 100644
--- a/api/core/workflow/variable_loader.py
+++ b/api/core/workflow/variable_loader.py
@@ -3,9 +3,8 @@ from collections.abc import Mapping, Sequence
from typing import Any, Protocol
from core.variables import Variable
-from core.variables.consts import MIN_SELECTORS_LENGTH
+from core.variables.consts import SELECTORS_LENGTH
from core.workflow.entities.variable_pool import VariablePool
-from core.workflow.utils import variable_utils
class VariableLoader(Protocol):
@@ -78,7 +77,7 @@ def load_into_variable_pool(
variables_to_load.append(list(selector))
loaded = variable_loader.load_variables(variables_to_load)
for var in loaded:
- assert len(var.selector) >= MIN_SELECTORS_LENGTH, f"Invalid variable {var}"
- variable_utils.append_variables_recursively(
- variable_pool, node_id=var.selector[0], variable_key_list=list(var.selector[1:]), variable_value=var
- )
+ assert len(var.selector) >= SELECTORS_LENGTH, f"Invalid variable {var}"
+ # Add variable directly to the pool
+ # The variable pool expects 2-element selectors [node_id, variable_name]
+ variable_pool.add([var.selector[0], var.selector[1]], var)
diff --git a/api/pyproject.toml b/api/pyproject.toml
index a86ec7ee6b..4b395276ef 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "dify-api"
-version = "1.7.1"
+version = "1.7.2"
requires-python = ">=3.11,<3.13"
dependencies = [
diff --git a/api/repositories/factory.py b/api/repositories/factory.py
index 070cdd46dd..1f0320054c 100644
--- a/api/repositories/factory.py
+++ b/api/repositories/factory.py
@@ -48,7 +48,6 @@ class DifyAPIRepositoryFactory(DifyCoreRepositoryFactory):
RepositoryImportError: If the configured repository cannot be imported or instantiated
"""
class_path = dify_config.API_WORKFLOW_NODE_EXECUTION_REPOSITORY
- logger.debug("Creating DifyAPIWorkflowNodeExecutionRepository from: %s", class_path)
try:
repository_class = cls._import_class(class_path)
@@ -86,7 +85,6 @@ class DifyAPIRepositoryFactory(DifyCoreRepositoryFactory):
RepositoryImportError: If the configured repository cannot be imported or instantiated
"""
class_path = dify_config.API_WORKFLOW_RUN_REPOSITORY
- logger.debug("Creating APIWorkflowRunRepository from: %s", class_path)
try:
repository_class = cls._import_class(class_path)
diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py
index 6bbb3bca04..b52f4924ba 100644
--- a/api/services/workflow_draft_variable_service.py
+++ b/api/services/workflow_draft_variable_service.py
@@ -13,7 +13,7 @@ from sqlalchemy.sql.expression import and_, or_
from core.app.entities.app_invoke_entities import InvokeFrom
from core.file.models import File
from core.variables import Segment, StringSegment, Variable
-from core.variables.consts import MIN_SELECTORS_LENGTH
+from core.variables.consts import SELECTORS_LENGTH
from core.variables.segments import ArrayFileSegment, FileSegment
from core.variables.types import SegmentType
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, ENVIRONMENT_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
@@ -147,7 +147,7 @@ class WorkflowDraftVariableService:
) -> list[WorkflowDraftVariable]:
ors = []
for selector in selectors:
- assert len(selector) >= MIN_SELECTORS_LENGTH, f"Invalid selector to get: {selector}"
+ assert len(selector) >= SELECTORS_LENGTH, f"Invalid selector to get: {selector}"
node_id, name = selector[:2]
ors.append(and_(WorkflowDraftVariable.node_id == node_id, WorkflowDraftVariable.name == name))
@@ -608,7 +608,7 @@ class DraftVariableSaver:
for item in updated_variables:
selector = item.selector
- if len(selector) < MIN_SELECTORS_LENGTH:
+ if len(selector) < SELECTORS_LENGTH:
raise Exception("selector too short")
# NOTE(QuantumGhost): only the following two kinds of variable could be updated by
# VariableAssigner: ConversationVariable and iteration variable.
diff --git a/api/tasks/clean_dataset_task.py b/api/tasks/clean_dataset_task.py
index 69e5df0253..9a45115b05 100644
--- a/api/tasks/clean_dataset_task.py
+++ b/api/tasks/clean_dataset_task.py
@@ -56,19 +56,29 @@ def clean_dataset_task(
documents = db.session.query(Document).where(Document.dataset_id == dataset_id).all()
segments = db.session.query(DocumentSegment).where(DocumentSegment.dataset_id == dataset_id).all()
- # Fix: Always clean vector database resources regardless of document existence
- # This ensures all 33 vector databases properly drop tables/collections/indices
- if doc_form is None:
- # Use default paragraph index type for empty datasets to enable vector database cleanup
+ # Enhanced validation: Check if doc_form is None, empty string, or contains only whitespace
+ # This ensures all invalid doc_form values are properly handled
+ if doc_form is None or (isinstance(doc_form, str) and not doc_form.strip()):
+ # Use default paragraph index type for empty/invalid datasets to enable vector database cleanup
from core.rag.index_processor.constant.index_type import IndexType
doc_form = IndexType.PARAGRAPH_INDEX
logging.info(
- click.style(f"No documents found, using default index type for cleanup: {doc_form}", fg="yellow")
+ click.style(f"Invalid doc_form detected, using default index type for cleanup: {doc_form}", fg="yellow")
)
- index_processor = IndexProcessorFactory(doc_form).init_index_processor()
- index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True)
+ # Add exception handling around IndexProcessorFactory.clean() to prevent single point of failure
+ # This ensures Document/Segment deletion can continue even if vector database cleanup fails
+ try:
+ index_processor = IndexProcessorFactory(doc_form).init_index_processor()
+ index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True)
+ logging.info(click.style(f"Successfully cleaned vector database for dataset: {dataset_id}", fg="green"))
+ except Exception as index_cleanup_error:
+ logging.exception(click.style(f"Failed to clean vector database for dataset {dataset_id}", fg="red"))
+ # Continue with document and segment deletion even if vector cleanup fails
+ logging.info(
+ click.style(f"Continuing with document and segment deletion for dataset: {dataset_id}", fg="yellow")
+ )
if documents is None or len(documents) == 0:
logging.info(click.style(f"No documents found for dataset: {dataset_id}", fg="green"))
@@ -128,6 +138,14 @@ def clean_dataset_task(
click.style(f"Cleaned dataset when dataset deleted: {dataset_id} latency: {end_at - start_at}", fg="green")
)
except Exception:
+ # Add rollback to prevent dirty session state in case of exceptions
+ # This ensures the database session is properly cleaned up
+ try:
+ db.session.rollback()
+ logging.info(click.style(f"Rolled back database session for dataset: {dataset_id}", fg="yellow"))
+ except Exception as rollback_error:
+ logging.exception("Failed to rollback database session")
+
logging.exception("Cleaned dataset when dataset deleted failed")
finally:
db.session.close()
diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py
index 707b28e6d8..4f659c5e13 100644
--- a/api/tests/integration_tests/workflow/nodes/test_code.py
+++ b/api/tests/integration_tests/workflow/nodes/test_code.py
@@ -55,8 +55,8 @@ def init_code_node(code_config: dict):
environment_variables=[],
conversation_variables=[],
)
- variable_pool.add(["code", "123", "args1"], 1)
- variable_pool.add(["code", "123", "args2"], 2)
+ variable_pool.add(["code", "args1"], 1)
+ variable_pool.add(["code", "args2"], 2)
node = CodeNode(
id=str(uuid.uuid4()),
@@ -96,9 +96,9 @@ def test_execute_code(setup_code_executor_mock):
"variables": [
{
"variable": "args1",
- "value_selector": ["1", "123", "args1"],
+ "value_selector": ["1", "args1"],
},
- {"variable": "args2", "value_selector": ["1", "123", "args2"]},
+ {"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
@@ -107,8 +107,8 @@ def test_execute_code(setup_code_executor_mock):
}
node = init_code_node(code_config)
- node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], 1)
- node.graph_runtime_state.variable_pool.add(["1", "123", "args2"], 2)
+ node.graph_runtime_state.variable_pool.add(["1", "args1"], 1)
+ node.graph_runtime_state.variable_pool.add(["1", "args2"], 2)
# execute node
result = node._run()
@@ -142,9 +142,9 @@ def test_execute_code_output_validator(setup_code_executor_mock):
"variables": [
{
"variable": "args1",
- "value_selector": ["1", "123", "args1"],
+ "value_selector": ["1", "args1"],
},
- {"variable": "args2", "value_selector": ["1", "123", "args2"]},
+ {"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
@@ -153,8 +153,8 @@ def test_execute_code_output_validator(setup_code_executor_mock):
}
node = init_code_node(code_config)
- node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], 1)
- node.graph_runtime_state.variable_pool.add(["1", "123", "args2"], 2)
+ node.graph_runtime_state.variable_pool.add(["1", "args1"], 1)
+ node.graph_runtime_state.variable_pool.add(["1", "args2"], 2)
# execute node
result = node._run()
@@ -217,9 +217,9 @@ def test_execute_code_output_validator_depth():
"variables": [
{
"variable": "args1",
- "value_selector": ["1", "123", "args1"],
+ "value_selector": ["1", "args1"],
},
- {"variable": "args2", "value_selector": ["1", "123", "args2"]},
+ {"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
@@ -307,9 +307,9 @@ def test_execute_code_output_object_list():
"variables": [
{
"variable": "args1",
- "value_selector": ["1", "123", "args1"],
+ "value_selector": ["1", "args1"],
},
- {"variable": "args2", "value_selector": ["1", "123", "args2"]},
+ {"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
diff --git a/api/tests/integration_tests/workflow/nodes/test_http.py b/api/tests/integration_tests/workflow/nodes/test_http.py
index d7856129a3..344539d51a 100644
--- a/api/tests/integration_tests/workflow/nodes/test_http.py
+++ b/api/tests/integration_tests/workflow/nodes/test_http.py
@@ -49,8 +49,8 @@ def init_http_node(config: dict):
environment_variables=[],
conversation_variables=[],
)
- variable_pool.add(["a", "b123", "args1"], 1)
- variable_pool.add(["a", "b123", "args2"], 2)
+ variable_pool.add(["a", "args1"], 1)
+ variable_pool.add(["a", "args2"], 2)
node = HttpRequestNode(
id=str(uuid.uuid4()),
@@ -171,7 +171,7 @@ def test_template(setup_http_mock):
"title": "http",
"desc": "",
"method": "get",
- "url": "http://example.com/{{#a.b123.args2#}}",
+ "url": "http://example.com/{{#a.args2#}}",
"authorization": {
"type": "api-key",
"config": {
@@ -180,8 +180,8 @@ def test_template(setup_http_mock):
"header": "api-key",
},
},
- "headers": "X-Header:123\nX-Header2:{{#a.b123.args2#}}",
- "params": "A:b\nTemplate:{{#a.b123.args2#}}",
+ "headers": "X-Header:123\nX-Header2:{{#a.args2#}}",
+ "params": "A:b\nTemplate:{{#a.args2#}}",
"body": None,
},
}
@@ -223,7 +223,7 @@ def test_json(setup_http_mock):
{
"key": "",
"type": "text",
- "value": '{"a": "{{#a.b123.args1#}}"}',
+ "value": '{"a": "{{#a.args1#}}"}',
},
],
},
@@ -264,12 +264,12 @@ def test_x_www_form_urlencoded(setup_http_mock):
{
"key": "a",
"type": "text",
- "value": "{{#a.b123.args1#}}",
+ "value": "{{#a.args1#}}",
},
{
"key": "b",
"type": "text",
- "value": "{{#a.b123.args2#}}",
+ "value": "{{#a.args2#}}",
},
],
},
@@ -310,12 +310,12 @@ def test_form_data(setup_http_mock):
{
"key": "a",
"type": "text",
- "value": "{{#a.b123.args1#}}",
+ "value": "{{#a.args1#}}",
},
{
"key": "b",
"type": "text",
- "value": "{{#a.b123.args2#}}",
+ "value": "{{#a.args2#}}",
},
],
},
@@ -436,3 +436,87 @@ def test_multi_colons_parse(setup_http_mock):
assert 'form-data; name="Redirect"\r\n\r\nhttp://example6.com' in result.process_data.get("request", "")
# resp = result.outputs
# assert "http://example3.com" == resp.get("headers", {}).get("referer")
+
+
+@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
+def test_nested_object_variable_selector(setup_http_mock):
+ """Test variable selector functionality with nested object properties."""
+ # Create independent test setup without affecting other tests
+ graph_config = {
+ "edges": [
+ {
+ "id": "start-source-next-target",
+ "source": "start",
+ "target": "1",
+ },
+ ],
+ "nodes": [
+ {"data": {"type": "start"}, "id": "start"},
+ {
+ "id": "1",
+ "data": {
+ "title": "http",
+ "desc": "",
+ "method": "get",
+ "url": "http://example.com/{{#a.args2#}}/{{#a.args3.nested#}}",
+ "authorization": {
+ "type": "api-key",
+ "config": {
+ "type": "basic",
+ "api_key": "ak-xxx",
+ "header": "api-key",
+ },
+ },
+ "headers": "X-Header:{{#a.args3.nested#}}",
+ "params": "nested_param:{{#a.args3.nested#}}",
+ "body": None,
+ },
+ },
+ ],
+ }
+
+ graph = Graph.init(graph_config=graph_config)
+
+ init_params = GraphInitParams(
+ tenant_id="1",
+ app_id="1",
+ workflow_type=WorkflowType.WORKFLOW,
+ workflow_id="1",
+ graph_config=graph_config,
+ user_id="1",
+ user_from=UserFrom.ACCOUNT,
+ invoke_from=InvokeFrom.DEBUGGER,
+ call_depth=0,
+ )
+
+ # Create independent variable pool for this test only
+ variable_pool = VariablePool(
+ system_variables=SystemVariable(user_id="aaa", files=[]),
+ user_inputs={},
+ environment_variables=[],
+ conversation_variables=[],
+ )
+ variable_pool.add(["a", "args1"], 1)
+ variable_pool.add(["a", "args2"], 2)
+ variable_pool.add(["a", "args3"], {"nested": "nested_value"}) # Only for this test
+
+ node = HttpRequestNode(
+ id=str(uuid.uuid4()),
+ graph_init_params=init_params,
+ graph=graph,
+ graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
+ config=graph_config["nodes"][1],
+ )
+
+ # Initialize node data
+ if "data" in graph_config["nodes"][1]:
+ node.init_node_data(graph_config["nodes"][1]["data"])
+
+ result = node._run()
+ assert result.process_data is not None
+ data = result.process_data.get("request", "")
+
+ # Verify nested object property is correctly resolved
+ assert "/2/nested_value" in data # URL path should contain resolved nested value
+ assert "X-Header: nested_value" in data # Header should contain nested value
+ assert "nested_param=nested_value" in data # Param should contain nested value
diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
index edd70193a8..ef373d968d 100644
--- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
+++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
@@ -71,8 +71,8 @@ def init_parameter_extractor_node(config: dict):
environment_variables=[],
conversation_variables=[],
)
- variable_pool.add(["a", "b123", "args1"], 1)
- variable_pool.add(["a", "b123", "args2"], 2)
+ variable_pool.add(["a", "args1"], 1)
+ variable_pool.add(["a", "args2"], 2)
node = ParameterExtractorNode(
id=str(uuid.uuid4()),
diff --git a/api/tests/integration_tests/workflow/nodes/test_template_transform.py b/api/tests/integration_tests/workflow/nodes/test_template_transform.py
index f71a5ee140..56265c6b95 100644
--- a/api/tests/integration_tests/workflow/nodes/test_template_transform.py
+++ b/api/tests/integration_tests/workflow/nodes/test_template_transform.py
@@ -26,9 +26,9 @@ def test_execute_code(setup_code_executor_mock):
"variables": [
{
"variable": "args1",
- "value_selector": ["1", "123", "args1"],
+ "value_selector": ["1", "args1"],
},
- {"variable": "args2", "value_selector": ["1", "123", "args2"]},
+ {"variable": "args2", "value_selector": ["1", "args2"]},
],
"template": code,
},
@@ -66,8 +66,8 @@ def test_execute_code(setup_code_executor_mock):
environment_variables=[],
conversation_variables=[],
)
- variable_pool.add(["1", "123", "args1"], 1)
- variable_pool.add(["1", "123", "args2"], 3)
+ variable_pool.add(["1", "args1"], 1)
+ variable_pool.add(["1", "args2"], 3)
node = TemplateTransformNode(
id=str(uuid.uuid4()),
diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py
index 8476c1f874..19a9b36350 100644
--- a/api/tests/integration_tests/workflow/nodes/test_tool.py
+++ b/api/tests/integration_tests/workflow/nodes/test_tool.py
@@ -81,7 +81,7 @@ def test_tool_variable_invoke():
ToolParameterConfigurationManager.decrypt_tool_parameters = MagicMock(return_value={"format": "%Y-%m-%d %H:%M:%S"})
- node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], "1+1")
+ node.graph_runtime_state.variable_pool.add(["1", "args1"], "1+1")
# execute node
result = node._run()
diff --git a/api/tests/test_containers_integration_tests/services/test_message_service.py b/api/tests/test_containers_integration_tests/services/test_message_service.py
new file mode 100644
index 0000000000..25ba0d03ef
--- /dev/null
+++ b/api/tests/test_containers_integration_tests/services/test_message_service.py
@@ -0,0 +1,775 @@
+from unittest.mock import patch
+
+import pytest
+from faker import Faker
+
+from models.model import MessageFeedback
+from services.app_service import AppService
+from services.errors.message import (
+ FirstMessageNotExistsError,
+ LastMessageNotExistsError,
+ MessageNotExistsError,
+ SuggestedQuestionsAfterAnswerDisabledError,
+)
+from services.message_service import MessageService
+
+
+class TestMessageService:
+ """Integration tests for MessageService using testcontainers."""
+
+ @pytest.fixture
+ def mock_external_service_dependencies(self):
+ """Mock setup for external service dependencies."""
+ with (
+ patch("services.account_service.FeatureService") as mock_account_feature_service,
+ patch("services.message_service.ModelManager") as mock_model_manager,
+ patch("services.message_service.WorkflowService") as mock_workflow_service,
+ patch("services.message_service.AdvancedChatAppConfigManager") as mock_app_config_manager,
+ patch("services.message_service.LLMGenerator") as mock_llm_generator,
+ patch("services.message_service.TraceQueueManager") as mock_trace_manager_class,
+ patch("services.message_service.TokenBufferMemory") as mock_token_buffer_memory,
+ ):
+ # Setup default mock returns
+ mock_account_feature_service.get_features.return_value.billing.enabled = False
+
+ # Mock ModelManager
+ mock_model_instance = mock_model_manager.return_value.get_default_model_instance.return_value
+ mock_model_instance.get_tts_voices.return_value = [{"value": "test-voice"}]
+
+ # Mock get_model_instance method as well
+ mock_model_manager.return_value.get_model_instance.return_value = mock_model_instance
+
+ # Mock WorkflowService
+ mock_workflow = mock_workflow_service.return_value.get_published_workflow.return_value
+ mock_workflow_service.return_value.get_draft_workflow.return_value = mock_workflow
+
+ # Mock AdvancedChatAppConfigManager
+ mock_app_config = mock_app_config_manager.get_app_config.return_value
+ mock_app_config.additional_features.suggested_questions_after_answer = True
+
+ # Mock LLMGenerator
+ mock_llm_generator.generate_suggested_questions_after_answer.return_value = ["Question 1", "Question 2"]
+
+ # Mock TraceQueueManager
+ mock_trace_manager_instance = mock_trace_manager_class.return_value
+
+ # Mock TokenBufferMemory
+ mock_memory_instance = mock_token_buffer_memory.return_value
+ mock_memory_instance.get_history_prompt_text.return_value = "Mocked history prompt"
+
+ yield {
+ "account_feature_service": mock_account_feature_service,
+ "model_manager": mock_model_manager,
+ "workflow_service": mock_workflow_service,
+ "app_config_manager": mock_app_config_manager,
+ "llm_generator": mock_llm_generator,
+ "trace_manager_class": mock_trace_manager_class,
+ "trace_manager_instance": mock_trace_manager_instance,
+ "token_buffer_memory": mock_token_buffer_memory,
+ # "current_user": mock_current_user,
+ }
+
+ def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Helper method to create a test app and account for testing.
+
+ Args:
+ db_session_with_containers: Database session from testcontainers infrastructure
+ mock_external_service_dependencies: Mock dependencies
+
+ Returns:
+ tuple: (app, account) - Created app and account instances
+ """
+ fake = Faker()
+
+ # Setup mocks for account creation
+ mock_external_service_dependencies[
+ "account_feature_service"
+ ].get_system_features.return_value.is_allow_register = True
+
+ # Create account and tenant first
+ from services.account_service import AccountService, TenantService
+
+ account = AccountService.create_account(
+ email=fake.email(),
+ name=fake.name(),
+ interface_language="en-US",
+ password=fake.password(length=12),
+ )
+ TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
+ tenant = account.current_tenant
+
+ # Setup app creation arguments
+ app_args = {
+ "name": fake.company(),
+ "description": fake.text(max_nb_chars=100),
+ "mode": "advanced-chat", # Use advanced-chat mode to use mocked workflow
+ "icon_type": "emoji",
+ "icon": "🤖",
+ "icon_background": "#FF6B6B",
+ "api_rph": 100,
+ "api_rpm": 10,
+ }
+
+ # Create app
+ app_service = AppService()
+ app = app_service.create_app(tenant.id, app_args, account)
+
+ # Setup current_user mock
+ self._mock_current_user(mock_external_service_dependencies, account.id, tenant.id)
+
+ return app, account
+
+ def _mock_current_user(self, mock_external_service_dependencies, account_id, tenant_id):
+ """
+ Helper method to mock the current user for testing.
+ """
+ # mock_external_service_dependencies["current_user"].id = account_id
+ # mock_external_service_dependencies["current_user"].current_tenant_id = tenant_id
+
+ def _create_test_conversation(self, app, account, fake):
+ """
+ Helper method to create a test conversation with all required fields.
+ """
+ from extensions.ext_database import db
+ from models.model import Conversation
+
+ conversation = Conversation(
+ app_id=app.id,
+ app_model_config_id=None,
+ model_provider=None,
+ model_id="",
+ override_model_configs=None,
+ mode=app.mode,
+ name=fake.sentence(),
+ inputs={},
+ introduction="",
+ system_instruction="",
+ system_instruction_tokens=0,
+ status="normal",
+ invoke_from="console",
+ from_source="console",
+ from_end_user_id=None,
+ from_account_id=account.id,
+ )
+
+ db.session.add(conversation)
+ db.session.flush()
+ return conversation
+
+ def _create_test_message(self, app, conversation, account, fake):
+ """
+ Helper method to create a test message with all required fields.
+ """
+ import json
+
+ from extensions.ext_database import db
+ from models.model import Message
+
+ message = Message(
+ app_id=app.id,
+ model_provider=None,
+ model_id="",
+ override_model_configs=None,
+ conversation_id=conversation.id,
+ inputs={},
+ query=fake.sentence(),
+ message=json.dumps([{"role": "user", "text": fake.sentence()}]),
+ message_tokens=0,
+ message_unit_price=0,
+ message_price_unit=0.001,
+ answer=fake.text(max_nb_chars=200),
+ answer_tokens=0,
+ answer_unit_price=0,
+ answer_price_unit=0.001,
+ parent_message_id=None,
+ provider_response_latency=0,
+ total_price=0,
+ currency="USD",
+ invoke_from="console",
+ from_source="console",
+ from_end_user_id=None,
+ from_account_id=account.id,
+ )
+
+ db.session.add(message)
+ db.session.commit()
+ return message
+
+ def test_pagination_by_first_id_success(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test successful pagination by first ID.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and multiple messages
+ conversation = self._create_test_conversation(app, account, fake)
+ messages = []
+ for i in range(5):
+ message = self._create_test_message(app, conversation, account, fake)
+ messages.append(message)
+
+ # Test pagination by first ID
+ result = MessageService.pagination_by_first_id(
+ app_model=app,
+ user=account,
+ conversation_id=conversation.id,
+ first_id=messages[2].id, # Use middle message as first_id
+ limit=2,
+ order="asc",
+ )
+
+ # Verify results
+ assert result.limit == 2
+ assert len(result.data) == 2
+ # total 5, from the middle, no more
+ assert result.has_more is False
+ # Verify messages are in ascending order
+ assert result.data[0].created_at <= result.data[1].created_at
+
+ def test_pagination_by_first_id_no_user(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test pagination by first ID when no user is provided.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Test pagination with no user
+ result = MessageService.pagination_by_first_id(
+ app_model=app, user=None, conversation_id=fake.uuid4(), first_id=None, limit=10
+ )
+
+ # Verify empty result
+ assert result.limit == 10
+ assert len(result.data) == 0
+ assert result.has_more is False
+
+ def test_pagination_by_first_id_no_conversation_id(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test pagination by first ID when no conversation ID is provided.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Test pagination with no conversation ID
+ result = MessageService.pagination_by_first_id(
+ app_model=app, user=account, conversation_id="", first_id=None, limit=10
+ )
+
+ # Verify empty result
+ assert result.limit == 10
+ assert len(result.data) == 0
+ assert result.has_more is False
+
+ def test_pagination_by_first_id_invalid_first_id(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test pagination by first ID with invalid first_id.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ self._create_test_message(app, conversation, account, fake)
+
+ # Test pagination with invalid first_id
+ with pytest.raises(FirstMessageNotExistsError):
+ MessageService.pagination_by_first_id(
+ app_model=app,
+ user=account,
+ conversation_id=conversation.id,
+ first_id=fake.uuid4(), # Non-existent message ID
+ limit=10,
+ )
+
+ def test_pagination_by_last_id_success(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test successful pagination by last ID.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and multiple messages
+ conversation = self._create_test_conversation(app, account, fake)
+ messages = []
+ for i in range(5):
+ message = self._create_test_message(app, conversation, account, fake)
+ messages.append(message)
+
+ # Test pagination by last ID
+ result = MessageService.pagination_by_last_id(
+ app_model=app,
+ user=account,
+ last_id=messages[2].id, # Use middle message as last_id
+ limit=2,
+ conversation_id=conversation.id,
+ )
+
+ # Verify results
+ assert result.limit == 2
+ assert len(result.data) == 2
+ # total 5, from the middle, no more
+ assert result.has_more is False
+ # Verify messages are in descending order
+ assert result.data[0].created_at >= result.data[1].created_at
+
+ def test_pagination_by_last_id_with_include_ids(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test pagination by last ID with include_ids filter.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and multiple messages
+ conversation = self._create_test_conversation(app, account, fake)
+ messages = []
+ for i in range(5):
+ message = self._create_test_message(app, conversation, account, fake)
+ messages.append(message)
+
+ # Test pagination with include_ids
+ include_ids = [messages[0].id, messages[1].id, messages[2].id]
+ result = MessageService.pagination_by_last_id(
+ app_model=app, user=account, last_id=messages[1].id, limit=2, include_ids=include_ids
+ )
+
+ # Verify results
+ assert result.limit == 2
+ assert len(result.data) <= 2
+ # Verify all returned messages are in include_ids
+ for message in result.data:
+ assert message.id in include_ids
+
+ def test_pagination_by_last_id_no_user(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test pagination by last ID when no user is provided.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Test pagination with no user
+ result = MessageService.pagination_by_last_id(app_model=app, user=None, last_id=None, limit=10)
+
+ # Verify empty result
+ assert result.limit == 10
+ assert len(result.data) == 0
+ assert result.has_more is False
+
+ def test_pagination_by_last_id_invalid_last_id(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test pagination by last ID with invalid last_id.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ self._create_test_message(app, conversation, account, fake)
+
+ # Test pagination with invalid last_id
+ with pytest.raises(LastMessageNotExistsError):
+ MessageService.pagination_by_last_id(
+ app_model=app,
+ user=account,
+ last_id=fake.uuid4(), # Non-existent message ID
+ limit=10,
+ conversation_id=conversation.id,
+ )
+
+ def test_create_feedback_success(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test successful creation of feedback.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Create feedback
+ rating = "like"
+ content = fake.text(max_nb_chars=100)
+ feedback = MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=account, rating=rating, content=content
+ )
+
+ # Verify feedback was created correctly
+ assert feedback.app_id == app.id
+ assert feedback.conversation_id == conversation.id
+ assert feedback.message_id == message.id
+ assert feedback.rating == rating
+ assert feedback.content == content
+ assert feedback.from_source == "admin"
+ assert feedback.from_account_id == account.id
+ assert feedback.from_end_user_id is None
+
+ def test_create_feedback_no_user(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test creating feedback when no user is provided.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Test creating feedback with no user
+ with pytest.raises(ValueError, match="user cannot be None"):
+ MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=None, rating="like", content=fake.text(max_nb_chars=100)
+ )
+
+ def test_create_feedback_update_existing(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test updating existing feedback.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Create initial feedback
+ initial_rating = "like"
+ initial_content = fake.text(max_nb_chars=100)
+ feedback = MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=account, rating=initial_rating, content=initial_content
+ )
+
+ # Update feedback
+ updated_rating = "dislike"
+ updated_content = fake.text(max_nb_chars=100)
+ updated_feedback = MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=account, rating=updated_rating, content=updated_content
+ )
+
+ # Verify feedback was updated correctly
+ assert updated_feedback.id == feedback.id
+ assert updated_feedback.rating == updated_rating
+ assert updated_feedback.content == updated_content
+ assert updated_feedback.rating != initial_rating
+ assert updated_feedback.content != initial_content
+
+ def test_create_feedback_delete_existing(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test deleting existing feedback by setting rating to None.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Create initial feedback
+ feedback = MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=account, rating="like", content=fake.text(max_nb_chars=100)
+ )
+
+ # Delete feedback by setting rating to None
+ MessageService.create_feedback(app_model=app, message_id=message.id, user=account, rating=None, content=None)
+
+ # Verify feedback was deleted
+ from extensions.ext_database import db
+
+ deleted_feedback = db.session.query(MessageFeedback).filter(MessageFeedback.id == feedback.id).first()
+ assert deleted_feedback is None
+
+ def test_create_feedback_no_rating_when_not_exists(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test creating feedback with no rating when feedback doesn't exist.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Test creating feedback with no rating when no feedback exists
+ with pytest.raises(ValueError, match="rating cannot be None when feedback not exists"):
+ MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=account, rating=None, content=None
+ )
+
+ def test_get_all_messages_feedbacks_success(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test successful retrieval of all message feedbacks.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create multiple conversations and messages with feedbacks
+ feedbacks = []
+ for i in range(3):
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ feedback = MessageService.create_feedback(
+ app_model=app,
+ message_id=message.id,
+ user=account,
+ rating="like" if i % 2 == 0 else "dislike",
+ content=f"Feedback {i}: {fake.text(max_nb_chars=50)}",
+ )
+ feedbacks.append(feedback)
+
+ # Get all feedbacks
+ result = MessageService.get_all_messages_feedbacks(app, page=1, limit=10)
+
+ # Verify results
+ assert len(result) == 3
+
+ # Verify feedbacks are ordered by created_at desc
+ for i in range(len(result) - 1):
+ assert result[i]["created_at"] >= result[i + 1]["created_at"]
+
+ def test_get_all_messages_feedbacks_pagination(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test pagination of message feedbacks.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create multiple conversations and messages with feedbacks
+ for i in range(5):
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ MessageService.create_feedback(
+ app_model=app, message_id=message.id, user=account, rating="like", content=f"Feedback {i}"
+ )
+
+ # Get feedbacks with pagination
+ result_page_1 = MessageService.get_all_messages_feedbacks(app, page=1, limit=3)
+ result_page_2 = MessageService.get_all_messages_feedbacks(app, page=2, limit=3)
+
+ # Verify pagination results
+ assert len(result_page_1) == 3
+ assert len(result_page_2) == 2
+
+ # Verify no overlap between pages
+ page_1_ids = {feedback["id"] for feedback in result_page_1}
+ page_2_ids = {feedback["id"] for feedback in result_page_2}
+ assert len(page_1_ids.intersection(page_2_ids)) == 0
+
+ def test_get_message_success(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test successful retrieval of message.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Get message
+ retrieved_message = MessageService.get_message(app_model=app, user=account, message_id=message.id)
+
+ # Verify message was retrieved correctly
+ assert retrieved_message.id == message.id
+ assert retrieved_message.app_id == app.id
+ assert retrieved_message.conversation_id == conversation.id
+ assert retrieved_message.from_source == "console"
+ assert retrieved_message.from_account_id == account.id
+
+ def test_get_message_not_exists(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test getting message that doesn't exist.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Test getting non-existent message
+ with pytest.raises(MessageNotExistsError):
+ MessageService.get_message(app_model=app, user=account, message_id=fake.uuid4())
+
+ def test_get_message_wrong_user(self, db_session_with_containers, mock_external_service_dependencies):
+ """
+ Test getting message with wrong user (different account).
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Create another account
+ from services.account_service import AccountService, TenantService
+
+ other_account = AccountService.create_account(
+ email=fake.email(),
+ name=fake.name(),
+ interface_language="en-US",
+ password=fake.password(length=12),
+ )
+ TenantService.create_owner_tenant_if_not_exist(other_account, name=fake.company())
+
+ # Test getting message with different user
+ with pytest.raises(MessageNotExistsError):
+ MessageService.get_message(app_model=app, user=other_account, message_id=message.id)
+
+ def test_get_suggested_questions_after_answer_success(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test successful generation of suggested questions after answer.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Mock the LLMGenerator to return specific questions
+ mock_questions = ["What is AI?", "How does machine learning work?", "Tell me about neural networks"]
+ mock_external_service_dependencies[
+ "llm_generator"
+ ].generate_suggested_questions_after_answer.return_value = mock_questions
+
+ # Get suggested questions
+ from core.app.entities.app_invoke_entities import InvokeFrom
+
+ result = MessageService.get_suggested_questions_after_answer(
+ app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
+ )
+
+ # Verify results
+ assert result == mock_questions
+
+ # Verify LLMGenerator was called
+ mock_external_service_dependencies[
+ "llm_generator"
+ ].generate_suggested_questions_after_answer.assert_called_once()
+
+ # Verify TraceQueueManager was called
+ mock_external_service_dependencies["trace_manager_instance"].add_trace_task.assert_called_once()
+
+ def test_get_suggested_questions_after_answer_no_user(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test getting suggested questions when no user is provided.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Test getting suggested questions with no user
+ from core.app.entities.app_invoke_entities import InvokeFrom
+
+ with pytest.raises(ValueError, match="user cannot be None"):
+ MessageService.get_suggested_questions_after_answer(
+ app_model=app, user=None, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
+ )
+
+ def test_get_suggested_questions_after_answer_disabled(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test getting suggested questions when feature is disabled.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Mock the feature to be disabled
+ mock_external_service_dependencies[
+ "app_config_manager"
+ ].get_app_config.return_value.additional_features.suggested_questions_after_answer = False
+
+ # Test getting suggested questions when feature is disabled
+ from core.app.entities.app_invoke_entities import InvokeFrom
+
+ with pytest.raises(SuggestedQuestionsAfterAnswerDisabledError):
+ MessageService.get_suggested_questions_after_answer(
+ app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
+ )
+
+ def test_get_suggested_questions_after_answer_no_workflow(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test getting suggested questions when no workflow exists.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Mock no workflow
+ mock_external_service_dependencies["workflow_service"].return_value.get_published_workflow.return_value = None
+
+ # Get suggested questions (should return empty list)
+ from core.app.entities.app_invoke_entities import InvokeFrom
+
+ result = MessageService.get_suggested_questions_after_answer(
+ app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
+ )
+
+ # Verify empty result
+ assert result == []
+
+ def test_get_suggested_questions_after_answer_debugger_mode(
+ self, db_session_with_containers, mock_external_service_dependencies
+ ):
+ """
+ Test getting suggested questions in debugger mode.
+ """
+ fake = Faker()
+ app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
+
+ # Create a conversation and message
+ conversation = self._create_test_conversation(app, account, fake)
+ message = self._create_test_message(app, conversation, account, fake)
+
+ # Mock questions
+ mock_questions = ["Debug question 1", "Debug question 2"]
+ mock_external_service_dependencies[
+ "llm_generator"
+ ].generate_suggested_questions_after_answer.return_value = mock_questions
+
+ # Get suggested questions in debugger mode
+ from core.app.entities.app_invoke_entities import InvokeFrom
+
+ result = MessageService.get_suggested_questions_after_answer(
+ app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.DEBUGGER
+ )
+
+ # Verify results
+ assert result == mock_questions
+
+ # Verify draft workflow was used instead of published workflow
+ mock_external_service_dependencies["workflow_service"].return_value.get_draft_workflow.assert_called_once_with(
+ app_model=app
+ )
+
+ # Verify TraceQueueManager was called
+ mock_external_service_dependencies["trace_manager_instance"].add_trace_task.assert_called_once()
diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py
index 85a9355c79..d73fb7e4be 100644
--- a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py
+++ b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py
@@ -12,6 +12,10 @@ from services.workflow_draft_variable_service import (
)
+def _get_random_variable_name(fake: Faker):
+ return "".join(fake.random_letters(length=10))
+
+
class TestWorkflowDraftVariableService:
"""
Comprehensive integration tests for WorkflowDraftVariableService using testcontainers.
@@ -112,7 +116,14 @@ class TestWorkflowDraftVariableService:
return workflow
def _create_test_variable(
- self, db_session_with_containers, app_id, node_id, name, value, variable_type="conversation", fake=None
+ self,
+ db_session_with_containers,
+ app_id,
+ node_id,
+ name,
+ value,
+ variable_type: DraftVariableType = DraftVariableType.CONVERSATION,
+ fake=None,
):
"""
Helper method to create a test workflow draft variable with proper configuration.
@@ -227,7 +238,13 @@ class TestWorkflowDraftVariableService:
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "var2", var2_value, fake=fake
)
var3 = self._create_test_variable(
- db_session_with_containers, app.id, "test_node_1", "var3", var3_value, "node", fake=fake
+ db_session_with_containers,
+ app.id,
+ "test_node_1",
+ "var3",
+ var3_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
)
selectors = [
[CONVERSATION_VARIABLE_NODE_ID, "var1"],
@@ -263,9 +280,14 @@ class TestWorkflowDraftVariableService:
fake = Faker()
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
for i in range(5):
- test_value = StringSegment(value=fake.numerify("value##"))
+ test_value = StringSegment(value=fake.numerify("value######"))
self._create_test_variable(
- db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), test_value, fake=fake
+ db_session_with_containers,
+ app.id,
+ CONVERSATION_VARIABLE_NODE_ID,
+ _get_random_variable_name(fake),
+ test_value,
+ fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
result = service.list_variables_without_values(app.id, page=1, limit=3)
@@ -291,10 +313,32 @@ class TestWorkflowDraftVariableService:
var1_value = StringSegment(value=fake.word())
var2_value = StringSegment(value=fake.word())
var3_value = StringSegment(value=fake.word())
- self._create_test_variable(db_session_with_containers, app.id, node_id, "var1", var1_value, "node", fake=fake)
- self._create_test_variable(db_session_with_containers, app.id, node_id, "var2", var3_value, "node", fake=fake)
self._create_test_variable(
- db_session_with_containers, app.id, "other_node", "var3", var2_value, "node", fake=fake
+ db_session_with_containers,
+ app.id,
+ node_id,
+ "var1",
+ var1_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
+ )
+ self._create_test_variable(
+ db_session_with_containers,
+ app.id,
+ node_id,
+ "var2",
+ var3_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
+ )
+ self._create_test_variable(
+ db_session_with_containers,
+ app.id,
+ "other_node",
+ "var3",
+ var2_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
result = service.list_node_variables(app.id, node_id)
@@ -328,7 +372,13 @@ class TestWorkflowDraftVariableService:
)
sys_var_value = StringSegment(value=fake.word())
self._create_test_variable(
- db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var", sys_var_value, "system", fake=fake
+ db_session_with_containers,
+ app.id,
+ SYSTEM_VARIABLE_NODE_ID,
+ "sys_var",
+ sys_var_value,
+ variable_type=DraftVariableType.SYS,
+ fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
result = service.list_conversation_variables(app.id)
@@ -480,14 +530,24 @@ class TestWorkflowDraftVariableService:
fake = Faker()
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
for i in range(3):
- test_value = StringSegment(value=fake.numerify("value##"))
+ test_value = StringSegment(value=fake.numerify("value######"))
self._create_test_variable(
- db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), test_value, fake=fake
+ db_session_with_containers,
+ app.id,
+ CONVERSATION_VARIABLE_NODE_ID,
+ _get_random_variable_name(fake),
+ test_value,
+ fake=fake,
)
other_app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
other_value = StringSegment(value=fake.word())
self._create_test_variable(
- db_session_with_containers, other_app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), other_value, fake=fake
+ db_session_with_containers,
+ other_app.id,
+ CONVERSATION_VARIABLE_NODE_ID,
+ _get_random_variable_name(fake),
+ other_value,
+ fake=fake,
)
from extensions.ext_database import db
@@ -515,17 +575,34 @@ class TestWorkflowDraftVariableService:
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
node_id = fake.word()
for i in range(2):
- test_value = StringSegment(value=fake.numerify("node_value##"))
+ test_value = StringSegment(value=fake.numerify("node_value######"))
self._create_test_variable(
- db_session_with_containers, app.id, node_id, fake.word(), test_value, "node", fake=fake
+ db_session_with_containers,
+ app.id,
+ node_id,
+ _get_random_variable_name(fake),
+ test_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
)
other_node_value = StringSegment(value=fake.word())
self._create_test_variable(
- db_session_with_containers, app.id, "other_node", fake.word(), other_node_value, "node", fake=fake
+ db_session_with_containers,
+ app.id,
+ "other_node",
+ _get_random_variable_name(fake),
+ other_node_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
)
conv_value = StringSegment(value=fake.word())
self._create_test_variable(
- db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), conv_value, fake=fake
+ db_session_with_containers,
+ app.id,
+ CONVERSATION_VARIABLE_NODE_ID,
+ _get_random_variable_name(fake),
+ conv_value,
+ fake=fake,
)
from extensions.ext_database import db
@@ -627,7 +704,7 @@ class TestWorkflowDraftVariableService:
SYSTEM_VARIABLE_NODE_ID,
"conversation_id",
conv_id_value,
- "system",
+ variable_type=DraftVariableType.SYS,
fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
@@ -664,10 +741,22 @@ class TestWorkflowDraftVariableService:
sys_var1_value = StringSegment(value=fake.word())
sys_var2_value = StringSegment(value=fake.word())
sys_var1 = self._create_test_variable(
- db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var1", sys_var1_value, "system", fake=fake
+ db_session_with_containers,
+ app.id,
+ SYSTEM_VARIABLE_NODE_ID,
+ "sys_var1",
+ sys_var1_value,
+ variable_type=DraftVariableType.SYS,
+ fake=fake,
)
sys_var2 = self._create_test_variable(
- db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var2", sys_var2_value, "system", fake=fake
+ db_session_with_containers,
+ app.id,
+ SYSTEM_VARIABLE_NODE_ID,
+ "sys_var2",
+ sys_var2_value,
+ variable_type=DraftVariableType.SYS,
+ fake=fake,
)
conv_var_value = StringSegment(value=fake.word())
self._create_test_variable(
@@ -701,10 +790,22 @@ class TestWorkflowDraftVariableService:
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "test_conv_var", test_value, fake=fake
)
sys_var = self._create_test_variable(
- db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "test_sys_var", test_value, "system", fake=fake
+ db_session_with_containers,
+ app.id,
+ SYSTEM_VARIABLE_NODE_ID,
+ "test_sys_var",
+ test_value,
+ variable_type=DraftVariableType.SYS,
+ fake=fake,
)
node_var = self._create_test_variable(
- db_session_with_containers, app.id, "test_node", "test_node_var", test_value, "node", fake=fake
+ db_session_with_containers,
+ app.id,
+ "test_node",
+ "test_node_var",
+ test_value,
+ variable_type=DraftVariableType.NODE,
+ fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
retrieved_conv_var = service.get_conversation_variable(app.id, "test_conv_var")
diff --git a/api/tests/unit_tests/core/app/features/rate_limiting/conftest.py b/api/tests/unit_tests/core/app/features/rate_limiting/conftest.py
new file mode 100644
index 0000000000..9557e78150
--- /dev/null
+++ b/api/tests/unit_tests/core/app/features/rate_limiting/conftest.py
@@ -0,0 +1,124 @@
+import time
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from core.app.features.rate_limiting.rate_limit import RateLimit
+
+
+@pytest.fixture
+def mock_redis():
+ """Mock Redis client with realistic behavior for rate limiting tests."""
+ mock_client = MagicMock()
+
+ # Redis data storage for simulation
+ mock_data = {}
+ mock_hashes = {}
+ mock_expiry = {}
+
+ def mock_setex(key, ttl, value):
+ mock_data[key] = str(value)
+ mock_expiry[key] = time.time() + ttl.total_seconds() if hasattr(ttl, "total_seconds") else time.time() + ttl
+ return True
+
+ def mock_get(key):
+ if key in mock_data and (key not in mock_expiry or time.time() < mock_expiry[key]):
+ return mock_data[key].encode("utf-8")
+ return None
+
+ def mock_exists(key):
+ return key in mock_data or key in mock_hashes
+
+ def mock_expire(key, ttl):
+ if key in mock_data or key in mock_hashes:
+ mock_expiry[key] = time.time() + ttl.total_seconds() if hasattr(ttl, "total_seconds") else time.time() + ttl
+ return True
+
+ def mock_hset(key, field, value):
+ if key not in mock_hashes:
+ mock_hashes[key] = {}
+ mock_hashes[key][field] = str(value).encode("utf-8")
+ return True
+
+ def mock_hgetall(key):
+ return mock_hashes.get(key, {})
+
+ def mock_hdel(key, *fields):
+ if key in mock_hashes:
+ count = 0
+ for field in fields:
+ if field in mock_hashes[key]:
+ del mock_hashes[key][field]
+ count += 1
+ return count
+ return 0
+
+ def mock_hlen(key):
+ return len(mock_hashes.get(key, {}))
+
+ # Configure mock methods
+ mock_client.setex = mock_setex
+ mock_client.get = mock_get
+ mock_client.exists = mock_exists
+ mock_client.expire = mock_expire
+ mock_client.hset = mock_hset
+ mock_client.hgetall = mock_hgetall
+ mock_client.hdel = mock_hdel
+ mock_client.hlen = mock_hlen
+
+ # Store references for test verification
+ mock_client._mock_data = mock_data
+ mock_client._mock_hashes = mock_hashes
+ mock_client._mock_expiry = mock_expiry
+
+ return mock_client
+
+
+@pytest.fixture
+def mock_time():
+ """Mock time.time() for deterministic tests."""
+ mock_time_val = 1000.0
+
+ def increment_time(seconds=1):
+ nonlocal mock_time_val
+ mock_time_val += seconds
+ return mock_time_val
+
+ with patch("time.time", return_value=mock_time_val) as mock:
+ mock.increment = increment_time
+ yield mock
+
+
+@pytest.fixture
+def sample_generator():
+ """Sample generator for testing RateLimitGenerator."""
+
+ def _create_generator(items=None, raise_error=False):
+ items = items or ["item1", "item2", "item3"]
+ for item in items:
+ if raise_error and item == "item2":
+ raise ValueError("Test error")
+ yield item
+
+ return _create_generator
+
+
+@pytest.fixture
+def sample_mapping():
+ """Sample mapping for testing RateLimitGenerator."""
+ return {"key1": "value1", "key2": "value2"}
+
+
+@pytest.fixture(autouse=True)
+def reset_rate_limit_instances():
+ """Clear RateLimit singleton instances between tests."""
+ RateLimit._instance_dict.clear()
+ yield
+ RateLimit._instance_dict.clear()
+
+
+@pytest.fixture
+def redis_patch():
+ """Patch redis_client globally for rate limit tests."""
+ with patch("core.app.features.rate_limiting.rate_limit.redis_client") as mock:
+ yield mock
diff --git a/api/tests/unit_tests/core/app/features/rate_limiting/test_rate_limit.py b/api/tests/unit_tests/core/app/features/rate_limiting/test_rate_limit.py
new file mode 100644
index 0000000000..3db10c1c72
--- /dev/null
+++ b/api/tests/unit_tests/core/app/features/rate_limiting/test_rate_limit.py
@@ -0,0 +1,569 @@
+import threading
+import time
+from datetime import timedelta
+from unittest.mock import patch
+
+import pytest
+
+from core.app.features.rate_limiting.rate_limit import RateLimit
+from core.errors.error import AppInvokeQuotaExceededError
+
+
+class TestRateLimit:
+ """Core rate limiting functionality tests."""
+
+ def test_should_return_same_instance_for_same_client_id(self, redis_patch):
+ """Test singleton behavior for same client ID."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ }
+ )
+
+ rate_limit1 = RateLimit("client1", 5)
+ rate_limit2 = RateLimit("client1", 10) # Second instance with different limit
+
+ assert rate_limit1 is rate_limit2
+ # Current implementation: last constructor call overwrites max_active_requests
+ # This reflects the actual behavior where __init__ always sets max_active_requests
+ assert rate_limit1.max_active_requests == 10
+
+ def test_should_create_different_instances_for_different_client_ids(self, redis_patch):
+ """Test different instances for different client IDs."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ }
+ )
+
+ rate_limit1 = RateLimit("client1", 5)
+ rate_limit2 = RateLimit("client2", 10)
+
+ assert rate_limit1 is not rate_limit2
+ assert rate_limit1.client_id == "client1"
+ assert rate_limit2.client_id == "client2"
+
+ def test_should_initialize_with_valid_parameters(self, redis_patch):
+ """Test normal initialization."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+
+ assert rate_limit.client_id == "test_client"
+ assert rate_limit.max_active_requests == 5
+ assert hasattr(rate_limit, "initialized")
+ redis_patch.setex.assert_called_once()
+
+ def test_should_skip_initialization_if_disabled(self):
+ """Test no initialization when rate limiting is disabled."""
+ rate_limit = RateLimit("test_client", 0)
+
+ assert rate_limit.disabled()
+ assert not hasattr(rate_limit, "initialized")
+
+ def test_should_skip_reinitialization_of_existing_instance(self, redis_patch):
+ """Test that existing instance doesn't reinitialize."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ }
+ )
+
+ RateLimit("client1", 5)
+ redis_patch.reset_mock()
+
+ RateLimit("client1", 10)
+
+ redis_patch.setex.assert_not_called()
+
+ def test_should_be_disabled_when_max_requests_is_zero_or_negative(self):
+ """Test disabled state for zero or negative limits."""
+ rate_limit_zero = RateLimit("client1", 0)
+ rate_limit_negative = RateLimit("client2", -5)
+
+ assert rate_limit_zero.disabled()
+ assert rate_limit_negative.disabled()
+
+ def test_should_set_redis_keys_on_first_flush(self, redis_patch):
+ """Test Redis keys are set correctly on initial flush."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+
+ expected_max_key = "dify:rate_limit:test_client:max_active_requests"
+ redis_patch.setex.assert_called_with(expected_max_key, timedelta(days=1), 5)
+
+ def test_should_sync_max_requests_from_redis_on_subsequent_flush(self, redis_patch):
+ """Test max requests syncs from Redis when key exists."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": True,
+ "get.return_value": b"10",
+ "expire.return_value": True,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ rate_limit.flush_cache()
+
+ assert rate_limit.max_active_requests == 10
+
+ @patch("time.time")
+ def test_should_clean_timeout_requests_from_active_list(self, mock_time, redis_patch):
+ """Test cleanup of timed-out requests."""
+ current_time = 1000.0
+ mock_time.return_value = current_time
+
+ # Setup mock Redis with timed-out requests
+ timeout_requests = {
+ b"req1": str(current_time - 700).encode(), # 700 seconds ago (timeout)
+ b"req2": str(current_time - 100).encode(), # 100 seconds ago (active)
+ }
+
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": True,
+ "get.return_value": b"5",
+ "expire.return_value": True,
+ "hgetall.return_value": timeout_requests,
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ redis_patch.reset_mock() # Reset to avoid counting initialization calls
+ rate_limit.flush_cache()
+
+ # Verify timeout request was cleaned up
+ redis_patch.hdel.assert_called_once()
+ call_args = redis_patch.hdel.call_args[0]
+ assert call_args[0] == "dify:rate_limit:test_client:active_requests"
+ assert b"req1" in call_args # Timeout request should be removed
+ assert b"req2" not in call_args # Active request should remain
+
+
+class TestRateLimitEnterExit:
+ """Rate limiting enter/exit logic tests."""
+
+ def test_should_allow_request_within_limit(self, redis_patch):
+ """Test allowing requests within the rate limit."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.return_value": 2,
+ "hset.return_value": True,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ request_id = rate_limit.enter()
+
+ assert request_id != RateLimit._UNLIMITED_REQUEST_ID
+ redis_patch.hset.assert_called_once()
+
+ def test_should_generate_request_id_if_not_provided(self, redis_patch):
+ """Test auto-generation of request ID."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.return_value": 0,
+ "hset.return_value": True,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ request_id = rate_limit.enter()
+
+ assert len(request_id) == 36 # UUID format
+
+ def test_should_use_provided_request_id(self, redis_patch):
+ """Test using provided request ID."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.return_value": 0,
+ "hset.return_value": True,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ custom_id = "custom_request_123"
+ request_id = rate_limit.enter(custom_id)
+
+ assert request_id == custom_id
+
+ def test_should_remove_request_on_exit(self, redis_patch):
+ """Test request removal on exit."""
+ redis_patch.configure_mock(
+ **{
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ rate_limit.exit("test_request_id")
+
+ redis_patch.hdel.assert_called_once_with("dify:rate_limit:test_client:active_requests", "test_request_id")
+
+ def test_should_raise_quota_exceeded_when_at_limit(self, redis_patch):
+ """Test quota exceeded error when at limit."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.return_value": 5, # At limit
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+
+ with pytest.raises(AppInvokeQuotaExceededError) as exc_info:
+ rate_limit.enter()
+
+ assert "Too many requests" in str(exc_info.value)
+ assert "test_client" in str(exc_info.value)
+
+ def test_should_allow_request_after_previous_exit(self, redis_patch):
+ """Test allowing new request after previous exit."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.return_value": 4, # Under limit after exit
+ "hset.return_value": True,
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+
+ request_id = rate_limit.enter()
+ rate_limit.exit(request_id)
+
+ new_request_id = rate_limit.enter()
+ assert new_request_id is not None
+
+ @patch("time.time")
+ def test_should_flush_cache_when_interval_exceeded(self, mock_time, redis_patch):
+ """Test cache flush when time interval exceeded."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.return_value": 0,
+ }
+ )
+
+ mock_time.return_value = 1000.0
+ rate_limit = RateLimit("test_client", 5)
+
+ # Advance time beyond flush interval
+ mock_time.return_value = 1400.0 # 400 seconds later
+ redis_patch.reset_mock()
+
+ rate_limit.enter()
+
+ # Should have called setex again due to cache flush
+ redis_patch.setex.assert_called()
+
+ def test_should_return_unlimited_id_when_disabled(self):
+ """Test unlimited ID return when rate limiting disabled."""
+ rate_limit = RateLimit("test_client", 0)
+ request_id = rate_limit.enter()
+
+ assert request_id == RateLimit._UNLIMITED_REQUEST_ID
+
+ def test_should_ignore_exit_for_unlimited_requests(self, redis_patch):
+ """Test ignoring exit for unlimited requests."""
+ rate_limit = RateLimit("test_client", 0)
+ rate_limit.exit(RateLimit._UNLIMITED_REQUEST_ID)
+
+ redis_patch.hdel.assert_not_called()
+
+
+class TestRateLimitGenerator:
+ """Rate limit generator wrapper tests."""
+
+ def test_should_wrap_generator_and_iterate_normally(self, redis_patch, sample_generator):
+ """Test normal generator iteration with rate limit wrapper."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ generator = sample_generator()
+ request_id = "test_request"
+
+ wrapped_gen = rate_limit.generate(generator, request_id)
+ result = list(wrapped_gen)
+
+ assert result == ["item1", "item2", "item3"]
+ redis_patch.hdel.assert_called_once_with("dify:rate_limit:test_client:active_requests", request_id)
+
+ def test_should_handle_mapping_input_directly(self, sample_mapping):
+ """Test direct return of mapping input."""
+ rate_limit = RateLimit("test_client", 0) # Disabled
+ result = rate_limit.generate(sample_mapping, "test_request")
+
+ assert result is sample_mapping
+
+ def test_should_cleanup_on_exception_during_iteration(self, redis_patch, sample_generator):
+ """Test cleanup when exception occurs during iteration."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ generator = sample_generator(raise_error=True)
+ request_id = "test_request"
+
+ wrapped_gen = rate_limit.generate(generator, request_id)
+
+ with pytest.raises(ValueError):
+ list(wrapped_gen)
+
+ redis_patch.hdel.assert_called_once_with("dify:rate_limit:test_client:active_requests", request_id)
+
+ def test_should_cleanup_on_explicit_close(self, redis_patch, sample_generator):
+ """Test cleanup on explicit generator close."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ generator = sample_generator()
+ request_id = "test_request"
+
+ wrapped_gen = rate_limit.generate(generator, request_id)
+ wrapped_gen.close()
+
+ redis_patch.hdel.assert_called_once()
+
+ def test_should_handle_generator_without_close_method(self, redis_patch):
+ """Test handling generator without close method."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hdel.return_value": 1,
+ }
+ )
+
+ # Create a generator-like object without close method
+ class SimpleGenerator:
+ def __init__(self):
+ self.items = ["test"]
+ self.index = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.index >= len(self.items):
+ raise StopIteration
+ item = self.items[self.index]
+ self.index += 1
+ return item
+
+ rate_limit = RateLimit("test_client", 5)
+ generator = SimpleGenerator()
+
+ wrapped_gen = rate_limit.generate(generator, "test_request")
+ wrapped_gen.close() # Should not raise error
+
+ redis_patch.hdel.assert_called_once()
+
+ def test_should_prevent_iteration_after_close(self, redis_patch, sample_generator):
+ """Test StopIteration after generator is closed."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hdel.return_value": 1,
+ }
+ )
+
+ rate_limit = RateLimit("test_client", 5)
+ generator = sample_generator()
+
+ wrapped_gen = rate_limit.generate(generator, "test_request")
+ wrapped_gen.close()
+
+ with pytest.raises(StopIteration):
+ next(wrapped_gen)
+
+
+class TestRateLimitConcurrency:
+ """Concurrent access safety tests."""
+
+ def test_should_handle_concurrent_instance_creation(self, redis_patch):
+ """Test thread-safe singleton instance creation."""
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ }
+ )
+
+ instances = []
+ errors = []
+
+ def create_instance():
+ try:
+ instance = RateLimit("concurrent_client", 5)
+ instances.append(instance)
+ except Exception as e:
+ errors.append(e)
+
+ threads = [threading.Thread(target=create_instance) for _ in range(10)]
+
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ assert len(errors) == 0
+ assert len({id(inst) for inst in instances}) == 1 # All same instance
+
+ def test_should_handle_concurrent_enter_requests(self, redis_patch):
+ """Test concurrent enter requests handling."""
+ # Setup mock to simulate realistic Redis behavior
+ request_count = 0
+
+ def mock_hlen(key):
+ nonlocal request_count
+ return request_count
+
+ def mock_hset(key, field, value):
+ nonlocal request_count
+ request_count += 1
+ return True
+
+ redis_patch.configure_mock(
+ **{
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.side_effect": mock_hlen,
+ "hset.side_effect": mock_hset,
+ }
+ )
+
+ rate_limit = RateLimit("concurrent_client", 3)
+ results = []
+ errors = []
+
+ def try_enter():
+ try:
+ request_id = rate_limit.enter()
+ results.append(request_id)
+ except AppInvokeQuotaExceededError as e:
+ errors.append(e)
+
+ threads = [threading.Thread(target=try_enter) for _ in range(5)]
+
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ # Should have some successful requests and some quota exceeded
+ assert len(results) + len(errors) == 5
+ assert len(errors) > 0 # Some should be rejected
+
+ @patch("time.time")
+ def test_should_maintain_accurate_count_under_load(self, mock_time, redis_patch):
+ """Test accurate count maintenance under concurrent load."""
+ mock_time.return_value = 1000.0
+
+ # Use real mock_redis fixture for better simulation
+ mock_client = self._create_mock_redis()
+ redis_patch.configure_mock(**mock_client)
+
+ rate_limit = RateLimit("load_test_client", 10)
+ active_requests = []
+
+ def enter_and_exit():
+ try:
+ request_id = rate_limit.enter()
+ active_requests.append(request_id)
+ time.sleep(0.01) # Simulate some work
+ rate_limit.exit(request_id)
+ active_requests.remove(request_id)
+ except AppInvokeQuotaExceededError:
+ pass # Expected under load
+
+ threads = [threading.Thread(target=enter_and_exit) for _ in range(20)]
+
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ # All requests should have been cleaned up
+ assert len(active_requests) == 0
+
+ def _create_mock_redis(self):
+ """Create a thread-safe mock Redis for concurrency tests."""
+ import threading
+
+ lock = threading.Lock()
+ data = {}
+ hashes = {}
+
+ def mock_hlen(key):
+ with lock:
+ return len(hashes.get(key, {}))
+
+ def mock_hset(key, field, value):
+ with lock:
+ if key not in hashes:
+ hashes[key] = {}
+ hashes[key][field] = str(value).encode("utf-8")
+ return True
+
+ def mock_hdel(key, *fields):
+ with lock:
+ if key in hashes:
+ count = 0
+ for field in fields:
+ if field in hashes[key]:
+ del hashes[key][field]
+ count += 1
+ return count
+ return 0
+
+ return {
+ "exists.return_value": False,
+ "setex.return_value": True,
+ "hlen.side_effect": mock_hlen,
+ "hset.side_effect": mock_hset,
+ "hdel.side_effect": mock_hdel,
+ }
diff --git a/api/tests/unit_tests/core/workflow/test_variable_pool.py b/api/tests/unit_tests/core/workflow/test_variable_pool.py
index c65b60cb4d..c0330b9441 100644
--- a/api/tests/unit_tests/core/workflow/test_variable_pool.py
+++ b/api/tests/unit_tests/core/workflow/test_variable_pool.py
@@ -69,8 +69,12 @@ def test_get_file_attribute(pool, file):
def test_use_long_selector(pool):
- pool.add(("node_1", "part_1", "part_2"), StringSegment(value="test_value"))
+ # The add method now only accepts 2-element selectors (node_id, variable_name)
+ # Store nested data as an ObjectSegment instead
+ nested_data = {"part_2": "test_value"}
+ pool.add(("node_1", "part_1"), ObjectSegment(value=nested_data))
+ # The get method supports longer selectors for nested access
result = pool.get(("node_1", "part_1", "part_2"))
assert result is not None
assert result.value == "test_value"
@@ -280,8 +284,10 @@ class TestVariablePoolSerialization:
pool.add((self._NODE2_ID, "array_file"), ArrayFileSegment(value=[test_file]))
pool.add((self._NODE2_ID, "array_any"), ArrayAnySegment(value=["mixed", 123, {"key": "value"}]))
- # Add nested variables
- pool.add((self._NODE3_ID, "nested", "deep", "var"), StringSegment(value="deep_value"))
+ # Add nested variables as ObjectSegment
+ # The add method only accepts 2-element selectors
+ nested_obj = {"deep": {"var": "deep_value"}}
+ pool.add((self._NODE3_ID, "nested"), ObjectSegment(value=nested_obj))
def test_system_variables(self):
sys_vars = SystemVariable(
diff --git a/api/tests/unit_tests/core/workflow/utils/test_variable_utils.py b/api/tests/unit_tests/core/workflow/utils/test_variable_utils.py
deleted file mode 100644
index 54bf6558bf..0000000000
--- a/api/tests/unit_tests/core/workflow/utils/test_variable_utils.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from typing import Any
-
-from core.variables.segments import ObjectSegment, StringSegment
-from core.workflow.entities.variable_pool import VariablePool
-from core.workflow.utils.variable_utils import append_variables_recursively
-
-
-class TestAppendVariablesRecursively:
- """Test cases for append_variables_recursively function"""
-
- def test_append_simple_dict_value(self):
- """Test appending a simple dictionary value"""
- pool = VariablePool.empty()
- node_id = "test_node"
- variable_key_list = ["output"]
- variable_value = {"name": "John", "age": 30}
-
- append_variables_recursively(pool, node_id, variable_key_list, variable_value)
-
- # Check that the main variable is added
- main_var = pool.get([node_id] + variable_key_list)
- assert main_var is not None
- assert main_var.value == variable_value
-
- # Check that nested variables are added recursively
- name_var = pool.get([node_id] + variable_key_list + ["name"])
- assert name_var is not None
- assert name_var.value == "John"
-
- age_var = pool.get([node_id] + variable_key_list + ["age"])
- assert age_var is not None
- assert age_var.value == 30
-
- def test_append_object_segment_value(self):
- """Test appending an ObjectSegment value"""
- pool = VariablePool.empty()
- node_id = "test_node"
- variable_key_list = ["result"]
-
- # Create an ObjectSegment
- obj_data = {"status": "success", "code": 200}
- variable_value = ObjectSegment(value=obj_data)
-
- append_variables_recursively(pool, node_id, variable_key_list, variable_value)
-
- # Check that the main variable is added
- main_var = pool.get([node_id] + variable_key_list)
- assert main_var is not None
- assert isinstance(main_var, ObjectSegment)
- assert main_var.value == obj_data
-
- # Check that nested variables are added recursively
- status_var = pool.get([node_id] + variable_key_list + ["status"])
- assert status_var is not None
- assert status_var.value == "success"
-
- code_var = pool.get([node_id] + variable_key_list + ["code"])
- assert code_var is not None
- assert code_var.value == 200
-
- def test_append_nested_dict_value(self):
- """Test appending a nested dictionary value"""
- pool = VariablePool.empty()
- node_id = "test_node"
- variable_key_list = ["data"]
-
- variable_value = {
- "user": {
- "profile": {"name": "Alice", "email": "alice@example.com"},
- "settings": {"theme": "dark", "notifications": True},
- },
- "metadata": {"version": "1.0", "timestamp": 1234567890},
- }
-
- append_variables_recursively(pool, node_id, variable_key_list, variable_value)
-
- # Check deeply nested variables
- name_var = pool.get([node_id] + variable_key_list + ["user", "profile", "name"])
- assert name_var is not None
- assert name_var.value == "Alice"
-
- email_var = pool.get([node_id] + variable_key_list + ["user", "profile", "email"])
- assert email_var is not None
- assert email_var.value == "alice@example.com"
-
- theme_var = pool.get([node_id] + variable_key_list + ["user", "settings", "theme"])
- assert theme_var is not None
- assert theme_var.value == "dark"
-
- notifications_var = pool.get([node_id] + variable_key_list + ["user", "settings", "notifications"])
- assert notifications_var is not None
- assert notifications_var.value == 1 # Boolean True is converted to integer 1
-
- version_var = pool.get([node_id] + variable_key_list + ["metadata", "version"])
- assert version_var is not None
- assert version_var.value == "1.0"
-
- def test_append_non_dict_value(self):
- """Test appending a non-dictionary value (should not recurse)"""
- pool = VariablePool.empty()
- node_id = "test_node"
- variable_key_list = ["simple"]
- variable_value = "simple_string"
-
- append_variables_recursively(pool, node_id, variable_key_list, variable_value)
-
- # Check that only the main variable is added
- main_var = pool.get([node_id] + variable_key_list)
- assert main_var is not None
- assert main_var.value == variable_value
-
- # Ensure no additional variables are created
- assert len(pool.variable_dictionary[node_id]) == 1
-
- def test_append_segment_non_object_value(self):
- """Test appending a Segment that is not ObjectSegment (should not recurse)"""
- pool = VariablePool.empty()
- node_id = "test_node"
- variable_key_list = ["text"]
- variable_value = StringSegment(value="Hello World")
-
- append_variables_recursively(pool, node_id, variable_key_list, variable_value)
-
- # Check that only the main variable is added
- main_var = pool.get([node_id] + variable_key_list)
- assert main_var is not None
- assert isinstance(main_var, StringSegment)
- assert main_var.value == "Hello World"
-
- # Ensure no additional variables are created
- assert len(pool.variable_dictionary[node_id]) == 1
-
- def test_append_empty_dict_value(self):
- """Test appending an empty dictionary value"""
- pool = VariablePool.empty()
- node_id = "test_node"
- variable_key_list = ["empty"]
- variable_value: dict[str, Any] = {}
-
- append_variables_recursively(pool, node_id, variable_key_list, variable_value)
-
- # Check that the main variable is added
- main_var = pool.get([node_id] + variable_key_list)
- assert main_var is not None
- assert main_var.value == {}
-
- # Ensure only the main variable is created (no recursion for empty dict)
- assert len(pool.variable_dictionary[node_id]) == 1
diff --git a/api/uv.lock b/api/uv.lock
index 16624dc8fd..ea2c1bef5b 100644
--- a/api/uv.lock
+++ b/api/uv.lock
@@ -1236,7 +1236,7 @@ wheels = [
[[package]]
name = "dify-api"
-version = "1.7.1"
+version = "1.7.2"
source = { virtual = "." }
dependencies = [
{ name = "arize-phoenix-otel" },
diff --git a/docker/.env.example b/docker/.env.example
index 1b1e9cad7b..ed19fa6099 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -907,6 +907,9 @@ TEXT_GENERATION_TIMEOUT_MS=60000
# Allow rendering unsafe URLs which have "data:" scheme.
ALLOW_UNSAFE_DATA_SCHEME=false
+# Maximum number of tree depth in the workflow
+MAX_TREE_DEPTH=50
+
# ------------------------------
# Environment Variables for db Service
# ------------------------------
diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml
index b5ae4a425c..6494087a4a 100644
--- a/docker/docker-compose-template.yaml
+++ b/docker/docker-compose-template.yaml
@@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
- image: langgenius/dify-api:1.7.1
+ image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@@ -31,7 +31,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:1.7.1
+ image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@@ -58,7 +58,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
- image: langgenius/dify-api:1.7.1
+ image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@@ -76,7 +76,7 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:1.7.1
+ image: langgenius/dify-web:1.7.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index 8e2d40883d..d64a8566a0 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -404,6 +404,7 @@ x-shared-env: &shared-api-worker-env
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
+ MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
POSTGRES_USER: ${POSTGRES_USER:-${DB_USERNAME}}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
@@ -567,7 +568,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
- image: langgenius/dify-api:1.7.1
+ image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@@ -596,7 +597,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:1.7.1
+ image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@@ -623,7 +624,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
- image: langgenius/dify-api:1.7.1
+ image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@@ -641,7 +642,7 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:1.7.1
+ image: langgenius/dify-web:1.7.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
diff --git a/web/__tests__/goto-anything/command-selector.test.tsx b/web/__tests__/goto-anything/command-selector.test.tsx
new file mode 100644
index 0000000000..1073b9d481
--- /dev/null
+++ b/web/__tests__/goto-anything/command-selector.test.tsx
@@ -0,0 +1,333 @@
+import React from 'react'
+import { fireEvent, render, screen } from '@testing-library/react'
+import '@testing-library/jest-dom'
+import CommandSelector from '../../app/components/goto-anything/command-selector'
+import type { ActionItem } from '../../app/components/goto-anything/actions/types'
+
+jest.mock('react-i18next', () => ({
+ useTranslation: () => ({
+ t: (key: string) => key,
+ }),
+}))
+
+jest.mock('cmdk', () => ({
+ Command: {
+ Group: ({ children, className }: any) =>
{children}
,
+ Item: ({ children, onSelect, value, className }: any) => (
+ onSelect && onSelect()}
+ data-value={value}
+ data-testid={`command-item-${value}`}
+ >
+ {children}
+
+ ),
+ },
+}))
+
+describe('CommandSelector', () => {
+ const mockActions: Record = {
+ app: {
+ key: '@app',
+ shortcut: '@app',
+ title: 'Search Applications',
+ description: 'Search apps',
+ search: jest.fn(),
+ },
+ knowledge: {
+ key: '@knowledge',
+ shortcut: '@knowledge',
+ title: 'Search Knowledge',
+ description: 'Search knowledge bases',
+ search: jest.fn(),
+ },
+ plugin: {
+ key: '@plugin',
+ shortcut: '@plugin',
+ title: 'Search Plugins',
+ description: 'Search plugins',
+ search: jest.fn(),
+ },
+ node: {
+ key: '@node',
+ shortcut: '@node',
+ title: 'Search Nodes',
+ description: 'Search workflow nodes',
+ search: jest.fn(),
+ },
+ }
+
+ const mockOnCommandSelect = jest.fn()
+ const mockOnCommandValueChange = jest.fn()
+
+ beforeEach(() => {
+ jest.clearAllMocks()
+ })
+
+ describe('Basic Rendering', () => {
+ it('should render all actions when no filter is provided', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
+ })
+
+ it('should render empty filter as showing all actions', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
+ })
+ })
+
+ describe('Filtering Functionality', () => {
+ it('should filter actions based on searchFilter - single match', () => {
+ render(
+ ,
+ )
+
+ expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
+ })
+
+ it('should filter actions with multiple matches', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
+ })
+
+ it('should be case-insensitive when filtering', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument()
+ })
+
+ it('should match partial strings', () => {
+ render(
+ ,
+ )
+
+ expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
+ })
+ })
+
+ describe('Empty State', () => {
+ it('should show empty state when no matches found', () => {
+ render(
+ ,
+ )
+
+ expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
+
+ expect(screen.getByText('app.gotoAnything.noMatchingCommands')).toBeInTheDocument()
+ expect(screen.getByText('app.gotoAnything.tryDifferentSearch')).toBeInTheDocument()
+ })
+
+ it('should not show empty state when filter is empty', () => {
+ render(
+ ,
+ )
+
+ expect(screen.queryByText('app.gotoAnything.noMatchingCommands')).not.toBeInTheDocument()
+ })
+ })
+
+ describe('Selection and Highlight Management', () => {
+ it('should call onCommandValueChange when filter changes and first item differs', () => {
+ const { rerender } = render(
+ ,
+ )
+
+ rerender(
+ ,
+ )
+
+ expect(mockOnCommandValueChange).toHaveBeenCalledWith('@knowledge')
+ })
+
+ it('should not call onCommandValueChange if current value still exists', () => {
+ const { rerender } = render(
+ ,
+ )
+
+ rerender(
+ ,
+ )
+
+ expect(mockOnCommandValueChange).not.toHaveBeenCalled()
+ })
+
+ it('should handle onCommandSelect callback correctly', () => {
+ render(
+ ,
+ )
+
+ const knowledgeItem = screen.getByTestId('command-item-@knowledge')
+ fireEvent.click(knowledgeItem)
+
+ expect(mockOnCommandSelect).toHaveBeenCalledWith('@knowledge')
+ })
+ })
+
+ describe('Edge Cases', () => {
+ it('should handle empty actions object', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByText('app.gotoAnything.noMatchingCommands')).toBeInTheDocument()
+ })
+
+ it('should handle special characters in filter', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
+ })
+
+ it('should handle undefined onCommandValueChange gracefully', () => {
+ const { rerender } = render(
+ ,
+ )
+
+ expect(() => {
+ rerender(
+ ,
+ )
+ }).not.toThrow()
+ })
+ })
+
+ describe('Backward Compatibility', () => {
+ it('should work without searchFilter prop (backward compatible)', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
+ expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
+ })
+
+ it('should work without commandValue and onCommandValueChange props', () => {
+ render(
+ ,
+ )
+
+ expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
+ expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
+ })
+ })
+})
diff --git a/web/__tests__/goto-anything/search-error-handling.test.ts b/web/__tests__/goto-anything/search-error-handling.test.ts
new file mode 100644
index 0000000000..d2fd921e1c
--- /dev/null
+++ b/web/__tests__/goto-anything/search-error-handling.test.ts
@@ -0,0 +1,197 @@
+/**
+ * Test GotoAnything search error handling mechanisms
+ *
+ * Main validations:
+ * 1. @plugin search error handling when API fails
+ * 2. Regular search (without @prefix) error handling when API fails
+ * 3. Verify consistent error handling across different search types
+ * 4. Ensure errors don't propagate to UI layer causing "search failed"
+ */
+
+import { Actions, searchAnything } from '@/app/components/goto-anything/actions'
+import { postMarketplace } from '@/service/base'
+import { fetchAppList } from '@/service/apps'
+import { fetchDatasets } from '@/service/datasets'
+
+// Mock API functions
+jest.mock('@/service/base', () => ({
+ postMarketplace: jest.fn(),
+}))
+
+jest.mock('@/service/apps', () => ({
+ fetchAppList: jest.fn(),
+}))
+
+jest.mock('@/service/datasets', () => ({
+ fetchDatasets: jest.fn(),
+}))
+
+const mockPostMarketplace = postMarketplace as jest.MockedFunction
+const mockFetchAppList = fetchAppList as jest.MockedFunction
+const mockFetchDatasets = fetchDatasets as jest.MockedFunction
+
+describe('GotoAnything Search Error Handling', () => {
+ beforeEach(() => {
+ jest.clearAllMocks()
+ // Suppress console.warn for clean test output
+ jest.spyOn(console, 'warn').mockImplementation(() => {
+ // Suppress console.warn for clean test output
+ })
+ })
+
+ afterEach(() => {
+ jest.restoreAllMocks()
+ })
+
+ describe('@plugin search error handling', () => {
+ it('should return empty array when API fails instead of throwing error', async () => {
+ // Mock marketplace API failure (403 permission denied)
+ mockPostMarketplace.mockRejectedValue(new Error('HTTP 403: Forbidden'))
+
+ const pluginAction = Actions.plugin
+
+ // Directly call plugin action's search method
+ const result = await pluginAction.search('@plugin', 'test', 'en')
+
+ // Should return empty array instead of throwing error
+ expect(result).toEqual([])
+ expect(mockPostMarketplace).toHaveBeenCalledWith('/plugins/search/advanced', {
+ body: {
+ page: 1,
+ page_size: 10,
+ query: 'test',
+ type: 'plugin',
+ },
+ })
+ })
+
+ it('should return empty array when user has no plugin data', async () => {
+ // Mock marketplace returning empty data
+ mockPostMarketplace.mockResolvedValue({
+ data: { plugins: [] },
+ })
+
+ const pluginAction = Actions.plugin
+ const result = await pluginAction.search('@plugin', '', 'en')
+
+ expect(result).toEqual([])
+ })
+
+ it('should return empty array when API returns unexpected data structure', async () => {
+ // Mock API returning unexpected data structure
+ mockPostMarketplace.mockResolvedValue({
+ data: null,
+ })
+
+ const pluginAction = Actions.plugin
+ const result = await pluginAction.search('@plugin', 'test', 'en')
+
+ expect(result).toEqual([])
+ })
+ })
+
+ describe('Other search types error handling', () => {
+ it('@app search should return empty array when API fails', async () => {
+ // Mock app API failure
+ mockFetchAppList.mockRejectedValue(new Error('API Error'))
+
+ const appAction = Actions.app
+ const result = await appAction.search('@app', 'test', 'en')
+
+ expect(result).toEqual([])
+ })
+
+ it('@knowledge search should return empty array when API fails', async () => {
+ // Mock knowledge API failure
+ mockFetchDatasets.mockRejectedValue(new Error('API Error'))
+
+ const knowledgeAction = Actions.knowledge
+ const result = await knowledgeAction.search('@knowledge', 'test', 'en')
+
+ expect(result).toEqual([])
+ })
+ })
+
+ describe('Unified search entry error handling', () => {
+ it('regular search (without @prefix) should return successful results even when partial APIs fail', async () => {
+ // Set app and knowledge success, plugin failure
+ mockFetchAppList.mockResolvedValue({ data: [], has_more: false, limit: 10, page: 1, total: 0 })
+ mockFetchDatasets.mockResolvedValue({ data: [], has_more: false, limit: 10, page: 1, total: 0 })
+ mockPostMarketplace.mockRejectedValue(new Error('Plugin API failed'))
+
+ const result = await searchAnything('en', 'test')
+
+ // Should return successful results even if plugin search fails
+ expect(result).toEqual([])
+ expect(console.warn).toHaveBeenCalledWith('Plugin search failed:', expect.any(Error))
+ })
+
+ it('@plugin dedicated search should return empty array when API fails', async () => {
+ // Mock plugin API failure
+ mockPostMarketplace.mockRejectedValue(new Error('Plugin service unavailable'))
+
+ const pluginAction = Actions.plugin
+ const result = await searchAnything('en', '@plugin test', pluginAction)
+
+ // Should return empty array instead of throwing error
+ expect(result).toEqual([])
+ })
+
+ it('@app dedicated search should return empty array when API fails', async () => {
+ // Mock app API failure
+ mockFetchAppList.mockRejectedValue(new Error('App service unavailable'))
+
+ const appAction = Actions.app
+ const result = await searchAnything('en', '@app test', appAction)
+
+ expect(result).toEqual([])
+ })
+ })
+
+ describe('Error handling consistency validation', () => {
+ it('all search types should return empty array when encountering errors', async () => {
+ // Mock all APIs to fail
+ mockPostMarketplace.mockRejectedValue(new Error('Plugin API failed'))
+ mockFetchAppList.mockRejectedValue(new Error('App API failed'))
+ mockFetchDatasets.mockRejectedValue(new Error('Dataset API failed'))
+
+ const actions = [
+ { name: '@plugin', action: Actions.plugin },
+ { name: '@app', action: Actions.app },
+ { name: '@knowledge', action: Actions.knowledge },
+ ]
+
+ for (const { name, action } of actions) {
+ const result = await action.search(name, 'test', 'en')
+ expect(result).toEqual([])
+ }
+ })
+ })
+
+ describe('Edge case testing', () => {
+ it('empty search term should be handled properly', async () => {
+ mockPostMarketplace.mockResolvedValue({ data: { plugins: [] } })
+
+ const result = await searchAnything('en', '@plugin ', Actions.plugin)
+ expect(result).toEqual([])
+ })
+
+ it('network timeout should be handled correctly', async () => {
+ const timeoutError = new Error('Network timeout')
+ timeoutError.name = 'TimeoutError'
+
+ mockPostMarketplace.mockRejectedValue(timeoutError)
+
+ const result = await searchAnything('en', '@plugin test', Actions.plugin)
+ expect(result).toEqual([])
+ })
+
+ it('JSON parsing errors should be handled correctly', async () => {
+ const parseError = new SyntaxError('Unexpected token in JSON')
+ mockPostMarketplace.mockRejectedValue(parseError)
+
+ const result = await searchAnything('en', '@plugin test', Actions.plugin)
+ expect(result).toEqual([])
+ })
+ })
+})
diff --git a/web/app/(commonLayout)/layout.tsx b/web/app/(commonLayout)/layout.tsx
index 64186a1b10..ed1c995e25 100644
--- a/web/app/(commonLayout)/layout.tsx
+++ b/web/app/(commonLayout)/layout.tsx
@@ -8,6 +8,7 @@ import Header from '@/app/components/header'
import { EventEmitterContextProvider } from '@/context/event-emitter'
import { ProviderContextProvider } from '@/context/provider-context'
import { ModalContextProvider } from '@/context/modal-context'
+import GotoAnything from '@/app/components/goto-anything'
const Layout = ({ children }: { children: ReactNode }) => {
return (
@@ -22,6 +23,7 @@ const Layout = ({ children }: { children: ReactNode }) => {
{children}
+
diff --git a/web/app/components/base/input/index.tsx b/web/app/components/base/input/index.tsx
index 30fd90aff8..ae171b0a76 100644
--- a/web/app/components/base/input/index.tsx
+++ b/web/app/components/base/input/index.tsx
@@ -32,7 +32,7 @@ export type InputProps = {
unit?: string
} & Omit, 'size'> & VariantProps
-const Input = ({
+const Input = React.forwardRef(({
size,
disabled,
destructive,
@@ -47,12 +47,13 @@ const Input = ({
onChange = noop,
unit,
...props
-}: InputProps) => {
+}, ref) => {
const { t } = useTranslation()
return (
{showLeftIcon &&
}
)
-}
+})
+
+Input.displayName = 'Input'
export default Input
diff --git a/web/app/components/base/modal/index.tsx b/web/app/components/base/modal/index.tsx
index e65f79d212..bb23bc3746 100644
--- a/web/app/components/base/modal/index.tsx
+++ b/web/app/components/base/modal/index.tsx
@@ -15,6 +15,7 @@ type IModal = {
children?: React.ReactNode
closable?: boolean
overflowVisible?: boolean
+ highPriority?: boolean // For modals that need to appear above dropdowns
}
export default function Modal({
@@ -27,10 +28,11 @@ export default function Modal({
children,
closable = false,
overflowVisible = false,
+ highPriority = false,
}: IModal) {
return (
-