Merge branch 'main' into feat/rag-2

# Conflicts:
#	api/core/workflow/entities/variable_pool.py
This commit is contained in:
jyong 2025-08-12 11:13:04 +08:00
commit 22b3933cc3
75 changed files with 2797 additions and 481 deletions

View File

@ -144,7 +144,8 @@ class DatabaseConfig(BaseSettings):
default="postgresql",
)
@computed_field
@computed_field # type: ignore[misc]
@property
def SQLALCHEMY_DATABASE_URI(self) -> str:
db_extras = (
f"{self.DB_EXTRAS}&client_encoding={self.DB_CHARSET}" if self.DB_CHARSET else self.DB_EXTRAS

View File

@ -568,7 +568,7 @@ class AdvancedChatAppGenerateTaskPipeline:
)
yield workflow_finish_resp
self._base_task_pipeline._queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
self._base_task_pipeline.queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
def _handle_workflow_partial_success_event(
self,
@ -600,7 +600,7 @@ class AdvancedChatAppGenerateTaskPipeline:
)
yield workflow_finish_resp
self._base_task_pipeline._queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
self._base_task_pipeline.queue_manager.publish(QueueAdvancedChatMessageEndEvent(), PublishFrom.TASK_PIPELINE)
def _handle_workflow_failed_event(
self,
@ -845,7 +845,7 @@ class AdvancedChatAppGenerateTaskPipeline:
# Initialize graph runtime state
graph_runtime_state: Optional[GraphRuntimeState] = None
for queue_message in self._base_task_pipeline._queue_manager.listen():
for queue_message in self._base_task_pipeline.queue_manager.listen():
event = queue_message.event
match event:
@ -959,11 +959,11 @@ class AdvancedChatAppGenerateTaskPipeline:
if self._base_task_pipeline._output_moderation_handler:
if self._base_task_pipeline._output_moderation_handler.should_direct_output():
self._task_state.answer = self._base_task_pipeline._output_moderation_handler.get_final_output()
self._base_task_pipeline._queue_manager.publish(
self._base_task_pipeline.queue_manager.publish(
QueueTextChunkEvent(text=self._task_state.answer), PublishFrom.TASK_PIPELINE
)
self._base_task_pipeline._queue_manager.publish(
self._base_task_pipeline.queue_manager.publish(
QueueStopEvent(stopped_by=QueueStopEvent.StopBy.OUTPUT_MODERATION), PublishFrom.TASK_PIPELINE
)
return True

View File

@ -711,7 +711,7 @@ class WorkflowAppGenerateTaskPipeline:
# Initialize graph runtime state
graph_runtime_state = None
for queue_message in self._base_task_pipeline._queue_manager.listen():
for queue_message in self._base_task_pipeline.queue_manager.listen():
event = queue_message.event
match event:

View File

@ -9,7 +9,6 @@ from core.app.app_config.entities import EasyUIBasedAppConfig, WorkflowUIBasedAp
from core.entities.provider_configuration import ProviderModelBundle
from core.file import File, FileUploadConfig
from core.model_runtime.entities.model_entities import AIModelEntity
from core.ops.ops_trace_manager import TraceQueueManager
class InvokeFrom(Enum):
@ -115,7 +114,8 @@ class AppGenerateEntity(BaseModel):
extras: dict[str, Any] = Field(default_factory=dict)
# tracing instance
trace_manager: Optional[TraceQueueManager] = None
# Using Any to avoid circular import with TraceQueueManager
trace_manager: Optional[Any] = None
class EasyUIBasedAppGenerateEntity(AppGenerateEntity):

View File

@ -37,7 +37,7 @@ class BasedGenerateTaskPipeline:
stream: bool,
) -> None:
self._application_generate_entity = application_generate_entity
self._queue_manager = queue_manager
self.queue_manager = queue_manager
self._start_at = time.perf_counter()
self._output_moderation_handler = self._init_output_moderation()
self._stream = stream
@ -113,7 +113,7 @@ class BasedGenerateTaskPipeline:
tenant_id=app_config.tenant_id,
app_id=app_config.app_id,
rule=ModerationRule(type=sensitive_word_avoidance.type, config=sensitive_word_avoidance.config),
queue_manager=self._queue_manager,
queue_manager=self.queue_manager,
)
return None

View File

@ -257,7 +257,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
Process stream response.
:return:
"""
for message in self._queue_manager.listen():
for message in self.queue_manager.listen():
if publisher:
publisher.publish(message)
event = message.event
@ -499,7 +499,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
if self._output_moderation_handler.should_direct_output():
# stop subscribe new token when output moderation should direct output
self._task_state.llm_result.message.content = self._output_moderation_handler.get_final_output()
self._queue_manager.publish(
self.queue_manager.publish(
QueueLLMChunkEvent(
chunk=LLMResultChunk(
model=self._task_state.llm_result.model,
@ -513,7 +513,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
PublishFrom.TASK_PIPELINE,
)
self._queue_manager.publish(
self.queue_manager.publish(
QueueStopEvent(stopped_by=QueueStopEvent.StopBy.OUTPUT_MODERATION), PublishFrom.TASK_PIPELINE
)
return True

View File

@ -99,13 +99,13 @@ class TokenBufferMemory:
prompt_messages.append(UserPromptMessage(content=message.query))
else:
prompt_message_contents: list[PromptMessageContentUnionTypes] = []
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
for file in file_objs:
prompt_message = file_manager.to_prompt_message_content(
file,
image_detail_config=detail,
)
prompt_message_contents.append(prompt_message)
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))

View File

@ -257,11 +257,6 @@ class ModelProviderFactory:
# scan all providers
plugin_model_provider_entities = self.get_plugin_model_providers()
# convert provider_configs to dict
provider_credentials_dict = {}
for provider_config in provider_configs:
provider_credentials_dict[provider_config.provider] = provider_config.credentials
# traverse all model_provider_extensions
providers = []
for plugin_model_provider_entity in plugin_model_provider_entities:

View File

@ -68,7 +68,7 @@ class CommonValidator:
if credential_form_schema.max_length:
if len(value) > credential_form_schema.max_length:
raise ValueError(
f"Variable {credential_form_schema.variable} length should not"
f"Variable {credential_form_schema.variable} length should not be"
f" greater than {credential_form_schema.max_length}"
)

View File

@ -1,10 +0,0 @@
import pydantic
from pydantic import BaseModel
def dump_model(model: BaseModel) -> dict:
if hasattr(pydantic, "model_dump"):
# FIXME mypy error, try to fix it instead of using type: ignore
return pydantic.model_dump(model) # type: ignore
else:
return model.model_dump()

View File

@ -109,8 +109,19 @@ class OracleVector(BaseVector):
)
def _get_connection(self) -> Connection:
connection = oracledb.connect(user=self.config.user, password=self.config.password, dsn=self.config.dsn)
return connection
if self.config.is_autonomous:
connection = oracledb.connect(
user=self.config.user,
password=self.config.password,
dsn=self.config.dsn,
config_dir=self.config.config_dir,
wallet_location=self.config.wallet_location,
wallet_password=self.config.wallet_password,
)
return connection
else:
connection = oracledb.connect(user=self.config.user, password=self.config.password, dsn=self.config.dsn)
return connection
def _create_connection_pool(self, config: OracleVectorConfig):
pool_params = {

View File

@ -4,4 +4,4 @@
#
# If the selector length is more than 2, the remaining parts are the keys / indexes paths used
# to extract part of the variable value.
MIN_SELECTORS_LENGTH = 2
SELECTORS_LENGTH = 2

View File

@ -7,8 +7,9 @@ from pydantic import BaseModel, Field
from core.file import File, FileAttribute, file_manager
from core.variables import Segment, SegmentGroup, Variable
from core.variables.consts import MIN_SELECTORS_LENGTH
from core.variables.segments import FileSegment, NoneSegment
from core.variables.consts import MIN_SELECTORS_LENGTH, SELECTORS_LENGTH
from core.variables.variables import VariableUnion
from core.variables.segments import FileSegment, NoneSegment, ObjectSegment
from core.variables.variables import RAGPipelineVariableInput, VariableUnion
from core.workflow.constants import (
CONVERSATION_VARIABLE_NODE_ID,
@ -29,7 +30,7 @@ class VariablePool(BaseModel):
# The first element of the selector is the node id, it's the first-level key in the dictionary.
# Other elements of the selector are the keys in the second-level dictionary. To get the key, we hash the
# elements of the selector except the first one.
variable_dictionary: defaultdict[str, Annotated[dict[int, VariableUnion], Field(default_factory=dict)]] = Field(
variable_dictionary: defaultdict[str, Annotated[dict[str, VariableUnion], Field(default_factory=dict)]] = Field(
description="Variables mapping",
default=defaultdict(dict),
)
@ -41,6 +42,7 @@ class VariablePool(BaseModel):
)
system_variables: SystemVariable = Field(
description="System variables",
default_factory=SystemVariable.empty,
)
environment_variables: Sequence[VariableUnion] = Field(
description="Environment variables.",
@ -70,23 +72,29 @@ class VariablePool(BaseModel):
def add(self, selector: Sequence[str], value: Any, /) -> None:
"""
Adds a variable to the variable pool.
Add a variable to the variable pool.
NOTE: You should not add a non-Segment value to the variable pool
even if it is allowed now.
This method accepts a selector path and a value, converting the value
to a Variable object if necessary before storing it in the pool.
Args:
selector (Sequence[str]): The selector for the variable.
value (VariableValue): The value of the variable.
selector: A two-element sequence containing [node_id, variable_name].
The selector must have exactly 2 elements to be valid.
value: The value to store. Can be a Variable, Segment, or any value
that can be converted to a Segment (str, int, float, dict, list, File).
Raises:
ValueError: If the selector is invalid.
ValueError: If selector length is not exactly 2 elements.
Returns:
None
Note:
While non-Segment values are currently accepted and automatically
converted, it's recommended to pass Segment or Variable objects directly.
"""
if len(selector) < MIN_SELECTORS_LENGTH:
raise ValueError("Invalid selector")
if len(selector) != SELECTORS_LENGTH:
raise ValueError(
f"Invalid selector: expected {SELECTORS_LENGTH} elements (node_id, variable_name), "
f"got {len(selector)} elements"
)
if isinstance(value, Variable):
variable = value
@ -96,57 +104,85 @@ class VariablePool(BaseModel):
segment = variable_factory.build_segment(value)
variable = variable_factory.segment_to_variable(segment=segment, selector=selector)
key, hash_key = self._selector_to_keys(selector)
node_id, name = self._selector_to_keys(selector)
# Based on the definition of `VariableUnion`,
# `list[Variable]` can be safely used as `list[VariableUnion]` since they are compatible.
self.variable_dictionary[key][hash_key] = cast(VariableUnion, variable)
self.variable_dictionary[node_id][name] = cast(VariableUnion, variable)
@classmethod
def _selector_to_keys(cls, selector: Sequence[str]) -> tuple[str, int]:
return selector[0], hash(tuple(selector[1:]))
def _selector_to_keys(cls, selector: Sequence[str]) -> tuple[str, str]:
return selector[0], selector[1]
def _has(self, selector: Sequence[str]) -> bool:
key, hash_key = self._selector_to_keys(selector)
if key not in self.variable_dictionary:
node_id, name = self._selector_to_keys(selector)
if node_id not in self.variable_dictionary:
return False
if hash_key not in self.variable_dictionary[key]:
if name not in self.variable_dictionary[node_id]:
return False
return True
def get(self, selector: Sequence[str], /) -> Segment | None:
"""
Retrieves the value from the variable pool based on the given selector.
Retrieve a variable's value from the pool as a Segment.
This method supports both simple selectors [node_id, variable_name] and
extended selectors that include attribute access for FileSegment and
ObjectSegment types.
Args:
selector (Sequence[str]): The selector used to identify the variable.
selector: A sequence with at least 2 elements:
- [node_id, variable_name]: Returns the full segment
- [node_id, variable_name, attr, ...]: Returns a nested value
from FileSegment (e.g., 'url', 'name') or ObjectSegment
Returns:
Any: The value associated with the given selector.
The Segment associated with the selector, or None if not found.
Returns None if selector has fewer than 2 elements.
Raises:
ValueError: If the selector is invalid.
ValueError: If attempting to access an invalid FileAttribute.
"""
if len(selector) < MIN_SELECTORS_LENGTH:
if len(selector) < SELECTORS_LENGTH:
return None
key, hash_key = self._selector_to_keys(selector)
value: Segment | None = self.variable_dictionary[key].get(hash_key)
node_id, name = self._selector_to_keys(selector)
segment: Segment | None = self.variable_dictionary[node_id].get(name)
if value is None:
selector, attr = selector[:-1], selector[-1]
if segment is None:
return None
if len(selector) == 2:
return segment
if isinstance(segment, FileSegment):
attr = selector[2]
# Python support `attr in FileAttribute` after 3.12
if attr not in {item.value for item in FileAttribute}:
return None
value = self.get(selector)
if not isinstance(value, FileSegment | NoneSegment):
return None
if isinstance(value, FileSegment):
attr = FileAttribute(attr)
attr_value = file_manager.get_attr(file=value.value, attr=attr)
return variable_factory.build_segment(attr_value)
return value
attr = FileAttribute(attr)
attr_value = file_manager.get_attr(file=segment.value, attr=attr)
return variable_factory.build_segment(attr_value)
return value
# Navigate through nested attributes
result: Any = segment
for attr in selector[2:]:
result = self._extract_value(result)
result = self._get_nested_attribute(result, attr)
if result is None:
return None
# Return result as Segment
return result if isinstance(result, Segment) else variable_factory.build_segment(result)
def _extract_value(self, obj: Any) -> Any:
"""Extract the actual value from an ObjectSegment."""
return obj.value if isinstance(obj, ObjectSegment) else obj
def _get_nested_attribute(self, obj: Mapping[str, Any], attr: str) -> Any:
"""Get a nested attribute from a dictionary-like object."""
if not isinstance(obj, dict):
return None
return obj.get(attr)
def remove(self, selector: Sequence[str], /):
"""

View File

@ -15,7 +15,7 @@ from configs import dify_config
from core.app.apps.exc import GenerateTaskStoppedError
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunResult
from core.workflow.entities.variable_pool import VariablePool, VariableValue
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
from core.workflow.graph_engine.condition_handlers.condition_manager import ConditionManager
from core.workflow.graph_engine.entities.event import (
@ -51,7 +51,6 @@ from core.workflow.nodes.base import BaseNode
from core.workflow.nodes.end.end_stream_processor import EndStreamProcessor
from core.workflow.nodes.enums import ErrorStrategy, FailBranchSourceHandle
from core.workflow.nodes.event import RunCompletedEvent, RunRetrieverResourceEvent, RunStreamChunkEvent
from core.workflow.utils import variable_utils
from libs.flask_utils import preserve_flask_contexts
from models.enums import UserFrom
from models.workflow import WorkflowType
@ -701,11 +700,9 @@ class GraphEngine:
route_node_state.status = RouteNodeState.Status.EXCEPTION
if run_result.outputs:
for variable_key, variable_value in run_result.outputs.items():
# append variables to variable pool recursively
self._append_variables_recursively(
node_id=node.node_id,
variable_key_list=[variable_key],
variable_value=variable_value,
# Add variables to variable pool
self.graph_runtime_state.variable_pool.add(
[node.node_id, variable_key], variable_value
)
yield NodeRunExceptionEvent(
error=run_result.error or "System Error",
@ -758,11 +755,9 @@ class GraphEngine:
# append node output variables to variable pool
if run_result.outputs:
for variable_key, variable_value in run_result.outputs.items():
# append variables to variable pool recursively
self._append_variables_recursively(
node_id=node.node_id,
variable_key_list=[variable_key],
variable_value=variable_value,
# Add variables to variable pool
self.graph_runtime_state.variable_pool.add(
[node.node_id, variable_key], variable_value
)
# When setting metadata, convert to dict first
@ -851,21 +846,6 @@ class GraphEngine:
logger.exception("Node %s run failed", node.title)
raise e
def _append_variables_recursively(self, node_id: str, variable_key_list: list[str], variable_value: VariableValue):
"""
Append variables recursively
:param node_id: node id
:param variable_key_list: variable key list
:param variable_value: variable value
:return:
"""
variable_utils.append_variables_recursively(
self.graph_runtime_state.variable_pool,
node_id,
variable_key_list,
variable_value,
)
def _is_timed_out(self, start_at: float, max_execution_time: int) -> bool:
"""
Check timeout

View File

@ -4,7 +4,7 @@ from typing import Any, TypeVar
from pydantic import BaseModel
from core.variables import Segment
from core.variables.consts import MIN_SELECTORS_LENGTH
from core.variables.consts import SELECTORS_LENGTH
from core.variables.types import SegmentType
# Use double underscore (`__`) prefix for internal variables
@ -23,7 +23,7 @@ _T = TypeVar("_T", bound=MutableMapping[str, Any])
def variable_to_processed_data(selector: Sequence[str], seg: Segment) -> UpdatedVariable:
if len(selector) < MIN_SELECTORS_LENGTH:
if len(selector) < SELECTORS_LENGTH:
raise Exception("selector too short")
node_id, var_name = selector[:2]
return UpdatedVariable(

View File

@ -4,7 +4,7 @@ from typing import Any, Optional, cast
from core.app.entities.app_invoke_entities import InvokeFrom
from core.variables import SegmentType, Variable
from core.variables.consts import MIN_SELECTORS_LENGTH
from core.variables.consts import SELECTORS_LENGTH
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID
from core.workflow.conversation_variable_updater import ConversationVariableUpdater
from core.workflow.entities.node_entities import NodeRunResult
@ -46,7 +46,7 @@ def _source_mapping_from_item(mapping: MutableMapping[str, Sequence[str]], node_
selector = item.value
if not isinstance(selector, list):
raise InvalidDataError(f"selector is not a list, {node_id=}, {item=}")
if len(selector) < MIN_SELECTORS_LENGTH:
if len(selector) < SELECTORS_LENGTH:
raise InvalidDataError(f"selector too short, {node_id=}, {item=}")
selector_str = ".".join(selector)
key = f"{node_id}.#{selector_str}#"

View File

@ -1,29 +0,0 @@
from core.variables.segments import ObjectSegment, Segment
from core.workflow.entities.variable_pool import VariablePool, VariableValue
def append_variables_recursively(
pool: VariablePool, node_id: str, variable_key_list: list[str], variable_value: VariableValue | Segment
):
"""
Append variables recursively
:param pool: variable pool to append variables to
:param node_id: node id
:param variable_key_list: variable key list
:param variable_value: variable value
:return:
"""
pool.add([node_id] + variable_key_list, variable_value)
# if variable_value is a dict, then recursively append variables
if isinstance(variable_value, ObjectSegment):
variable_dict = variable_value.value
elif isinstance(variable_value, dict):
variable_dict = variable_value
else:
return
for key, value in variable_dict.items():
# construct new key list
new_key_list = variable_key_list + [key]
append_variables_recursively(pool, node_id=node_id, variable_key_list=new_key_list, variable_value=value)

View File

@ -3,9 +3,8 @@ from collections.abc import Mapping, Sequence
from typing import Any, Protocol
from core.variables import Variable
from core.variables.consts import MIN_SELECTORS_LENGTH
from core.variables.consts import SELECTORS_LENGTH
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.utils import variable_utils
class VariableLoader(Protocol):
@ -78,7 +77,7 @@ def load_into_variable_pool(
variables_to_load.append(list(selector))
loaded = variable_loader.load_variables(variables_to_load)
for var in loaded:
assert len(var.selector) >= MIN_SELECTORS_LENGTH, f"Invalid variable {var}"
variable_utils.append_variables_recursively(
variable_pool, node_id=var.selector[0], variable_key_list=list(var.selector[1:]), variable_value=var
)
assert len(var.selector) >= SELECTORS_LENGTH, f"Invalid variable {var}"
# Add variable directly to the pool
# The variable pool expects 2-element selectors [node_id, variable_name]
variable_pool.add([var.selector[0], var.selector[1]], var)

View File

@ -1,6 +1,6 @@
[project]
name = "dify-api"
version = "1.7.1"
version = "1.7.2"
requires-python = ">=3.11,<3.13"
dependencies = [

View File

@ -48,7 +48,6 @@ class DifyAPIRepositoryFactory(DifyCoreRepositoryFactory):
RepositoryImportError: If the configured repository cannot be imported or instantiated
"""
class_path = dify_config.API_WORKFLOW_NODE_EXECUTION_REPOSITORY
logger.debug("Creating DifyAPIWorkflowNodeExecutionRepository from: %s", class_path)
try:
repository_class = cls._import_class(class_path)
@ -86,7 +85,6 @@ class DifyAPIRepositoryFactory(DifyCoreRepositoryFactory):
RepositoryImportError: If the configured repository cannot be imported or instantiated
"""
class_path = dify_config.API_WORKFLOW_RUN_REPOSITORY
logger.debug("Creating APIWorkflowRunRepository from: %s", class_path)
try:
repository_class = cls._import_class(class_path)

View File

@ -13,7 +13,7 @@ from sqlalchemy.sql.expression import and_, or_
from core.app.entities.app_invoke_entities import InvokeFrom
from core.file.models import File
from core.variables import Segment, StringSegment, Variable
from core.variables.consts import MIN_SELECTORS_LENGTH
from core.variables.consts import SELECTORS_LENGTH
from core.variables.segments import ArrayFileSegment, FileSegment
from core.variables.types import SegmentType
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, ENVIRONMENT_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
@ -147,7 +147,7 @@ class WorkflowDraftVariableService:
) -> list[WorkflowDraftVariable]:
ors = []
for selector in selectors:
assert len(selector) >= MIN_SELECTORS_LENGTH, f"Invalid selector to get: {selector}"
assert len(selector) >= SELECTORS_LENGTH, f"Invalid selector to get: {selector}"
node_id, name = selector[:2]
ors.append(and_(WorkflowDraftVariable.node_id == node_id, WorkflowDraftVariable.name == name))
@ -608,7 +608,7 @@ class DraftVariableSaver:
for item in updated_variables:
selector = item.selector
if len(selector) < MIN_SELECTORS_LENGTH:
if len(selector) < SELECTORS_LENGTH:
raise Exception("selector too short")
# NOTE(QuantumGhost): only the following two kinds of variable could be updated by
# VariableAssigner: ConversationVariable and iteration variable.

View File

@ -56,19 +56,29 @@ def clean_dataset_task(
documents = db.session.query(Document).where(Document.dataset_id == dataset_id).all()
segments = db.session.query(DocumentSegment).where(DocumentSegment.dataset_id == dataset_id).all()
# Fix: Always clean vector database resources regardless of document existence
# This ensures all 33 vector databases properly drop tables/collections/indices
if doc_form is None:
# Use default paragraph index type for empty datasets to enable vector database cleanup
# Enhanced validation: Check if doc_form is None, empty string, or contains only whitespace
# This ensures all invalid doc_form values are properly handled
if doc_form is None or (isinstance(doc_form, str) and not doc_form.strip()):
# Use default paragraph index type for empty/invalid datasets to enable vector database cleanup
from core.rag.index_processor.constant.index_type import IndexType
doc_form = IndexType.PARAGRAPH_INDEX
logging.info(
click.style(f"No documents found, using default index type for cleanup: {doc_form}", fg="yellow")
click.style(f"Invalid doc_form detected, using default index type for cleanup: {doc_form}", fg="yellow")
)
index_processor = IndexProcessorFactory(doc_form).init_index_processor()
index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True)
# Add exception handling around IndexProcessorFactory.clean() to prevent single point of failure
# This ensures Document/Segment deletion can continue even if vector database cleanup fails
try:
index_processor = IndexProcessorFactory(doc_form).init_index_processor()
index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True)
logging.info(click.style(f"Successfully cleaned vector database for dataset: {dataset_id}", fg="green"))
except Exception as index_cleanup_error:
logging.exception(click.style(f"Failed to clean vector database for dataset {dataset_id}", fg="red"))
# Continue with document and segment deletion even if vector cleanup fails
logging.info(
click.style(f"Continuing with document and segment deletion for dataset: {dataset_id}", fg="yellow")
)
if documents is None or len(documents) == 0:
logging.info(click.style(f"No documents found for dataset: {dataset_id}", fg="green"))
@ -128,6 +138,14 @@ def clean_dataset_task(
click.style(f"Cleaned dataset when dataset deleted: {dataset_id} latency: {end_at - start_at}", fg="green")
)
except Exception:
# Add rollback to prevent dirty session state in case of exceptions
# This ensures the database session is properly cleaned up
try:
db.session.rollback()
logging.info(click.style(f"Rolled back database session for dataset: {dataset_id}", fg="yellow"))
except Exception as rollback_error:
logging.exception("Failed to rollback database session")
logging.exception("Cleaned dataset when dataset deleted failed")
finally:
db.session.close()

View File

@ -55,8 +55,8 @@ def init_code_node(code_config: dict):
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["code", "123", "args1"], 1)
variable_pool.add(["code", "123", "args2"], 2)
variable_pool.add(["code", "args1"], 1)
variable_pool.add(["code", "args2"], 2)
node = CodeNode(
id=str(uuid.uuid4()),
@ -96,9 +96,9 @@ def test_execute_code(setup_code_executor_mock):
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
"value_selector": ["1", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
{"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
@ -107,8 +107,8 @@ def test_execute_code(setup_code_executor_mock):
}
node = init_code_node(code_config)
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], 1)
node.graph_runtime_state.variable_pool.add(["1", "123", "args2"], 2)
node.graph_runtime_state.variable_pool.add(["1", "args1"], 1)
node.graph_runtime_state.variable_pool.add(["1", "args2"], 2)
# execute node
result = node._run()
@ -142,9 +142,9 @@ def test_execute_code_output_validator(setup_code_executor_mock):
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
"value_selector": ["1", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
{"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
@ -153,8 +153,8 @@ def test_execute_code_output_validator(setup_code_executor_mock):
}
node = init_code_node(code_config)
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], 1)
node.graph_runtime_state.variable_pool.add(["1", "123", "args2"], 2)
node.graph_runtime_state.variable_pool.add(["1", "args1"], 1)
node.graph_runtime_state.variable_pool.add(["1", "args2"], 2)
# execute node
result = node._run()
@ -217,9 +217,9 @@ def test_execute_code_output_validator_depth():
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
"value_selector": ["1", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
{"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",
@ -307,9 +307,9 @@ def test_execute_code_output_object_list():
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
"value_selector": ["1", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
{"variable": "args2", "value_selector": ["1", "args2"]},
],
"answer": "123",
"code_language": "python3",

View File

@ -49,8 +49,8 @@ def init_http_node(config: dict):
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["a", "b123", "args1"], 1)
variable_pool.add(["a", "b123", "args2"], 2)
variable_pool.add(["a", "args1"], 1)
variable_pool.add(["a", "args2"], 2)
node = HttpRequestNode(
id=str(uuid.uuid4()),
@ -171,7 +171,7 @@ def test_template(setup_http_mock):
"title": "http",
"desc": "",
"method": "get",
"url": "http://example.com/{{#a.b123.args2#}}",
"url": "http://example.com/{{#a.args2#}}",
"authorization": {
"type": "api-key",
"config": {
@ -180,8 +180,8 @@ def test_template(setup_http_mock):
"header": "api-key",
},
},
"headers": "X-Header:123\nX-Header2:{{#a.b123.args2#}}",
"params": "A:b\nTemplate:{{#a.b123.args2#}}",
"headers": "X-Header:123\nX-Header2:{{#a.args2#}}",
"params": "A:b\nTemplate:{{#a.args2#}}",
"body": None,
},
}
@ -223,7 +223,7 @@ def test_json(setup_http_mock):
{
"key": "",
"type": "text",
"value": '{"a": "{{#a.b123.args1#}}"}',
"value": '{"a": "{{#a.args1#}}"}',
},
],
},
@ -264,12 +264,12 @@ def test_x_www_form_urlencoded(setup_http_mock):
{
"key": "a",
"type": "text",
"value": "{{#a.b123.args1#}}",
"value": "{{#a.args1#}}",
},
{
"key": "b",
"type": "text",
"value": "{{#a.b123.args2#}}",
"value": "{{#a.args2#}}",
},
],
},
@ -310,12 +310,12 @@ def test_form_data(setup_http_mock):
{
"key": "a",
"type": "text",
"value": "{{#a.b123.args1#}}",
"value": "{{#a.args1#}}",
},
{
"key": "b",
"type": "text",
"value": "{{#a.b123.args2#}}",
"value": "{{#a.args2#}}",
},
],
},
@ -436,3 +436,87 @@ def test_multi_colons_parse(setup_http_mock):
assert 'form-data; name="Redirect"\r\n\r\nhttp://example6.com' in result.process_data.get("request", "")
# resp = result.outputs
# assert "http://example3.com" == resp.get("headers", {}).get("referer")
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
def test_nested_object_variable_selector(setup_http_mock):
"""Test variable selector functionality with nested object properties."""
# Create independent test setup without affecting other tests
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "1",
},
],
"nodes": [
{"data": {"type": "start"}, "id": "start"},
{
"id": "1",
"data": {
"title": "http",
"desc": "",
"method": "get",
"url": "http://example.com/{{#a.args2#}}/{{#a.args3.nested#}}",
"authorization": {
"type": "api-key",
"config": {
"type": "basic",
"api_key": "ak-xxx",
"header": "api-key",
},
},
"headers": "X-Header:{{#a.args3.nested#}}",
"params": "nested_param:{{#a.args3.nested#}}",
"body": None,
},
},
],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# Create independent variable pool for this test only
variable_pool = VariablePool(
system_variables=SystemVariable(user_id="aaa", files=[]),
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["a", "args1"], 1)
variable_pool.add(["a", "args2"], 2)
variable_pool.add(["a", "args3"], {"nested": "nested_value"}) # Only for this test
node = HttpRequestNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=graph_config["nodes"][1],
)
# Initialize node data
if "data" in graph_config["nodes"][1]:
node.init_node_data(graph_config["nodes"][1]["data"])
result = node._run()
assert result.process_data is not None
data = result.process_data.get("request", "")
# Verify nested object property is correctly resolved
assert "/2/nested_value" in data # URL path should contain resolved nested value
assert "X-Header: nested_value" in data # Header should contain nested value
assert "nested_param=nested_value" in data # Param should contain nested value

View File

@ -71,8 +71,8 @@ def init_parameter_extractor_node(config: dict):
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["a", "b123", "args1"], 1)
variable_pool.add(["a", "b123", "args2"], 2)
variable_pool.add(["a", "args1"], 1)
variable_pool.add(["a", "args2"], 2)
node = ParameterExtractorNode(
id=str(uuid.uuid4()),

View File

@ -26,9 +26,9 @@ def test_execute_code(setup_code_executor_mock):
"variables": [
{
"variable": "args1",
"value_selector": ["1", "123", "args1"],
"value_selector": ["1", "args1"],
},
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
{"variable": "args2", "value_selector": ["1", "args2"]},
],
"template": code,
},
@ -66,8 +66,8 @@ def test_execute_code(setup_code_executor_mock):
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["1", "123", "args1"], 1)
variable_pool.add(["1", "123", "args2"], 3)
variable_pool.add(["1", "args1"], 1)
variable_pool.add(["1", "args2"], 3)
node = TemplateTransformNode(
id=str(uuid.uuid4()),

View File

@ -81,7 +81,7 @@ def test_tool_variable_invoke():
ToolParameterConfigurationManager.decrypt_tool_parameters = MagicMock(return_value={"format": "%Y-%m-%d %H:%M:%S"})
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], "1+1")
node.graph_runtime_state.variable_pool.add(["1", "args1"], "1+1")
# execute node
result = node._run()

View File

@ -0,0 +1,775 @@
from unittest.mock import patch
import pytest
from faker import Faker
from models.model import MessageFeedback
from services.app_service import AppService
from services.errors.message import (
FirstMessageNotExistsError,
LastMessageNotExistsError,
MessageNotExistsError,
SuggestedQuestionsAfterAnswerDisabledError,
)
from services.message_service import MessageService
class TestMessageService:
"""Integration tests for MessageService using testcontainers."""
@pytest.fixture
def mock_external_service_dependencies(self):
"""Mock setup for external service dependencies."""
with (
patch("services.account_service.FeatureService") as mock_account_feature_service,
patch("services.message_service.ModelManager") as mock_model_manager,
patch("services.message_service.WorkflowService") as mock_workflow_service,
patch("services.message_service.AdvancedChatAppConfigManager") as mock_app_config_manager,
patch("services.message_service.LLMGenerator") as mock_llm_generator,
patch("services.message_service.TraceQueueManager") as mock_trace_manager_class,
patch("services.message_service.TokenBufferMemory") as mock_token_buffer_memory,
):
# Setup default mock returns
mock_account_feature_service.get_features.return_value.billing.enabled = False
# Mock ModelManager
mock_model_instance = mock_model_manager.return_value.get_default_model_instance.return_value
mock_model_instance.get_tts_voices.return_value = [{"value": "test-voice"}]
# Mock get_model_instance method as well
mock_model_manager.return_value.get_model_instance.return_value = mock_model_instance
# Mock WorkflowService
mock_workflow = mock_workflow_service.return_value.get_published_workflow.return_value
mock_workflow_service.return_value.get_draft_workflow.return_value = mock_workflow
# Mock AdvancedChatAppConfigManager
mock_app_config = mock_app_config_manager.get_app_config.return_value
mock_app_config.additional_features.suggested_questions_after_answer = True
# Mock LLMGenerator
mock_llm_generator.generate_suggested_questions_after_answer.return_value = ["Question 1", "Question 2"]
# Mock TraceQueueManager
mock_trace_manager_instance = mock_trace_manager_class.return_value
# Mock TokenBufferMemory
mock_memory_instance = mock_token_buffer_memory.return_value
mock_memory_instance.get_history_prompt_text.return_value = "Mocked history prompt"
yield {
"account_feature_service": mock_account_feature_service,
"model_manager": mock_model_manager,
"workflow_service": mock_workflow_service,
"app_config_manager": mock_app_config_manager,
"llm_generator": mock_llm_generator,
"trace_manager_class": mock_trace_manager_class,
"trace_manager_instance": mock_trace_manager_instance,
"token_buffer_memory": mock_token_buffer_memory,
# "current_user": mock_current_user,
}
def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies):
"""
Helper method to create a test app and account for testing.
Args:
db_session_with_containers: Database session from testcontainers infrastructure
mock_external_service_dependencies: Mock dependencies
Returns:
tuple: (app, account) - Created app and account instances
"""
fake = Faker()
# Setup mocks for account creation
mock_external_service_dependencies[
"account_feature_service"
].get_system_features.return_value.is_allow_register = True
# Create account and tenant first
from services.account_service import AccountService, TenantService
account = AccountService.create_account(
email=fake.email(),
name=fake.name(),
interface_language="en-US",
password=fake.password(length=12),
)
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
# Setup app creation arguments
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "advanced-chat", # Use advanced-chat mode to use mocked workflow
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
# Create app
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
# Setup current_user mock
self._mock_current_user(mock_external_service_dependencies, account.id, tenant.id)
return app, account
def _mock_current_user(self, mock_external_service_dependencies, account_id, tenant_id):
"""
Helper method to mock the current user for testing.
"""
# mock_external_service_dependencies["current_user"].id = account_id
# mock_external_service_dependencies["current_user"].current_tenant_id = tenant_id
def _create_test_conversation(self, app, account, fake):
"""
Helper method to create a test conversation with all required fields.
"""
from extensions.ext_database import db
from models.model import Conversation
conversation = Conversation(
app_id=app.id,
app_model_config_id=None,
model_provider=None,
model_id="",
override_model_configs=None,
mode=app.mode,
name=fake.sentence(),
inputs={},
introduction="",
system_instruction="",
system_instruction_tokens=0,
status="normal",
invoke_from="console",
from_source="console",
from_end_user_id=None,
from_account_id=account.id,
)
db.session.add(conversation)
db.session.flush()
return conversation
def _create_test_message(self, app, conversation, account, fake):
"""
Helper method to create a test message with all required fields.
"""
import json
from extensions.ext_database import db
from models.model import Message
message = Message(
app_id=app.id,
model_provider=None,
model_id="",
override_model_configs=None,
conversation_id=conversation.id,
inputs={},
query=fake.sentence(),
message=json.dumps([{"role": "user", "text": fake.sentence()}]),
message_tokens=0,
message_unit_price=0,
message_price_unit=0.001,
answer=fake.text(max_nb_chars=200),
answer_tokens=0,
answer_unit_price=0,
answer_price_unit=0.001,
parent_message_id=None,
provider_response_latency=0,
total_price=0,
currency="USD",
invoke_from="console",
from_source="console",
from_end_user_id=None,
from_account_id=account.id,
)
db.session.add(message)
db.session.commit()
return message
def test_pagination_by_first_id_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful pagination by first ID.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and multiple messages
conversation = self._create_test_conversation(app, account, fake)
messages = []
for i in range(5):
message = self._create_test_message(app, conversation, account, fake)
messages.append(message)
# Test pagination by first ID
result = MessageService.pagination_by_first_id(
app_model=app,
user=account,
conversation_id=conversation.id,
first_id=messages[2].id, # Use middle message as first_id
limit=2,
order="asc",
)
# Verify results
assert result.limit == 2
assert len(result.data) == 2
# total 5, from the middle, no more
assert result.has_more is False
# Verify messages are in ascending order
assert result.data[0].created_at <= result.data[1].created_at
def test_pagination_by_first_id_no_user(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test pagination by first ID when no user is provided.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Test pagination with no user
result = MessageService.pagination_by_first_id(
app_model=app, user=None, conversation_id=fake.uuid4(), first_id=None, limit=10
)
# Verify empty result
assert result.limit == 10
assert len(result.data) == 0
assert result.has_more is False
def test_pagination_by_first_id_no_conversation_id(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test pagination by first ID when no conversation ID is provided.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Test pagination with no conversation ID
result = MessageService.pagination_by_first_id(
app_model=app, user=account, conversation_id="", first_id=None, limit=10
)
# Verify empty result
assert result.limit == 10
assert len(result.data) == 0
assert result.has_more is False
def test_pagination_by_first_id_invalid_first_id(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test pagination by first ID with invalid first_id.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
self._create_test_message(app, conversation, account, fake)
# Test pagination with invalid first_id
with pytest.raises(FirstMessageNotExistsError):
MessageService.pagination_by_first_id(
app_model=app,
user=account,
conversation_id=conversation.id,
first_id=fake.uuid4(), # Non-existent message ID
limit=10,
)
def test_pagination_by_last_id_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful pagination by last ID.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and multiple messages
conversation = self._create_test_conversation(app, account, fake)
messages = []
for i in range(5):
message = self._create_test_message(app, conversation, account, fake)
messages.append(message)
# Test pagination by last ID
result = MessageService.pagination_by_last_id(
app_model=app,
user=account,
last_id=messages[2].id, # Use middle message as last_id
limit=2,
conversation_id=conversation.id,
)
# Verify results
assert result.limit == 2
assert len(result.data) == 2
# total 5, from the middle, no more
assert result.has_more is False
# Verify messages are in descending order
assert result.data[0].created_at >= result.data[1].created_at
def test_pagination_by_last_id_with_include_ids(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test pagination by last ID with include_ids filter.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and multiple messages
conversation = self._create_test_conversation(app, account, fake)
messages = []
for i in range(5):
message = self._create_test_message(app, conversation, account, fake)
messages.append(message)
# Test pagination with include_ids
include_ids = [messages[0].id, messages[1].id, messages[2].id]
result = MessageService.pagination_by_last_id(
app_model=app, user=account, last_id=messages[1].id, limit=2, include_ids=include_ids
)
# Verify results
assert result.limit == 2
assert len(result.data) <= 2
# Verify all returned messages are in include_ids
for message in result.data:
assert message.id in include_ids
def test_pagination_by_last_id_no_user(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test pagination by last ID when no user is provided.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Test pagination with no user
result = MessageService.pagination_by_last_id(app_model=app, user=None, last_id=None, limit=10)
# Verify empty result
assert result.limit == 10
assert len(result.data) == 0
assert result.has_more is False
def test_pagination_by_last_id_invalid_last_id(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test pagination by last ID with invalid last_id.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
self._create_test_message(app, conversation, account, fake)
# Test pagination with invalid last_id
with pytest.raises(LastMessageNotExistsError):
MessageService.pagination_by_last_id(
app_model=app,
user=account,
last_id=fake.uuid4(), # Non-existent message ID
limit=10,
conversation_id=conversation.id,
)
def test_create_feedback_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful creation of feedback.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Create feedback
rating = "like"
content = fake.text(max_nb_chars=100)
feedback = MessageService.create_feedback(
app_model=app, message_id=message.id, user=account, rating=rating, content=content
)
# Verify feedback was created correctly
assert feedback.app_id == app.id
assert feedback.conversation_id == conversation.id
assert feedback.message_id == message.id
assert feedback.rating == rating
assert feedback.content == content
assert feedback.from_source == "admin"
assert feedback.from_account_id == account.id
assert feedback.from_end_user_id is None
def test_create_feedback_no_user(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test creating feedback when no user is provided.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Test creating feedback with no user
with pytest.raises(ValueError, match="user cannot be None"):
MessageService.create_feedback(
app_model=app, message_id=message.id, user=None, rating="like", content=fake.text(max_nb_chars=100)
)
def test_create_feedback_update_existing(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test updating existing feedback.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Create initial feedback
initial_rating = "like"
initial_content = fake.text(max_nb_chars=100)
feedback = MessageService.create_feedback(
app_model=app, message_id=message.id, user=account, rating=initial_rating, content=initial_content
)
# Update feedback
updated_rating = "dislike"
updated_content = fake.text(max_nb_chars=100)
updated_feedback = MessageService.create_feedback(
app_model=app, message_id=message.id, user=account, rating=updated_rating, content=updated_content
)
# Verify feedback was updated correctly
assert updated_feedback.id == feedback.id
assert updated_feedback.rating == updated_rating
assert updated_feedback.content == updated_content
assert updated_feedback.rating != initial_rating
assert updated_feedback.content != initial_content
def test_create_feedback_delete_existing(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test deleting existing feedback by setting rating to None.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Create initial feedback
feedback = MessageService.create_feedback(
app_model=app, message_id=message.id, user=account, rating="like", content=fake.text(max_nb_chars=100)
)
# Delete feedback by setting rating to None
MessageService.create_feedback(app_model=app, message_id=message.id, user=account, rating=None, content=None)
# Verify feedback was deleted
from extensions.ext_database import db
deleted_feedback = db.session.query(MessageFeedback).filter(MessageFeedback.id == feedback.id).first()
assert deleted_feedback is None
def test_create_feedback_no_rating_when_not_exists(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test creating feedback with no rating when feedback doesn't exist.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Test creating feedback with no rating when no feedback exists
with pytest.raises(ValueError, match="rating cannot be None when feedback not exists"):
MessageService.create_feedback(
app_model=app, message_id=message.id, user=account, rating=None, content=None
)
def test_get_all_messages_feedbacks_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful retrieval of all message feedbacks.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create multiple conversations and messages with feedbacks
feedbacks = []
for i in range(3):
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
feedback = MessageService.create_feedback(
app_model=app,
message_id=message.id,
user=account,
rating="like" if i % 2 == 0 else "dislike",
content=f"Feedback {i}: {fake.text(max_nb_chars=50)}",
)
feedbacks.append(feedback)
# Get all feedbacks
result = MessageService.get_all_messages_feedbacks(app, page=1, limit=10)
# Verify results
assert len(result) == 3
# Verify feedbacks are ordered by created_at desc
for i in range(len(result) - 1):
assert result[i]["created_at"] >= result[i + 1]["created_at"]
def test_get_all_messages_feedbacks_pagination(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test pagination of message feedbacks.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create multiple conversations and messages with feedbacks
for i in range(5):
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
MessageService.create_feedback(
app_model=app, message_id=message.id, user=account, rating="like", content=f"Feedback {i}"
)
# Get feedbacks with pagination
result_page_1 = MessageService.get_all_messages_feedbacks(app, page=1, limit=3)
result_page_2 = MessageService.get_all_messages_feedbacks(app, page=2, limit=3)
# Verify pagination results
assert len(result_page_1) == 3
assert len(result_page_2) == 2
# Verify no overlap between pages
page_1_ids = {feedback["id"] for feedback in result_page_1}
page_2_ids = {feedback["id"] for feedback in result_page_2}
assert len(page_1_ids.intersection(page_2_ids)) == 0
def test_get_message_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful retrieval of message.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Get message
retrieved_message = MessageService.get_message(app_model=app, user=account, message_id=message.id)
# Verify message was retrieved correctly
assert retrieved_message.id == message.id
assert retrieved_message.app_id == app.id
assert retrieved_message.conversation_id == conversation.id
assert retrieved_message.from_source == "console"
assert retrieved_message.from_account_id == account.id
def test_get_message_not_exists(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test getting message that doesn't exist.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Test getting non-existent message
with pytest.raises(MessageNotExistsError):
MessageService.get_message(app_model=app, user=account, message_id=fake.uuid4())
def test_get_message_wrong_user(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test getting message with wrong user (different account).
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Create another account
from services.account_service import AccountService, TenantService
other_account = AccountService.create_account(
email=fake.email(),
name=fake.name(),
interface_language="en-US",
password=fake.password(length=12),
)
TenantService.create_owner_tenant_if_not_exist(other_account, name=fake.company())
# Test getting message with different user
with pytest.raises(MessageNotExistsError):
MessageService.get_message(app_model=app, user=other_account, message_id=message.id)
def test_get_suggested_questions_after_answer_success(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test successful generation of suggested questions after answer.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Mock the LLMGenerator to return specific questions
mock_questions = ["What is AI?", "How does machine learning work?", "Tell me about neural networks"]
mock_external_service_dependencies[
"llm_generator"
].generate_suggested_questions_after_answer.return_value = mock_questions
# Get suggested questions
from core.app.entities.app_invoke_entities import InvokeFrom
result = MessageService.get_suggested_questions_after_answer(
app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
)
# Verify results
assert result == mock_questions
# Verify LLMGenerator was called
mock_external_service_dependencies[
"llm_generator"
].generate_suggested_questions_after_answer.assert_called_once()
# Verify TraceQueueManager was called
mock_external_service_dependencies["trace_manager_instance"].add_trace_task.assert_called_once()
def test_get_suggested_questions_after_answer_no_user(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test getting suggested questions when no user is provided.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Test getting suggested questions with no user
from core.app.entities.app_invoke_entities import InvokeFrom
with pytest.raises(ValueError, match="user cannot be None"):
MessageService.get_suggested_questions_after_answer(
app_model=app, user=None, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
)
def test_get_suggested_questions_after_answer_disabled(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test getting suggested questions when feature is disabled.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Mock the feature to be disabled
mock_external_service_dependencies[
"app_config_manager"
].get_app_config.return_value.additional_features.suggested_questions_after_answer = False
# Test getting suggested questions when feature is disabled
from core.app.entities.app_invoke_entities import InvokeFrom
with pytest.raises(SuggestedQuestionsAfterAnswerDisabledError):
MessageService.get_suggested_questions_after_answer(
app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
)
def test_get_suggested_questions_after_answer_no_workflow(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test getting suggested questions when no workflow exists.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Mock no workflow
mock_external_service_dependencies["workflow_service"].return_value.get_published_workflow.return_value = None
# Get suggested questions (should return empty list)
from core.app.entities.app_invoke_entities import InvokeFrom
result = MessageService.get_suggested_questions_after_answer(
app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.SERVICE_API
)
# Verify empty result
assert result == []
def test_get_suggested_questions_after_answer_debugger_mode(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test getting suggested questions in debugger mode.
"""
fake = Faker()
app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies)
# Create a conversation and message
conversation = self._create_test_conversation(app, account, fake)
message = self._create_test_message(app, conversation, account, fake)
# Mock questions
mock_questions = ["Debug question 1", "Debug question 2"]
mock_external_service_dependencies[
"llm_generator"
].generate_suggested_questions_after_answer.return_value = mock_questions
# Get suggested questions in debugger mode
from core.app.entities.app_invoke_entities import InvokeFrom
result = MessageService.get_suggested_questions_after_answer(
app_model=app, user=account, message_id=message.id, invoke_from=InvokeFrom.DEBUGGER
)
# Verify results
assert result == mock_questions
# Verify draft workflow was used instead of published workflow
mock_external_service_dependencies["workflow_service"].return_value.get_draft_workflow.assert_called_once_with(
app_model=app
)
# Verify TraceQueueManager was called
mock_external_service_dependencies["trace_manager_instance"].add_trace_task.assert_called_once()

View File

@ -12,6 +12,10 @@ from services.workflow_draft_variable_service import (
)
def _get_random_variable_name(fake: Faker):
return "".join(fake.random_letters(length=10))
class TestWorkflowDraftVariableService:
"""
Comprehensive integration tests for WorkflowDraftVariableService using testcontainers.
@ -112,7 +116,14 @@ class TestWorkflowDraftVariableService:
return workflow
def _create_test_variable(
self, db_session_with_containers, app_id, node_id, name, value, variable_type="conversation", fake=None
self,
db_session_with_containers,
app_id,
node_id,
name,
value,
variable_type: DraftVariableType = DraftVariableType.CONVERSATION,
fake=None,
):
"""
Helper method to create a test workflow draft variable with proper configuration.
@ -227,7 +238,13 @@ class TestWorkflowDraftVariableService:
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "var2", var2_value, fake=fake
)
var3 = self._create_test_variable(
db_session_with_containers, app.id, "test_node_1", "var3", var3_value, "node", fake=fake
db_session_with_containers,
app.id,
"test_node_1",
"var3",
var3_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
selectors = [
[CONVERSATION_VARIABLE_NODE_ID, "var1"],
@ -263,9 +280,14 @@ class TestWorkflowDraftVariableService:
fake = Faker()
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
for i in range(5):
test_value = StringSegment(value=fake.numerify("value##"))
test_value = StringSegment(value=fake.numerify("value######"))
self._create_test_variable(
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), test_value, fake=fake
db_session_with_containers,
app.id,
CONVERSATION_VARIABLE_NODE_ID,
_get_random_variable_name(fake),
test_value,
fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
result = service.list_variables_without_values(app.id, page=1, limit=3)
@ -291,10 +313,32 @@ class TestWorkflowDraftVariableService:
var1_value = StringSegment(value=fake.word())
var2_value = StringSegment(value=fake.word())
var3_value = StringSegment(value=fake.word())
self._create_test_variable(db_session_with_containers, app.id, node_id, "var1", var1_value, "node", fake=fake)
self._create_test_variable(db_session_with_containers, app.id, node_id, "var2", var3_value, "node", fake=fake)
self._create_test_variable(
db_session_with_containers, app.id, "other_node", "var3", var2_value, "node", fake=fake
db_session_with_containers,
app.id,
node_id,
"var1",
var1_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
self._create_test_variable(
db_session_with_containers,
app.id,
node_id,
"var2",
var3_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
self._create_test_variable(
db_session_with_containers,
app.id,
"other_node",
"var3",
var2_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
result = service.list_node_variables(app.id, node_id)
@ -328,7 +372,13 @@ class TestWorkflowDraftVariableService:
)
sys_var_value = StringSegment(value=fake.word())
self._create_test_variable(
db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var", sys_var_value, "system", fake=fake
db_session_with_containers,
app.id,
SYSTEM_VARIABLE_NODE_ID,
"sys_var",
sys_var_value,
variable_type=DraftVariableType.SYS,
fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
result = service.list_conversation_variables(app.id)
@ -480,14 +530,24 @@ class TestWorkflowDraftVariableService:
fake = Faker()
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
for i in range(3):
test_value = StringSegment(value=fake.numerify("value##"))
test_value = StringSegment(value=fake.numerify("value######"))
self._create_test_variable(
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), test_value, fake=fake
db_session_with_containers,
app.id,
CONVERSATION_VARIABLE_NODE_ID,
_get_random_variable_name(fake),
test_value,
fake=fake,
)
other_app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
other_value = StringSegment(value=fake.word())
self._create_test_variable(
db_session_with_containers, other_app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), other_value, fake=fake
db_session_with_containers,
other_app.id,
CONVERSATION_VARIABLE_NODE_ID,
_get_random_variable_name(fake),
other_value,
fake=fake,
)
from extensions.ext_database import db
@ -515,17 +575,34 @@ class TestWorkflowDraftVariableService:
app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake)
node_id = fake.word()
for i in range(2):
test_value = StringSegment(value=fake.numerify("node_value##"))
test_value = StringSegment(value=fake.numerify("node_value######"))
self._create_test_variable(
db_session_with_containers, app.id, node_id, fake.word(), test_value, "node", fake=fake
db_session_with_containers,
app.id,
node_id,
_get_random_variable_name(fake),
test_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
other_node_value = StringSegment(value=fake.word())
self._create_test_variable(
db_session_with_containers, app.id, "other_node", fake.word(), other_node_value, "node", fake=fake
db_session_with_containers,
app.id,
"other_node",
_get_random_variable_name(fake),
other_node_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
conv_value = StringSegment(value=fake.word())
self._create_test_variable(
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), conv_value, fake=fake
db_session_with_containers,
app.id,
CONVERSATION_VARIABLE_NODE_ID,
_get_random_variable_name(fake),
conv_value,
fake=fake,
)
from extensions.ext_database import db
@ -627,7 +704,7 @@ class TestWorkflowDraftVariableService:
SYSTEM_VARIABLE_NODE_ID,
"conversation_id",
conv_id_value,
"system",
variable_type=DraftVariableType.SYS,
fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
@ -664,10 +741,22 @@ class TestWorkflowDraftVariableService:
sys_var1_value = StringSegment(value=fake.word())
sys_var2_value = StringSegment(value=fake.word())
sys_var1 = self._create_test_variable(
db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var1", sys_var1_value, "system", fake=fake
db_session_with_containers,
app.id,
SYSTEM_VARIABLE_NODE_ID,
"sys_var1",
sys_var1_value,
variable_type=DraftVariableType.SYS,
fake=fake,
)
sys_var2 = self._create_test_variable(
db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var2", sys_var2_value, "system", fake=fake
db_session_with_containers,
app.id,
SYSTEM_VARIABLE_NODE_ID,
"sys_var2",
sys_var2_value,
variable_type=DraftVariableType.SYS,
fake=fake,
)
conv_var_value = StringSegment(value=fake.word())
self._create_test_variable(
@ -701,10 +790,22 @@ class TestWorkflowDraftVariableService:
db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "test_conv_var", test_value, fake=fake
)
sys_var = self._create_test_variable(
db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "test_sys_var", test_value, "system", fake=fake
db_session_with_containers,
app.id,
SYSTEM_VARIABLE_NODE_ID,
"test_sys_var",
test_value,
variable_type=DraftVariableType.SYS,
fake=fake,
)
node_var = self._create_test_variable(
db_session_with_containers, app.id, "test_node", "test_node_var", test_value, "node", fake=fake
db_session_with_containers,
app.id,
"test_node",
"test_node_var",
test_value,
variable_type=DraftVariableType.NODE,
fake=fake,
)
service = WorkflowDraftVariableService(db_session_with_containers)
retrieved_conv_var = service.get_conversation_variable(app.id, "test_conv_var")

View File

@ -0,0 +1,124 @@
import time
from unittest.mock import MagicMock, patch
import pytest
from core.app.features.rate_limiting.rate_limit import RateLimit
@pytest.fixture
def mock_redis():
"""Mock Redis client with realistic behavior for rate limiting tests."""
mock_client = MagicMock()
# Redis data storage for simulation
mock_data = {}
mock_hashes = {}
mock_expiry = {}
def mock_setex(key, ttl, value):
mock_data[key] = str(value)
mock_expiry[key] = time.time() + ttl.total_seconds() if hasattr(ttl, "total_seconds") else time.time() + ttl
return True
def mock_get(key):
if key in mock_data and (key not in mock_expiry or time.time() < mock_expiry[key]):
return mock_data[key].encode("utf-8")
return None
def mock_exists(key):
return key in mock_data or key in mock_hashes
def mock_expire(key, ttl):
if key in mock_data or key in mock_hashes:
mock_expiry[key] = time.time() + ttl.total_seconds() if hasattr(ttl, "total_seconds") else time.time() + ttl
return True
def mock_hset(key, field, value):
if key not in mock_hashes:
mock_hashes[key] = {}
mock_hashes[key][field] = str(value).encode("utf-8")
return True
def mock_hgetall(key):
return mock_hashes.get(key, {})
def mock_hdel(key, *fields):
if key in mock_hashes:
count = 0
for field in fields:
if field in mock_hashes[key]:
del mock_hashes[key][field]
count += 1
return count
return 0
def mock_hlen(key):
return len(mock_hashes.get(key, {}))
# Configure mock methods
mock_client.setex = mock_setex
mock_client.get = mock_get
mock_client.exists = mock_exists
mock_client.expire = mock_expire
mock_client.hset = mock_hset
mock_client.hgetall = mock_hgetall
mock_client.hdel = mock_hdel
mock_client.hlen = mock_hlen
# Store references for test verification
mock_client._mock_data = mock_data
mock_client._mock_hashes = mock_hashes
mock_client._mock_expiry = mock_expiry
return mock_client
@pytest.fixture
def mock_time():
"""Mock time.time() for deterministic tests."""
mock_time_val = 1000.0
def increment_time(seconds=1):
nonlocal mock_time_val
mock_time_val += seconds
return mock_time_val
with patch("time.time", return_value=mock_time_val) as mock:
mock.increment = increment_time
yield mock
@pytest.fixture
def sample_generator():
"""Sample generator for testing RateLimitGenerator."""
def _create_generator(items=None, raise_error=False):
items = items or ["item1", "item2", "item3"]
for item in items:
if raise_error and item == "item2":
raise ValueError("Test error")
yield item
return _create_generator
@pytest.fixture
def sample_mapping():
"""Sample mapping for testing RateLimitGenerator."""
return {"key1": "value1", "key2": "value2"}
@pytest.fixture(autouse=True)
def reset_rate_limit_instances():
"""Clear RateLimit singleton instances between tests."""
RateLimit._instance_dict.clear()
yield
RateLimit._instance_dict.clear()
@pytest.fixture
def redis_patch():
"""Patch redis_client globally for rate limit tests."""
with patch("core.app.features.rate_limiting.rate_limit.redis_client") as mock:
yield mock

View File

@ -0,0 +1,569 @@
import threading
import time
from datetime import timedelta
from unittest.mock import patch
import pytest
from core.app.features.rate_limiting.rate_limit import RateLimit
from core.errors.error import AppInvokeQuotaExceededError
class TestRateLimit:
"""Core rate limiting functionality tests."""
def test_should_return_same_instance_for_same_client_id(self, redis_patch):
"""Test singleton behavior for same client ID."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
}
)
rate_limit1 = RateLimit("client1", 5)
rate_limit2 = RateLimit("client1", 10) # Second instance with different limit
assert rate_limit1 is rate_limit2
# Current implementation: last constructor call overwrites max_active_requests
# This reflects the actual behavior where __init__ always sets max_active_requests
assert rate_limit1.max_active_requests == 10
def test_should_create_different_instances_for_different_client_ids(self, redis_patch):
"""Test different instances for different client IDs."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
}
)
rate_limit1 = RateLimit("client1", 5)
rate_limit2 = RateLimit("client2", 10)
assert rate_limit1 is not rate_limit2
assert rate_limit1.client_id == "client1"
assert rate_limit2.client_id == "client2"
def test_should_initialize_with_valid_parameters(self, redis_patch):
"""Test normal initialization."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
}
)
rate_limit = RateLimit("test_client", 5)
assert rate_limit.client_id == "test_client"
assert rate_limit.max_active_requests == 5
assert hasattr(rate_limit, "initialized")
redis_patch.setex.assert_called_once()
def test_should_skip_initialization_if_disabled(self):
"""Test no initialization when rate limiting is disabled."""
rate_limit = RateLimit("test_client", 0)
assert rate_limit.disabled()
assert not hasattr(rate_limit, "initialized")
def test_should_skip_reinitialization_of_existing_instance(self, redis_patch):
"""Test that existing instance doesn't reinitialize."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
}
)
RateLimit("client1", 5)
redis_patch.reset_mock()
RateLimit("client1", 10)
redis_patch.setex.assert_not_called()
def test_should_be_disabled_when_max_requests_is_zero_or_negative(self):
"""Test disabled state for zero or negative limits."""
rate_limit_zero = RateLimit("client1", 0)
rate_limit_negative = RateLimit("client2", -5)
assert rate_limit_zero.disabled()
assert rate_limit_negative.disabled()
def test_should_set_redis_keys_on_first_flush(self, redis_patch):
"""Test Redis keys are set correctly on initial flush."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
}
)
rate_limit = RateLimit("test_client", 5)
expected_max_key = "dify:rate_limit:test_client:max_active_requests"
redis_patch.setex.assert_called_with(expected_max_key, timedelta(days=1), 5)
def test_should_sync_max_requests_from_redis_on_subsequent_flush(self, redis_patch):
"""Test max requests syncs from Redis when key exists."""
redis_patch.configure_mock(
**{
"exists.return_value": True,
"get.return_value": b"10",
"expire.return_value": True,
}
)
rate_limit = RateLimit("test_client", 5)
rate_limit.flush_cache()
assert rate_limit.max_active_requests == 10
@patch("time.time")
def test_should_clean_timeout_requests_from_active_list(self, mock_time, redis_patch):
"""Test cleanup of timed-out requests."""
current_time = 1000.0
mock_time.return_value = current_time
# Setup mock Redis with timed-out requests
timeout_requests = {
b"req1": str(current_time - 700).encode(), # 700 seconds ago (timeout)
b"req2": str(current_time - 100).encode(), # 100 seconds ago (active)
}
redis_patch.configure_mock(
**{
"exists.return_value": True,
"get.return_value": b"5",
"expire.return_value": True,
"hgetall.return_value": timeout_requests,
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
redis_patch.reset_mock() # Reset to avoid counting initialization calls
rate_limit.flush_cache()
# Verify timeout request was cleaned up
redis_patch.hdel.assert_called_once()
call_args = redis_patch.hdel.call_args[0]
assert call_args[0] == "dify:rate_limit:test_client:active_requests"
assert b"req1" in call_args # Timeout request should be removed
assert b"req2" not in call_args # Active request should remain
class TestRateLimitEnterExit:
"""Rate limiting enter/exit logic tests."""
def test_should_allow_request_within_limit(self, redis_patch):
"""Test allowing requests within the rate limit."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.return_value": 2,
"hset.return_value": True,
}
)
rate_limit = RateLimit("test_client", 5)
request_id = rate_limit.enter()
assert request_id != RateLimit._UNLIMITED_REQUEST_ID
redis_patch.hset.assert_called_once()
def test_should_generate_request_id_if_not_provided(self, redis_patch):
"""Test auto-generation of request ID."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.return_value": 0,
"hset.return_value": True,
}
)
rate_limit = RateLimit("test_client", 5)
request_id = rate_limit.enter()
assert len(request_id) == 36 # UUID format
def test_should_use_provided_request_id(self, redis_patch):
"""Test using provided request ID."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.return_value": 0,
"hset.return_value": True,
}
)
rate_limit = RateLimit("test_client", 5)
custom_id = "custom_request_123"
request_id = rate_limit.enter(custom_id)
assert request_id == custom_id
def test_should_remove_request_on_exit(self, redis_patch):
"""Test request removal on exit."""
redis_patch.configure_mock(
**{
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
rate_limit.exit("test_request_id")
redis_patch.hdel.assert_called_once_with("dify:rate_limit:test_client:active_requests", "test_request_id")
def test_should_raise_quota_exceeded_when_at_limit(self, redis_patch):
"""Test quota exceeded error when at limit."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.return_value": 5, # At limit
}
)
rate_limit = RateLimit("test_client", 5)
with pytest.raises(AppInvokeQuotaExceededError) as exc_info:
rate_limit.enter()
assert "Too many requests" in str(exc_info.value)
assert "test_client" in str(exc_info.value)
def test_should_allow_request_after_previous_exit(self, redis_patch):
"""Test allowing new request after previous exit."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.return_value": 4, # Under limit after exit
"hset.return_value": True,
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
request_id = rate_limit.enter()
rate_limit.exit(request_id)
new_request_id = rate_limit.enter()
assert new_request_id is not None
@patch("time.time")
def test_should_flush_cache_when_interval_exceeded(self, mock_time, redis_patch):
"""Test cache flush when time interval exceeded."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.return_value": 0,
}
)
mock_time.return_value = 1000.0
rate_limit = RateLimit("test_client", 5)
# Advance time beyond flush interval
mock_time.return_value = 1400.0 # 400 seconds later
redis_patch.reset_mock()
rate_limit.enter()
# Should have called setex again due to cache flush
redis_patch.setex.assert_called()
def test_should_return_unlimited_id_when_disabled(self):
"""Test unlimited ID return when rate limiting disabled."""
rate_limit = RateLimit("test_client", 0)
request_id = rate_limit.enter()
assert request_id == RateLimit._UNLIMITED_REQUEST_ID
def test_should_ignore_exit_for_unlimited_requests(self, redis_patch):
"""Test ignoring exit for unlimited requests."""
rate_limit = RateLimit("test_client", 0)
rate_limit.exit(RateLimit._UNLIMITED_REQUEST_ID)
redis_patch.hdel.assert_not_called()
class TestRateLimitGenerator:
"""Rate limit generator wrapper tests."""
def test_should_wrap_generator_and_iterate_normally(self, redis_patch, sample_generator):
"""Test normal generator iteration with rate limit wrapper."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
generator = sample_generator()
request_id = "test_request"
wrapped_gen = rate_limit.generate(generator, request_id)
result = list(wrapped_gen)
assert result == ["item1", "item2", "item3"]
redis_patch.hdel.assert_called_once_with("dify:rate_limit:test_client:active_requests", request_id)
def test_should_handle_mapping_input_directly(self, sample_mapping):
"""Test direct return of mapping input."""
rate_limit = RateLimit("test_client", 0) # Disabled
result = rate_limit.generate(sample_mapping, "test_request")
assert result is sample_mapping
def test_should_cleanup_on_exception_during_iteration(self, redis_patch, sample_generator):
"""Test cleanup when exception occurs during iteration."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
generator = sample_generator(raise_error=True)
request_id = "test_request"
wrapped_gen = rate_limit.generate(generator, request_id)
with pytest.raises(ValueError):
list(wrapped_gen)
redis_patch.hdel.assert_called_once_with("dify:rate_limit:test_client:active_requests", request_id)
def test_should_cleanup_on_explicit_close(self, redis_patch, sample_generator):
"""Test cleanup on explicit generator close."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
generator = sample_generator()
request_id = "test_request"
wrapped_gen = rate_limit.generate(generator, request_id)
wrapped_gen.close()
redis_patch.hdel.assert_called_once()
def test_should_handle_generator_without_close_method(self, redis_patch):
"""Test handling generator without close method."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hdel.return_value": 1,
}
)
# Create a generator-like object without close method
class SimpleGenerator:
def __init__(self):
self.items = ["test"]
self.index = 0
def __iter__(self):
return self
def __next__(self):
if self.index >= len(self.items):
raise StopIteration
item = self.items[self.index]
self.index += 1
return item
rate_limit = RateLimit("test_client", 5)
generator = SimpleGenerator()
wrapped_gen = rate_limit.generate(generator, "test_request")
wrapped_gen.close() # Should not raise error
redis_patch.hdel.assert_called_once()
def test_should_prevent_iteration_after_close(self, redis_patch, sample_generator):
"""Test StopIteration after generator is closed."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hdel.return_value": 1,
}
)
rate_limit = RateLimit("test_client", 5)
generator = sample_generator()
wrapped_gen = rate_limit.generate(generator, "test_request")
wrapped_gen.close()
with pytest.raises(StopIteration):
next(wrapped_gen)
class TestRateLimitConcurrency:
"""Concurrent access safety tests."""
def test_should_handle_concurrent_instance_creation(self, redis_patch):
"""Test thread-safe singleton instance creation."""
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
}
)
instances = []
errors = []
def create_instance():
try:
instance = RateLimit("concurrent_client", 5)
instances.append(instance)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=create_instance) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert len(errors) == 0
assert len({id(inst) for inst in instances}) == 1 # All same instance
def test_should_handle_concurrent_enter_requests(self, redis_patch):
"""Test concurrent enter requests handling."""
# Setup mock to simulate realistic Redis behavior
request_count = 0
def mock_hlen(key):
nonlocal request_count
return request_count
def mock_hset(key, field, value):
nonlocal request_count
request_count += 1
return True
redis_patch.configure_mock(
**{
"exists.return_value": False,
"setex.return_value": True,
"hlen.side_effect": mock_hlen,
"hset.side_effect": mock_hset,
}
)
rate_limit = RateLimit("concurrent_client", 3)
results = []
errors = []
def try_enter():
try:
request_id = rate_limit.enter()
results.append(request_id)
except AppInvokeQuotaExceededError as e:
errors.append(e)
threads = [threading.Thread(target=try_enter) for _ in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
# Should have some successful requests and some quota exceeded
assert len(results) + len(errors) == 5
assert len(errors) > 0 # Some should be rejected
@patch("time.time")
def test_should_maintain_accurate_count_under_load(self, mock_time, redis_patch):
"""Test accurate count maintenance under concurrent load."""
mock_time.return_value = 1000.0
# Use real mock_redis fixture for better simulation
mock_client = self._create_mock_redis()
redis_patch.configure_mock(**mock_client)
rate_limit = RateLimit("load_test_client", 10)
active_requests = []
def enter_and_exit():
try:
request_id = rate_limit.enter()
active_requests.append(request_id)
time.sleep(0.01) # Simulate some work
rate_limit.exit(request_id)
active_requests.remove(request_id)
except AppInvokeQuotaExceededError:
pass # Expected under load
threads = [threading.Thread(target=enter_and_exit) for _ in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
# All requests should have been cleaned up
assert len(active_requests) == 0
def _create_mock_redis(self):
"""Create a thread-safe mock Redis for concurrency tests."""
import threading
lock = threading.Lock()
data = {}
hashes = {}
def mock_hlen(key):
with lock:
return len(hashes.get(key, {}))
def mock_hset(key, field, value):
with lock:
if key not in hashes:
hashes[key] = {}
hashes[key][field] = str(value).encode("utf-8")
return True
def mock_hdel(key, *fields):
with lock:
if key in hashes:
count = 0
for field in fields:
if field in hashes[key]:
del hashes[key][field]
count += 1
return count
return 0
return {
"exists.return_value": False,
"setex.return_value": True,
"hlen.side_effect": mock_hlen,
"hset.side_effect": mock_hset,
"hdel.side_effect": mock_hdel,
}

View File

@ -69,8 +69,12 @@ def test_get_file_attribute(pool, file):
def test_use_long_selector(pool):
pool.add(("node_1", "part_1", "part_2"), StringSegment(value="test_value"))
# The add method now only accepts 2-element selectors (node_id, variable_name)
# Store nested data as an ObjectSegment instead
nested_data = {"part_2": "test_value"}
pool.add(("node_1", "part_1"), ObjectSegment(value=nested_data))
# The get method supports longer selectors for nested access
result = pool.get(("node_1", "part_1", "part_2"))
assert result is not None
assert result.value == "test_value"
@ -280,8 +284,10 @@ class TestVariablePoolSerialization:
pool.add((self._NODE2_ID, "array_file"), ArrayFileSegment(value=[test_file]))
pool.add((self._NODE2_ID, "array_any"), ArrayAnySegment(value=["mixed", 123, {"key": "value"}]))
# Add nested variables
pool.add((self._NODE3_ID, "nested", "deep", "var"), StringSegment(value="deep_value"))
# Add nested variables as ObjectSegment
# The add method only accepts 2-element selectors
nested_obj = {"deep": {"var": "deep_value"}}
pool.add((self._NODE3_ID, "nested"), ObjectSegment(value=nested_obj))
def test_system_variables(self):
sys_vars = SystemVariable(

View File

@ -1,148 +0,0 @@
from typing import Any
from core.variables.segments import ObjectSegment, StringSegment
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.utils.variable_utils import append_variables_recursively
class TestAppendVariablesRecursively:
"""Test cases for append_variables_recursively function"""
def test_append_simple_dict_value(self):
"""Test appending a simple dictionary value"""
pool = VariablePool.empty()
node_id = "test_node"
variable_key_list = ["output"]
variable_value = {"name": "John", "age": 30}
append_variables_recursively(pool, node_id, variable_key_list, variable_value)
# Check that the main variable is added
main_var = pool.get([node_id] + variable_key_list)
assert main_var is not None
assert main_var.value == variable_value
# Check that nested variables are added recursively
name_var = pool.get([node_id] + variable_key_list + ["name"])
assert name_var is not None
assert name_var.value == "John"
age_var = pool.get([node_id] + variable_key_list + ["age"])
assert age_var is not None
assert age_var.value == 30
def test_append_object_segment_value(self):
"""Test appending an ObjectSegment value"""
pool = VariablePool.empty()
node_id = "test_node"
variable_key_list = ["result"]
# Create an ObjectSegment
obj_data = {"status": "success", "code": 200}
variable_value = ObjectSegment(value=obj_data)
append_variables_recursively(pool, node_id, variable_key_list, variable_value)
# Check that the main variable is added
main_var = pool.get([node_id] + variable_key_list)
assert main_var is not None
assert isinstance(main_var, ObjectSegment)
assert main_var.value == obj_data
# Check that nested variables are added recursively
status_var = pool.get([node_id] + variable_key_list + ["status"])
assert status_var is not None
assert status_var.value == "success"
code_var = pool.get([node_id] + variable_key_list + ["code"])
assert code_var is not None
assert code_var.value == 200
def test_append_nested_dict_value(self):
"""Test appending a nested dictionary value"""
pool = VariablePool.empty()
node_id = "test_node"
variable_key_list = ["data"]
variable_value = {
"user": {
"profile": {"name": "Alice", "email": "alice@example.com"},
"settings": {"theme": "dark", "notifications": True},
},
"metadata": {"version": "1.0", "timestamp": 1234567890},
}
append_variables_recursively(pool, node_id, variable_key_list, variable_value)
# Check deeply nested variables
name_var = pool.get([node_id] + variable_key_list + ["user", "profile", "name"])
assert name_var is not None
assert name_var.value == "Alice"
email_var = pool.get([node_id] + variable_key_list + ["user", "profile", "email"])
assert email_var is not None
assert email_var.value == "alice@example.com"
theme_var = pool.get([node_id] + variable_key_list + ["user", "settings", "theme"])
assert theme_var is not None
assert theme_var.value == "dark"
notifications_var = pool.get([node_id] + variable_key_list + ["user", "settings", "notifications"])
assert notifications_var is not None
assert notifications_var.value == 1 # Boolean True is converted to integer 1
version_var = pool.get([node_id] + variable_key_list + ["metadata", "version"])
assert version_var is not None
assert version_var.value == "1.0"
def test_append_non_dict_value(self):
"""Test appending a non-dictionary value (should not recurse)"""
pool = VariablePool.empty()
node_id = "test_node"
variable_key_list = ["simple"]
variable_value = "simple_string"
append_variables_recursively(pool, node_id, variable_key_list, variable_value)
# Check that only the main variable is added
main_var = pool.get([node_id] + variable_key_list)
assert main_var is not None
assert main_var.value == variable_value
# Ensure no additional variables are created
assert len(pool.variable_dictionary[node_id]) == 1
def test_append_segment_non_object_value(self):
"""Test appending a Segment that is not ObjectSegment (should not recurse)"""
pool = VariablePool.empty()
node_id = "test_node"
variable_key_list = ["text"]
variable_value = StringSegment(value="Hello World")
append_variables_recursively(pool, node_id, variable_key_list, variable_value)
# Check that only the main variable is added
main_var = pool.get([node_id] + variable_key_list)
assert main_var is not None
assert isinstance(main_var, StringSegment)
assert main_var.value == "Hello World"
# Ensure no additional variables are created
assert len(pool.variable_dictionary[node_id]) == 1
def test_append_empty_dict_value(self):
"""Test appending an empty dictionary value"""
pool = VariablePool.empty()
node_id = "test_node"
variable_key_list = ["empty"]
variable_value: dict[str, Any] = {}
append_variables_recursively(pool, node_id, variable_key_list, variable_value)
# Check that the main variable is added
main_var = pool.get([node_id] + variable_key_list)
assert main_var is not None
assert main_var.value == {}
# Ensure only the main variable is created (no recursion for empty dict)
assert len(pool.variable_dictionary[node_id]) == 1

View File

@ -1236,7 +1236,7 @@ wheels = [
[[package]]
name = "dify-api"
version = "1.7.1"
version = "1.7.2"
source = { virtual = "." }
dependencies = [
{ name = "arize-phoenix-otel" },

View File

@ -907,6 +907,9 @@ TEXT_GENERATION_TIMEOUT_MS=60000
# Allow rendering unsafe URLs which have "data:" scheme.
ALLOW_UNSAFE_DATA_SCHEME=false
# Maximum number of tree depth in the workflow
MAX_TREE_DEPTH=50
# ------------------------------
# Environment Variables for db Service
# ------------------------------

View File

@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:1.7.1
image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@ -31,7 +31,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:1.7.1
image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@ -58,7 +58,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.7.1
image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@ -76,7 +76,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.7.1
image: langgenius/dify-web:1.7.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@ -404,6 +404,7 @@ x-shared-env: &shared-api-worker-env
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
POSTGRES_USER: ${POSTGRES_USER:-${DB_USERNAME}}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
@ -567,7 +568,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:1.7.1
image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@ -596,7 +597,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:1.7.1
image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@ -623,7 +624,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.7.1
image: langgenius/dify-api:1.7.2
restart: always
environment:
# Use the shared environment variables.
@ -641,7 +642,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.7.1
image: langgenius/dify-web:1.7.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@ -0,0 +1,333 @@
import React from 'react'
import { fireEvent, render, screen } from '@testing-library/react'
import '@testing-library/jest-dom'
import CommandSelector from '../../app/components/goto-anything/command-selector'
import type { ActionItem } from '../../app/components/goto-anything/actions/types'
jest.mock('react-i18next', () => ({
useTranslation: () => ({
t: (key: string) => key,
}),
}))
jest.mock('cmdk', () => ({
Command: {
Group: ({ children, className }: any) => <div className={className}>{children}</div>,
Item: ({ children, onSelect, value, className }: any) => (
<div
className={className}
onClick={() => onSelect && onSelect()}
data-value={value}
data-testid={`command-item-${value}`}
>
{children}
</div>
),
},
}))
describe('CommandSelector', () => {
const mockActions: Record<string, ActionItem> = {
app: {
key: '@app',
shortcut: '@app',
title: 'Search Applications',
description: 'Search apps',
search: jest.fn(),
},
knowledge: {
key: '@knowledge',
shortcut: '@knowledge',
title: 'Search Knowledge',
description: 'Search knowledge bases',
search: jest.fn(),
},
plugin: {
key: '@plugin',
shortcut: '@plugin',
title: 'Search Plugins',
description: 'Search plugins',
search: jest.fn(),
},
node: {
key: '@node',
shortcut: '@node',
title: 'Search Nodes',
description: 'Search workflow nodes',
search: jest.fn(),
},
}
const mockOnCommandSelect = jest.fn()
const mockOnCommandValueChange = jest.fn()
beforeEach(() => {
jest.clearAllMocks()
})
describe('Basic Rendering', () => {
it('should render all actions when no filter is provided', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
/>,
)
expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
})
it('should render empty filter as showing all actions', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter=""
/>,
)
expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
})
})
describe('Filtering Functionality', () => {
it('should filter actions based on searchFilter - single match', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="k"
/>,
)
expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument()
expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
})
it('should filter actions with multiple matches', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="p"
/>,
)
expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument()
expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
})
it('should be case-insensitive when filtering', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="APP"
/>,
)
expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument()
})
it('should match partial strings', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="nowl"
/>,
)
expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument()
expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
})
})
describe('Empty State', () => {
it('should show empty state when no matches found', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="xyz"
/>,
)
expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument()
expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument()
expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument()
expect(screen.getByText('app.gotoAnything.noMatchingCommands')).toBeInTheDocument()
expect(screen.getByText('app.gotoAnything.tryDifferentSearch')).toBeInTheDocument()
})
it('should not show empty state when filter is empty', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter=""
/>,
)
expect(screen.queryByText('app.gotoAnything.noMatchingCommands')).not.toBeInTheDocument()
})
})
describe('Selection and Highlight Management', () => {
it('should call onCommandValueChange when filter changes and first item differs', () => {
const { rerender } = render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter=""
commandValue="@app"
onCommandValueChange={mockOnCommandValueChange}
/>,
)
rerender(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="k"
commandValue="@app"
onCommandValueChange={mockOnCommandValueChange}
/>,
)
expect(mockOnCommandValueChange).toHaveBeenCalledWith('@knowledge')
})
it('should not call onCommandValueChange if current value still exists', () => {
const { rerender } = render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter=""
commandValue="@app"
onCommandValueChange={mockOnCommandValueChange}
/>,
)
rerender(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="a"
commandValue="@app"
onCommandValueChange={mockOnCommandValueChange}
/>,
)
expect(mockOnCommandValueChange).not.toHaveBeenCalled()
})
it('should handle onCommandSelect callback correctly', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="k"
/>,
)
const knowledgeItem = screen.getByTestId('command-item-@knowledge')
fireEvent.click(knowledgeItem)
expect(mockOnCommandSelect).toHaveBeenCalledWith('@knowledge')
})
})
describe('Edge Cases', () => {
it('should handle empty actions object', () => {
render(
<CommandSelector
actions={{}}
onCommandSelect={mockOnCommandSelect}
searchFilter=""
/>,
)
expect(screen.getByText('app.gotoAnything.noMatchingCommands')).toBeInTheDocument()
})
it('should handle special characters in filter', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="@"
/>,
)
expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
})
it('should handle undefined onCommandValueChange gracefully', () => {
const { rerender } = render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter=""
/>,
)
expect(() => {
rerender(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="k"
/>,
)
}).not.toThrow()
})
})
describe('Backward Compatibility', () => {
it('should work without searchFilter prop (backward compatible)', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
/>,
)
expect(screen.getByTestId('command-item-@app')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument()
expect(screen.getByTestId('command-item-@node')).toBeInTheDocument()
})
it('should work without commandValue and onCommandValueChange props', () => {
render(
<CommandSelector
actions={mockActions}
onCommandSelect={mockOnCommandSelect}
searchFilter="k"
/>,
)
expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument()
expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument()
})
})
})

View File

@ -0,0 +1,197 @@
/**
* Test GotoAnything search error handling mechanisms
*
* Main validations:
* 1. @plugin search error handling when API fails
* 2. Regular search (without @prefix) error handling when API fails
* 3. Verify consistent error handling across different search types
* 4. Ensure errors don't propagate to UI layer causing "search failed"
*/
import { Actions, searchAnything } from '@/app/components/goto-anything/actions'
import { postMarketplace } from '@/service/base'
import { fetchAppList } from '@/service/apps'
import { fetchDatasets } from '@/service/datasets'
// Mock API functions
jest.mock('@/service/base', () => ({
postMarketplace: jest.fn(),
}))
jest.mock('@/service/apps', () => ({
fetchAppList: jest.fn(),
}))
jest.mock('@/service/datasets', () => ({
fetchDatasets: jest.fn(),
}))
const mockPostMarketplace = postMarketplace as jest.MockedFunction<typeof postMarketplace>
const mockFetchAppList = fetchAppList as jest.MockedFunction<typeof fetchAppList>
const mockFetchDatasets = fetchDatasets as jest.MockedFunction<typeof fetchDatasets>
describe('GotoAnything Search Error Handling', () => {
beforeEach(() => {
jest.clearAllMocks()
// Suppress console.warn for clean test output
jest.spyOn(console, 'warn').mockImplementation(() => {
// Suppress console.warn for clean test output
})
})
afterEach(() => {
jest.restoreAllMocks()
})
describe('@plugin search error handling', () => {
it('should return empty array when API fails instead of throwing error', async () => {
// Mock marketplace API failure (403 permission denied)
mockPostMarketplace.mockRejectedValue(new Error('HTTP 403: Forbidden'))
const pluginAction = Actions.plugin
// Directly call plugin action's search method
const result = await pluginAction.search('@plugin', 'test', 'en')
// Should return empty array instead of throwing error
expect(result).toEqual([])
expect(mockPostMarketplace).toHaveBeenCalledWith('/plugins/search/advanced', {
body: {
page: 1,
page_size: 10,
query: 'test',
type: 'plugin',
},
})
})
it('should return empty array when user has no plugin data', async () => {
// Mock marketplace returning empty data
mockPostMarketplace.mockResolvedValue({
data: { plugins: [] },
})
const pluginAction = Actions.plugin
const result = await pluginAction.search('@plugin', '', 'en')
expect(result).toEqual([])
})
it('should return empty array when API returns unexpected data structure', async () => {
// Mock API returning unexpected data structure
mockPostMarketplace.mockResolvedValue({
data: null,
})
const pluginAction = Actions.plugin
const result = await pluginAction.search('@plugin', 'test', 'en')
expect(result).toEqual([])
})
})
describe('Other search types error handling', () => {
it('@app search should return empty array when API fails', async () => {
// Mock app API failure
mockFetchAppList.mockRejectedValue(new Error('API Error'))
const appAction = Actions.app
const result = await appAction.search('@app', 'test', 'en')
expect(result).toEqual([])
})
it('@knowledge search should return empty array when API fails', async () => {
// Mock knowledge API failure
mockFetchDatasets.mockRejectedValue(new Error('API Error'))
const knowledgeAction = Actions.knowledge
const result = await knowledgeAction.search('@knowledge', 'test', 'en')
expect(result).toEqual([])
})
})
describe('Unified search entry error handling', () => {
it('regular search (without @prefix) should return successful results even when partial APIs fail', async () => {
// Set app and knowledge success, plugin failure
mockFetchAppList.mockResolvedValue({ data: [], has_more: false, limit: 10, page: 1, total: 0 })
mockFetchDatasets.mockResolvedValue({ data: [], has_more: false, limit: 10, page: 1, total: 0 })
mockPostMarketplace.mockRejectedValue(new Error('Plugin API failed'))
const result = await searchAnything('en', 'test')
// Should return successful results even if plugin search fails
expect(result).toEqual([])
expect(console.warn).toHaveBeenCalledWith('Plugin search failed:', expect.any(Error))
})
it('@plugin dedicated search should return empty array when API fails', async () => {
// Mock plugin API failure
mockPostMarketplace.mockRejectedValue(new Error('Plugin service unavailable'))
const pluginAction = Actions.plugin
const result = await searchAnything('en', '@plugin test', pluginAction)
// Should return empty array instead of throwing error
expect(result).toEqual([])
})
it('@app dedicated search should return empty array when API fails', async () => {
// Mock app API failure
mockFetchAppList.mockRejectedValue(new Error('App service unavailable'))
const appAction = Actions.app
const result = await searchAnything('en', '@app test', appAction)
expect(result).toEqual([])
})
})
describe('Error handling consistency validation', () => {
it('all search types should return empty array when encountering errors', async () => {
// Mock all APIs to fail
mockPostMarketplace.mockRejectedValue(new Error('Plugin API failed'))
mockFetchAppList.mockRejectedValue(new Error('App API failed'))
mockFetchDatasets.mockRejectedValue(new Error('Dataset API failed'))
const actions = [
{ name: '@plugin', action: Actions.plugin },
{ name: '@app', action: Actions.app },
{ name: '@knowledge', action: Actions.knowledge },
]
for (const { name, action } of actions) {
const result = await action.search(name, 'test', 'en')
expect(result).toEqual([])
}
})
})
describe('Edge case testing', () => {
it('empty search term should be handled properly', async () => {
mockPostMarketplace.mockResolvedValue({ data: { plugins: [] } })
const result = await searchAnything('en', '@plugin ', Actions.plugin)
expect(result).toEqual([])
})
it('network timeout should be handled correctly', async () => {
const timeoutError = new Error('Network timeout')
timeoutError.name = 'TimeoutError'
mockPostMarketplace.mockRejectedValue(timeoutError)
const result = await searchAnything('en', '@plugin test', Actions.plugin)
expect(result).toEqual([])
})
it('JSON parsing errors should be handled correctly', async () => {
const parseError = new SyntaxError('Unexpected token in JSON')
mockPostMarketplace.mockRejectedValue(parseError)
const result = await searchAnything('en', '@plugin test', Actions.plugin)
expect(result).toEqual([])
})
})
})

View File

@ -15,6 +15,7 @@ type IModal = {
children?: React.ReactNode
closable?: boolean
overflowVisible?: boolean
highPriority?: boolean // For modals that need to appear above dropdowns
}
export default function Modal({
@ -27,10 +28,11 @@ export default function Modal({
children,
closable = false,
overflowVisible = false,
highPriority = false,
}: IModal) {
return (
<Transition appear show={isShow} as={Fragment}>
<Dialog as="div" className={classNames('relative z-[60]', wrapperClassName)} onClose={onClose}>
<Dialog as="div" className={classNames('relative', highPriority ? 'z-[1100]' : 'z-[60]', wrapperClassName)} onClose={onClose}>
<TransitionChild>
<div className={classNames(
'fixed inset-0 bg-background-overlay',

View File

@ -79,7 +79,7 @@ const TagFilter: FC<TagFilterProps> = ({
className='block'
>
<div className={cn(
'flex h-8 cursor-pointer items-center gap-1 rounded-lg border-[0.5px] border-transparent bg-components-input-bg-normal px-2',
'flex h-8 cursor-pointer select-none items-center gap-1 rounded-lg border-[0.5px] border-transparent bg-components-input-bg-normal px-2',
!open && !!value.length && 'shadow-xs',
open && !!value.length && 'shadow-xs',
)}>
@ -123,7 +123,7 @@ const TagFilter: FC<TagFilterProps> = ({
{filteredTagList.map(tag => (
<div
key={tag.id}
className='flex cursor-pointer items-center gap-2 rounded-lg py-[6px] pl-3 pr-2 hover:bg-state-base-hover'
className='flex cursor-pointer select-none items-center gap-2 rounded-lg py-[6px] pl-3 pr-2 hover:bg-state-base-hover'
onClick={() => selectTag(tag)}
>
<div title={tag.name} className='grow truncate text-sm leading-5 text-text-tertiary'>{tag.name}</div>
@ -139,7 +139,7 @@ const TagFilter: FC<TagFilterProps> = ({
</div>
<div className='border-t-[0.5px] border-divider-regular' />
<div className='p-1'>
<div className='flex cursor-pointer items-center gap-2 rounded-lg py-[6px] pl-3 pr-2 hover:bg-state-base-hover' onClick={() => {
<div className='flex cursor-pointer select-none items-center gap-2 rounded-lg py-[6px] pl-3 pr-2 hover:bg-state-base-hover' onClick={() => {
setShowTagManagementModal(true)
setOpen(false)
}}>

View File

@ -38,16 +38,21 @@ export const appAction: ActionItem = {
title: 'Search Applications',
description: 'Search and navigate to your applications',
// action,
search: async (_, searchTerm = '', locale) => {
const response = (await fetchAppList({
url: 'apps',
params: {
page: 1,
name: searchTerm,
},
}))
const apps = response.data || []
return parser(apps)
search: async (_, searchTerm = '', _locale) => {
try {
const response = await fetchAppList({
url: 'apps',
params: {
page: 1,
name: searchTerm,
},
})
const apps = response?.data || []
return parser(apps)
}
catch (error) {
console.warn('App search failed:', error)
return []
}
},
}

View File

@ -18,7 +18,13 @@ export const searchAnything = async (
): Promise<SearchResult[]> => {
if (actionItem) {
const searchTerm = query.replace(actionItem.key, '').replace(actionItem.shortcut, '').trim()
return await actionItem.search(query, searchTerm, locale)
try {
return await actionItem.search(query, searchTerm, locale)
}
catch (error) {
console.warn(`Search failed for ${actionItem.key}:`, error)
return []
}
}
if (query.startsWith('@'))

View File

@ -35,16 +35,22 @@ export const knowledgeAction: ActionItem = {
title: 'Search Knowledge Bases',
description: 'Search and navigate to your knowledge bases',
// action,
search: async (_, searchTerm = '', locale) => {
const response = await fetchDatasets({
url: '/datasets',
params: {
page: 1,
limit: 10,
keyword: searchTerm,
},
})
return parser(response.data)
search: async (_, searchTerm = '', _locale) => {
try {
const response = await fetchDatasets({
url: '/datasets',
params: {
page: 1,
limit: 10,
keyword: searchTerm,
},
})
const datasets = response?.data || []
return parser(datasets)
}
catch (error) {
console.warn('Knowledge search failed:', error)
return []
}
},
}

View File

@ -24,18 +24,30 @@ export const pluginAction: ActionItem = {
title: 'Search Plugins',
description: 'Search and navigate to your plugins',
search: async (_, searchTerm = '', locale) => {
const response = await postMarketplace<{ data: PluginsFromMarketplaceResponse }>('/plugins/search/advanced', {
body: {
page: 1,
page_size: 10,
query: searchTerm,
type: 'plugin',
},
})
const list = (response.data.plugins || []).map(plugin => ({
...plugin,
icon: getPluginIconInMarketplace(plugin),
}))
return parser(list, locale!)
try {
const response = await postMarketplace<{ data: PluginsFromMarketplaceResponse }>('/plugins/search/advanced', {
body: {
page: 1,
page_size: 10,
query: searchTerm,
type: 'plugin',
},
})
if (!response?.data?.plugins) {
console.warn('Plugin search: Unexpected response structure', response)
return []
}
const list = response.data.plugins.map(plugin => ({
...plugin,
icon: getPluginIconInMarketplace(plugin),
}))
return parser(list, locale!)
}
catch (error) {
console.warn('Plugin search failed:', error)
return []
}
},
}

View File

@ -1,6 +1,4 @@
import type { ActionItem } from './types'
import { BoltIcon } from '@heroicons/react/24/outline'
import i18n from 'i18next'
// Create the workflow nodes action
export const workflowNodesAction: ActionItem = {
@ -12,32 +10,14 @@ export const workflowNodesAction: ActionItem = {
search: async (_, searchTerm = '', locale) => {
try {
// Use the searchFn if available (set by useWorkflowSearch hook)
if (workflowNodesAction.searchFn) {
// searchFn already returns SearchResult[] type, no need to use parser
if (workflowNodesAction.searchFn)
return workflowNodesAction.searchFn(searchTerm)
}
// If not in workflow context or search function not registered
if (!searchTerm.trim()) {
return [{
id: 'help',
title: i18n.t('app.gotoAnything.actions.searchWorkflowNodes', { lng: locale }),
description: i18n.t('app.gotoAnything.actions.searchWorkflowNodesHelp', { lng: locale }),
type: 'workflow-node',
path: '#',
data: {} as any,
icon: (
<div className="flex h-8 w-8 shrink-0 items-center justify-center rounded-md bg-blue-50 text-blue-600">
<BoltIcon className="h-5 w-5" />
</div>
),
}]
}
// If not in workflow context, return empty array
return []
}
catch (error) {
console.error('Error searching workflow nodes:', error)
catch (error) {
console.warn('Workflow nodes search failed:', error)
return []
}
},

View File

@ -0,0 +1,88 @@
import type { FC } from 'react'
import { useEffect } from 'react'
import { Command } from 'cmdk'
import { useTranslation } from 'react-i18next'
import type { ActionItem } from './actions/types'
type Props = {
actions: Record<string, ActionItem>
onCommandSelect: (commandKey: string) => void
searchFilter?: string
commandValue?: string
onCommandValueChange?: (value: string) => void
}
const CommandSelector: FC<Props> = ({ actions, onCommandSelect, searchFilter, commandValue, onCommandValueChange }) => {
const { t } = useTranslation()
const filteredActions = Object.values(actions).filter((action) => {
if (!searchFilter)
return true
const filterLower = searchFilter.toLowerCase()
return action.shortcut.toLowerCase().includes(filterLower)
|| action.key.toLowerCase().includes(filterLower)
})
useEffect(() => {
if (filteredActions.length > 0 && onCommandValueChange) {
const currentValueExists = filteredActions.some(action => action.shortcut === commandValue)
if (!currentValueExists)
onCommandValueChange(filteredActions[0].shortcut)
}
}, [searchFilter, filteredActions.length])
if (filteredActions.length === 0) {
return (
<div className="p-4">
<div className="flex items-center justify-center py-8 text-center text-text-tertiary">
<div>
<div className="text-sm font-medium text-text-tertiary">
{t('app.gotoAnything.noMatchingCommands')}
</div>
<div className="mt-1 text-xs text-text-quaternary">
{t('app.gotoAnything.tryDifferentSearch')}
</div>
</div>
</div>
</div>
)
}
return (
<div className="p-4">
<div className="mb-3 text-left text-sm font-medium text-text-secondary">
{t('app.gotoAnything.selectSearchType')}
</div>
<Command.Group className="space-y-1">
{filteredActions.map(action => (
<Command.Item
key={action.key}
value={action.shortcut}
className="flex cursor-pointer items-center rounded-md
p-2.5
transition-all
duration-150 hover:bg-state-base-hover aria-[selected=true]:bg-state-base-hover"
onSelect={() => onCommandSelect(action.shortcut)}
>
<span className="min-w-[4.5rem] text-left font-mono text-xs text-text-tertiary">
{action.shortcut}
</span>
<span className="ml-3 text-sm text-text-secondary">
{(() => {
const keyMap: Record<string, string> = {
'@app': 'app.gotoAnything.actions.searchApplicationsDesc',
'@plugin': 'app.gotoAnything.actions.searchPluginsDesc',
'@knowledge': 'app.gotoAnything.actions.searchKnowledgeBasesDesc',
'@node': 'app.gotoAnything.actions.searchWorkflowNodesDesc',
}
return t(keyMap[action.key])
})()}
</span>
</Command.Item>
))}
</Command.Group>
</div>
)
}
export default CommandSelector

View File

@ -17,6 +17,7 @@ import { useTranslation } from 'react-i18next'
import InstallFromMarketplace from '../plugins/install-plugin/install-from-marketplace'
import type { Plugin } from '../plugins/types'
import { Command } from 'cmdk'
import CommandSelector from './command-selector'
type Props = {
onHide?: () => void
@ -81,11 +82,15 @@ const GotoAnything: FC<Props> = ({
wait: 300,
})
const isCommandsMode = searchQuery.trim() === '@' || (searchQuery.trim().startsWith('@') && !matchAction(searchQuery.trim(), Actions))
const searchMode = useMemo(() => {
if (isCommandsMode) return 'commands'
const query = searchQueryDebouncedValue.toLowerCase()
const action = matchAction(query, Actions)
return action ? action.key : 'general'
}, [searchQueryDebouncedValue, Actions])
}, [searchQueryDebouncedValue, Actions, isCommandsMode])
const { data: searchResults = [], isLoading, isError, error } = useQuery(
{
@ -103,12 +108,20 @@ const GotoAnything: FC<Props> = ({
const action = matchAction(query, Actions)
return await searchAnything(defaultLocale, query, action)
},
enabled: !!searchQueryDebouncedValue,
enabled: !!searchQueryDebouncedValue && !isCommandsMode,
staleTime: 30000,
gcTime: 300000,
},
)
const handleCommandSelect = useCallback((commandKey: string) => {
setSearchQuery(`${commandKey} `)
setCmdVal('')
setTimeout(() => {
inputRef.current?.focus()
}, 0)
}, [])
// Handle navigation to selected result
const handleNavigate = useCallback((result: SearchResult) => {
setShow(false)
@ -141,7 +154,7 @@ const GotoAnything: FC<Props> = ({
[searchResults])
const emptyResult = useMemo(() => {
if (searchResults.length || !searchQueryDebouncedValue.trim() || isLoading)
if (searchResults.length || !searchQuery.trim() || isLoading || isCommandsMode)
return null
const isCommandSearch = searchMode !== 'general'
@ -186,34 +199,22 @@ const GotoAnything: FC<Props> = ({
</div>
</div>
)
}, [searchResults, searchQueryDebouncedValue, Actions, searchMode, isLoading, isError])
}, [searchResults, searchQuery, Actions, searchMode, isLoading, isError, isCommandsMode])
const defaultUI = useMemo(() => {
if (searchQueryDebouncedValue.trim())
if (searchQuery.trim())
return null
return (<div className="flex items-center justify-center py-8 text-center text-text-tertiary">
return (<div className="flex items-center justify-center py-12 text-center text-text-tertiary">
<div>
<div className='text-sm font-medium'>{t('app.gotoAnything.searchTitle')}</div>
<div className='mt-3 space-y-2 text-xs text-text-quaternary'>
{Object.values(Actions).map(action => (
<div key={action.key} className='flex items-center gap-2'>
<span className='inline-flex items-center rounded bg-gray-200 px-2 py-1 font-mono text-xs font-medium text-gray-600 dark:bg-gray-700 dark:text-gray-200'>{action.shortcut}</span>
<span>{(() => {
const keyMap: Record<string, string> = {
'@app': 'app.gotoAnything.actions.searchApplicationsDesc',
'@plugin': 'app.gotoAnything.actions.searchPluginsDesc',
'@knowledge': 'app.gotoAnything.actions.searchKnowledgeBasesDesc',
'@node': 'app.gotoAnything.actions.searchWorkflowNodesDesc',
}
return t(keyMap[action.key])
})()}</span>
</div>
))}
<div className='mt-3 space-y-1 text-xs text-text-quaternary'>
<div>{t('app.gotoAnything.searchHint')}</div>
<div>{t('app.gotoAnything.commandHint')}</div>
</div>
</div>
</div>)
}, [searchQueryDebouncedValue, Actions])
}, [searchQuery, Actions])
useEffect(() => {
if (show) {
@ -237,6 +238,7 @@ const GotoAnything: FC<Props> = ({
}}
closable={false}
className='!w-[480px] !p-0'
highPriority={true}
>
<div className='flex flex-col rounded-2xl border border-components-panel-border bg-components-panel-bg shadow-xl'>
<Command
@ -252,8 +254,9 @@ const GotoAnything: FC<Props> = ({
value={searchQuery}
placeholder={t('app.gotoAnything.searchPlaceholder')}
onChange={(e) => {
setCmdVal('')
setSearchQuery(e.target.value)
if (!e.target.value.startsWith('@'))
setCmdVal('')
}}
className='flex-1 !border-0 !bg-transparent !shadow-none'
wrapperClassName='flex-1 !border-0 !bg-transparent'
@ -296,7 +299,16 @@ const GotoAnything: FC<Props> = ({
)}
{!isLoading && !isError && (
<>
{Object.entries(groupedResults).map(([type, results], groupIndex) => (
{isCommandsMode ? (
<CommandSelector
actions={Actions}
onCommandSelect={handleCommandSelect}
searchFilter={searchQuery.trim().substring(1)}
commandValue={cmdVal}
onCommandValueChange={setCmdVal}
/>
) : (
Object.entries(groupedResults).map(([type, results], groupIndex) => (
<Command.Group key={groupIndex} heading={(() => {
const typeMap: Record<string, string> = {
'app': 'app.gotoAnything.groups.apps',
@ -330,9 +342,10 @@ const GotoAnything: FC<Props> = ({
</Command.Item>
))}
</Command.Group>
))}
{emptyResult}
{defaultUI}
))
)}
{!isCommandsMode && emptyResult}
{!isCommandsMode && defaultUI}
</>
)}
</Command.List>

View File

@ -1,6 +1,7 @@
import type { FC } from 'react'
import { useEffect, useRef, useState } from 'react'
import type { ModelParameterRule } from '../declarations'
import { useLanguage } from '../hooks'
import { isNullOrUndefined } from '../utils'
import cn from '@/utils/classnames'
import Switch from '@/app/components/base/switch'
@ -26,6 +27,7 @@ const ParameterItem: FC<ParameterItemProps> = ({
onSwitch,
isInWorkflow,
}) => {
const language = useLanguage()
const [localValue, setLocalValue] = useState(value)
const numberInputRef = useRef<HTMLInputElement>(null)

View File

@ -64,7 +64,7 @@ const TagsFilter = ({
onClick={() => setOpen(v => !v)}
>
<div className={cn(
'ml-0.5 mr-1.5 flex items-center text-text-tertiary ',
'ml-0.5 mr-1.5 flex select-none items-center text-text-tertiary',
size === 'large' && 'h-8 py-1',
size === 'small' && 'h-7 py-0.5 ',
className,
@ -128,7 +128,7 @@ const TagsFilter = ({
filteredOptions.map(option => (
<div
key={option.name}
className='flex h-7 cursor-pointer items-center rounded-lg px-2 py-1.5 hover:bg-state-base-hover'
className='flex h-7 cursor-pointer select-none items-center rounded-lg px-2 py-1.5 hover:bg-state-base-hover'
onClick={() => handleCheck(option.name)}
>
<Checkbox

View File

@ -48,7 +48,7 @@ const TagsFilter = ({
>
<PortalToFollowElemTrigger onClick={() => setOpen(v => !v)}>
<div className={cn(
'flex h-8 cursor-pointer items-center rounded-lg bg-components-input-bg-normal px-2 py-1 text-text-tertiary hover:bg-state-base-hover-alt',
'flex h-8 cursor-pointer select-none items-center rounded-lg bg-components-input-bg-normal px-2 py-1 text-text-tertiary hover:bg-state-base-hover-alt',
selectedTagsLength && 'text-text-secondary',
open && 'bg-state-base-hover',
)}>
@ -99,7 +99,7 @@ const TagsFilter = ({
filteredOptions.map(option => (
<div
key={option.name}
className='flex h-7 cursor-pointer items-center rounded-lg px-2 py-1.5 hover:bg-state-base-hover'
className='flex h-7 cursor-pointer select-none items-center rounded-lg px-2 py-1.5 hover:bg-state-base-hover'
onClick={() => handleCheck(option.name)}
>
<Checkbox

View File

@ -67,7 +67,7 @@ const LabelFilter: FC<LabelFilterProps> = ({
className='block'
>
<div className={cn(
'flex h-8 cursor-pointer items-center gap-1 rounded-lg border-[0.5px] border-transparent bg-components-input-bg-normal px-2 hover:bg-components-input-bg-hover',
'flex h-8 cursor-pointer select-none items-center gap-1 rounded-lg border-[0.5px] border-transparent bg-components-input-bg-normal px-2 hover:bg-components-input-bg-hover',
!open && !!value.length && 'shadow-xs',
open && !!value.length && 'shadow-xs',
)}>
@ -111,7 +111,7 @@ const LabelFilter: FC<LabelFilterProps> = ({
{filteredLabelList.map(label => (
<div
key={label.name}
className='flex cursor-pointer items-center gap-2 rounded-lg py-[6px] pl-3 pr-2 hover:bg-state-base-hover'
className='flex cursor-pointer select-none items-center gap-2 rounded-lg py-[6px] pl-3 pr-2 hover:bg-state-base-hover'
onClick={() => selectLabel(label)}
>
<div title={label.label} className='grow truncate text-sm leading-5 text-text-secondary'>{label.label}</div>

View File

@ -46,9 +46,9 @@ export const useWorkflowSearch = () => {
// Create search function for workflow nodes
const searchWorkflowNodes = useCallback((query: string) => {
if (!searchableNodes.length || !query.trim()) return []
if (!searchableNodes.length) return []
const searchTerm = query.toLowerCase()
const searchTerm = query.toLowerCase().trim()
const results = searchableNodes
.map((node) => {
@ -58,11 +58,18 @@ export const useWorkflowSearch = () => {
let score = 0
if (titleMatch.startsWith(searchTerm)) score += 100
else if (titleMatch.includes(searchTerm)) score += 50
else if (typeMatch === searchTerm) score += 80
else if (typeMatch.includes(searchTerm)) score += 30
else if (descMatch.includes(searchTerm)) score += 20
// If no search term, show all nodes with base score
if (!searchTerm) {
score = 1
}
else {
// Score based on search relevance
if (titleMatch.startsWith(searchTerm)) score += 100
else if (titleMatch.includes(searchTerm)) score += 50
else if (typeMatch === searchTerm) score += 80
else if (typeMatch.includes(searchTerm)) score += 30
else if (descMatch.includes(searchTerm)) score += 20
}
return score > 0
? {
@ -89,6 +96,11 @@ export const useWorkflowSearch = () => {
})
.filter((node): node is NonNullable<typeof node> => node !== null)
.sort((a, b) => {
// If no search term, sort alphabetically
if (!searchTerm)
return a.title.localeCompare(b.title)
// Sort by relevance when searching
const aTitle = a.title.toLowerCase()
const bTitle = b.title.toLowerCase()

View File

@ -31,7 +31,8 @@ const Placeholder = () => {
<div className='system-kbd mx-0.5 flex h-4 w-4 items-center justify-center rounded bg-components-kbd-bg-gray text-text-placeholder'>/</div>
<div
className='system-sm-regular cursor-pointer text-components-input-text-placeholder underline decoration-dotted decoration-auto underline-offset-auto hover:text-text-tertiary'
onClick={((e) => {
onMouseDown={((e) => {
e.preventDefault()
e.stopPropagation()
handleInsert('/')
})}

View File

@ -271,6 +271,8 @@ const translation = {
noWorkflowNodesFound: 'Keine Workflow-Knoten gefunden',
noKnowledgeBasesFound: 'Keine Wissensdatenbanken gefunden',
noAppsFound: 'Keine Apps gefunden',
tryDifferentTerm: 'Versuchen Sie einen anderen Suchbegriff oder entfernen Sie den {{mode}}-Filter',
trySpecificSearch: 'Versuchen Sie {{shortcuts}} für spezifische Suchen',
},
groups: {
knowledgeBases: 'Wissensdatenbanken',
@ -288,6 +290,12 @@ const translation = {
useAtForSpecific: 'Verwenden von @ für bestimmte Typen',
searchTitle: 'Suchen Sie nach irgendetwas',
searching: 'Suche...',
selectSearchType: 'Wählen Sie aus, wonach gesucht werden soll',
commandHint: 'Geben Sie @ ein, um nach Kategorie zu suchen',
searchHint: 'Beginnen Sie mit der Eingabe, um alles sofort zu durchsuchen',
resultCount: '{{count}} Ergebnis',
resultCount_other: '{{count}} Ergebnisse',
inScope: 'in {{scope}}s',
},
}

View File

@ -267,6 +267,9 @@ const translation = {
inScope: 'in {{scope}}s',
clearToSearchAll: 'Clear @ to search all',
useAtForSpecific: 'Use @ for specific types',
selectSearchType: 'Choose what to search for',
searchHint: 'Start typing to search everything instantly',
commandHint: 'Type @ to browse by category',
actions: {
searchApplications: 'Search Applications',
searchApplicationsDesc: 'Search and navigate to your applications',

View File

@ -269,6 +269,8 @@ const translation = {
noPluginsFound: 'No se encontraron complementos',
noWorkflowNodesFound: 'No se encontraron nodos de flujo de trabajo',
noKnowledgeBasesFound: 'No se han encontrado bases de conocimiento',
tryDifferentTerm: 'Intenta un término de búsqueda diferente o elimina el filtro {{mode}}',
trySpecificSearch: 'Prueba {{shortcuts}} para búsquedas específicas',
},
groups: {
apps: 'Aplicaciones',
@ -278,7 +280,7 @@ const translation = {
},
clearToSearchAll: 'Borrar @ para buscar todo',
noResults: 'No se han encontrado resultados',
searching: 'Minucioso...',
searching: 'Buscando...',
searchTemporarilyUnavailable: 'La búsqueda no está disponible temporalmente',
searchFailed: 'Error de búsqueda',
useAtForSpecific: 'Use @ para tipos específicos',
@ -286,6 +288,12 @@ const translation = {
searchTitle: 'Busca cualquier cosa',
someServicesUnavailable: 'Algunos servicios de búsqueda no están disponibles',
servicesUnavailableMessage: 'Algunos servicios de búsqueda pueden estar experimentando problemas. Inténtalo de nuevo en un momento.',
searchHint: 'Empieza a escribir para buscar todo al instante',
commandHint: 'Escriba @ para buscar por categoría',
selectSearchType: 'Elige qué buscar',
resultCount: '{{count}} resultado',
resultCount_other: '{{count}} resultados',
inScope: 'en {{scope}}s',
},
}

View File

@ -269,10 +269,12 @@ const translation = {
noAppsFound: 'هیچ برنامه ای یافت نشد',
noPluginsFound: 'هیچ افزونه ای یافت نشد',
noWorkflowNodesFound: 'هیچ گره گردش کاری یافت نشد',
tryDifferentTerm: 'یک عبارت جستجوی متفاوت را امتحان کنید یا فیلتر {{mode}} را حذف کنید',
trySpecificSearch: '{{shortcuts}} را برای جستجوهای خاص امتحان کنید',
},
groups: {
plugins: 'پلاگین',
apps: 'واژهنامه',
apps: 'برنامه‌ها',
knowledgeBases: 'پایگاه های دانش',
workflowNodes: 'گره های گردش کار',
},
@ -286,6 +288,12 @@ const translation = {
searchTemporarilyUnavailable: 'جستجو به طور موقت در دسترس نیست',
servicesUnavailableMessage: 'برخی از سرویس های جستجو ممکن است با مشکل مواجه شوند. یک لحظه دیگر دوباره امتحان کنید.',
someServicesUnavailable: 'برخی از سرویس های جستجو دردسترس نیستند',
selectSearchType: 'انتخاب کنید چه چیزی را جستجو کنید',
commandHint: '@ را برای مرور بر اساس دسته بندی تایپ کنید',
searchHint: 'شروع به تایپ کنید تا فورا همه چیز را جستجو کنید',
resultCount: '{{count}} نتیجه',
resultCount_other: '{{count}} نتیجه',
inScope: 'در {{scope}}s',
},
}

View File

@ -262,16 +262,18 @@ const translation = {
searchWorkflowNodes: 'Rechercher des nœuds de workflow',
searchKnowledgeBases: 'Rechercher dans les bases de connaissances',
searchApplications: 'Rechercher des applications',
searchWorkflowNodesHelp: 'Cette fonctionnalité ne fonctionne que lors de laffichage dun flux de travail. Accédez dabord à un flux de travail.',
searchWorkflowNodesHelp: 'Cette fonctionnalité ne fonctionne que lors de l\'affichage d\'un flux de travail. Accédez d\'abord à un flux de travail.',
},
emptyState: {
noKnowledgeBasesFound: 'Aucune base de connaissances trouvée',
noAppsFound: 'Aucune application trouvée',
noPluginsFound: 'Aucun plugin trouvé',
noWorkflowNodesFound: 'Aucun nœud de workflow trouvé',
tryDifferentTerm: 'Essayez un terme de recherche différent ou supprimez le filtre {{mode}}',
trySpecificSearch: 'Essayez {{shortcuts}} pour des recherches spécifiques',
},
groups: {
apps: 'Apps',
apps: 'Applications',
workflowNodes: 'Nœuds de flux de travail',
knowledgeBases: 'Bases de connaissances',
plugins: 'Plug-ins',
@ -280,12 +282,18 @@ const translation = {
servicesUnavailableMessage: 'Certains services de recherche peuvent rencontrer des problèmes. Réessayez dans un instant.',
useAtForSpecific: 'Utilisez @ pour des types spécifiques',
searchTemporarilyUnavailable: 'Recherche temporairement indisponible',
searchTitle: 'Recherchez nimporte quoi',
searchTitle: 'Recherchez n\'importe quoi',
clearToSearchAll: 'Effacer @ pour rechercher tout',
searching: 'Recherche...',
searchPlaceholder: 'Recherchez ou tapez @ pour les commandes...',
searchFailed: 'Echec de la recherche',
noResults: 'Aucun résultat trouvé',
commandHint: 'Tapez @ pour parcourir par catégorie',
selectSearchType: 'Choisissez les éléments de recherche',
searchHint: 'Commencez à taper pour tout rechercher instantanément',
resultCount: '{{count}} résultat',
resultCount_other: '{{count}} résultats',
inScope: 'dans {{scope}}s',
},
}

View File

@ -269,6 +269,8 @@ const translation = {
noAppsFound: 'कोई ऐप्स नहीं मिले',
noKnowledgeBasesFound: 'कोई ज्ञान आधार नहीं मिले',
noWorkflowNodesFound: 'कोई कार्यप्रवाह नोड नहीं मिला',
tryDifferentTerm: 'एक अलग खोज शब्द आज़माएं या {{mode}} फ़िल्टर हटा दें',
trySpecificSearch: 'विशिष्ट खोज के लिए {{shortcuts}} आज़माएं',
},
groups: {
apps: 'ऐप्स',
@ -286,6 +288,12 @@ const translation = {
searchPlaceholder: 'कमांड के लिए खोजें या टाइप करें @...',
searchTemporarilyUnavailable: 'खोज अस्थायी रूप से उपलब्ध नहीं है',
servicesUnavailableMessage: 'कुछ खोज सेवाएँ समस्याओं का सामना कर सकती हैं। थोड़ी देर बाद फिर से प्रयास करें।',
commandHint: '@ का उपयोग कर श्रेणी के अनुसार ब्राउज़ करें',
selectSearchType: 'खोजने के लिए क्या चुनें',
searchHint: 'सब कुछ तुरंत खोजने के लिए टाइप करना शुरू करें',
resultCount: '{{count}} परिणाम',
resultCount_other: '{{count}} परिणाम',
inScope: '{{scope}}s में',
},
}

View File

@ -275,6 +275,8 @@ const translation = {
noAppsFound: 'Nessuna app trovata',
noWorkflowNodesFound: 'Nessun nodo del flusso di lavoro trovato',
noPluginsFound: 'Nessun plugin trovato',
tryDifferentTerm: 'Prova un termine di ricerca diverso o rimuovi il filtro {{mode}}',
trySpecificSearch: 'Prova {{shortcuts}} per ricerche specifiche',
},
groups: {
knowledgeBases: 'Basi di conoscenza',
@ -284,7 +286,7 @@ const translation = {
},
searchTitle: 'Cerca qualsiasi cosa',
searchPlaceholder: 'Cerca o digita @ per i comandi...',
searching: 'Indagatore...',
searching: 'Ricerca in corso...',
searchTemporarilyUnavailable: 'Ricerca temporaneamente non disponibile',
searchFailed: 'Ricerca non riuscita',
servicesUnavailableMessage: 'Alcuni servizi di ricerca potrebbero riscontrare problemi. Riprova tra un attimo.',
@ -292,6 +294,12 @@ const translation = {
noResults: 'Nessun risultato trovato',
useAtForSpecific: 'Utilizzare @ per tipi specifici',
clearToSearchAll: 'Cancella @ per cercare tutto',
selectSearchType: 'Scegli cosa cercare',
commandHint: 'Digita @ per sfogliare per categoria',
searchHint: 'Inizia a digitare per cercare tutto all\'istante',
resultCount: '{{count}} risultato',
resultCount_other: '{{count}} risultati',
inScope: 'in {{scope}}s',
},
}

View File

@ -265,6 +265,9 @@ const translation = {
inScope: '{{scope}}s 内',
clearToSearchAll: '@ をクリアしてすべてを検索',
useAtForSpecific: '特定のタイプには @ を使用',
selectSearchType: '検索対象を選択',
searchHint: '入力を開始してすべてを瞬時に検索',
commandHint: '@ を入力してカテゴリ別に参照',
actions: {
searchApplications: 'アプリケーションを検索',
searchApplicationsDesc: 'アプリケーションを検索してナビゲート',

View File

@ -289,6 +289,8 @@ const translation = {
noPluginsFound: '플러그인을 찾을 수 없습니다.',
noKnowledgeBasesFound: '기술 자료를 찾을 수 없습니다.',
noWorkflowNodesFound: '워크플로 노드를 찾을 수 없습니다.',
tryDifferentTerm: '다른 검색어를 시도하거나 {{mode}} 필터를 제거하세요',
trySpecificSearch: '특정 검색을 위해 {{shortcuts}}를 사용해보세요',
},
groups: {
apps: '앱',
@ -306,6 +308,12 @@ const translation = {
searchFailed: '검색 실패',
searchPlaceholder: '명령을 검색하거나 @를 입력합니다...',
clearToSearchAll: '@를 지우면 모두 검색됩니다.',
selectSearchType: '검색할 항목 선택',
commandHint: '@를 입력하여 카테고리별로 찾아봅니다.',
searchHint: '즉시 모든 것을 검색하려면 입력을 시작하세요.',
resultCount: '{{count}} 개 결과',
resultCount_other: '{{count}} 개 결과',
inScope: '{{scope}}s 내에서',
},
}

View File

@ -270,6 +270,8 @@ const translation = {
noKnowledgeBasesFound: 'Nie znaleziono baz wiedzy',
noWorkflowNodesFound: 'Nie znaleziono węzłów przepływu pracy',
noPluginsFound: 'Nie znaleziono wtyczek',
tryDifferentTerm: 'Spróbuj innego terminu wyszukiwania lub usuń filtr {{mode}}',
trySpecificSearch: 'Spróbuj {{shortcuts}} dla konkretnych wyszukiwań',
},
groups: {
apps: 'Aplikacje',
@ -287,6 +289,12 @@ const translation = {
searchTemporarilyUnavailable: 'Wyszukiwanie chwilowo niedostępne',
servicesUnavailableMessage: 'W przypadku niektórych usług wyszukiwania mogą występować problemy. Spróbuj ponownie za chwilę.',
searchFailed: 'Wyszukiwanie nie powiodło się',
searchHint: 'Zacznij pisać, aby natychmiast wszystko przeszukać',
commandHint: 'Wpisz @, aby przeglądać według kategorii',
selectSearchType: 'Wybierz, czego chcesz szukać',
resultCount: '{{count}} wynik',
resultCount_other: '{{count}} wyników',
inScope: 'w {{scope}}s',
},
}

View File

@ -269,9 +269,11 @@ const translation = {
noPluginsFound: 'Nenhum plugin encontrado',
noWorkflowNodesFound: 'Nenhum nó de fluxo de trabalho encontrado',
noKnowledgeBasesFound: 'Nenhuma base de conhecimento encontrada',
tryDifferentTerm: 'Tente um termo de pesquisa diferente ou remova o filtro {{mode}}',
trySpecificSearch: 'Tente {{shortcuts}} para pesquisas específicas',
},
groups: {
apps: 'Apps',
apps: 'Aplicativos',
knowledgeBases: 'Bases de conhecimento',
plugins: 'Plugins',
workflowNodes: 'Nós de fluxo de trabalho',
@ -286,6 +288,12 @@ const translation = {
useAtForSpecific: 'Use @ para tipos específicos',
clearToSearchAll: 'Desmarque @ para pesquisar tudo',
searchFailed: 'Falha na pesquisa',
searchHint: 'Comece a digitar para pesquisar tudo instantaneamente',
commandHint: 'Digite @ para navegar por categoria',
selectSearchType: 'Escolha o que pesquisar',
resultCount: '{{count}} resultado',
resultCount_other: '{{count}} resultados',
inScope: 'em {{scope}}s',
},
}

View File

@ -269,6 +269,8 @@ const translation = {
noPluginsFound: 'Nu au fost găsite plugin-uri',
noWorkflowNodesFound: 'Nu au fost găsite noduri de flux de lucru',
noKnowledgeBasesFound: 'Nu au fost găsite baze de cunoștințe',
tryDifferentTerm: 'Încercați un termen de căutare diferit sau eliminați filtrul {{mode}}',
trySpecificSearch: 'Încercați {{shortcuts}} pentru căutări specifice',
},
groups: {
knowledgeBases: 'Baze de cunoștințe',
@ -286,6 +288,12 @@ const translation = {
servicesUnavailableMessage: 'Este posibil ca unele servicii de căutare să întâmpine probleme. Încercați din nou într-o clipă.',
someServicesUnavailable: 'Unele servicii de căutare nu sunt disponibile',
clearToSearchAll: 'Ștergeți @ pentru a căuta toate',
selectSearchType: 'Alegeți ce să căutați',
commandHint: 'Tastați @ pentru a naviga după categorie',
searchHint: 'Începeți să tastați pentru a căuta totul instantaneu',
resultCount: '{{count}} rezultat',
resultCount_other: '{{count}} rezultate',
inScope: 'în {{scope}}s',
},
}

View File

@ -269,6 +269,8 @@ const translation = {
noKnowledgeBasesFound: 'Базы знаний не найдены',
noAppsFound: 'Приложения не найдены',
noWorkflowNodesFound: 'Узлы расчетной схемы не найдены',
tryDifferentTerm: 'Попробуйте другой поисковый термин или удалите фильтр {{mode}}',
trySpecificSearch: 'Попробуйте {{shortcuts}} для конкретного поиска',
},
groups: {
knowledgeBases: 'Базы знаний',
@ -286,6 +288,12 @@ const translation = {
searchPlaceholder: 'Найдите или введите @ для команд...',
someServicesUnavailable: 'Некоторые поисковые сервисы недоступны',
servicesUnavailableMessage: 'В некоторых поисковых службах могут возникать проблемы. Повторите попытку через мгновение.',
searchHint: 'Начните печатать, чтобы мгновенно искать все',
commandHint: 'Введите @ для просмотра по категориям',
selectSearchType: 'Выберите, что искать',
resultCount: '{{count}} результат',
resultCount_other: '{{count}} результатов',
inScope: 'в {{scope}}s',
},
}

View File

@ -269,10 +269,12 @@ const translation = {
noWorkflowNodesFound: 'Vozlišča poteka dela niso bila najdena',
noKnowledgeBasesFound: 'Zbirk znanja ni mogoče najti',
noAppsFound: 'Ni bilo najdenih aplikacij',
tryDifferentTerm: 'Poskusite z drugim iskalnim izrazom ali odstranite filter {{mode}}',
trySpecificSearch: 'Poskusite {{shortcuts}} za specifična iskanja',
},
groups: {
workflowNodes: 'Vozlišča poteka dela',
apps: 'Apps',
apps: 'Aplikacije',
knowledgeBases: 'Baze znanja',
plugins: 'Vtičniki',
},
@ -286,6 +288,12 @@ const translation = {
searchFailed: 'Iskanje ni uspelo',
useAtForSpecific: 'Uporaba znaka @ za določene vrste',
servicesUnavailableMessage: 'Pri nekaterih iskalnih storitvah se morda pojavljajo težave. Poskusite znova čez trenutek.',
commandHint: 'Vnesite @ za brskanje po kategoriji',
selectSearchType: 'Izberite, kaj želite iskati',
searchHint: 'Začnite tipkati, da takoj preiščete vse',
resultCount: '{{count}} rezultat',
resultCount_other: '{{count}} rezultatov',
inScope: 'v {{scope}}s',
},
}

View File

@ -265,9 +265,11 @@ const translation = {
noAppsFound: 'ไม่พบแอป',
noWorkflowNodesFound: 'ไม่พบโหนดเวิร์กโฟลว์',
noKnowledgeBasesFound: 'ไม่พบฐานความรู้',
tryDifferentTerm: 'ลองใช้คำค้นหาที่แตกต่างออกไปหรือลบตัวกรอง {{mode}}',
trySpecificSearch: 'ลองใช้ {{shortcuts}} สำหรับการค้นหาเฉพาะ',
},
groups: {
apps: 'ปพลิ เค ชัน',
apps: 'แอปพลิเคชัน',
knowledgeBases: 'ฐานความรู้',
plugins: 'ปลั๊กอิน',
workflowNodes: 'โหนดเวิร์กโฟลว์',
@ -282,6 +284,12 @@ const translation = {
searchPlaceholder: 'ค้นหาหรือพิมพ์ @ สําหรับคําสั่ง...',
servicesUnavailableMessage: 'บริการค้นหาบางบริการอาจประสบปัญหา ลองอีกครั้งในอีกสักครู่',
searching: 'กำลังค้นหา...',
searchHint: 'เริ่มพิมพ์เพื่อค้นหาทุกอย่างได้ทันที',
selectSearchType: 'เลือกสิ่งที่จะค้นหา',
commandHint: 'พิมพ์ @ เพื่อเรียกดูตามหมวดหมู่',
resultCount: '{{count}} ผลลัพธ์',
resultCount_other: '{{count}} ผลลัพธ์',
inScope: 'ใน {{scope}}s',
},
}

View File

@ -265,9 +265,11 @@ const translation = {
noWorkflowNodesFound: 'İş akışı düğümü bulunamadı',
noKnowledgeBasesFound: 'Bilgi bankası bulunamadı',
noPluginsFound: 'Eklenti bulunamadı',
tryDifferentTerm: 'Farklı bir arama terimi deneyin veya {{mode}} filtresini kaldırın',
trySpecificSearch: 'Belirli aramalar için {{shortcuts}} deneyin',
},
groups: {
apps: 'Apps',
apps: 'Uygulamalar',
plugins: 'Eklentiler',
knowledgeBases: 'Bilgi Tabanları',
workflowNodes: 'İş Akışı Düğümleri',
@ -281,7 +283,13 @@ const translation = {
searchTitle: 'Her şeyi arayın',
noResults: 'Sonuç bulunamadı',
servicesUnavailableMessage: 'Bazı arama hizmetlerinde sorunlar yaşanıyor olabilir. Kısa bir süre sonra tekrar deneyin.',
searching: 'Araştırıcı...',
searching: 'Aranıyor...',
selectSearchType: 'Ne arayacağınızı seçin',
searchHint: 'Her şeyi anında aramak için yazmaya başlayın',
commandHint: 'Kategoriye göre göz atmak için @ yazın',
resultCount: '{{count}} sonuç',
resultCount_other: '{{count}} sonuç',
inScope: '{{scope}}s içinde',
},
}

View File

@ -269,6 +269,8 @@ const translation = {
noKnowledgeBasesFound: 'Баз знань не знайдено',
noAppsFound: 'Не знайдено додатків',
noWorkflowNodesFound: 'Вузли бізнес-процесу не знайдено',
tryDifferentTerm: 'Спробуйте інший пошуковий термін або видаліть фільтр {{mode}}',
trySpecificSearch: 'Спробуйте {{shortcuts}} для конкретного пошуку',
},
groups: {
knowledgeBases: 'Бази знань',
@ -279,13 +281,19 @@ const translation = {
searching: 'Пошук...',
searchTitle: 'Шукайте що завгодно',
searchFailed: 'Пошук не вдався',
clearToSearchAll: 'Clear @ для пошуку всіх',
clearToSearchAll: 'Очистіть @ для пошуку всіх',
noResults: 'Результатів не знайдено',
searchPlaceholder: 'Виконайте пошук або введіть @ для команд...',
searchTemporarilyUnavailable: 'Пошук тимчасово недоступний',
useAtForSpecific: 'Використовуйте @ для конкретних типів',
someServicesUnavailable: 'Деякі пошукові сервіси недоступні',
servicesUnavailableMessage: 'У деяких пошукових службах можуть виникати проблеми. Повторіть спробу за мить.',
selectSearchType: 'Виберіть, що шукати',
commandHint: 'Введіть @ для навігації за категоріями',
searchHint: 'Почніть вводити текст, щоб миттєво шукати все',
resultCount: '{{count}} результат',
resultCount_other: '{{count}} результатів',
inScope: 'у {{scope}}s',
},
}

View File

@ -269,6 +269,8 @@ const translation = {
noKnowledgeBasesFound: 'Không tìm thấy cơ sở kiến thức',
noPluginsFound: 'Không tìm thấy plugin',
noAppsFound: 'Không tìm thấy ứng dụng nào',
tryDifferentTerm: 'Thử từ khóa tìm kiếm khác hoặc xóa bộ lọc {{mode}}',
trySpecificSearch: 'Thử {{shortcuts}} để tìm kiếm cụ thể',
},
groups: {
plugins: 'Plugin',
@ -286,6 +288,12 @@ const translation = {
useAtForSpecific: 'Sử dụng @ cho các loại cụ thể',
someServicesUnavailable: 'Một số dịch vụ tìm kiếm không khả dụng',
servicesUnavailableMessage: 'Một số dịch vụ tìm kiếm có thể gặp sự cố. Thử lại trong giây lát.',
searchHint: 'Bắt đầu nhập để tìm kiếm mọi thứ ngay lập tức',
commandHint: 'Nhập @ để duyệt theo danh mục',
selectSearchType: 'Chọn nội dung để tìm kiếm',
resultCount: '{{count}} kết quả',
resultCount_other: '{{count}} kết quả',
inScope: 'trong {{scope}}s',
},
}

View File

@ -266,6 +266,9 @@ const translation = {
inScope: '在 {{scope}}s 中',
clearToSearchAll: '清除 @ 以搜索全部',
useAtForSpecific: '使用 @ 进行特定类型搜索',
selectSearchType: '选择搜索内容',
searchHint: '开始输入即可立即搜索所有内容',
commandHint: '输入 @ 按类别浏览',
actions: {
searchApplications: '搜索应用程序',
searchApplicationsDesc: '搜索并导航到您的应用程序',

View File

@ -268,6 +268,8 @@ const translation = {
noWorkflowNodesFound: '未找到工作流節點',
noKnowledgeBasesFound: '未找到知識庫',
noPluginsFound: '未找到外掛程式',
tryDifferentTerm: '嘗試不同的搜索詞或移除 {{mode}} 過濾器',
trySpecificSearch: '嘗試使用 {{shortcuts}} 進行特定搜索',
},
groups: {
apps: '應用程式',
@ -276,7 +278,7 @@ const translation = {
workflowNodes: '工作流節點',
},
searchPlaceholder: '搜尋或鍵入 @ 以取得命令...',
searching: '搜索。。。',
searching: '搜索中...',
searchTitle: '搜索任何內容',
noResults: '未找到結果',
clearToSearchAll: '清除 @ 以搜尋全部',
@ -285,6 +287,12 @@ const translation = {
someServicesUnavailable: '某些搜索服務不可用',
useAtForSpecific: '對特定類型使用 @',
searchTemporarilyUnavailable: '搜索暫時不可用',
selectSearchType: '選擇要搜索的內容',
commandHint: '鍵入 @ 按類別流覽',
searchHint: '開始輸入以立即搜索所有內容',
resultCount: '{{count}} 個結果',
resultCount_other: '{{count}} 個結果',
inScope: '在 {{scope}}s 中',
},
}

View File

@ -1,6 +1,6 @@
{
"name": "dify-web",
"version": "1.7.1",
"version": "1.7.2",
"private": true,
"engines": {
"node": ">=v22.11.0"