mirror of https://github.com/langgenius/dify.git
Merge branch 'refs/heads/origin-main' into feat/end-user-oauth
This commit is contained in:
commit
6aa0c9e5cc
|
|
@ -0,0 +1,226 @@
|
|||
# CODEOWNERS
|
||||
# This file defines code ownership for the Dify project.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
# Owners can be @username, @org/team-name, or email addresses.
|
||||
# For more information, see: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
|
||||
* @crazywoola @laipz8200 @Yeuoly
|
||||
|
||||
# Backend (default owner, more specific rules below will override)
|
||||
api/ @QuantumGhost
|
||||
|
||||
# Backend - Workflow - Engine (Core graph execution engine)
|
||||
api/core/workflow/graph_engine/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/runtime/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/graph/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/graph_events/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/node_events/ @laipz8200 @QuantumGhost
|
||||
api/core/model_runtime/ @laipz8200 @QuantumGhost
|
||||
|
||||
# Backend - Workflow - Nodes (Agent, Iteration, Loop, LLM)
|
||||
api/core/workflow/nodes/agent/ @Novice
|
||||
api/core/workflow/nodes/iteration/ @Novice
|
||||
api/core/workflow/nodes/loop/ @Novice
|
||||
api/core/workflow/nodes/llm/ @Novice
|
||||
|
||||
# Backend - RAG (Retrieval Augmented Generation)
|
||||
api/core/rag/ @JohnJyong
|
||||
api/services/rag_pipeline/ @JohnJyong
|
||||
api/services/dataset_service.py @JohnJyong
|
||||
api/services/knowledge_service.py @JohnJyong
|
||||
api/services/external_knowledge_service.py @JohnJyong
|
||||
api/services/hit_testing_service.py @JohnJyong
|
||||
api/services/metadata_service.py @JohnJyong
|
||||
api/services/vector_service.py @JohnJyong
|
||||
api/services/entities/knowledge_entities/ @JohnJyong
|
||||
api/services/entities/external_knowledge_entities/ @JohnJyong
|
||||
api/controllers/console/datasets/ @JohnJyong
|
||||
api/controllers/service_api/dataset/ @JohnJyong
|
||||
api/models/dataset.py @JohnJyong
|
||||
api/tasks/rag_pipeline/ @JohnJyong
|
||||
api/tasks/add_document_to_index_task.py @JohnJyong
|
||||
api/tasks/batch_clean_document_task.py @JohnJyong
|
||||
api/tasks/clean_document_task.py @JohnJyong
|
||||
api/tasks/clean_notion_document_task.py @JohnJyong
|
||||
api/tasks/document_indexing_task.py @JohnJyong
|
||||
api/tasks/document_indexing_sync_task.py @JohnJyong
|
||||
api/tasks/document_indexing_update_task.py @JohnJyong
|
||||
api/tasks/duplicate_document_indexing_task.py @JohnJyong
|
||||
api/tasks/recover_document_indexing_task.py @JohnJyong
|
||||
api/tasks/remove_document_from_index_task.py @JohnJyong
|
||||
api/tasks/retry_document_indexing_task.py @JohnJyong
|
||||
api/tasks/sync_website_document_indexing_task.py @JohnJyong
|
||||
api/tasks/batch_create_segment_to_index_task.py @JohnJyong
|
||||
api/tasks/create_segment_to_index_task.py @JohnJyong
|
||||
api/tasks/delete_segment_from_index_task.py @JohnJyong
|
||||
api/tasks/disable_segment_from_index_task.py @JohnJyong
|
||||
api/tasks/disable_segments_from_index_task.py @JohnJyong
|
||||
api/tasks/enable_segment_to_index_task.py @JohnJyong
|
||||
api/tasks/enable_segments_to_index_task.py @JohnJyong
|
||||
api/tasks/clean_dataset_task.py @JohnJyong
|
||||
api/tasks/deal_dataset_index_update_task.py @JohnJyong
|
||||
api/tasks/deal_dataset_vector_index_task.py @JohnJyong
|
||||
|
||||
# Backend - Plugins
|
||||
api/core/plugin/ @Mairuis @Yeuoly @Stream29
|
||||
api/services/plugin/ @Mairuis @Yeuoly @Stream29
|
||||
api/controllers/console/workspace/plugin.py @Mairuis @Yeuoly @Stream29
|
||||
api/controllers/inner_api/plugin/ @Mairuis @Yeuoly @Stream29
|
||||
api/tasks/process_tenant_plugin_autoupgrade_check_task.py @Mairuis @Yeuoly @Stream29
|
||||
|
||||
# Backend - Trigger/Schedule/Webhook
|
||||
api/controllers/trigger/ @Mairuis @Yeuoly
|
||||
api/controllers/console/app/workflow_trigger.py @Mairuis @Yeuoly
|
||||
api/controllers/console/workspace/trigger_providers.py @Mairuis @Yeuoly
|
||||
api/core/trigger/ @Mairuis @Yeuoly
|
||||
api/core/app/layers/trigger_post_layer.py @Mairuis @Yeuoly
|
||||
api/services/trigger/ @Mairuis @Yeuoly
|
||||
api/models/trigger.py @Mairuis @Yeuoly
|
||||
api/fields/workflow_trigger_fields.py @Mairuis @Yeuoly
|
||||
api/repositories/workflow_trigger_log_repository.py @Mairuis @Yeuoly
|
||||
api/repositories/sqlalchemy_workflow_trigger_log_repository.py @Mairuis @Yeuoly
|
||||
api/libs/schedule_utils.py @Mairuis @Yeuoly
|
||||
api/services/workflow/scheduler.py @Mairuis @Yeuoly
|
||||
api/schedule/trigger_provider_refresh_task.py @Mairuis @Yeuoly
|
||||
api/schedule/workflow_schedule_task.py @Mairuis @Yeuoly
|
||||
api/tasks/trigger_processing_tasks.py @Mairuis @Yeuoly
|
||||
api/tasks/trigger_subscription_refresh_tasks.py @Mairuis @Yeuoly
|
||||
api/tasks/workflow_schedule_tasks.py @Mairuis @Yeuoly
|
||||
api/tasks/workflow_cfs_scheduler/ @Mairuis @Yeuoly
|
||||
api/events/event_handlers/sync_plugin_trigger_when_app_created.py @Mairuis @Yeuoly
|
||||
api/events/event_handlers/update_app_triggers_when_app_published_workflow_updated.py @Mairuis @Yeuoly
|
||||
api/events/event_handlers/sync_workflow_schedule_when_app_published.py @Mairuis @Yeuoly
|
||||
api/events/event_handlers/sync_webhook_when_app_created.py @Mairuis @Yeuoly
|
||||
|
||||
# Backend - Async Workflow
|
||||
api/services/async_workflow_service.py @Mairuis @Yeuoly
|
||||
api/tasks/async_workflow_tasks.py @Mairuis @Yeuoly
|
||||
|
||||
# Backend - Billing
|
||||
api/services/billing_service.py @hj24 @zyssyz123
|
||||
api/controllers/console/billing/ @hj24 @zyssyz123
|
||||
|
||||
# Backend - Enterprise
|
||||
api/configs/enterprise/ @GarfieldDai @GareArc
|
||||
api/services/enterprise/ @GarfieldDai @GareArc
|
||||
api/services/feature_service.py @GarfieldDai @GareArc
|
||||
api/controllers/console/feature.py @GarfieldDai @GareArc
|
||||
api/controllers/web/feature.py @GarfieldDai @GareArc
|
||||
|
||||
# Backend - Database Migrations
|
||||
api/migrations/ @snakevash @laipz8200
|
||||
|
||||
# Frontend
|
||||
web/ @iamjoel
|
||||
|
||||
# Frontend - App - Orchestration
|
||||
web/app/components/workflow/ @iamjoel @zxhlyh
|
||||
web/app/components/workflow-app/ @iamjoel @zxhlyh
|
||||
web/app/components/app/configuration/ @iamjoel @zxhlyh
|
||||
web/app/components/app/app-publisher/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - WebApp - Chat
|
||||
web/app/components/base/chat/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - WebApp - Completion
|
||||
web/app/components/share/text-generation/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - App - List and Creation
|
||||
web/app/components/apps/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/create-app-dialog/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/create-app-modal/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/create-from-dsl-modal/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - API Documentation
|
||||
web/app/components/develop/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - Logs and Annotations
|
||||
web/app/components/app/workflow-log/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/log/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/log-annotation/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/annotation/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - Monitoring
|
||||
web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/overview/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - Settings
|
||||
web/app/components/app-sidebar/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - RAG - Hit Testing
|
||||
web/app/components/datasets/hit-testing/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - RAG - List and Creation
|
||||
web/app/components/datasets/list/ @iamjoel @WTW0313
|
||||
web/app/components/datasets/create/ @iamjoel @WTW0313
|
||||
web/app/components/datasets/create-from-pipeline/ @iamjoel @WTW0313
|
||||
web/app/components/datasets/external-knowledge-base/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - RAG - Orchestration (general rule first, specific rules below override)
|
||||
web/app/components/rag-pipeline/ @iamjoel @WTW0313
|
||||
web/app/components/rag-pipeline/components/rag-pipeline-main.tsx @iamjoel @zxhlyh
|
||||
web/app/components/rag-pipeline/store/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - RAG - Documents List
|
||||
web/app/components/datasets/documents/list.tsx @iamjoel @WTW0313
|
||||
web/app/components/datasets/documents/create-from-pipeline/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - RAG - Segments List
|
||||
web/app/components/datasets/documents/detail/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - RAG - Settings
|
||||
web/app/components/datasets/settings/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - Ecosystem - Plugins
|
||||
web/app/components/plugins/ @iamjoel @zhsama
|
||||
|
||||
# Frontend - Ecosystem - Tools
|
||||
web/app/components/tools/ @iamjoel @Yessenia-d
|
||||
|
||||
# Frontend - Ecosystem - MarketPlace
|
||||
web/app/components/plugins/marketplace/ @iamjoel @Yessenia-d
|
||||
|
||||
# Frontend - Login and Registration
|
||||
web/app/signin/ @douxc @iamjoel
|
||||
web/app/signup/ @douxc @iamjoel
|
||||
web/app/reset-password/ @douxc @iamjoel
|
||||
web/app/install/ @douxc @iamjoel
|
||||
web/app/init/ @douxc @iamjoel
|
||||
web/app/forgot-password/ @douxc @iamjoel
|
||||
web/app/account/ @douxc @iamjoel
|
||||
|
||||
# Frontend - Service Authentication
|
||||
web/service/base.ts @douxc @iamjoel
|
||||
|
||||
# Frontend - WebApp Authentication and Access Control
|
||||
web/app/(shareLayout)/components/ @douxc @iamjoel
|
||||
web/app/(shareLayout)/webapp-signin/ @douxc @iamjoel
|
||||
web/app/(shareLayout)/webapp-reset-password/ @douxc @iamjoel
|
||||
web/app/components/app/app-access-control/ @douxc @iamjoel
|
||||
|
||||
# Frontend - Explore Page
|
||||
web/app/components/explore/ @CodingOnStar @iamjoel
|
||||
|
||||
# Frontend - Personal Settings
|
||||
web/app/components/header/account-setting/ @CodingOnStar @iamjoel
|
||||
web/app/components/header/account-dropdown/ @CodingOnStar @iamjoel
|
||||
|
||||
# Frontend - Analytics
|
||||
web/app/components/base/ga/ @CodingOnStar @iamjoel
|
||||
|
||||
# Frontend - Base Components
|
||||
web/app/components/base/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - Utils and Hooks
|
||||
web/utils/classnames.ts @iamjoel @zxhlyh
|
||||
web/utils/time.ts @iamjoel @zxhlyh
|
||||
web/utils/format.ts @iamjoel @zxhlyh
|
||||
web/utils/clipboard.ts @iamjoel @zxhlyh
|
||||
web/hooks/use-document-title.ts @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - Billing and Education
|
||||
web/app/components/billing/ @iamjoel @zxhlyh
|
||||
web/app/education-apply/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - Workspace
|
||||
web/app/components/header/account-dropdown/workplace-selector/ @iamjoel @zxhlyh
|
||||
|
|
@ -77,12 +77,15 @@ jobs:
|
|||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: Update i18n files and type definitions based on en-US changes
|
||||
title: 'chore: translate i18n files and update type definitions'
|
||||
commit-message: 'chore(i18n): update translations based on en-US changes'
|
||||
title: 'chore(i18n): translate i18n files and update type definitions'
|
||||
body: |
|
||||
This PR was automatically created to update i18n files and TypeScript type definitions based on changes in en-US locale.
|
||||
|
||||
|
||||
**Triggered by:** ${{ github.sha }}
|
||||
|
||||
**Changes included:**
|
||||
- Updated translation files for all locales
|
||||
- Regenerated TypeScript type definitions for type safety
|
||||
branch: chore/automated-i18n-updates
|
||||
branch: chore/automated-i18n-updates-${{ github.sha }}
|
||||
delete-branch: true
|
||||
|
|
|
|||
|
|
@ -48,6 +48,12 @@ ENV PYTHONIOENCODING=utf-8
|
|||
|
||||
WORKDIR /app/api
|
||||
|
||||
# Create non-root user
|
||||
ARG dify_uid=1001
|
||||
RUN groupadd -r -g ${dify_uid} dify && \
|
||||
useradd -r -u ${dify_uid} -g ${dify_uid} -s /bin/bash dify && \
|
||||
chown -R dify:dify /app
|
||||
|
||||
RUN \
|
||||
apt-get update \
|
||||
# Install dependencies
|
||||
|
|
@ -69,7 +75,7 @@ RUN \
|
|||
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/app/api/.venv
|
||||
COPY --from=packages ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
COPY --from=packages --chown=dify:dify ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
# Download nltk data
|
||||
|
|
@ -78,24 +84,20 @@ RUN mkdir -p /usr/local/share/nltk_data && NLTK_DATA=/usr/local/share/nltk_data
|
|||
|
||||
ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache
|
||||
|
||||
RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')"
|
||||
RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')" \
|
||||
&& chown -R dify:dify ${TIKTOKEN_CACHE_DIR}
|
||||
|
||||
# Copy source code
|
||||
COPY . /app/api/
|
||||
COPY --chown=dify:dify . /app/api/
|
||||
|
||||
# Copy entrypoint
|
||||
COPY docker/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
# Prepare entrypoint script
|
||||
COPY --chown=dify:dify --chmod=755 docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Create non-root user and set permissions
|
||||
RUN groupadd -r -g 1001 dify && \
|
||||
useradd -r -u 1001 -g 1001 -s /bin/bash dify && \
|
||||
mkdir -p /home/dify && \
|
||||
chown -R 1001:1001 /app /home/dify ${TIKTOKEN_CACHE_DIR} /entrypoint.sh
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ENV COMMIT_SHA=${COMMIT_SHA}
|
||||
ENV NLTK_DATA=/usr/local/share/nltk_data
|
||||
USER 1001
|
||||
|
||||
USER dify
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
|
|
|
|||
|
|
@ -70,7 +70,6 @@ class AgentNode(Node[AgentNodeData]):
|
|||
"""
|
||||
|
||||
node_type = NodeType.AGENT
|
||||
_node_data: AgentNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
|
|
@ -82,8 +81,8 @@ class AgentNode(Node[AgentNodeData]):
|
|||
try:
|
||||
strategy = get_plugin_agent_strategy(
|
||||
tenant_id=self.tenant_id,
|
||||
agent_strategy_provider_name=self._node_data.agent_strategy_provider_name,
|
||||
agent_strategy_name=self._node_data.agent_strategy_name,
|
||||
agent_strategy_provider_name=self.node_data.agent_strategy_provider_name,
|
||||
agent_strategy_name=self.node_data.agent_strategy_name,
|
||||
)
|
||||
except Exception as e:
|
||||
yield StreamCompletedEvent(
|
||||
|
|
@ -101,13 +100,13 @@ class AgentNode(Node[AgentNodeData]):
|
|||
parameters = self._generate_agent_parameters(
|
||||
agent_parameters=agent_parameters,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
node_data=self._node_data,
|
||||
node_data=self.node_data,
|
||||
strategy=strategy,
|
||||
)
|
||||
parameters_for_log = self._generate_agent_parameters(
|
||||
agent_parameters=agent_parameters,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
node_data=self._node_data,
|
||||
node_data=self.node_data,
|
||||
for_log=True,
|
||||
strategy=strategy,
|
||||
)
|
||||
|
|
@ -140,7 +139,7 @@ class AgentNode(Node[AgentNodeData]):
|
|||
messages=message_stream,
|
||||
tool_info={
|
||||
"icon": self.agent_strategy_icon,
|
||||
"agent_strategy": self._node_data.agent_strategy_name,
|
||||
"agent_strategy": self.node_data.agent_strategy_name,
|
||||
},
|
||||
parameters_for_log=parameters_for_log,
|
||||
user_id=self.user_id,
|
||||
|
|
@ -387,7 +386,7 @@ class AgentNode(Node[AgentNodeData]):
|
|||
current_plugin = next(
|
||||
plugin
|
||||
for plugin in plugins
|
||||
if f"{plugin.plugin_id}/{plugin.name}" == self._node_data.agent_strategy_provider_name
|
||||
if f"{plugin.plugin_id}/{plugin.name}" == self.node_data.agent_strategy_provider_name
|
||||
)
|
||||
icon = current_plugin.declaration.icon
|
||||
except StopIteration:
|
||||
|
|
|
|||
|
|
@ -14,14 +14,12 @@ class AnswerNode(Node[AnswerNodeData]):
|
|||
node_type = NodeType.ANSWER
|
||||
execution_type = NodeExecutionType.RESPONSE
|
||||
|
||||
_node_data: AnswerNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
||||
def _run(self) -> NodeRunResult:
|
||||
segments = self.graph_runtime_state.variable_pool.convert_template(self._node_data.answer)
|
||||
segments = self.graph_runtime_state.variable_pool.convert_template(self.node_data.answer)
|
||||
files = self._extract_files_from_segments(segments.value)
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
|
|
@ -71,4 +69,4 @@ class AnswerNode(Node[AnswerNodeData]):
|
|||
Returns:
|
||||
Template instance for this Answer node
|
||||
"""
|
||||
return Template.from_answer_template(self._node_data.answer)
|
||||
return Template.from_answer_template(self.node_data.answer)
|
||||
|
|
|
|||
|
|
@ -24,8 +24,6 @@ from .exc import (
|
|||
class CodeNode(Node[CodeNodeData]):
|
||||
node_type = NodeType.CODE
|
||||
|
||||
_node_data: CodeNodeData
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
"""
|
||||
|
|
@ -48,12 +46,12 @@ class CodeNode(Node[CodeNodeData]):
|
|||
|
||||
def _run(self) -> NodeRunResult:
|
||||
# Get code language
|
||||
code_language = self._node_data.code_language
|
||||
code = self._node_data.code
|
||||
code_language = self.node_data.code_language
|
||||
code = self.node_data.code
|
||||
|
||||
# Get variables
|
||||
variables = {}
|
||||
for variable_selector in self._node_data.variables:
|
||||
for variable_selector in self.node_data.variables:
|
||||
variable_name = variable_selector.variable
|
||||
variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
|
||||
if isinstance(variable, ArrayFileSegment):
|
||||
|
|
@ -69,7 +67,7 @@ class CodeNode(Node[CodeNodeData]):
|
|||
)
|
||||
|
||||
# Transform result
|
||||
result = self._transform_result(result=result, output_schema=self._node_data.outputs)
|
||||
result = self._transform_result(result=result, output_schema=self.node_data.outputs)
|
||||
except (CodeExecutionError, CodeNodeError) as e:
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED, inputs=variables, error=str(e), error_type=type(e).__name__
|
||||
|
|
@ -406,7 +404,7 @@ class CodeNode(Node[CodeNodeData]):
|
|||
|
||||
@property
|
||||
def retry(self) -> bool:
|
||||
return self._node_data.retry_config.retry_enabled
|
||||
return self.node_data.retry_config.retry_enabled
|
||||
|
||||
@staticmethod
|
||||
def _convert_boolean_to_int(value: bool | int | float | None) -> int | float | None:
|
||||
|
|
|
|||
|
|
@ -42,7 +42,6 @@ class DatasourceNode(Node[DatasourceNodeData]):
|
|||
Datasource Node
|
||||
"""
|
||||
|
||||
_node_data: DatasourceNodeData
|
||||
node_type = NodeType.DATASOURCE
|
||||
execution_type = NodeExecutionType.ROOT
|
||||
|
||||
|
|
@ -51,7 +50,7 @@ class DatasourceNode(Node[DatasourceNodeData]):
|
|||
Run the datasource node
|
||||
"""
|
||||
|
||||
node_data = self._node_data
|
||||
node_data = self.node_data
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
datasource_type_segement = variable_pool.get(["sys", SystemVariableKey.DATASOURCE_TYPE])
|
||||
if not datasource_type_segement:
|
||||
|
|
|
|||
|
|
@ -43,14 +43,12 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]):
|
|||
|
||||
node_type = NodeType.DOCUMENT_EXTRACTOR
|
||||
|
||||
_node_data: DocumentExtractorNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
||||
def _run(self):
|
||||
variable_selector = self._node_data.variable_selector
|
||||
variable_selector = self.node_data.variable_selector
|
||||
variable = self.graph_runtime_state.variable_pool.get(variable_selector)
|
||||
|
||||
if variable is None:
|
||||
|
|
|
|||
|
|
@ -9,8 +9,6 @@ class EndNode(Node[EndNodeData]):
|
|||
node_type = NodeType.END
|
||||
execution_type = NodeExecutionType.RESPONSE
|
||||
|
||||
_node_data: EndNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
@ -22,7 +20,7 @@ class EndNode(Node[EndNodeData]):
|
|||
This method runs after streaming is complete (if streaming was enabled).
|
||||
It collects all output variables and returns them.
|
||||
"""
|
||||
output_variables = self._node_data.outputs
|
||||
output_variables = self.node_data.outputs
|
||||
|
||||
outputs = {}
|
||||
for variable_selector in output_variables:
|
||||
|
|
@ -44,6 +42,6 @@ class EndNode(Node[EndNodeData]):
|
|||
Template instance for this End node
|
||||
"""
|
||||
outputs_config = [
|
||||
{"variable": output.variable, "value_selector": output.value_selector} for output in self._node_data.outputs
|
||||
{"variable": output.variable, "value_selector": output.value_selector} for output in self.node_data.outputs
|
||||
]
|
||||
return Template.from_end_outputs(outputs_config)
|
||||
|
|
|
|||
|
|
@ -34,8 +34,6 @@ logger = logging.getLogger(__name__)
|
|||
class HttpRequestNode(Node[HttpRequestNodeData]):
|
||||
node_type = NodeType.HTTP_REQUEST
|
||||
|
||||
_node_data: HttpRequestNodeData
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
return {
|
||||
|
|
@ -69,8 +67,8 @@ class HttpRequestNode(Node[HttpRequestNodeData]):
|
|||
process_data = {}
|
||||
try:
|
||||
http_executor = Executor(
|
||||
node_data=self._node_data,
|
||||
timeout=self._get_request_timeout(self._node_data),
|
||||
node_data=self.node_data,
|
||||
timeout=self._get_request_timeout(self.node_data),
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
max_retries=0,
|
||||
)
|
||||
|
|
@ -225,4 +223,4 @@ class HttpRequestNode(Node[HttpRequestNodeData]):
|
|||
|
||||
@property
|
||||
def retry(self) -> bool:
|
||||
return self._node_data.retry_config.retry_enabled
|
||||
return self.node_data.retry_config.retry_enabled
|
||||
|
|
|
|||
|
|
@ -25,8 +25,6 @@ class HumanInputNode(Node[HumanInputNodeData]):
|
|||
"handle",
|
||||
)
|
||||
|
||||
_node_data: HumanInputNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
@ -49,12 +47,12 @@ class HumanInputNode(Node[HumanInputNodeData]):
|
|||
def _is_completion_ready(self) -> bool:
|
||||
"""Determine whether all required inputs are satisfied."""
|
||||
|
||||
if not self._node_data.required_variables:
|
||||
if not self.node_data.required_variables:
|
||||
return False
|
||||
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
for selector_str in self._node_data.required_variables:
|
||||
for selector_str in self.node_data.required_variables:
|
||||
parts = selector_str.split(".")
|
||||
if len(parts) != 2:
|
||||
return False
|
||||
|
|
@ -74,7 +72,7 @@ class HumanInputNode(Node[HumanInputNodeData]):
|
|||
if handle:
|
||||
return handle
|
||||
|
||||
default_values = self._node_data.default_value_dict
|
||||
default_values = self.node_data.default_value_dict
|
||||
for key in self._BRANCH_SELECTION_KEYS:
|
||||
handle = self._normalize_branch_value(default_values.get(key))
|
||||
if handle:
|
||||
|
|
|
|||
|
|
@ -16,8 +16,6 @@ class IfElseNode(Node[IfElseNodeData]):
|
|||
node_type = NodeType.IF_ELSE
|
||||
execution_type = NodeExecutionType.BRANCH
|
||||
|
||||
_node_data: IfElseNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
@ -37,8 +35,8 @@ class IfElseNode(Node[IfElseNodeData]):
|
|||
condition_processor = ConditionProcessor()
|
||||
try:
|
||||
# Check if the new cases structure is used
|
||||
if self._node_data.cases:
|
||||
for case in self._node_data.cases:
|
||||
if self.node_data.cases:
|
||||
for case in self.node_data.cases:
|
||||
input_conditions, group_result, final_result = condition_processor.process_conditions(
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
conditions=case.conditions,
|
||||
|
|
@ -64,8 +62,8 @@ class IfElseNode(Node[IfElseNodeData]):
|
|||
input_conditions, group_result, final_result = _should_not_use_old_function( # pyright: ignore [reportDeprecated]
|
||||
condition_processor=condition_processor,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
conditions=self._node_data.conditions or [],
|
||||
operator=self._node_data.logical_operator or "and",
|
||||
conditions=self.node_data.conditions or [],
|
||||
operator=self.node_data.logical_operator or "and",
|
||||
)
|
||||
|
||||
selected_case_id = "true" if final_result else "false"
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
|
||||
node_type = NodeType.ITERATION
|
||||
execution_type = NodeExecutionType.CONTAINER
|
||||
_node_data: IterationNodeData
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
|
|
@ -136,10 +135,10 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
)
|
||||
|
||||
def _get_iterator_variable(self) -> ArraySegment | NoneSegment:
|
||||
variable = self.graph_runtime_state.variable_pool.get(self._node_data.iterator_selector)
|
||||
variable = self.graph_runtime_state.variable_pool.get(self.node_data.iterator_selector)
|
||||
|
||||
if not variable:
|
||||
raise IteratorVariableNotFoundError(f"iterator variable {self._node_data.iterator_selector} not found")
|
||||
raise IteratorVariableNotFoundError(f"iterator variable {self.node_data.iterator_selector} not found")
|
||||
|
||||
if not isinstance(variable, ArraySegment) and not isinstance(variable, NoneSegment):
|
||||
raise InvalidIteratorValueError(f"invalid iterator value: {variable}, please provide a list.")
|
||||
|
|
@ -174,7 +173,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
return cast(list[object], iterator_list_value)
|
||||
|
||||
def _validate_start_node(self) -> None:
|
||||
if not self._node_data.start_node_id:
|
||||
if not self.node_data.start_node_id:
|
||||
raise StartNodeIdNotFoundError(f"field start_node_id in iteration {self._node_id} not found")
|
||||
|
||||
def _execute_iterations(
|
||||
|
|
@ -184,7 +183,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
iter_run_map: dict[str, float],
|
||||
usage_accumulator: list[LLMUsage],
|
||||
) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]:
|
||||
if self._node_data.is_parallel:
|
||||
if self.node_data.is_parallel:
|
||||
# Parallel mode execution
|
||||
yield from self._execute_parallel_iterations(
|
||||
iterator_list_value=iterator_list_value,
|
||||
|
|
@ -231,7 +230,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
outputs.extend([None] * len(iterator_list_value))
|
||||
|
||||
# Determine the number of parallel workers
|
||||
max_workers = min(self._node_data.parallel_nums, len(iterator_list_value))
|
||||
max_workers = min(self.node_data.parallel_nums, len(iterator_list_value))
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
# Submit all iteration tasks
|
||||
|
|
@ -287,7 +286,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
|
||||
except Exception as e:
|
||||
# Handle errors based on error_handle_mode
|
||||
match self._node_data.error_handle_mode:
|
||||
match self.node_data.error_handle_mode:
|
||||
case ErrorHandleMode.TERMINATED:
|
||||
# Cancel remaining futures and re-raise
|
||||
for f in future_to_index:
|
||||
|
|
@ -300,7 +299,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
outputs[index] = None # Will be filtered later
|
||||
|
||||
# Remove None values if in REMOVE_ABNORMAL_OUTPUT mode
|
||||
if self._node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
|
||||
if self.node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
|
||||
outputs[:] = [output for output in outputs if output is not None]
|
||||
|
||||
def _execute_single_iteration_parallel(
|
||||
|
|
@ -389,7 +388,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
If flatten_output is True (default), flattens the list if all elements are lists.
|
||||
"""
|
||||
# If flatten_output is disabled, return outputs as-is
|
||||
if not self._node_data.flatten_output:
|
||||
if not self.node_data.flatten_output:
|
||||
return outputs
|
||||
|
||||
if not outputs:
|
||||
|
|
@ -569,14 +568,14 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
self._append_iteration_info_to_event(event=event, iter_run_index=current_index)
|
||||
yield event
|
||||
elif isinstance(event, (GraphRunSucceededEvent, GraphRunPartialSucceededEvent)):
|
||||
result = variable_pool.get(self._node_data.output_selector)
|
||||
result = variable_pool.get(self.node_data.output_selector)
|
||||
if result is None:
|
||||
outputs.append(None)
|
||||
else:
|
||||
outputs.append(result.to_object())
|
||||
return
|
||||
elif isinstance(event, GraphRunFailedEvent):
|
||||
match self._node_data.error_handle_mode:
|
||||
match self.node_data.error_handle_mode:
|
||||
case ErrorHandleMode.TERMINATED:
|
||||
raise IterationNodeError(event.error)
|
||||
case ErrorHandleMode.CONTINUE_ON_ERROR:
|
||||
|
|
@ -627,7 +626,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
|
|||
|
||||
# Initialize the iteration graph with the new node factory
|
||||
iteration_graph = Graph.init(
|
||||
graph_config=self.graph_config, node_factory=node_factory, root_node_id=self._node_data.start_node_id
|
||||
graph_config=self.graph_config, node_factory=node_factory, root_node_id=self.node_data.start_node_id
|
||||
)
|
||||
|
||||
if not iteration_graph:
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ class IterationStartNode(Node[IterationStartNodeData]):
|
|||
|
||||
node_type = NodeType.ITERATION_START
|
||||
|
||||
_node_data: IterationStartNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
|
|||
|
|
@ -35,12 +35,11 @@ default_retrieval_model = {
|
|||
|
||||
|
||||
class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]):
|
||||
_node_data: KnowledgeIndexNodeData
|
||||
node_type = NodeType.KNOWLEDGE_INDEX
|
||||
execution_type = NodeExecutionType.RESPONSE
|
||||
|
||||
def _run(self) -> NodeRunResult: # type: ignore
|
||||
node_data = self._node_data
|
||||
node_data = self.node_data
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
dataset_id = variable_pool.get(["sys", SystemVariableKey.DATASET_ID])
|
||||
if not dataset_id:
|
||||
|
|
|
|||
|
|
@ -83,8 +83,6 @@ default_retrieval_model = {
|
|||
class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeData]):
|
||||
node_type = NodeType.KNOWLEDGE_RETRIEVAL
|
||||
|
||||
_node_data: KnowledgeRetrievalNodeData
|
||||
|
||||
# Instance attributes specific to LLMNode.
|
||||
# Output variable for file
|
||||
_file_outputs: list["File"]
|
||||
|
|
@ -122,7 +120,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD
|
|||
|
||||
def _run(self) -> NodeRunResult:
|
||||
# extract variables
|
||||
variable = self.graph_runtime_state.variable_pool.get(self._node_data.query_variable_selector)
|
||||
variable = self.graph_runtime_state.variable_pool.get(self.node_data.query_variable_selector)
|
||||
if not isinstance(variable, StringSegment):
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
|
|
@ -163,7 +161,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD
|
|||
# retrieve knowledge
|
||||
usage = LLMUsage.empty_usage()
|
||||
try:
|
||||
results, usage = self._fetch_dataset_retriever(node_data=self._node_data, query=query)
|
||||
results, usage = self._fetch_dataset_retriever(node_data=self.node_data, query=query)
|
||||
outputs = {"result": ArrayObjectSegment(value=results)}
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
|
|
@ -536,7 +534,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD
|
|||
prompt_messages=prompt_messages,
|
||||
stop=stop,
|
||||
user_id=self.user_id,
|
||||
structured_output_enabled=self._node_data.structured_output_enabled,
|
||||
structured_output_enabled=self.node_data.structured_output_enabled,
|
||||
structured_output=None,
|
||||
file_saver=self._llm_file_saver,
|
||||
file_outputs=self._file_outputs,
|
||||
|
|
|
|||
|
|
@ -37,8 +37,6 @@ def _negation(filter_: Callable[[_T], bool]) -> Callable[[_T], bool]:
|
|||
class ListOperatorNode(Node[ListOperatorNodeData]):
|
||||
node_type = NodeType.LIST_OPERATOR
|
||||
|
||||
_node_data: ListOperatorNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
@ -48,9 +46,9 @@ class ListOperatorNode(Node[ListOperatorNodeData]):
|
|||
process_data: dict[str, Sequence[object]] = {}
|
||||
outputs: dict[str, Any] = {}
|
||||
|
||||
variable = self.graph_runtime_state.variable_pool.get(self._node_data.variable)
|
||||
variable = self.graph_runtime_state.variable_pool.get(self.node_data.variable)
|
||||
if variable is None:
|
||||
error_message = f"Variable not found for selector: {self._node_data.variable}"
|
||||
error_message = f"Variable not found for selector: {self.node_data.variable}"
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED, error=error_message, inputs=inputs, outputs=outputs
|
||||
)
|
||||
|
|
@ -69,7 +67,7 @@ class ListOperatorNode(Node[ListOperatorNodeData]):
|
|||
outputs=outputs,
|
||||
)
|
||||
if not isinstance(variable, _SUPPORTED_TYPES_TUPLE):
|
||||
error_message = f"Variable {self._node_data.variable} is not an array type, actual type: {type(variable)}"
|
||||
error_message = f"Variable {self.node_data.variable} is not an array type, actual type: {type(variable)}"
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED, error=error_message, inputs=inputs, outputs=outputs
|
||||
)
|
||||
|
|
@ -83,19 +81,19 @@ class ListOperatorNode(Node[ListOperatorNodeData]):
|
|||
|
||||
try:
|
||||
# Filter
|
||||
if self._node_data.filter_by.enabled:
|
||||
if self.node_data.filter_by.enabled:
|
||||
variable = self._apply_filter(variable)
|
||||
|
||||
# Extract
|
||||
if self._node_data.extract_by.enabled:
|
||||
if self.node_data.extract_by.enabled:
|
||||
variable = self._extract_slice(variable)
|
||||
|
||||
# Order
|
||||
if self._node_data.order_by.enabled:
|
||||
if self.node_data.order_by.enabled:
|
||||
variable = self._apply_order(variable)
|
||||
|
||||
# Slice
|
||||
if self._node_data.limit.enabled:
|
||||
if self.node_data.limit.enabled:
|
||||
variable = self._apply_slice(variable)
|
||||
|
||||
outputs = {
|
||||
|
|
@ -121,7 +119,7 @@ class ListOperatorNode(Node[ListOperatorNodeData]):
|
|||
def _apply_filter(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS:
|
||||
filter_func: Callable[[Any], bool]
|
||||
result: list[Any] = []
|
||||
for condition in self._node_data.filter_by.conditions:
|
||||
for condition in self.node_data.filter_by.conditions:
|
||||
if isinstance(variable, ArrayStringSegment):
|
||||
if not isinstance(condition.value, str):
|
||||
raise InvalidFilterValueError(f"Invalid filter value: {condition.value}")
|
||||
|
|
@ -160,22 +158,22 @@ class ListOperatorNode(Node[ListOperatorNodeData]):
|
|||
|
||||
def _apply_order(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS:
|
||||
if isinstance(variable, (ArrayStringSegment, ArrayNumberSegment, ArrayBooleanSegment)):
|
||||
result = sorted(variable.value, reverse=self._node_data.order_by.value == Order.DESC)
|
||||
result = sorted(variable.value, reverse=self.node_data.order_by.value == Order.DESC)
|
||||
variable = variable.model_copy(update={"value": result})
|
||||
else:
|
||||
result = _order_file(
|
||||
order=self._node_data.order_by.value, order_by=self._node_data.order_by.key, array=variable.value
|
||||
order=self.node_data.order_by.value, order_by=self.node_data.order_by.key, array=variable.value
|
||||
)
|
||||
variable = variable.model_copy(update={"value": result})
|
||||
|
||||
return variable
|
||||
|
||||
def _apply_slice(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS:
|
||||
result = variable.value[: self._node_data.limit.size]
|
||||
result = variable.value[: self.node_data.limit.size]
|
||||
return variable.model_copy(update={"value": result})
|
||||
|
||||
def _extract_slice(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS:
|
||||
value = int(self.graph_runtime_state.variable_pool.convert_template(self._node_data.extract_by.serial).text)
|
||||
value = int(self.graph_runtime_state.variable_pool.convert_template(self.node_data.extract_by.serial).text)
|
||||
if value < 1:
|
||||
raise ValueError(f"Invalid serial index: must be >= 1, got {value}")
|
||||
if value > len(variable.value):
|
||||
|
|
|
|||
|
|
@ -102,8 +102,6 @@ logger = logging.getLogger(__name__)
|
|||
class LLMNode(Node[LLMNodeData]):
|
||||
node_type = NodeType.LLM
|
||||
|
||||
_node_data: LLMNodeData
|
||||
|
||||
# Compiled regex for extracting <think> blocks (with compatibility for attributes)
|
||||
_THINK_PATTERN = re.compile(r"<think[^>]*>(.*?)</think>", re.IGNORECASE | re.DOTALL)
|
||||
|
||||
|
|
@ -154,13 +152,13 @@ class LLMNode(Node[LLMNodeData]):
|
|||
|
||||
try:
|
||||
# init messages template
|
||||
self._node_data.prompt_template = self._transform_chat_messages(self._node_data.prompt_template)
|
||||
self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
|
||||
|
||||
# fetch variables and fetch values from variable pool
|
||||
inputs = self._fetch_inputs(node_data=self._node_data)
|
||||
inputs = self._fetch_inputs(node_data=self.node_data)
|
||||
|
||||
# fetch jinja2 inputs
|
||||
jinja_inputs = self._fetch_jinja_inputs(node_data=self._node_data)
|
||||
jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
|
||||
|
||||
# merge inputs
|
||||
inputs.update(jinja_inputs)
|
||||
|
|
@ -169,9 +167,9 @@ class LLMNode(Node[LLMNodeData]):
|
|||
files = (
|
||||
llm_utils.fetch_files(
|
||||
variable_pool=variable_pool,
|
||||
selector=self._node_data.vision.configs.variable_selector,
|
||||
selector=self.node_data.vision.configs.variable_selector,
|
||||
)
|
||||
if self._node_data.vision.enabled
|
||||
if self.node_data.vision.enabled
|
||||
else []
|
||||
)
|
||||
|
||||
|
|
@ -179,7 +177,7 @@ class LLMNode(Node[LLMNodeData]):
|
|||
node_inputs["#files#"] = [file.to_dict() for file in files]
|
||||
|
||||
# fetch context value
|
||||
generator = self._fetch_context(node_data=self._node_data)
|
||||
generator = self._fetch_context(node_data=self.node_data)
|
||||
context = None
|
||||
for event in generator:
|
||||
context = event.context
|
||||
|
|
@ -189,7 +187,7 @@ class LLMNode(Node[LLMNodeData]):
|
|||
|
||||
# fetch model config
|
||||
model_instance, model_config = LLMNode._fetch_model_config(
|
||||
node_data_model=self._node_data.model,
|
||||
node_data_model=self.node_data.model,
|
||||
tenant_id=self.tenant_id,
|
||||
)
|
||||
|
||||
|
|
@ -197,13 +195,13 @@ class LLMNode(Node[LLMNodeData]):
|
|||
memory = llm_utils.fetch_memory(
|
||||
variable_pool=variable_pool,
|
||||
app_id=self.app_id,
|
||||
node_data_memory=self._node_data.memory,
|
||||
node_data_memory=self.node_data.memory,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
|
||||
query: str | None = None
|
||||
if self._node_data.memory:
|
||||
query = self._node_data.memory.query_prompt_template
|
||||
if self.node_data.memory:
|
||||
query = self.node_data.memory.query_prompt_template
|
||||
if not query and (
|
||||
query_variable := variable_pool.get((SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY))
|
||||
):
|
||||
|
|
@ -215,29 +213,29 @@ class LLMNode(Node[LLMNodeData]):
|
|||
context=context,
|
||||
memory=memory,
|
||||
model_config=model_config,
|
||||
prompt_template=self._node_data.prompt_template,
|
||||
memory_config=self._node_data.memory,
|
||||
vision_enabled=self._node_data.vision.enabled,
|
||||
vision_detail=self._node_data.vision.configs.detail,
|
||||
prompt_template=self.node_data.prompt_template,
|
||||
memory_config=self.node_data.memory,
|
||||
vision_enabled=self.node_data.vision.enabled,
|
||||
vision_detail=self.node_data.vision.configs.detail,
|
||||
variable_pool=variable_pool,
|
||||
jinja2_variables=self._node_data.prompt_config.jinja2_variables,
|
||||
jinja2_variables=self.node_data.prompt_config.jinja2_variables,
|
||||
tenant_id=self.tenant_id,
|
||||
)
|
||||
|
||||
# handle invoke result
|
||||
generator = LLMNode.invoke_llm(
|
||||
node_data_model=self._node_data.model,
|
||||
node_data_model=self.node_data.model,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=prompt_messages,
|
||||
stop=stop,
|
||||
user_id=self.user_id,
|
||||
structured_output_enabled=self._node_data.structured_output_enabled,
|
||||
structured_output=self._node_data.structured_output,
|
||||
structured_output_enabled=self.node_data.structured_output_enabled,
|
||||
structured_output=self.node_data.structured_output,
|
||||
file_saver=self._llm_file_saver,
|
||||
file_outputs=self._file_outputs,
|
||||
node_id=self._node_id,
|
||||
node_type=self.node_type,
|
||||
reasoning_format=self._node_data.reasoning_format,
|
||||
reasoning_format=self.node_data.reasoning_format,
|
||||
)
|
||||
|
||||
structured_output: LLMStructuredOutput | None = None
|
||||
|
|
@ -253,12 +251,12 @@ class LLMNode(Node[LLMNodeData]):
|
|||
reasoning_content = event.reasoning_content or ""
|
||||
|
||||
# For downstream nodes, determine clean text based on reasoning_format
|
||||
if self._node_data.reasoning_format == "tagged":
|
||||
if self.node_data.reasoning_format == "tagged":
|
||||
# Keep <think> tags for backward compatibility
|
||||
clean_text = result_text
|
||||
else:
|
||||
# Extract clean text from <think> tags
|
||||
clean_text, _ = LLMNode._split_reasoning(result_text, self._node_data.reasoning_format)
|
||||
clean_text, _ = LLMNode._split_reasoning(result_text, self.node_data.reasoning_format)
|
||||
|
||||
# Process structured output if available from the event.
|
||||
structured_output = (
|
||||
|
|
@ -1204,7 +1202,7 @@ class LLMNode(Node[LLMNodeData]):
|
|||
|
||||
@property
|
||||
def retry(self) -> bool:
|
||||
return self._node_data.retry_config.retry_enabled
|
||||
return self.node_data.retry_config.retry_enabled
|
||||
|
||||
|
||||
def _combine_message_content_with_role(
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ class LoopEndNode(Node[LoopEndNodeData]):
|
|||
|
||||
node_type = NodeType.LOOP_END
|
||||
|
||||
_node_data: LoopEndNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
|
|||
"""
|
||||
|
||||
node_type = NodeType.LOOP
|
||||
_node_data: LoopNodeData
|
||||
execution_type = NodeExecutionType.CONTAINER
|
||||
|
||||
@classmethod
|
||||
|
|
@ -56,27 +55,27 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
|
|||
def _run(self) -> Generator:
|
||||
"""Run the node."""
|
||||
# Get inputs
|
||||
loop_count = self._node_data.loop_count
|
||||
break_conditions = self._node_data.break_conditions
|
||||
logical_operator = self._node_data.logical_operator
|
||||
loop_count = self.node_data.loop_count
|
||||
break_conditions = self.node_data.break_conditions
|
||||
logical_operator = self.node_data.logical_operator
|
||||
|
||||
inputs = {"loop_count": loop_count}
|
||||
|
||||
if not self._node_data.start_node_id:
|
||||
if not self.node_data.start_node_id:
|
||||
raise ValueError(f"field start_node_id in loop {self._node_id} not found")
|
||||
|
||||
root_node_id = self._node_data.start_node_id
|
||||
root_node_id = self.node_data.start_node_id
|
||||
|
||||
# Initialize loop variables in the original variable pool
|
||||
loop_variable_selectors = {}
|
||||
if self._node_data.loop_variables:
|
||||
if self.node_data.loop_variables:
|
||||
value_processor: dict[Literal["constant", "variable"], Callable[[LoopVariableData], Segment | None]] = {
|
||||
"constant": lambda var: self._get_segment_for_constant(var.var_type, var.value),
|
||||
"variable": lambda var: self.graph_runtime_state.variable_pool.get(var.value)
|
||||
if isinstance(var.value, list)
|
||||
else None,
|
||||
}
|
||||
for loop_variable in self._node_data.loop_variables:
|
||||
for loop_variable in self.node_data.loop_variables:
|
||||
if loop_variable.value_type not in value_processor:
|
||||
raise ValueError(
|
||||
f"Invalid value type '{loop_variable.value_type}' for loop variable {loop_variable.label}"
|
||||
|
|
@ -164,7 +163,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
|
|||
|
||||
yield LoopNextEvent(
|
||||
index=i + 1,
|
||||
pre_loop_output=self._node_data.outputs,
|
||||
pre_loop_output=self.node_data.outputs,
|
||||
)
|
||||
|
||||
self._accumulate_usage(loop_usage)
|
||||
|
|
@ -172,7 +171,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
|
|||
yield LoopSucceededEvent(
|
||||
start_at=start_at,
|
||||
inputs=inputs,
|
||||
outputs=self._node_data.outputs,
|
||||
outputs=self.node_data.outputs,
|
||||
steps=loop_count,
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: loop_usage.total_tokens,
|
||||
|
|
@ -194,7 +193,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
|
|||
WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map,
|
||||
WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map,
|
||||
},
|
||||
outputs=self._node_data.outputs,
|
||||
outputs=self.node_data.outputs,
|
||||
inputs=inputs,
|
||||
llm_usage=loop_usage,
|
||||
)
|
||||
|
|
@ -252,11 +251,11 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
|
|||
if isinstance(event, GraphRunFailedEvent):
|
||||
raise Exception(event.error)
|
||||
|
||||
for loop_var in self._node_data.loop_variables or []:
|
||||
for loop_var in self.node_data.loop_variables or []:
|
||||
key, sel = loop_var.label, [self._node_id, loop_var.label]
|
||||
segment = self.graph_runtime_state.variable_pool.get(sel)
|
||||
self._node_data.outputs[key] = segment.value if segment else None
|
||||
self._node_data.outputs["loop_round"] = current_index + 1
|
||||
self.node_data.outputs[key] = segment.value if segment else None
|
||||
self.node_data.outputs["loop_round"] = current_index + 1
|
||||
|
||||
return reach_break_node
|
||||
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ class LoopStartNode(Node[LoopStartNodeData]):
|
|||
|
||||
node_type = NodeType.LOOP_START
|
||||
|
||||
_node_data: LoopStartNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
|
|||
|
|
@ -90,8 +90,6 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
|
|||
|
||||
node_type = NodeType.PARAMETER_EXTRACTOR
|
||||
|
||||
_node_data: ParameterExtractorNodeData
|
||||
|
||||
_model_instance: ModelInstance | None = None
|
||||
_model_config: ModelConfigWithCredentialsEntity | None = None
|
||||
|
||||
|
|
@ -116,7 +114,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
|
|||
"""
|
||||
Run the node.
|
||||
"""
|
||||
node_data = self._node_data
|
||||
node_data = self.node_data
|
||||
variable = self.graph_runtime_state.variable_pool.get(node_data.query)
|
||||
query = variable.text if variable else ""
|
||||
|
||||
|
|
|
|||
|
|
@ -47,8 +47,6 @@ class QuestionClassifierNode(Node[QuestionClassifierNodeData]):
|
|||
node_type = NodeType.QUESTION_CLASSIFIER
|
||||
execution_type = NodeExecutionType.BRANCH
|
||||
|
||||
_node_data: QuestionClassifierNodeData
|
||||
|
||||
_file_outputs: list["File"]
|
||||
_llm_file_saver: LLMFileSaver
|
||||
|
||||
|
|
@ -82,7 +80,7 @@ class QuestionClassifierNode(Node[QuestionClassifierNodeData]):
|
|||
return "1"
|
||||
|
||||
def _run(self):
|
||||
node_data = self._node_data
|
||||
node_data = self.node_data
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
# extract variables
|
||||
|
|
|
|||
|
|
@ -9,8 +9,6 @@ class StartNode(Node[StartNodeData]):
|
|||
node_type = NodeType.START
|
||||
execution_type = NodeExecutionType.ROOT
|
||||
|
||||
_node_data: StartNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
|
|||
|
|
@ -14,8 +14,6 @@ MAX_TEMPLATE_TRANSFORM_OUTPUT_LENGTH = dify_config.TEMPLATE_TRANSFORM_MAX_LENGTH
|
|||
class TemplateTransformNode(Node[TemplateTransformNodeData]):
|
||||
node_type = NodeType.TEMPLATE_TRANSFORM
|
||||
|
||||
_node_data: TemplateTransformNodeData
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
"""
|
||||
|
|
@ -35,14 +33,14 @@ class TemplateTransformNode(Node[TemplateTransformNodeData]):
|
|||
def _run(self) -> NodeRunResult:
|
||||
# Get variables
|
||||
variables: dict[str, Any] = {}
|
||||
for variable_selector in self._node_data.variables:
|
||||
for variable_selector in self.node_data.variables:
|
||||
variable_name = variable_selector.variable
|
||||
value = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
|
||||
variables[variable_name] = value.to_object() if value else None
|
||||
# Run code
|
||||
try:
|
||||
result = CodeExecutor.execute_workflow_code_template(
|
||||
language=CodeLanguage.JINJA2, code=self._node_data.template, inputs=variables
|
||||
language=CodeLanguage.JINJA2, code=self.node_data.template, inputs=variables
|
||||
)
|
||||
except CodeExecutionError as e:
|
||||
return NodeRunResult(inputs=variables, status=WorkflowNodeExecutionStatus.FAILED, error=str(e))
|
||||
|
|
|
|||
|
|
@ -47,8 +47,6 @@ class ToolNode(Node[ToolNodeData]):
|
|||
|
||||
node_type = NodeType.TOOL
|
||||
|
||||
_node_data: ToolNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
@ -59,13 +57,11 @@ class ToolNode(Node[ToolNodeData]):
|
|||
"""
|
||||
from core.plugin.impl.exc import PluginDaemonClientSideError, PluginInvokeError
|
||||
|
||||
node_data = self._node_data
|
||||
|
||||
# fetch tool icon
|
||||
tool_info = {
|
||||
"provider_type": node_data.provider_type.value,
|
||||
"provider_id": node_data.provider_id,
|
||||
"plugin_unique_identifier": node_data.plugin_unique_identifier,
|
||||
"provider_type": self.node_data.provider_type.value,
|
||||
"provider_id": self.node_data.provider_id,
|
||||
"plugin_unique_identifier": self.node_data.plugin_unique_identifier,
|
||||
}
|
||||
|
||||
# get tool runtime
|
||||
|
|
@ -77,10 +73,10 @@ class ToolNode(Node[ToolNodeData]):
|
|||
# But for backward compatibility with historical data
|
||||
# this version field judgment is still preserved here.
|
||||
variable_pool: VariablePool | None = None
|
||||
if node_data.version != "1" or node_data.tool_node_version is not None:
|
||||
if self.node_data.version != "1" or self.node_data.tool_node_version is not None:
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
tool_runtime = ToolManager.get_workflow_tool_runtime(
|
||||
self.tenant_id, self.app_id, self._node_id, self._node_data, self.invoke_from, variable_pool
|
||||
self.tenant_id, self.app_id, self._node_id, self.node_data, self.invoke_from, variable_pool
|
||||
)
|
||||
except ToolNodeError as e:
|
||||
yield StreamCompletedEvent(
|
||||
|
|
@ -99,12 +95,12 @@ class ToolNode(Node[ToolNodeData]):
|
|||
parameters = self._generate_parameters(
|
||||
tool_parameters=tool_parameters,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
node_data=self._node_data,
|
||||
node_data=self.node_data,
|
||||
)
|
||||
parameters_for_log = self._generate_parameters(
|
||||
tool_parameters=tool_parameters,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
node_data=self._node_data,
|
||||
node_data=self.node_data,
|
||||
for_log=True,
|
||||
)
|
||||
# get conversation id
|
||||
|
|
@ -149,7 +145,7 @@ class ToolNode(Node[ToolNodeData]):
|
|||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
inputs=parameters_for_log,
|
||||
metadata={WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info},
|
||||
error=f"Failed to invoke tool {node_data.provider_name}: {str(e)}",
|
||||
error=f"Failed to invoke tool {self.node_data.provider_name}: {str(e)}",
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
)
|
||||
|
|
@ -159,7 +155,7 @@ class ToolNode(Node[ToolNodeData]):
|
|||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
inputs=parameters_for_log,
|
||||
metadata={WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info},
|
||||
error=e.to_user_friendly_error(plugin_name=node_data.provider_name),
|
||||
error=e.to_user_friendly_error(plugin_name=self.node_data.provider_name),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
)
|
||||
|
|
@ -495,4 +491,4 @@ class ToolNode(Node[ToolNodeData]):
|
|||
|
||||
@property
|
||||
def retry(self) -> bool:
|
||||
return self._node_data.retry_config.retry_enabled
|
||||
return self.node_data.retry_config.retry_enabled
|
||||
|
|
|
|||
|
|
@ -43,9 +43,9 @@ class TriggerEventNode(Node[TriggerEventNodeData]):
|
|||
# Get trigger data passed when workflow was triggered
|
||||
metadata = {
|
||||
WorkflowNodeExecutionMetadataKey.TRIGGER_INFO: {
|
||||
"provider_id": self._node_data.provider_id,
|
||||
"event_name": self._node_data.event_name,
|
||||
"plugin_unique_identifier": self._node_data.plugin_unique_identifier,
|
||||
"provider_id": self.node_data.provider_id,
|
||||
"event_name": self.node_data.event_name,
|
||||
"plugin_unique_identifier": self.node_data.plugin_unique_identifier,
|
||||
},
|
||||
}
|
||||
node_inputs = dict(self.graph_runtime_state.variable_pool.user_inputs)
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class TriggerWebhookNode(Node[WebhookData]):
|
|||
webhook_headers = webhook_data.get("headers", {})
|
||||
webhook_headers_lower = {k.lower(): v for k, v in webhook_headers.items()}
|
||||
|
||||
for header in self._node_data.headers:
|
||||
for header in self.node_data.headers:
|
||||
header_name = header.name
|
||||
value = _get_normalized(webhook_headers, header_name)
|
||||
if value is None:
|
||||
|
|
@ -93,20 +93,20 @@ class TriggerWebhookNode(Node[WebhookData]):
|
|||
outputs[sanitized_name] = value
|
||||
|
||||
# Extract configured query parameters
|
||||
for param in self._node_data.params:
|
||||
for param in self.node_data.params:
|
||||
param_name = param.name
|
||||
outputs[param_name] = webhook_data.get("query_params", {}).get(param_name)
|
||||
|
||||
# Extract configured body parameters
|
||||
for body_param in self._node_data.body:
|
||||
for body_param in self.node_data.body:
|
||||
param_name = body_param.name
|
||||
param_type = body_param.type
|
||||
|
||||
if self._node_data.content_type == ContentType.TEXT:
|
||||
if self.node_data.content_type == ContentType.TEXT:
|
||||
# For text/plain, the entire body is a single string parameter
|
||||
outputs[param_name] = str(webhook_data.get("body", {}).get("raw", ""))
|
||||
continue
|
||||
elif self._node_data.content_type == ContentType.BINARY:
|
||||
elif self.node_data.content_type == ContentType.BINARY:
|
||||
outputs[param_name] = webhook_data.get("body", {}).get("raw", b"")
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -23,12 +23,11 @@ class AdvancedSettings(BaseModel):
|
|||
groups: list[Group]
|
||||
|
||||
|
||||
class VariableAssignerNodeData(BaseNodeData):
|
||||
class VariableAggregatorNodeData(BaseNodeData):
|
||||
"""
|
||||
Variable Assigner Node Data.
|
||||
Variable Aggregator Node Data.
|
||||
"""
|
||||
|
||||
type: str = "variable-assigner"
|
||||
output_type: str
|
||||
variables: list[list[str]]
|
||||
advanced_settings: AdvancedSettings | None = None
|
||||
|
|
|
|||
|
|
@ -4,14 +4,12 @@ from core.variables.segments import Segment
|
|||
from core.workflow.enums import NodeType, WorkflowNodeExecutionStatus
|
||||
from core.workflow.node_events import NodeRunResult
|
||||
from core.workflow.nodes.base.node import Node
|
||||
from core.workflow.nodes.variable_aggregator.entities import VariableAssignerNodeData
|
||||
from core.workflow.nodes.variable_aggregator.entities import VariableAggregatorNodeData
|
||||
|
||||
|
||||
class VariableAggregatorNode(Node[VariableAssignerNodeData]):
|
||||
class VariableAggregatorNode(Node[VariableAggregatorNodeData]):
|
||||
node_type = NodeType.VARIABLE_AGGREGATOR
|
||||
|
||||
_node_data: VariableAssignerNodeData
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
|
@ -21,8 +19,8 @@ class VariableAggregatorNode(Node[VariableAssignerNodeData]):
|
|||
outputs: dict[str, Segment | Mapping[str, Segment]] = {}
|
||||
inputs = {}
|
||||
|
||||
if not self._node_data.advanced_settings or not self._node_data.advanced_settings.group_enabled:
|
||||
for selector in self._node_data.variables:
|
||||
if not self.node_data.advanced_settings or not self.node_data.advanced_settings.group_enabled:
|
||||
for selector in self.node_data.variables:
|
||||
variable = self.graph_runtime_state.variable_pool.get(selector)
|
||||
if variable is not None:
|
||||
outputs = {"output": variable}
|
||||
|
|
@ -30,7 +28,7 @@ class VariableAggregatorNode(Node[VariableAssignerNodeData]):
|
|||
inputs = {".".join(selector[1:]): variable.to_object()}
|
||||
break
|
||||
else:
|
||||
for group in self._node_data.advanced_settings.groups:
|
||||
for group in self.node_data.advanced_settings.groups:
|
||||
for selector in group.variables:
|
||||
variable = self.graph_runtime_state.variable_pool.get(selector)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,8 +25,6 @@ class VariableAssignerNode(Node[VariableAssignerData]):
|
|||
node_type = NodeType.VARIABLE_ASSIGNER
|
||||
_conv_var_updater_factory: _CONV_VAR_UPDATER_FACTORY
|
||||
|
||||
_node_data: VariableAssignerData
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id: str,
|
||||
|
|
@ -71,21 +69,21 @@ class VariableAssignerNode(Node[VariableAssignerData]):
|
|||
return mapping
|
||||
|
||||
def _run(self) -> NodeRunResult:
|
||||
assigned_variable_selector = self._node_data.assigned_variable_selector
|
||||
assigned_variable_selector = self.node_data.assigned_variable_selector
|
||||
# Should be String, Number, Object, ArrayString, ArrayNumber, ArrayObject
|
||||
original_variable = self.graph_runtime_state.variable_pool.get(assigned_variable_selector)
|
||||
if not isinstance(original_variable, Variable):
|
||||
raise VariableOperatorNodeError("assigned variable not found")
|
||||
|
||||
match self._node_data.write_mode:
|
||||
match self.node_data.write_mode:
|
||||
case WriteMode.OVER_WRITE:
|
||||
income_value = self.graph_runtime_state.variable_pool.get(self._node_data.input_variable_selector)
|
||||
income_value = self.graph_runtime_state.variable_pool.get(self.node_data.input_variable_selector)
|
||||
if not income_value:
|
||||
raise VariableOperatorNodeError("input value not found")
|
||||
updated_variable = original_variable.model_copy(update={"value": income_value.value})
|
||||
|
||||
case WriteMode.APPEND:
|
||||
income_value = self.graph_runtime_state.variable_pool.get(self._node_data.input_variable_selector)
|
||||
income_value = self.graph_runtime_state.variable_pool.get(self.node_data.input_variable_selector)
|
||||
if not income_value:
|
||||
raise VariableOperatorNodeError("input value not found")
|
||||
updated_value = original_variable.value + [income_value.value]
|
||||
|
|
|
|||
|
|
@ -53,8 +53,6 @@ def _source_mapping_from_item(mapping: MutableMapping[str, Sequence[str]], node_
|
|||
class VariableAssignerNode(Node[VariableAssignerNodeData]):
|
||||
node_type = NodeType.VARIABLE_ASSIGNER
|
||||
|
||||
_node_data: VariableAssignerNodeData
|
||||
|
||||
def blocks_variable_output(self, variable_selectors: set[tuple[str, ...]]) -> bool:
|
||||
"""
|
||||
Check if this Variable Assigner node blocks the output of specific variables.
|
||||
|
|
@ -62,7 +60,7 @@ class VariableAssignerNode(Node[VariableAssignerNodeData]):
|
|||
Returns True if this node updates any of the requested conversation variables.
|
||||
"""
|
||||
# Check each item in this Variable Assigner node
|
||||
for item in self._node_data.items:
|
||||
for item in self.node_data.items:
|
||||
# Convert the item's variable_selector to tuple for comparison
|
||||
item_selector_tuple = tuple(item.variable_selector)
|
||||
|
||||
|
|
@ -97,13 +95,13 @@ class VariableAssignerNode(Node[VariableAssignerNodeData]):
|
|||
return var_mapping
|
||||
|
||||
def _run(self) -> NodeRunResult:
|
||||
inputs = self._node_data.model_dump()
|
||||
inputs = self.node_data.model_dump()
|
||||
process_data: dict[str, Any] = {}
|
||||
# NOTE: This node has no outputs
|
||||
updated_variable_selectors: list[Sequence[str]] = []
|
||||
|
||||
try:
|
||||
for item in self._node_data.items:
|
||||
for item in self.node_data.items:
|
||||
variable = self.graph_runtime_state.variable_pool.get(item.variable_selector)
|
||||
|
||||
# ==================== Validation Part
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class FeedbackService:
|
|||
export_data = []
|
||||
for feedback, message, conversation, app, account in results:
|
||||
# Get the user query from the message
|
||||
user_query = message.query or message.inputs.get("query", "") if message.inputs else ""
|
||||
user_query = message.query or (message.inputs.get("query", "") if message.inputs else "")
|
||||
|
||||
# Format the feedback data
|
||||
feedback_record = {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1 @@
|
|||
"""Unit tests for core.rag.embedding module."""
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,825 @@
|
|||
"""
|
||||
Comprehensive unit tests for Provider models.
|
||||
|
||||
This test suite covers:
|
||||
- ProviderType and ProviderQuotaType enum validation
|
||||
- Provider model creation and properties
|
||||
- ProviderModel credential management
|
||||
- TenantDefaultModel configuration
|
||||
- TenantPreferredModelProvider settings
|
||||
- ProviderOrder payment tracking
|
||||
- ProviderModelSetting load balancing
|
||||
- LoadBalancingModelConfig management
|
||||
- ProviderCredential storage
|
||||
- ProviderModelCredential storage
|
||||
"""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
from models.provider import (
|
||||
LoadBalancingModelConfig,
|
||||
Provider,
|
||||
ProviderCredential,
|
||||
ProviderModel,
|
||||
ProviderModelCredential,
|
||||
ProviderModelSetting,
|
||||
ProviderOrder,
|
||||
ProviderQuotaType,
|
||||
ProviderType,
|
||||
TenantDefaultModel,
|
||||
TenantPreferredModelProvider,
|
||||
)
|
||||
|
||||
|
||||
class TestProviderTypeEnum:
|
||||
"""Test suite for ProviderType enum validation."""
|
||||
|
||||
def test_provider_type_custom_value(self):
|
||||
"""Test ProviderType CUSTOM enum value."""
|
||||
# Assert
|
||||
assert ProviderType.CUSTOM.value == "custom"
|
||||
|
||||
def test_provider_type_system_value(self):
|
||||
"""Test ProviderType SYSTEM enum value."""
|
||||
# Assert
|
||||
assert ProviderType.SYSTEM.value == "system"
|
||||
|
||||
def test_provider_type_value_of_custom(self):
|
||||
"""Test ProviderType.value_of returns CUSTOM for 'custom' string."""
|
||||
# Act
|
||||
result = ProviderType.value_of("custom")
|
||||
|
||||
# Assert
|
||||
assert result == ProviderType.CUSTOM
|
||||
|
||||
def test_provider_type_value_of_system(self):
|
||||
"""Test ProviderType.value_of returns SYSTEM for 'system' string."""
|
||||
# Act
|
||||
result = ProviderType.value_of("system")
|
||||
|
||||
# Assert
|
||||
assert result == ProviderType.SYSTEM
|
||||
|
||||
def test_provider_type_value_of_invalid_raises_error(self):
|
||||
"""Test ProviderType.value_of raises ValueError for invalid value."""
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="No matching enum found"):
|
||||
ProviderType.value_of("invalid_type")
|
||||
|
||||
def test_provider_type_iteration(self):
|
||||
"""Test iterating over ProviderType enum members."""
|
||||
# Act
|
||||
members = list(ProviderType)
|
||||
|
||||
# Assert
|
||||
assert len(members) == 2
|
||||
assert ProviderType.CUSTOM in members
|
||||
assert ProviderType.SYSTEM in members
|
||||
|
||||
|
||||
class TestProviderQuotaTypeEnum:
|
||||
"""Test suite for ProviderQuotaType enum validation."""
|
||||
|
||||
def test_provider_quota_type_paid_value(self):
|
||||
"""Test ProviderQuotaType PAID enum value."""
|
||||
# Assert
|
||||
assert ProviderQuotaType.PAID.value == "paid"
|
||||
|
||||
def test_provider_quota_type_free_value(self):
|
||||
"""Test ProviderQuotaType FREE enum value."""
|
||||
# Assert
|
||||
assert ProviderQuotaType.FREE.value == "free"
|
||||
|
||||
def test_provider_quota_type_trial_value(self):
|
||||
"""Test ProviderQuotaType TRIAL enum value."""
|
||||
# Assert
|
||||
assert ProviderQuotaType.TRIAL.value == "trial"
|
||||
|
||||
def test_provider_quota_type_value_of_paid(self):
|
||||
"""Test ProviderQuotaType.value_of returns PAID for 'paid' string."""
|
||||
# Act
|
||||
result = ProviderQuotaType.value_of("paid")
|
||||
|
||||
# Assert
|
||||
assert result == ProviderQuotaType.PAID
|
||||
|
||||
def test_provider_quota_type_value_of_free(self):
|
||||
"""Test ProviderQuotaType.value_of returns FREE for 'free' string."""
|
||||
# Act
|
||||
result = ProviderQuotaType.value_of("free")
|
||||
|
||||
# Assert
|
||||
assert result == ProviderQuotaType.FREE
|
||||
|
||||
def test_provider_quota_type_value_of_trial(self):
|
||||
"""Test ProviderQuotaType.value_of returns TRIAL for 'trial' string."""
|
||||
# Act
|
||||
result = ProviderQuotaType.value_of("trial")
|
||||
|
||||
# Assert
|
||||
assert result == ProviderQuotaType.TRIAL
|
||||
|
||||
def test_provider_quota_type_value_of_invalid_raises_error(self):
|
||||
"""Test ProviderQuotaType.value_of raises ValueError for invalid value."""
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="No matching enum found"):
|
||||
ProviderQuotaType.value_of("invalid_quota")
|
||||
|
||||
def test_provider_quota_type_iteration(self):
|
||||
"""Test iterating over ProviderQuotaType enum members."""
|
||||
# Act
|
||||
members = list(ProviderQuotaType)
|
||||
|
||||
# Assert
|
||||
assert len(members) == 3
|
||||
assert ProviderQuotaType.PAID in members
|
||||
assert ProviderQuotaType.FREE in members
|
||||
assert ProviderQuotaType.TRIAL in members
|
||||
|
||||
|
||||
class TestProviderModel:
|
||||
"""Test suite for Provider model validation and operations."""
|
||||
|
||||
def test_provider_creation_with_required_fields(self):
|
||||
"""Test creating a provider with all required fields."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
provider_name = "openai"
|
||||
|
||||
# Act
|
||||
provider = Provider(
|
||||
tenant_id=tenant_id,
|
||||
provider_name=provider_name,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider.tenant_id == tenant_id
|
||||
assert provider.provider_name == provider_name
|
||||
assert provider.provider_type == "custom"
|
||||
assert provider.is_valid is False
|
||||
assert provider.quota_used == 0
|
||||
|
||||
def test_provider_creation_with_all_fields(self):
|
||||
"""Test creating a provider with all optional fields."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
credential_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
provider = Provider(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="anthropic",
|
||||
provider_type="system",
|
||||
is_valid=True,
|
||||
credential_id=credential_id,
|
||||
quota_type="paid",
|
||||
quota_limit=10000,
|
||||
quota_used=500,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider.tenant_id == tenant_id
|
||||
assert provider.provider_name == "anthropic"
|
||||
assert provider.provider_type == "system"
|
||||
assert provider.is_valid is True
|
||||
assert provider.credential_id == credential_id
|
||||
assert provider.quota_type == "paid"
|
||||
assert provider.quota_limit == 10000
|
||||
assert provider.quota_used == 500
|
||||
|
||||
def test_provider_default_values(self):
|
||||
"""Test provider default values are set correctly."""
|
||||
# Arrange & Act
|
||||
provider = Provider(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="test_provider",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider.provider_type == "custom"
|
||||
assert provider.is_valid is False
|
||||
assert provider.quota_type == ""
|
||||
assert provider.quota_limit is None
|
||||
assert provider.quota_used == 0
|
||||
assert provider.credential_id is None
|
||||
|
||||
def test_provider_repr(self):
|
||||
"""Test provider __repr__ method."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
provider = Provider(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
provider_type="custom",
|
||||
)
|
||||
|
||||
# Act
|
||||
repr_str = repr(provider)
|
||||
|
||||
# Assert
|
||||
assert "Provider" in repr_str
|
||||
assert "openai" in repr_str
|
||||
assert "custom" in repr_str
|
||||
|
||||
def test_provider_token_is_set_false_when_no_credential(self):
|
||||
"""Test token_is_set returns False when no credential."""
|
||||
# Arrange
|
||||
provider = Provider(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
)
|
||||
|
||||
# Act & Assert
|
||||
assert provider.token_is_set is False
|
||||
|
||||
def test_provider_is_enabled_false_when_not_valid(self):
|
||||
"""Test is_enabled returns False when provider is not valid."""
|
||||
# Arrange
|
||||
provider = Provider(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
is_valid=False,
|
||||
)
|
||||
|
||||
# Act & Assert
|
||||
assert provider.is_enabled is False
|
||||
|
||||
def test_provider_is_enabled_true_for_valid_system_provider(self):
|
||||
"""Test is_enabled returns True for valid system provider."""
|
||||
# Arrange
|
||||
provider = Provider(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
provider_type=ProviderType.SYSTEM.value,
|
||||
is_valid=True,
|
||||
)
|
||||
|
||||
# Act & Assert
|
||||
assert provider.is_enabled is True
|
||||
|
||||
def test_provider_quota_tracking(self):
|
||||
"""Test provider quota tracking fields."""
|
||||
# Arrange
|
||||
provider = Provider(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
quota_type="trial",
|
||||
quota_limit=1000,
|
||||
quota_used=250,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider.quota_type == "trial"
|
||||
assert provider.quota_limit == 1000
|
||||
assert provider.quota_used == 250
|
||||
remaining = provider.quota_limit - provider.quota_used
|
||||
assert remaining == 750
|
||||
|
||||
|
||||
class TestProviderModelEntity:
|
||||
"""Test suite for ProviderModel entity validation."""
|
||||
|
||||
def test_provider_model_creation_with_required_fields(self):
|
||||
"""Test creating a provider model with required fields."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
provider_model = ProviderModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider_model.tenant_id == tenant_id
|
||||
assert provider_model.provider_name == "openai"
|
||||
assert provider_model.model_name == "gpt-4"
|
||||
assert provider_model.model_type == "llm"
|
||||
assert provider_model.is_valid is False
|
||||
|
||||
def test_provider_model_with_credential(self):
|
||||
"""Test provider model with credential ID."""
|
||||
# Arrange
|
||||
credential_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
provider_model = ProviderModel(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="anthropic",
|
||||
model_name="claude-3",
|
||||
model_type="llm",
|
||||
credential_id=credential_id,
|
||||
is_valid=True,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider_model.credential_id == credential_id
|
||||
assert provider_model.is_valid is True
|
||||
|
||||
def test_provider_model_default_values(self):
|
||||
"""Test provider model default values."""
|
||||
# Arrange & Act
|
||||
provider_model = ProviderModel(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
model_name="gpt-3.5-turbo",
|
||||
model_type="llm",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert provider_model.is_valid is False
|
||||
assert provider_model.credential_id is None
|
||||
|
||||
def test_provider_model_different_types(self):
|
||||
"""Test provider model with different model types."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act - LLM type
|
||||
llm_model = ProviderModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
)
|
||||
|
||||
# Act - Embedding type
|
||||
embedding_model = ProviderModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="text-embedding-ada-002",
|
||||
model_type="text-embedding",
|
||||
)
|
||||
|
||||
# Act - Speech2Text type
|
||||
speech_model = ProviderModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="whisper-1",
|
||||
model_type="speech2text",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert llm_model.model_type == "llm"
|
||||
assert embedding_model.model_type == "text-embedding"
|
||||
assert speech_model.model_type == "speech2text"
|
||||
|
||||
|
||||
class TestTenantDefaultModel:
|
||||
"""Test suite for TenantDefaultModel configuration."""
|
||||
|
||||
def test_tenant_default_model_creation(self):
|
||||
"""Test creating a tenant default model."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
default_model = TenantDefaultModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert default_model.tenant_id == tenant_id
|
||||
assert default_model.provider_name == "openai"
|
||||
assert default_model.model_name == "gpt-4"
|
||||
assert default_model.model_type == "llm"
|
||||
|
||||
def test_tenant_default_model_for_different_types(self):
|
||||
"""Test tenant default models for different model types."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
llm_default = TenantDefaultModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
)
|
||||
|
||||
embedding_default = TenantDefaultModel(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="text-embedding-3-small",
|
||||
model_type="text-embedding",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert llm_default.model_type == "llm"
|
||||
assert embedding_default.model_type == "text-embedding"
|
||||
|
||||
|
||||
class TestTenantPreferredModelProvider:
|
||||
"""Test suite for TenantPreferredModelProvider settings."""
|
||||
|
||||
def test_tenant_preferred_provider_creation(self):
|
||||
"""Test creating a tenant preferred model provider."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
preferred = TenantPreferredModelProvider(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
preferred_provider_type="custom",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert preferred.tenant_id == tenant_id
|
||||
assert preferred.provider_name == "openai"
|
||||
assert preferred.preferred_provider_type == "custom"
|
||||
|
||||
def test_tenant_preferred_provider_system_type(self):
|
||||
"""Test tenant preferred provider with system type."""
|
||||
# Arrange & Act
|
||||
preferred = TenantPreferredModelProvider(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="anthropic",
|
||||
preferred_provider_type="system",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert preferred.preferred_provider_type == "system"
|
||||
|
||||
|
||||
class TestProviderOrder:
|
||||
"""Test suite for ProviderOrder payment tracking."""
|
||||
|
||||
def test_provider_order_creation_with_required_fields(self):
|
||||
"""Test creating a provider order with required fields."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
account_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
order = ProviderOrder(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
account_id=account_id,
|
||||
payment_product_id="prod_123",
|
||||
payment_id=None,
|
||||
transaction_id=None,
|
||||
quantity=1,
|
||||
currency=None,
|
||||
total_amount=None,
|
||||
payment_status="wait_pay",
|
||||
paid_at=None,
|
||||
pay_failed_at=None,
|
||||
refunded_at=None,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert order.tenant_id == tenant_id
|
||||
assert order.provider_name == "openai"
|
||||
assert order.account_id == account_id
|
||||
assert order.payment_product_id == "prod_123"
|
||||
assert order.payment_status == "wait_pay"
|
||||
assert order.quantity == 1
|
||||
|
||||
def test_provider_order_with_payment_details(self):
|
||||
"""Test provider order with full payment details."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
account_id = str(uuid4())
|
||||
paid_time = datetime.now(UTC)
|
||||
|
||||
# Act
|
||||
order = ProviderOrder(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
account_id=account_id,
|
||||
payment_product_id="prod_456",
|
||||
payment_id="pay_789",
|
||||
transaction_id="txn_abc",
|
||||
quantity=5,
|
||||
currency="USD",
|
||||
total_amount=9999,
|
||||
payment_status="paid",
|
||||
paid_at=paid_time,
|
||||
pay_failed_at=None,
|
||||
refunded_at=None,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert order.payment_id == "pay_789"
|
||||
assert order.transaction_id == "txn_abc"
|
||||
assert order.quantity == 5
|
||||
assert order.currency == "USD"
|
||||
assert order.total_amount == 9999
|
||||
assert order.payment_status == "paid"
|
||||
assert order.paid_at == paid_time
|
||||
|
||||
def test_provider_order_payment_statuses(self):
|
||||
"""Test provider order with different payment statuses."""
|
||||
# Arrange
|
||||
base_params = {
|
||||
"tenant_id": str(uuid4()),
|
||||
"provider_name": "openai",
|
||||
"account_id": str(uuid4()),
|
||||
"payment_product_id": "prod_123",
|
||||
"payment_id": None,
|
||||
"transaction_id": None,
|
||||
"quantity": 1,
|
||||
"currency": None,
|
||||
"total_amount": None,
|
||||
"paid_at": None,
|
||||
"pay_failed_at": None,
|
||||
"refunded_at": None,
|
||||
}
|
||||
|
||||
# Act & Assert - Wait pay status
|
||||
wait_order = ProviderOrder(**base_params, payment_status="wait_pay")
|
||||
assert wait_order.payment_status == "wait_pay"
|
||||
|
||||
# Act & Assert - Paid status
|
||||
paid_order = ProviderOrder(**base_params, payment_status="paid")
|
||||
assert paid_order.payment_status == "paid"
|
||||
|
||||
# Act & Assert - Failed status
|
||||
failed_params = {**base_params, "pay_failed_at": datetime.now(UTC)}
|
||||
failed_order = ProviderOrder(**failed_params, payment_status="failed")
|
||||
assert failed_order.payment_status == "failed"
|
||||
assert failed_order.pay_failed_at is not None
|
||||
|
||||
# Act & Assert - Refunded status
|
||||
refunded_params = {**base_params, "refunded_at": datetime.now(UTC)}
|
||||
refunded_order = ProviderOrder(**refunded_params, payment_status="refunded")
|
||||
assert refunded_order.payment_status == "refunded"
|
||||
assert refunded_order.refunded_at is not None
|
||||
|
||||
|
||||
class TestProviderModelSetting:
|
||||
"""Test suite for ProviderModelSetting load balancing configuration."""
|
||||
|
||||
def test_provider_model_setting_creation(self):
|
||||
"""Test creating a provider model setting."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
setting = ProviderModelSetting(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert setting.tenant_id == tenant_id
|
||||
assert setting.provider_name == "openai"
|
||||
assert setting.model_name == "gpt-4"
|
||||
assert setting.model_type == "llm"
|
||||
assert setting.enabled is True
|
||||
assert setting.load_balancing_enabled is False
|
||||
|
||||
def test_provider_model_setting_with_load_balancing(self):
|
||||
"""Test provider model setting with load balancing enabled."""
|
||||
# Arrange & Act
|
||||
setting = ProviderModelSetting(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
enabled=True,
|
||||
load_balancing_enabled=True,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert setting.enabled is True
|
||||
assert setting.load_balancing_enabled is True
|
||||
|
||||
def test_provider_model_setting_disabled(self):
|
||||
"""Test disabled provider model setting."""
|
||||
# Arrange & Act
|
||||
setting = ProviderModelSetting(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert setting.enabled is False
|
||||
|
||||
|
||||
class TestLoadBalancingModelConfig:
|
||||
"""Test suite for LoadBalancingModelConfig management."""
|
||||
|
||||
def test_load_balancing_config_creation(self):
|
||||
"""Test creating a load balancing model config."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
config = LoadBalancingModelConfig(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
name="Primary API Key",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert config.tenant_id == tenant_id
|
||||
assert config.provider_name == "openai"
|
||||
assert config.model_name == "gpt-4"
|
||||
assert config.model_type == "llm"
|
||||
assert config.name == "Primary API Key"
|
||||
assert config.enabled is True
|
||||
|
||||
def test_load_balancing_config_with_credentials(self):
|
||||
"""Test load balancing config with credential details."""
|
||||
# Arrange
|
||||
credential_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
config = LoadBalancingModelConfig(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
name="Secondary API Key",
|
||||
encrypted_config='{"api_key": "encrypted_value"}',
|
||||
credential_id=credential_id,
|
||||
credential_source_type="custom",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert config.encrypted_config == '{"api_key": "encrypted_value"}'
|
||||
assert config.credential_id == credential_id
|
||||
assert config.credential_source_type == "custom"
|
||||
|
||||
def test_load_balancing_config_disabled(self):
|
||||
"""Test disabled load balancing config."""
|
||||
# Arrange & Act
|
||||
config = LoadBalancingModelConfig(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
name="Disabled Config",
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert config.enabled is False
|
||||
|
||||
def test_load_balancing_config_multiple_entries(self):
|
||||
"""Test multiple load balancing configs for same model."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
base_params = {
|
||||
"tenant_id": tenant_id,
|
||||
"provider_name": "openai",
|
||||
"model_name": "gpt-4",
|
||||
"model_type": "llm",
|
||||
}
|
||||
|
||||
# Act
|
||||
primary = LoadBalancingModelConfig(**base_params, name="Primary Key")
|
||||
secondary = LoadBalancingModelConfig(**base_params, name="Secondary Key")
|
||||
backup = LoadBalancingModelConfig(**base_params, name="Backup Key", enabled=False)
|
||||
|
||||
# Assert
|
||||
assert primary.name == "Primary Key"
|
||||
assert secondary.name == "Secondary Key"
|
||||
assert backup.name == "Backup Key"
|
||||
assert primary.enabled is True
|
||||
assert secondary.enabled is True
|
||||
assert backup.enabled is False
|
||||
|
||||
|
||||
class TestProviderCredential:
|
||||
"""Test suite for ProviderCredential storage."""
|
||||
|
||||
def test_provider_credential_creation(self):
|
||||
"""Test creating a provider credential."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
credential = ProviderCredential(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
credential_name="Production API Key",
|
||||
encrypted_config='{"api_key": "sk-encrypted..."}',
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert credential.tenant_id == tenant_id
|
||||
assert credential.provider_name == "openai"
|
||||
assert credential.credential_name == "Production API Key"
|
||||
assert credential.encrypted_config == '{"api_key": "sk-encrypted..."}'
|
||||
|
||||
def test_provider_credential_multiple_for_same_provider(self):
|
||||
"""Test multiple credentials for the same provider."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
prod_cred = ProviderCredential(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
credential_name="Production",
|
||||
encrypted_config='{"api_key": "prod_key"}',
|
||||
)
|
||||
|
||||
dev_cred = ProviderCredential(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
credential_name="Development",
|
||||
encrypted_config='{"api_key": "dev_key"}',
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert prod_cred.credential_name == "Production"
|
||||
assert dev_cred.credential_name == "Development"
|
||||
assert prod_cred.provider_name == dev_cred.provider_name
|
||||
|
||||
|
||||
class TestProviderModelCredential:
|
||||
"""Test suite for ProviderModelCredential storage."""
|
||||
|
||||
def test_provider_model_credential_creation(self):
|
||||
"""Test creating a provider model credential."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
credential = ProviderModelCredential(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
credential_name="GPT-4 API Key",
|
||||
encrypted_config='{"api_key": "sk-model-specific..."}',
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert credential.tenant_id == tenant_id
|
||||
assert credential.provider_name == "openai"
|
||||
assert credential.model_name == "gpt-4"
|
||||
assert credential.model_type == "llm"
|
||||
assert credential.credential_name == "GPT-4 API Key"
|
||||
|
||||
def test_provider_model_credential_different_models(self):
|
||||
"""Test credentials for different models of same provider."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid4())
|
||||
|
||||
# Act
|
||||
gpt4_cred = ProviderModelCredential(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="llm",
|
||||
credential_name="GPT-4 Key",
|
||||
encrypted_config='{"api_key": "gpt4_key"}',
|
||||
)
|
||||
|
||||
embedding_cred = ProviderModelCredential(
|
||||
tenant_id=tenant_id,
|
||||
provider_name="openai",
|
||||
model_name="text-embedding-3-large",
|
||||
model_type="text-embedding",
|
||||
credential_name="Embedding Key",
|
||||
encrypted_config='{"api_key": "embedding_key"}',
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert gpt4_cred.model_name == "gpt-4"
|
||||
assert gpt4_cred.model_type == "llm"
|
||||
assert embedding_cred.model_name == "text-embedding-3-large"
|
||||
assert embedding_cred.model_type == "text-embedding"
|
||||
|
||||
def test_provider_model_credential_with_complex_config(self):
|
||||
"""Test provider model credential with complex encrypted config."""
|
||||
# Arrange
|
||||
complex_config = (
|
||||
'{"api_key": "sk-xxx", "organization_id": "org-123", '
|
||||
'"base_url": "https://api.openai.com/v1", "timeout": 30}'
|
||||
)
|
||||
|
||||
# Act
|
||||
credential = ProviderModelCredential(
|
||||
tenant_id=str(uuid4()),
|
||||
provider_name="openai",
|
||||
model_name="gpt-4-turbo",
|
||||
model_type="llm",
|
||||
credential_name="Custom Config",
|
||||
encrypted_config=complex_config,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert credential.encrypted_config == complex_config
|
||||
assert "organization_id" in credential.encrypted_config
|
||||
assert "base_url" in credential.encrypted_config
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,626 @@
|
|||
import csv
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from services.feedback_service import FeedbackService
|
||||
|
||||
|
||||
class TestFeedbackServiceFactory:
|
||||
"""Factory class for creating test data and mock objects for feedback service tests."""
|
||||
|
||||
@staticmethod
|
||||
def create_feedback_mock(
|
||||
feedback_id: str = "feedback-123",
|
||||
app_id: str = "app-456",
|
||||
conversation_id: str = "conv-789",
|
||||
message_id: str = "msg-001",
|
||||
rating: str = "like",
|
||||
content: str | None = "Great response!",
|
||||
from_source: str = "user",
|
||||
from_account_id: str | None = None,
|
||||
from_end_user_id: str | None = "end-user-001",
|
||||
created_at: datetime | None = None,
|
||||
) -> MagicMock:
|
||||
"""Create a mock MessageFeedback object."""
|
||||
feedback = MagicMock()
|
||||
feedback.id = feedback_id
|
||||
feedback.app_id = app_id
|
||||
feedback.conversation_id = conversation_id
|
||||
feedback.message_id = message_id
|
||||
feedback.rating = rating
|
||||
feedback.content = content
|
||||
feedback.from_source = from_source
|
||||
feedback.from_account_id = from_account_id
|
||||
feedback.from_end_user_id = from_end_user_id
|
||||
feedback.created_at = created_at or datetime.now()
|
||||
return feedback
|
||||
|
||||
@staticmethod
|
||||
def create_message_mock(
|
||||
message_id: str = "msg-001",
|
||||
query: str = "What is AI?",
|
||||
answer: str = "AI stands for Artificial Intelligence.",
|
||||
inputs: dict | None = None,
|
||||
created_at: datetime | None = None,
|
||||
):
|
||||
"""Create a mock Message object."""
|
||||
|
||||
# Create a simple object with instance attributes
|
||||
# Using a class with __init__ ensures attributes are instance attributes
|
||||
class Message:
|
||||
def __init__(self):
|
||||
self.id = message_id
|
||||
self.query = query
|
||||
self.answer = answer
|
||||
self.inputs = inputs
|
||||
self.created_at = created_at or datetime.now()
|
||||
|
||||
return Message()
|
||||
|
||||
@staticmethod
|
||||
def create_conversation_mock(
|
||||
conversation_id: str = "conv-789",
|
||||
name: str | None = "Test Conversation",
|
||||
) -> MagicMock:
|
||||
"""Create a mock Conversation object."""
|
||||
conversation = MagicMock()
|
||||
conversation.id = conversation_id
|
||||
conversation.name = name
|
||||
return conversation
|
||||
|
||||
@staticmethod
|
||||
def create_app_mock(
|
||||
app_id: str = "app-456",
|
||||
name: str = "Test App",
|
||||
) -> MagicMock:
|
||||
"""Create a mock App object."""
|
||||
app = MagicMock()
|
||||
app.id = app_id
|
||||
app.name = name
|
||||
return app
|
||||
|
||||
@staticmethod
|
||||
def create_account_mock(
|
||||
account_id: str = "account-123",
|
||||
name: str = "Test Admin",
|
||||
) -> MagicMock:
|
||||
"""Create a mock Account object."""
|
||||
account = MagicMock()
|
||||
account.id = account_id
|
||||
account.name = name
|
||||
return account
|
||||
|
||||
|
||||
class TestFeedbackService:
|
||||
"""
|
||||
Comprehensive unit tests for FeedbackService.
|
||||
|
||||
This test suite covers:
|
||||
- CSV and JSON export formats
|
||||
- All filter combinations
|
||||
- Edge cases and error handling
|
||||
- Response validation
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def factory(self):
|
||||
"""Provide test data factory."""
|
||||
return TestFeedbackServiceFactory()
|
||||
|
||||
@pytest.fixture
|
||||
def sample_feedback_data(self, factory):
|
||||
"""Create sample feedback data for testing."""
|
||||
feedback = factory.create_feedback_mock(
|
||||
rating="like",
|
||||
content="Excellent answer!",
|
||||
from_source="user",
|
||||
)
|
||||
message = factory.create_message_mock(
|
||||
query="What is Python?",
|
||||
answer="Python is a programming language.",
|
||||
)
|
||||
conversation = factory.create_conversation_mock(name="Python Discussion")
|
||||
app = factory.create_app_mock(name="AI Assistant")
|
||||
account = factory.create_account_mock(name="Admin User")
|
||||
|
||||
return [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Test 01: CSV Export - Basic Functionality
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_csv_basic(self, mock_db, factory, sample_feedback_data):
|
||||
"""Test basic CSV export with single feedback record."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
# Configure the mock to return itself for all chaining methods
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = sample_feedback_data
|
||||
|
||||
# Set up the session.query to return our mock
|
||||
mock_db.session.query.return_value = mock_query
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="csv")
|
||||
|
||||
# Assert
|
||||
assert response.mimetype == "text/csv"
|
||||
assert "charset=utf-8-sig" in response.content_type
|
||||
assert "attachment" in response.headers["Content-Disposition"]
|
||||
assert "dify_feedback_export_app-456" in response.headers["Content-Disposition"]
|
||||
|
||||
# Verify CSV content
|
||||
csv_content = response.get_data(as_text=True)
|
||||
reader = csv.DictReader(io.StringIO(csv_content))
|
||||
rows = list(reader)
|
||||
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["feedback_rating"] == "👍"
|
||||
assert rows[0]["feedback_rating_raw"] == "like"
|
||||
assert rows[0]["feedback_comment"] == "Excellent answer!"
|
||||
assert rows[0]["user_query"] == "What is Python?"
|
||||
assert rows[0]["ai_response"] == "Python is a programming language."
|
||||
|
||||
# Test 02: JSON Export - Basic Functionality
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_json_basic(self, mock_db, factory, sample_feedback_data):
|
||||
"""Test basic JSON export with metadata structure."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
# Configure the mock to return itself for all chaining methods
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = sample_feedback_data
|
||||
|
||||
# Set up the session.query to return our mock
|
||||
mock_db.session.query.return_value = mock_query
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
assert response.mimetype == "application/json"
|
||||
assert "charset=utf-8" in response.content_type
|
||||
assert "attachment" in response.headers["Content-Disposition"]
|
||||
|
||||
# Verify JSON structure
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert "export_info" in json_content
|
||||
assert "feedback_data" in json_content
|
||||
assert json_content["export_info"]["app_id"] == "app-456"
|
||||
assert json_content["export_info"]["total_records"] == 1
|
||||
assert len(json_content["feedback_data"]) == 1
|
||||
|
||||
# Test 03: Filter by from_source
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_filter_from_source(self, mock_db, factory):
|
||||
"""Test filtering by feedback source (user/admin)."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
FeedbackService.export_feedbacks(app_id="app-456", from_source="admin")
|
||||
|
||||
# Assert
|
||||
mock_query.filter.assert_called()
|
||||
|
||||
# Test 04: Filter by rating
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_filter_rating(self, mock_db, factory):
|
||||
"""Test filtering by rating (like/dislike)."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
FeedbackService.export_feedbacks(app_id="app-456", rating="dislike")
|
||||
|
||||
# Assert
|
||||
mock_query.filter.assert_called()
|
||||
|
||||
# Test 05: Filter by has_comment (True)
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_filter_has_comment_true(self, mock_db, factory):
|
||||
"""Test filtering for feedback with comments."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
FeedbackService.export_feedbacks(app_id="app-456", has_comment=True)
|
||||
|
||||
# Assert
|
||||
mock_query.filter.assert_called()
|
||||
|
||||
# Test 06: Filter by has_comment (False)
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_filter_has_comment_false(self, mock_db, factory):
|
||||
"""Test filtering for feedback without comments."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
FeedbackService.export_feedbacks(app_id="app-456", has_comment=False)
|
||||
|
||||
# Assert
|
||||
mock_query.filter.assert_called()
|
||||
|
||||
# Test 07: Filter by date range
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_filter_date_range(self, mock_db, factory):
|
||||
"""Test filtering by start and end dates."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
FeedbackService.export_feedbacks(
|
||||
app_id="app-456",
|
||||
start_date="2024-01-01",
|
||||
end_date="2024-12-31",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert mock_query.filter.call_count >= 2 # Called for both start and end dates
|
||||
|
||||
# Test 08: Invalid date format - start_date
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_invalid_start_date(self, mock_db):
|
||||
"""Test error handling for invalid start_date format."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="Invalid start_date format"):
|
||||
FeedbackService.export_feedbacks(app_id="app-456", start_date="invalid-date")
|
||||
|
||||
# Test 09: Invalid date format - end_date
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_invalid_end_date(self, mock_db):
|
||||
"""Test error handling for invalid end_date format."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="Invalid end_date format"):
|
||||
FeedbackService.export_feedbacks(app_id="app-456", end_date="2024-13-45")
|
||||
|
||||
# Test 10: Unsupported format
|
||||
def test_export_feedbacks_unsupported_format(self):
|
||||
"""Test error handling for unsupported export format."""
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="Unsupported format"):
|
||||
FeedbackService.export_feedbacks(app_id="app-456", format_type="xml")
|
||||
|
||||
# Test 11: Empty result set - CSV
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_empty_results_csv(self, mock_db):
|
||||
"""Test CSV export with no feedback records."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="csv")
|
||||
|
||||
# Assert
|
||||
csv_content = response.get_data(as_text=True)
|
||||
reader = csv.DictReader(io.StringIO(csv_content))
|
||||
rows = list(reader)
|
||||
assert len(rows) == 0
|
||||
# But headers should still be present
|
||||
assert reader.fieldnames is not None
|
||||
|
||||
# Test 12: Empty result set - JSON
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_empty_results_json(self, mock_db):
|
||||
"""Test JSON export with no feedback records."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert json_content["export_info"]["total_records"] == 0
|
||||
assert len(json_content["feedback_data"]) == 0
|
||||
|
||||
# Test 13: Long response truncation
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_long_response_truncation(self, mock_db, factory):
|
||||
"""Test that long AI responses are truncated to 500 characters."""
|
||||
# Arrange
|
||||
long_answer = "A" * 600 # 600 characters
|
||||
feedback = factory.create_feedback_mock()
|
||||
message = factory.create_message_mock(answer=long_answer)
|
||||
conversation = factory.create_conversation_mock()
|
||||
app = factory.create_app_mock()
|
||||
account = factory.create_account_mock()
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
ai_response = json_content["feedback_data"][0]["ai_response"]
|
||||
assert len(ai_response) == 503 # 500 + "..."
|
||||
assert ai_response.endswith("...")
|
||||
|
||||
# Test 14: Null account (end user feedback)
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_null_account(self, mock_db, factory):
|
||||
"""Test handling of feedback from end users (no account)."""
|
||||
# Arrange
|
||||
feedback = factory.create_feedback_mock(from_account_id=None)
|
||||
message = factory.create_message_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
app = factory.create_app_mock()
|
||||
account = None # No account for end user
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert json_content["feedback_data"][0]["from_account_name"] == ""
|
||||
|
||||
# Test 15: Null conversation name
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_null_conversation_name(self, mock_db, factory):
|
||||
"""Test handling of conversations without names."""
|
||||
# Arrange
|
||||
feedback = factory.create_feedback_mock()
|
||||
message = factory.create_message_mock()
|
||||
conversation = factory.create_conversation_mock(name=None)
|
||||
app = factory.create_app_mock()
|
||||
account = factory.create_account_mock()
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert json_content["feedback_data"][0]["conversation_name"] == ""
|
||||
|
||||
# Test 16: Dislike rating emoji
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_dislike_rating(self, mock_db, factory):
|
||||
"""Test that dislike rating shows thumbs down emoji."""
|
||||
# Arrange
|
||||
feedback = factory.create_feedback_mock(rating="dislike")
|
||||
message = factory.create_message_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
app = factory.create_app_mock()
|
||||
account = factory.create_account_mock()
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert json_content["feedback_data"][0]["feedback_rating"] == "👎"
|
||||
assert json_content["feedback_data"][0]["feedback_rating_raw"] == "dislike"
|
||||
|
||||
# Test 17: Combined filters
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_combined_filters(self, mock_db, factory):
|
||||
"""Test applying multiple filters simultaneously."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
FeedbackService.export_feedbacks(
|
||||
app_id="app-456",
|
||||
from_source="admin",
|
||||
rating="like",
|
||||
has_comment=True,
|
||||
start_date="2024-01-01",
|
||||
end_date="2024-12-31",
|
||||
)
|
||||
|
||||
# Assert
|
||||
# Should have called filter multiple times for each condition
|
||||
assert mock_query.filter.call_count >= 4
|
||||
|
||||
# Test 18: Message query fallback to inputs
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_message_query_from_inputs(self, mock_db, factory):
|
||||
"""Test fallback to inputs.query when message.query is None."""
|
||||
# Arrange
|
||||
feedback = factory.create_feedback_mock()
|
||||
message = factory.create_message_mock(query=None, inputs={"query": "Query from inputs"})
|
||||
conversation = factory.create_conversation_mock()
|
||||
app = factory.create_app_mock()
|
||||
account = factory.create_account_mock()
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert json_content["feedback_data"][0]["user_query"] == "Query from inputs"
|
||||
|
||||
# Test 19: Empty feedback content
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_empty_feedback_content(self, mock_db, factory):
|
||||
"""Test handling of feedback with empty/null content."""
|
||||
# Arrange
|
||||
feedback = factory.create_feedback_mock(content=None)
|
||||
message = factory.create_message_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
app = factory.create_app_mock()
|
||||
account = factory.create_account_mock()
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = [(feedback, message, conversation, app, account)]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="json")
|
||||
|
||||
# Assert
|
||||
json_content = json.loads(response.get_data(as_text=True))
|
||||
assert json_content["feedback_data"][0]["feedback_comment"] == ""
|
||||
assert json_content["feedback_data"][0]["has_comment"] == "No"
|
||||
|
||||
# Test 20: CSV headers validation
|
||||
@patch("services.feedback_service.db")
|
||||
def test_export_feedbacks_csv_headers(self, mock_db, factory, sample_feedback_data):
|
||||
"""Test that CSV contains all expected headers."""
|
||||
# Arrange
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.join.return_value = mock_query
|
||||
mock_query.outerjoin.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.filter.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.all.return_value = sample_feedback_data
|
||||
|
||||
expected_headers = [
|
||||
"feedback_id",
|
||||
"app_name",
|
||||
"app_id",
|
||||
"conversation_id",
|
||||
"conversation_name",
|
||||
"message_id",
|
||||
"user_query",
|
||||
"ai_response",
|
||||
"feedback_rating",
|
||||
"feedback_rating_raw",
|
||||
"feedback_comment",
|
||||
"feedback_source",
|
||||
"feedback_date",
|
||||
"message_date",
|
||||
"from_account_name",
|
||||
"from_end_user_id",
|
||||
"has_comment",
|
||||
]
|
||||
|
||||
# Act
|
||||
response = FeedbackService.export_feedbacks(app_id="app-456", format_type="csv")
|
||||
|
||||
# Assert
|
||||
csv_content = response.get_data(as_text=True)
|
||||
reader = csv.DictReader(io.StringIO(csv_content))
|
||||
assert list(reader.fieldnames) == expected_headers
|
||||
|
|
@ -0,0 +1,649 @@
|
|||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from models.model import App, AppMode, EndUser, Message
|
||||
from services.errors.message import FirstMessageNotExistsError, LastMessageNotExistsError
|
||||
from services.message_service import MessageService
|
||||
|
||||
|
||||
class TestMessageServiceFactory:
|
||||
"""Factory class for creating test data and mock objects for message service tests."""
|
||||
|
||||
@staticmethod
|
||||
def create_app_mock(
|
||||
app_id: str = "app-123",
|
||||
mode: str = AppMode.ADVANCED_CHAT.value,
|
||||
name: str = "Test App",
|
||||
) -> MagicMock:
|
||||
"""Create a mock App object."""
|
||||
app = MagicMock(spec=App)
|
||||
app.id = app_id
|
||||
app.mode = mode
|
||||
app.name = name
|
||||
return app
|
||||
|
||||
@staticmethod
|
||||
def create_end_user_mock(
|
||||
user_id: str = "user-456",
|
||||
session_id: str = "session-789",
|
||||
) -> MagicMock:
|
||||
"""Create a mock EndUser object."""
|
||||
user = MagicMock(spec=EndUser)
|
||||
user.id = user_id
|
||||
user.session_id = session_id
|
||||
return user
|
||||
|
||||
@staticmethod
|
||||
def create_conversation_mock(
|
||||
conversation_id: str = "conv-001",
|
||||
app_id: str = "app-123",
|
||||
) -> MagicMock:
|
||||
"""Create a mock Conversation object."""
|
||||
conversation = MagicMock()
|
||||
conversation.id = conversation_id
|
||||
conversation.app_id = app_id
|
||||
return conversation
|
||||
|
||||
@staticmethod
|
||||
def create_message_mock(
|
||||
message_id: str = "msg-001",
|
||||
conversation_id: str = "conv-001",
|
||||
query: str = "What is AI?",
|
||||
answer: str = "AI stands for Artificial Intelligence.",
|
||||
created_at: datetime | None = None,
|
||||
) -> MagicMock:
|
||||
"""Create a mock Message object."""
|
||||
message = MagicMock(spec=Message)
|
||||
message.id = message_id
|
||||
message.conversation_id = conversation_id
|
||||
message.query = query
|
||||
message.answer = answer
|
||||
message.created_at = created_at or datetime.now()
|
||||
return message
|
||||
|
||||
|
||||
class TestMessageServicePaginationByFirstId:
|
||||
"""
|
||||
Unit tests for MessageService.pagination_by_first_id method.
|
||||
|
||||
This test suite covers:
|
||||
- Basic pagination with and without first_id
|
||||
- Order handling (asc/desc)
|
||||
- Edge cases (no user, no conversation, invalid first_id)
|
||||
- Has_more flag logic
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def factory(self):
|
||||
"""Provide test data factory."""
|
||||
return TestMessageServiceFactory()
|
||||
|
||||
# Test 01: No user provided
|
||||
def test_pagination_by_first_id_no_user(self, factory):
|
||||
"""Test pagination returns empty result when no user is provided."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=None,
|
||||
conversation_id="conv-001",
|
||||
first_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, InfiniteScrollPagination)
|
||||
assert result.data == []
|
||||
assert result.limit == 10
|
||||
assert result.has_more is False
|
||||
|
||||
# Test 02: No conversation_id provided
|
||||
def test_pagination_by_first_id_no_conversation(self, factory):
|
||||
"""Test pagination returns empty result when no conversation_id is provided."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="",
|
||||
first_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, InfiniteScrollPagination)
|
||||
assert result.data == []
|
||||
assert result.limit == 10
|
||||
assert result.has_more is False
|
||||
|
||||
# Test 03: Basic pagination without first_id (desc order)
|
||||
@patch("services.message_service.db")
|
||||
@patch("services.message_service.ConversationService")
|
||||
def test_pagination_by_first_id_without_first_id_desc(self, mock_conversation_service, mock_db, factory):
|
||||
"""Test basic pagination without first_id in descending order."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
# Create 5 messages
|
||||
messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(5)
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="conv-001",
|
||||
first_id=None,
|
||||
limit=10,
|
||||
order="desc",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 5
|
||||
assert result.has_more is False
|
||||
assert result.limit == 10
|
||||
# Messages should remain in desc order (not reversed)
|
||||
assert result.data[0].id == "msg-000"
|
||||
|
||||
# Test 04: Basic pagination without first_id (asc order)
|
||||
@patch("services.message_service.db")
|
||||
@patch("services.message_service.ConversationService")
|
||||
def test_pagination_by_first_id_without_first_id_asc(self, mock_conversation_service, mock_db, factory):
|
||||
"""Test basic pagination without first_id in ascending order."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
# Create 5 messages (returned in desc order from DB)
|
||||
messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, 4 - i), # Descending timestamps
|
||||
)
|
||||
for i in range(5)
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="conv-001",
|
||||
first_id=None,
|
||||
limit=10,
|
||||
order="asc",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 5
|
||||
assert result.has_more is False
|
||||
# Messages should be reversed to asc order
|
||||
assert result.data[0].id == "msg-004"
|
||||
assert result.data[4].id == "msg-000"
|
||||
|
||||
# Test 05: Pagination with first_id
|
||||
@patch("services.message_service.db")
|
||||
@patch("services.message_service.ConversationService")
|
||||
def test_pagination_by_first_id_with_first_id(self, mock_conversation_service, mock_db, factory):
|
||||
"""Test pagination with first_id to get messages before a specific message."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
first_message = factory.create_message_mock(
|
||||
message_id="msg-005",
|
||||
created_at=datetime(2024, 1, 1, 12, 5),
|
||||
)
|
||||
|
||||
# Messages before first_message
|
||||
history_messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(5)
|
||||
]
|
||||
|
||||
# Setup query mocks
|
||||
mock_query_first = MagicMock()
|
||||
mock_query_history = MagicMock()
|
||||
|
||||
def query_side_effect(*args):
|
||||
if args[0] == Message:
|
||||
# First call returns mock for first_message query
|
||||
if not hasattr(query_side_effect, "call_count"):
|
||||
query_side_effect.call_count = 0
|
||||
query_side_effect.call_count += 1
|
||||
|
||||
if query_side_effect.call_count == 1:
|
||||
return mock_query_first
|
||||
else:
|
||||
return mock_query_history
|
||||
|
||||
mock_db.session.query.side_effect = [mock_query_first, mock_query_history]
|
||||
|
||||
# Setup first message query
|
||||
mock_query_first.where.return_value = mock_query_first
|
||||
mock_query_first.first.return_value = first_message
|
||||
|
||||
# Setup history messages query
|
||||
mock_query_history.where.return_value = mock_query_history
|
||||
mock_query_history.order_by.return_value = mock_query_history
|
||||
mock_query_history.limit.return_value = mock_query_history
|
||||
mock_query_history.all.return_value = history_messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="conv-001",
|
||||
first_id="msg-005",
|
||||
limit=10,
|
||||
order="desc",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 5
|
||||
assert result.has_more is False
|
||||
mock_query_first.where.assert_called_once()
|
||||
mock_query_history.where.assert_called_once()
|
||||
|
||||
# Test 06: First message not found
|
||||
@patch("services.message_service.db")
|
||||
@patch("services.message_service.ConversationService")
|
||||
def test_pagination_by_first_id_first_message_not_exists(self, mock_conversation_service, mock_db, factory):
|
||||
"""Test error handling when first_id doesn't exist."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.first.return_value = None # Message not found
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(FirstMessageNotExistsError):
|
||||
MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="conv-001",
|
||||
first_id="nonexistent-msg",
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Test 07: Has_more flag when results exceed limit
|
||||
@patch("services.message_service.db")
|
||||
@patch("services.message_service.ConversationService")
|
||||
def test_pagination_by_first_id_has_more_true(self, mock_conversation_service, mock_db, factory):
|
||||
"""Test has_more flag is True when results exceed limit."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
# Create limit+1 messages (11 messages for limit=10)
|
||||
messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(11)
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="conv-001",
|
||||
first_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 10 # Last message trimmed
|
||||
assert result.has_more is True
|
||||
assert result.limit == 10
|
||||
|
||||
# Test 08: Empty conversation
|
||||
@patch("services.message_service.db")
|
||||
@patch("services.message_service.ConversationService")
|
||||
def test_pagination_by_first_id_empty_conversation(self, mock_conversation_service, mock_db, factory):
|
||||
"""Test pagination with conversation that has no messages."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock()
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = []
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_first_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
conversation_id="conv-001",
|
||||
first_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 0
|
||||
assert result.has_more is False
|
||||
assert result.limit == 10
|
||||
|
||||
|
||||
class TestMessageServicePaginationByLastId:
|
||||
"""
|
||||
Unit tests for MessageService.pagination_by_last_id method.
|
||||
|
||||
This test suite covers:
|
||||
- Basic pagination with and without last_id
|
||||
- Conversation filtering
|
||||
- Include_ids filtering
|
||||
- Edge cases (no user, invalid last_id)
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def factory(self):
|
||||
"""Provide test data factory."""
|
||||
return TestMessageServiceFactory()
|
||||
|
||||
# Test 09: No user provided
|
||||
def test_pagination_by_last_id_no_user(self, factory):
|
||||
"""Test pagination returns empty result when no user is provided."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=None,
|
||||
last_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, InfiniteScrollPagination)
|
||||
assert result.data == []
|
||||
assert result.limit == 10
|
||||
assert result.has_more is False
|
||||
|
||||
# Test 10: Basic pagination without last_id
|
||||
@patch("services.message_service.db")
|
||||
def test_pagination_by_last_id_without_last_id(self, mock_db, factory):
|
||||
"""Test basic pagination without last_id."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
|
||||
messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(5)
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
last_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 5
|
||||
assert result.has_more is False
|
||||
assert result.limit == 10
|
||||
|
||||
# Test 11: Pagination with last_id
|
||||
@patch("services.message_service.db")
|
||||
def test_pagination_by_last_id_with_last_id(self, mock_db, factory):
|
||||
"""Test pagination with last_id to get messages after a specific message."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
|
||||
last_message = factory.create_message_mock(
|
||||
message_id="msg-005",
|
||||
created_at=datetime(2024, 1, 1, 12, 5),
|
||||
)
|
||||
|
||||
# Messages after last_message
|
||||
new_messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(6, 10)
|
||||
]
|
||||
|
||||
# Setup base query mock that returns itself for chaining
|
||||
mock_base_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_base_query
|
||||
|
||||
# First where() call for last_id lookup
|
||||
mock_query_last = MagicMock()
|
||||
mock_query_last.first.return_value = last_message
|
||||
|
||||
# Second where() call for history messages
|
||||
mock_query_history = MagicMock()
|
||||
mock_query_history.order_by.return_value = mock_query_history
|
||||
mock_query_history.limit.return_value = mock_query_history
|
||||
mock_query_history.all.return_value = new_messages
|
||||
|
||||
# Setup where() to return different mocks on consecutive calls
|
||||
mock_base_query.where.side_effect = [mock_query_last, mock_query_history]
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
last_id="msg-005",
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 4
|
||||
assert result.has_more is False
|
||||
|
||||
# Test 12: Last message not found
|
||||
@patch("services.message_service.db")
|
||||
def test_pagination_by_last_id_last_message_not_exists(self, mock_db, factory):
|
||||
"""Test error handling when last_id doesn't exist."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.first.return_value = None # Message not found
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(LastMessageNotExistsError):
|
||||
MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
last_id="nonexistent-msg",
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Test 13: Pagination with conversation_id filter
|
||||
@patch("services.message_service.ConversationService")
|
||||
@patch("services.message_service.db")
|
||||
def test_pagination_by_last_id_with_conversation_filter(self, mock_db, mock_conversation_service, factory):
|
||||
"""Test pagination filtered by conversation_id."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
conversation = factory.create_conversation_mock(conversation_id="conv-001")
|
||||
|
||||
mock_conversation_service.get_conversation.return_value = conversation
|
||||
|
||||
messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
conversation_id="conv-001",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(5)
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
last_id=None,
|
||||
limit=10,
|
||||
conversation_id="conv-001",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 5
|
||||
assert result.has_more is False
|
||||
# Verify conversation_id was used in query
|
||||
mock_query.where.assert_called()
|
||||
mock_conversation_service.get_conversation.assert_called_once()
|
||||
|
||||
# Test 14: Pagination with include_ids filter
|
||||
@patch("services.message_service.db")
|
||||
def test_pagination_by_last_id_with_include_ids(self, mock_db, factory):
|
||||
"""Test pagination filtered by include_ids."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
|
||||
# Only messages with IDs in include_ids should be returned
|
||||
messages = [
|
||||
factory.create_message_mock(message_id="msg-001"),
|
||||
factory.create_message_mock(message_id="msg-003"),
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
last_id=None,
|
||||
limit=10,
|
||||
include_ids=["msg-001", "msg-003"],
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 2
|
||||
assert result.data[0].id == "msg-001"
|
||||
assert result.data[1].id == "msg-003"
|
||||
|
||||
# Test 15: Has_more flag when results exceed limit
|
||||
@patch("services.message_service.db")
|
||||
def test_pagination_by_last_id_has_more_true(self, mock_db, factory):
|
||||
"""Test has_more flag is True when results exceed limit."""
|
||||
# Arrange
|
||||
app = factory.create_app_mock()
|
||||
user = factory.create_end_user_mock()
|
||||
|
||||
# Create limit+1 messages (11 messages for limit=10)
|
||||
messages = [
|
||||
factory.create_message_mock(
|
||||
message_id=f"msg-{i:03d}",
|
||||
created_at=datetime(2024, 1, 1, 12, i),
|
||||
)
|
||||
for i in range(11)
|
||||
]
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_db.session.query.return_value = mock_query
|
||||
mock_query.where.return_value = mock_query
|
||||
mock_query.order_by.return_value = mock_query
|
||||
mock_query.limit.return_value = mock_query
|
||||
mock_query.all.return_value = messages
|
||||
|
||||
# Act
|
||||
result = MessageService.pagination_by_last_id(
|
||||
app_model=app,
|
||||
user=user,
|
||||
last_id=None,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 10 # Last message trimmed
|
||||
assert result.has_more is True
|
||||
assert result.limit == 10
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -123,7 +123,7 @@ services:
|
|||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.4.0-local
|
||||
image: langgenius/dify-plugin-daemon:0.4.1-local
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ RUN apk add --no-cache tzdata
|
|||
RUN corepack enable
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
ENV NEXT_PUBLIC_BASE_PATH=
|
||||
ENV NEXT_PUBLIC_BASE_PATH=""
|
||||
|
||||
|
||||
# install packages
|
||||
|
|
@ -20,8 +20,7 @@ FROM base AS packages
|
|||
|
||||
WORKDIR /app/web
|
||||
|
||||
COPY package.json .
|
||||
COPY pnpm-lock.yaml .
|
||||
COPY package.json pnpm-lock.yaml /app/web/
|
||||
|
||||
# Use packageManager from package.json
|
||||
RUN corepack install
|
||||
|
|
@ -57,24 +56,30 @@ ENV TZ=UTC
|
|||
RUN ln -s /usr/share/zoneinfo/${TZ} /etc/localtime \
|
||||
&& echo ${TZ} > /etc/timezone
|
||||
|
||||
# global runtime packages
|
||||
RUN pnpm add -g pm2
|
||||
|
||||
|
||||
# Create non-root user
|
||||
ARG dify_uid=1001
|
||||
RUN addgroup -S -g ${dify_uid} dify && \
|
||||
adduser -S -u ${dify_uid} -G dify -s /bin/ash -h /home/dify dify && \
|
||||
mkdir /app && \
|
||||
mkdir /.pm2 && \
|
||||
chown -R dify:dify /app /.pm2
|
||||
|
||||
|
||||
WORKDIR /app/web
|
||||
COPY --from=builder /app/web/public ./public
|
||||
COPY --from=builder /app/web/.next/standalone ./
|
||||
COPY --from=builder /app/web/.next/static ./.next/static
|
||||
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
COPY --from=builder --chown=dify:dify /app/web/public ./public
|
||||
COPY --from=builder --chown=dify:dify /app/web/.next/standalone ./
|
||||
COPY --from=builder --chown=dify:dify /app/web/.next/static ./.next/static
|
||||
|
||||
|
||||
# global runtime packages
|
||||
RUN pnpm add -g pm2 \
|
||||
&& mkdir /.pm2 \
|
||||
&& chown -R 1001:0 /.pm2 /app/web \
|
||||
&& chmod -R g=u /.pm2 /app/web
|
||||
COPY --chown=dify:dify --chmod=755 docker/entrypoint.sh ./entrypoint.sh
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ENV COMMIT_SHA=${COMMIT_SHA}
|
||||
|
||||
USER 1001
|
||||
USER dify
|
||||
EXPOSE 3000
|
||||
ENTRYPOINT ["/bin/sh", "./entrypoint.sh"]
|
||||
|
|
|
|||
|
|
@ -70,11 +70,12 @@ export class SlashCommandRegistry {
|
|||
|
||||
// First check if any alias starts with this
|
||||
const aliasMatch = this.findHandlerByAliasPrefix(lowerPartial)
|
||||
if (aliasMatch)
|
||||
if (aliasMatch && this.isCommandAvailable(aliasMatch))
|
||||
return aliasMatch
|
||||
|
||||
// Then check if command name starts with this
|
||||
return this.findHandlerByNamePrefix(lowerPartial)
|
||||
const nameMatch = this.findHandlerByNamePrefix(lowerPartial)
|
||||
return nameMatch && this.isCommandAvailable(nameMatch) ? nameMatch : undefined
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -108,6 +109,14 @@ export class SlashCommandRegistry {
|
|||
return Array.from(uniqueCommands.values())
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available commands in current context (deduplicated and filtered)
|
||||
* Commands without isAvailable method are considered always available
|
||||
*/
|
||||
getAvailableCommands(): SlashCommandHandler[] {
|
||||
return this.getAllCommands().filter(handler => this.isCommandAvailable(handler))
|
||||
}
|
||||
|
||||
/**
|
||||
* Search commands
|
||||
* @param query Full query (e.g., "/theme dark" or "/lang en")
|
||||
|
|
@ -128,7 +137,7 @@ export class SlashCommandRegistry {
|
|||
|
||||
// First try exact match
|
||||
let handler = this.findCommand(commandName)
|
||||
if (handler) {
|
||||
if (handler && this.isCommandAvailable(handler)) {
|
||||
try {
|
||||
return await handler.search(args, locale)
|
||||
}
|
||||
|
|
@ -140,7 +149,7 @@ export class SlashCommandRegistry {
|
|||
|
||||
// If no exact match, try smart partial matching
|
||||
handler = this.findBestPartialMatch(commandName)
|
||||
if (handler) {
|
||||
if (handler && this.isCommandAvailable(handler)) {
|
||||
try {
|
||||
return await handler.search(args, locale)
|
||||
}
|
||||
|
|
@ -156,35 +165,30 @@ export class SlashCommandRegistry {
|
|||
|
||||
/**
|
||||
* Get root level command list
|
||||
* Only shows commands that are available in current context
|
||||
*/
|
||||
private async getRootCommands(): Promise<CommandSearchResult[]> {
|
||||
const results: CommandSearchResult[] = []
|
||||
|
||||
// Generate a root level item for each command
|
||||
for (const handler of this.getAllCommands()) {
|
||||
results.push({
|
||||
id: `root-${handler.name}`,
|
||||
title: `/${handler.name}`,
|
||||
description: handler.description,
|
||||
type: 'command' as const,
|
||||
data: {
|
||||
command: `root.${handler.name}`,
|
||||
args: { name: handler.name },
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return results
|
||||
return this.getAvailableCommands().map(handler => ({
|
||||
id: `root-${handler.name}`,
|
||||
title: `/${handler.name}`,
|
||||
description: handler.description,
|
||||
type: 'command' as const,
|
||||
data: {
|
||||
command: `root.${handler.name}`,
|
||||
args: { name: handler.name },
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Fuzzy search commands
|
||||
* Only shows commands that are available in current context
|
||||
*/
|
||||
private fuzzySearchCommands(query: string): CommandSearchResult[] {
|
||||
const lowercaseQuery = query.toLowerCase()
|
||||
const matches: CommandSearchResult[] = []
|
||||
|
||||
this.getAllCommands().forEach((handler) => {
|
||||
for (const handler of this.getAvailableCommands()) {
|
||||
// Check if command name matches
|
||||
if (handler.name.toLowerCase().includes(lowercaseQuery)) {
|
||||
matches.push({
|
||||
|
|
@ -216,7 +220,7 @@ export class SlashCommandRegistry {
|
|||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return matches
|
||||
}
|
||||
|
|
@ -227,6 +231,14 @@ export class SlashCommandRegistry {
|
|||
getCommandDependencies(commandName: string): any {
|
||||
return this.commandDeps.get(commandName)
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if a command is available in the current context.
|
||||
* Defaults to true when a handler does not implement the guard.
|
||||
*/
|
||||
private isCommandAvailable(handler: SlashCommandHandler) {
|
||||
return handler.isAvailable?.() ?? true
|
||||
}
|
||||
}
|
||||
|
||||
// Global registry instance
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import { forumCommand } from './forum'
|
|||
import { docsCommand } from './docs'
|
||||
import { communityCommand } from './community'
|
||||
import { accountCommand } from './account'
|
||||
import { zenCommand } from './zen'
|
||||
import i18n from '@/i18n-config/i18next-config'
|
||||
|
||||
export const slashAction: ActionItem = {
|
||||
|
|
@ -38,6 +39,7 @@ export const registerSlashCommands = (deps: Record<string, any>) => {
|
|||
slashCommandRegistry.register(docsCommand, {})
|
||||
slashCommandRegistry.register(communityCommand, {})
|
||||
slashCommandRegistry.register(accountCommand, {})
|
||||
slashCommandRegistry.register(zenCommand, {})
|
||||
}
|
||||
|
||||
export const unregisterSlashCommands = () => {
|
||||
|
|
@ -48,6 +50,7 @@ export const unregisterSlashCommands = () => {
|
|||
slashCommandRegistry.unregister('docs')
|
||||
slashCommandRegistry.unregister('community')
|
||||
slashCommandRegistry.unregister('account')
|
||||
slashCommandRegistry.unregister('zen')
|
||||
}
|
||||
|
||||
export const SlashCommandProvider = () => {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,13 @@ export type SlashCommandHandler<TDeps = any> = {
|
|||
*/
|
||||
mode?: 'direct' | 'submenu'
|
||||
|
||||
/**
|
||||
* Check if command is available in current context
|
||||
* If not implemented, command is always available
|
||||
* Used to conditionally show/hide commands based on page, user state, etc.
|
||||
*/
|
||||
isAvailable?: () => boolean
|
||||
|
||||
/**
|
||||
* Direct execution function for 'direct' mode commands
|
||||
* Called when the command is selected and should execute immediately
|
||||
|
|
|
|||
|
|
@ -0,0 +1,58 @@
|
|||
import type { SlashCommandHandler } from './types'
|
||||
import React from 'react'
|
||||
import { RiFullscreenLine } from '@remixicon/react'
|
||||
import i18n from '@/i18n-config/i18next-config'
|
||||
import { registerCommands, unregisterCommands } from './command-bus'
|
||||
import { isInWorkflowPage } from '@/app/components/workflow/constants'
|
||||
|
||||
// Zen command dependency types - no external dependencies needed
|
||||
type ZenDeps = Record<string, never>
|
||||
|
||||
// Custom event name for zen toggle
|
||||
export const ZEN_TOGGLE_EVENT = 'zen-toggle-maximize'
|
||||
|
||||
// Shared function to dispatch zen toggle event
|
||||
const toggleZenMode = () => {
|
||||
window.dispatchEvent(new CustomEvent(ZEN_TOGGLE_EVENT))
|
||||
}
|
||||
|
||||
/**
|
||||
* Zen command - Toggle canvas maximize (focus mode) in workflow pages
|
||||
* Only available in workflow and chatflow pages
|
||||
*/
|
||||
export const zenCommand: SlashCommandHandler<ZenDeps> = {
|
||||
name: 'zen',
|
||||
description: 'Toggle canvas focus mode',
|
||||
mode: 'direct',
|
||||
|
||||
// Only available in workflow/chatflow pages
|
||||
isAvailable: () => isInWorkflowPage(),
|
||||
|
||||
// Direct execution function
|
||||
execute: toggleZenMode,
|
||||
|
||||
async search(_args: string, locale: string = 'en') {
|
||||
return [{
|
||||
id: 'zen',
|
||||
title: i18n.t('app.gotoAnything.actions.zenTitle', { lng: locale }) || 'Zen Mode',
|
||||
description: i18n.t('app.gotoAnything.actions.zenDesc', { lng: locale }) || 'Toggle canvas focus mode',
|
||||
type: 'command' as const,
|
||||
icon: (
|
||||
<div className='flex h-6 w-6 items-center justify-center rounded-md border-[0.5px] border-divider-regular bg-components-panel-bg'>
|
||||
<RiFullscreenLine className='h-4 w-4 text-text-tertiary' />
|
||||
</div>
|
||||
),
|
||||
data: { command: 'workflow.zen', args: {} },
|
||||
}]
|
||||
},
|
||||
|
||||
register(_deps: ZenDeps) {
|
||||
registerCommands({
|
||||
'workflow.zen': async () => toggleZenMode(),
|
||||
})
|
||||
},
|
||||
|
||||
unregister() {
|
||||
unregisterCommands(['workflow.zen'])
|
||||
},
|
||||
}
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import type { FC } from 'react'
|
||||
import { useEffect, useMemo } from 'react'
|
||||
import { usePathname } from 'next/navigation'
|
||||
import { Command } from 'cmdk'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import type { ActionItem } from './actions/types'
|
||||
|
|
@ -16,18 +17,20 @@ type Props = {
|
|||
|
||||
const CommandSelector: FC<Props> = ({ actions, onCommandSelect, searchFilter, commandValue, onCommandValueChange, originalQuery }) => {
|
||||
const { t } = useTranslation()
|
||||
const pathname = usePathname()
|
||||
|
||||
// Check if we're in slash command mode
|
||||
const isSlashMode = originalQuery?.trim().startsWith('/') || false
|
||||
|
||||
// Get slash commands from registry
|
||||
// Note: pathname is included in deps because some commands (like /zen) check isAvailable based on current route
|
||||
const slashCommands = useMemo(() => {
|
||||
if (!isSlashMode) return []
|
||||
|
||||
const allCommands = slashCommandRegistry.getAllCommands()
|
||||
const availableCommands = slashCommandRegistry.getAvailableCommands()
|
||||
const filter = searchFilter?.toLowerCase() || '' // searchFilter already has '/' removed
|
||||
|
||||
return allCommands.filter((cmd) => {
|
||||
return availableCommands.filter((cmd) => {
|
||||
if (!filter) return true
|
||||
return cmd.name.toLowerCase().includes(filter)
|
||||
}).map(cmd => ({
|
||||
|
|
@ -36,7 +39,7 @@ const CommandSelector: FC<Props> = ({ actions, onCommandSelect, searchFilter, co
|
|||
title: cmd.name,
|
||||
description: cmd.description,
|
||||
}))
|
||||
}, [isSlashMode, searchFilter])
|
||||
}, [isSlashMode, searchFilter, pathname])
|
||||
|
||||
const filteredActions = useMemo(() => {
|
||||
if (isSlashMode) return []
|
||||
|
|
@ -107,6 +110,7 @@ const CommandSelector: FC<Props> = ({ actions, onCommandSelect, searchFilter, co
|
|||
'/feedback': 'app.gotoAnything.actions.feedbackDesc',
|
||||
'/docs': 'app.gotoAnything.actions.docDesc',
|
||||
'/community': 'app.gotoAnything.actions.communityDesc',
|
||||
'/zen': 'app.gotoAnything.actions.zenDesc',
|
||||
}
|
||||
return t(slashKeyMap[item.key] || item.description)
|
||||
})()
|
||||
|
|
|
|||
|
|
@ -303,7 +303,8 @@ const GotoAnything: FC<Props> = ({
|
|||
const handler = slashCommandRegistry.findCommand(commandName)
|
||||
|
||||
// If it's a direct mode command, execute immediately
|
||||
if (handler?.mode === 'direct' && handler.execute) {
|
||||
const isAvailable = handler?.isAvailable?.() ?? true
|
||||
if (handler?.mode === 'direct' && handler.execute && isAvailable) {
|
||||
e.preventDefault()
|
||||
handler.execute()
|
||||
setShow(false)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { useReactFlow } from 'reactflow'
|
||||
import { useKeyPress } from 'ahooks'
|
||||
import { useCallback } from 'react'
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import { ZEN_TOGGLE_EVENT } from '@/app/components/goto-anything/actions/commands/zen'
|
||||
import {
|
||||
getKeyboardKeyCodeBySystem,
|
||||
isEventTargetInputArea,
|
||||
|
|
@ -246,4 +247,16 @@ export const useShortcuts = (): void => {
|
|||
events: ['keyup'],
|
||||
},
|
||||
)
|
||||
|
||||
// Listen for zen toggle event from /zen command
|
||||
useEffect(() => {
|
||||
const handleZenToggle = () => {
|
||||
handleToggleMaximizeCanvas()
|
||||
}
|
||||
|
||||
window.addEventListener(ZEN_TOGGLE_EVENT, handleZenToggle)
|
||||
return () => {
|
||||
window.removeEventListener(ZEN_TOGGLE_EVENT, handleZenToggle)
|
||||
}
|
||||
}, [handleToggleMaximizeCanvas])
|
||||
}
|
||||
|
|
|
|||
|
|
@ -304,6 +304,8 @@ const translation = {
|
|||
feedbackDesc: 'Offene Diskussionen zum Feedback der Gemeinschaft',
|
||||
communityDesc: 'Offene Discord-Community',
|
||||
docDesc: 'Öffnen Sie die Hilfedokumentation',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noPluginsFound: 'Keine Plugins gefunden',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
confirmTitle: 'Bestätigen, um zu speichern?',
|
||||
nameForToolCallPlaceHolder: 'Wird für die Maschinenerkennung verwendet, z. B. getCurrentWeather, list_pets',
|
||||
descriptionPlaceholder: 'Kurze Beschreibung des Zwecks des Werkzeugs, z. B. um die Temperatur für einen bestimmten Ort zu ermitteln.',
|
||||
toolOutput: {
|
||||
title: 'Werkzeugausgabe',
|
||||
name: 'Name',
|
||||
reserved: 'Reserviert',
|
||||
reservedParameterDuplicateTip: 'Text, JSON und Dateien sind reservierte Variablen. Variablen mit diesen Namen dürfen im Ausgabeschema nicht erscheinen.',
|
||||
description: 'Beschreibung',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Test',
|
||||
|
|
|
|||
|
|
@ -325,6 +325,8 @@ const translation = {
|
|||
communityDesc: 'Open Discord community',
|
||||
docDesc: 'Open help documentation',
|
||||
feedbackDesc: 'Open community feedback discussions',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'No apps found',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
communityDesc: 'Abrir comunidad de Discord',
|
||||
feedbackDesc: 'Discusiones de retroalimentación de la comunidad abierta',
|
||||
docDesc: 'Abrir la documentación de ayuda',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'No se encontraron aplicaciones',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'Las aplicaciones que usen esta herramienta se verán afectadas',
|
||||
deleteToolConfirmTitle: '¿Eliminar esta Herramienta?',
|
||||
deleteToolConfirmContent: 'Eliminar la herramienta es irreversible. Los usuarios ya no podrán acceder a tu herramienta.',
|
||||
toolOutput: {
|
||||
title: 'Salida de la herramienta',
|
||||
name: 'Nombre',
|
||||
reserved: 'Reservado',
|
||||
reservedParameterDuplicateTip: 'text, json y files son variables reservadas. Las variables con estos nombres no pueden aparecer en el esquema de salida.',
|
||||
description: 'Descripción',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Probar',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
accountDesc: 'به صفحه حساب کاربری بروید',
|
||||
communityDesc: 'جامعه دیسکورد باز',
|
||||
docDesc: 'مستندات کمک را باز کنید',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noKnowledgeBasesFound: 'هیچ پایگاه دانش یافت نشد',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'برنامههایی که از این ابزار استفاده میکنند تحت تأثیر قرار خواهند گرفت',
|
||||
deleteToolConfirmTitle: 'آیا این ابزار را حذف کنید؟',
|
||||
deleteToolConfirmContent: 'حذف ابزار غیرقابل بازگشت است. کاربران دیگر قادر به دسترسی به ابزار شما نخواهند بود.',
|
||||
toolOutput: {
|
||||
title: 'خروجی ابزار',
|
||||
name: 'نام',
|
||||
reserved: 'رزرو شده',
|
||||
reservedParameterDuplicateTip: 'متن، JSON و فایلها متغیرهای رزرو شده هستند. متغیرهایی با این نامها نمیتوانند در طرح خروجی ظاهر شوند.',
|
||||
description: 'توضیحات',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'آزمایش',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
docDesc: 'Ouvrir la documentation d\'aide',
|
||||
accountDesc: 'Accédez à la page de compte',
|
||||
feedbackDesc: 'Discussions de rétroaction de la communauté ouverte',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noKnowledgeBasesFound: 'Aucune base de connaissances trouvée',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
description: 'Description',
|
||||
nameForToolCallPlaceHolder: 'Utilisé pour la reconnaissance automatique, tels que getCurrentWeather, list_pets',
|
||||
descriptionPlaceholder: 'Brève description de l’objectif de l’outil, par exemple, obtenir la température d’un endroit spécifique.',
|
||||
toolOutput: {
|
||||
title: 'Sortie de l\'outil',
|
||||
name: 'Nom',
|
||||
reserved: 'Réservé',
|
||||
reservedParameterDuplicateTip: 'text, json et files sont des variables réservées. Les variables portant ces noms ne peuvent pas apparaître dans le schéma de sortie.',
|
||||
description: 'Description',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Test',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
docDesc: 'सहायता दस्तावेज़ खोलें',
|
||||
communityDesc: 'ओपन डिस्कॉर्ड समुदाय',
|
||||
feedbackDesc: 'खुले समुदाय की फीडबैक चर्चाएँ',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noPluginsFound: 'कोई प्लगइन नहीं मिले',
|
||||
|
|
|
|||
|
|
@ -123,6 +123,13 @@ const translation = {
|
|||
confirmTip: 'इस उपकरण का उपयोग करने वाले ऐप्स प्रभावित होंगे',
|
||||
deleteToolConfirmTitle: 'इस उपकरण को हटाएं?',
|
||||
deleteToolConfirmContent: 'इस उपकरण को हटाने से वापस नहीं आ सकता है। उपयोगकर्ता अब तक आपके उपकरण पर अन्तराल नहीं कर सकेंगे।',
|
||||
toolOutput: {
|
||||
title: 'उपकरण आउटपुट',
|
||||
name: 'नाम',
|
||||
reserved: 'आरक्षित',
|
||||
reservedParameterDuplicateTip: 'text, json, और फाइलें आरक्षित वेरिएबल हैं। इन नामों वाले वेरिएबल आउटपुट स्कीमा में दिखाई नहीं दे सकते।',
|
||||
description: 'विवरण',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'परीक्षण',
|
||||
|
|
|
|||
|
|
@ -262,6 +262,8 @@ const translation = {
|
|||
searchKnowledgeBasesDesc: 'Cari dan navigasikan ke basis pengetahuan Anda',
|
||||
themeSystem: 'Tema Sistem',
|
||||
languageChangeDesc: 'Mengubah bahasa UI',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noWorkflowNodesFound: 'Tidak ada simpul alur kerja yang ditemukan',
|
||||
|
|
|
|||
|
|
@ -114,6 +114,13 @@ const translation = {
|
|||
importFromUrlPlaceHolder: 'https://...',
|
||||
descriptionPlaceholder: 'Deskripsi singkat tentang tujuan alat, misalnya, mendapatkan suhu untuk lokasi tertentu.',
|
||||
confirmTitle: 'Konfirmasi untuk menyimpan?',
|
||||
toolOutput: {
|
||||
title: 'Keluaran Alat',
|
||||
name: 'Nama',
|
||||
reserved: 'Dicadangkan',
|
||||
reservedParameterDuplicateTip: 'text, json, dan file adalah variabel yang dicadangkan. Variabel dengan nama-nama ini tidak dapat muncul dalam skema keluaran.',
|
||||
description: 'Deskripsi',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
testResult: 'Hasil Tes',
|
||||
|
|
|
|||
|
|
@ -308,6 +308,8 @@ const translation = {
|
|||
accountDesc: 'Vai alla pagina dell\'account',
|
||||
feedbackDesc: 'Discussioni di feedback della comunità aperta',
|
||||
docDesc: 'Apri la documentazione di aiuto',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noKnowledgeBasesFound: 'Nessuna base di conoscenza trovata',
|
||||
|
|
|
|||
|
|
@ -126,6 +126,13 @@ const translation = {
|
|||
deleteToolConfirmTitle: 'Eliminare questo Strumento?',
|
||||
deleteToolConfirmContent:
|
||||
'L\'eliminazione dello Strumento è irreversibile. Gli utenti non potranno più accedere al tuo Strumento.',
|
||||
toolOutput: {
|
||||
title: 'Output dello strumento',
|
||||
name: 'Nome',
|
||||
reserved: 'Riservato',
|
||||
reservedParameterDuplicateTip: 'text, json e files sono variabili riservate. Le variabili con questi nomi non possono comparire nello schema di output.',
|
||||
description: 'Descrizione',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Test',
|
||||
|
|
|
|||
|
|
@ -322,6 +322,8 @@ const translation = {
|
|||
docDesc: 'ヘルプドキュメントを開く',
|
||||
communityDesc: 'オープンDiscordコミュニティ',
|
||||
feedbackDesc: 'オープンなコミュニティフィードバックディスカッション',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'アプリが見つかりません',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'このツールを使用しているアプリは影響を受けます',
|
||||
deleteToolConfirmTitle: 'このツールを削除しますか?',
|
||||
deleteToolConfirmContent: 'ツールの削除は取り消しできません。ユーザーはもうあなたのツールにアクセスできません。',
|
||||
toolOutput: {
|
||||
title: 'ツール出力',
|
||||
name: '名前',
|
||||
reserved: '予約済み',
|
||||
reservedParameterDuplicateTip: 'text、json、および files は予約語です。これらの名前の変数は出力スキーマに表示することはできません。',
|
||||
description: '説明',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'テスト',
|
||||
|
|
|
|||
|
|
@ -322,6 +322,8 @@ const translation = {
|
|||
feedbackDesc: '공개 커뮤니티 피드백 토론',
|
||||
docDesc: '도움 문서 열기',
|
||||
accountDesc: '계정 페이지로 이동',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: '앱을 찾을 수 없습니다.',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: '이 도구를 사용하는 앱은 영향을 받습니다.',
|
||||
deleteToolConfirmTitle: '이 도구를 삭제하시겠습니까?',
|
||||
deleteToolConfirmContent: '이 도구를 삭제하면 되돌릴 수 없습니다. 사용자는 더 이상 당신의 도구에 액세스할 수 없습니다.',
|
||||
toolOutput: {
|
||||
title: '도구 출력',
|
||||
name: '이름',
|
||||
reserved: '예약됨',
|
||||
reservedParameterDuplicateTip: 'text, json, 파일은 예약된 변수입니다. 이러한 이름을 가진 변수는 출력 스키마에 나타날 수 없습니다.',
|
||||
description: '설명',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: '테스트',
|
||||
|
|
|
|||
|
|
@ -303,6 +303,8 @@ const translation = {
|
|||
docDesc: 'Otwórz dokumentację pomocy',
|
||||
accountDesc: 'Przejdź do strony konta',
|
||||
feedbackDesc: 'Otwarte dyskusje na temat opinii społeczności',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'Nie znaleziono aplikacji',
|
||||
|
|
|
|||
|
|
@ -100,6 +100,13 @@ const translation = {
|
|||
nameForToolCallPlaceHolder: 'Służy do rozpoznawania maszyn, takich jak getCurrentWeather, list_pets',
|
||||
confirmTip: 'Będzie to miało wpływ na aplikacje korzystające z tego narzędzia',
|
||||
confirmTitle: 'Potwierdź, aby zapisać ?',
|
||||
toolOutput: {
|
||||
title: 'Wynik narzędzia',
|
||||
name: 'Nazwa',
|
||||
reserved: 'Zarezerwowane',
|
||||
reservedParameterDuplicateTip: 'text, json i pliki są zastrzeżonymi zmiennymi. Zmienne o tych nazwach nie mogą pojawiać się w schemacie wyjściowym.',
|
||||
description: 'Opis',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Test',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
communityDesc: 'Comunidade do Discord aberta',
|
||||
feedbackDesc: 'Discussões de feedback da comunidade aberta',
|
||||
docDesc: 'Abra a documentação de ajuda',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'Nenhum aplicativo encontrado',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
nameForToolCallTip: 'Suporta apenas números, letras e sublinhados.',
|
||||
descriptionPlaceholder: 'Breve descrição da finalidade da ferramenta, por exemplo, obter a temperatura para um local específico.',
|
||||
nameForToolCallPlaceHolder: 'Usado para reconhecimento de máquina, como getCurrentWeather, list_pets',
|
||||
toolOutput: {
|
||||
title: 'Saída da ferramenta',
|
||||
name: 'Nome',
|
||||
reserved: 'Reservado',
|
||||
reservedParameterDuplicateTip: 'texto, json e arquivos são variáveis reservadas. Variáveis com esses nomes não podem aparecer no esquema de saída.',
|
||||
description: 'Descrição',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Testar',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
docDesc: 'Deschide documentația de ajutor',
|
||||
communityDesc: 'Deschide comunitatea Discord',
|
||||
accountDesc: 'Navigați la pagina de cont',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'Nu s-au găsit aplicații',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
confirmTitle: 'Confirmați pentru a salva?',
|
||||
customDisclaimerPlaceholder: 'Vă rugăm să introduceți declinarea responsabilității personalizate',
|
||||
nameForToolCallTip: 'Acceptă doar numere, litere și caractere de subliniere.',
|
||||
toolOutput: {
|
||||
title: 'Ieșire instrument',
|
||||
name: 'Nume',
|
||||
reserved: 'Rezervat',
|
||||
reservedParameterDuplicateTip: 'text, json și fișiere sunt variabile rezervate. Variabilele cu aceste nume nu pot apărea în schema de ieșire.',
|
||||
description: 'Descriere',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Testează',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
feedbackDesc: 'Обсуждения обратной связи с открытым сообществом',
|
||||
docDesc: 'Откройте справочную документацию',
|
||||
communityDesc: 'Открытое сообщество Discord',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noPluginsFound: 'Плагины не найдены',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'Приложения, использующие этот инструмент, будут затронуты',
|
||||
deleteToolConfirmTitle: 'Удалить этот инструмент?',
|
||||
deleteToolConfirmContent: 'Удаление инструмента необратимо. Пользователи больше не смогут получить доступ к вашему инструменту.',
|
||||
toolOutput: {
|
||||
title: 'Вывод инструмента',
|
||||
name: 'Имя',
|
||||
reserved: 'Зарезервировано',
|
||||
reservedParameterDuplicateTip: 'text, json и files — зарезервированные переменные. Переменные с этими именами не могут появляться в схеме вывода.',
|
||||
description: 'Описание',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Тест',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
docDesc: 'Odprite pomoč dokumentacijo',
|
||||
feedbackDesc: 'Razprave o povratnih informacijah odprte skupnosti',
|
||||
communityDesc: 'Odpri Discord skupnost',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noPluginsFound: 'Vtičnikov ni mogoče najti',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'Aplikacije, ki uporabljajo to orodje, bodo vplivane',
|
||||
deleteToolConfirmTitle: 'Izbrisati to orodje?',
|
||||
deleteToolConfirmContent: 'Brisanje orodja je nepovratno. Uporabniki ne bodo več imeli dostopa do vašega orodja.',
|
||||
toolOutput: {
|
||||
title: 'Izhod orodja',
|
||||
name: 'Ime',
|
||||
reserved: 'Rezervirano',
|
||||
reservedParameterDuplicateTip: 'text, json in datoteke so rezervirane spremenljivke. Spremenljivke s temi imeni se ne smejo pojaviti v izhodni shemi.',
|
||||
description: 'Opis',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Test',
|
||||
|
|
|
|||
|
|
@ -298,6 +298,8 @@ const translation = {
|
|||
accountDesc: 'ไปที่หน้าบัญชี',
|
||||
docDesc: 'เปิดเอกสารช่วยเหลือ',
|
||||
communityDesc: 'เปิดชุมชน Discord',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noPluginsFound: 'ไม่พบปลั๊กอิน',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'แอปที่ใช้เครื่องมือนี้จะได้รับผลกระทบ',
|
||||
deleteToolConfirmTitle: 'ลบเครื่องมือนี้?',
|
||||
deleteToolConfirmContent: 'การลบเครื่องมือนั้นไม่สามารถย้อนกลับได้ ผู้ใช้จะไม่สามารถเข้าถึงเครื่องมือของคุณได้อีกต่อไป',
|
||||
toolOutput: {
|
||||
title: 'เอาต์พุตของเครื่องมือ',
|
||||
name: 'ชื่อ',
|
||||
reserved: 'สงวน',
|
||||
reservedParameterDuplicateTip: 'text, json และ files เป็นตัวแปรที่สงวนไว้ ไม่สามารถใช้ชื่อตัวแปรเหล่านี้ในโครงสร้างผลลัพธ์ได้',
|
||||
description: 'คำอธิบาย',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'ทดสอบ',
|
||||
|
|
|
|||
|
|
@ -298,6 +298,8 @@ const translation = {
|
|||
accountDesc: 'Hesap sayfasına gidin',
|
||||
feedbackDesc: 'Açık topluluk geri bildirim tartışmaları',
|
||||
docDesc: 'Yardım belgelerini aç',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: 'Uygulama bulunamadı',
|
||||
|
|
|
|||
|
|
@ -119,6 +119,13 @@ const translation = {
|
|||
confirmTip: 'Bu aracı kullanan uygulamalar etkilenecek',
|
||||
deleteToolConfirmTitle: 'Bu Aracı silmek istiyor musunuz?',
|
||||
deleteToolConfirmContent: 'Aracın silinmesi geri alınamaz. Kullanıcılar artık aracınıza erişemeyecek.',
|
||||
toolOutput: {
|
||||
title: 'Araç Çıktısı',
|
||||
name: 'İsim',
|
||||
reserved: 'Ayrılmış',
|
||||
reservedParameterDuplicateTip: 'text, json ve dosyalar ayrılmış değişkenlerdir. Bu isimlere sahip değişkenler çıktı şemasında yer alamaz.',
|
||||
description: 'Açıklama',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Test',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
docDesc: 'Відкрийте документацію допомоги',
|
||||
accountDesc: 'Перейдіть на сторінку облікового запису',
|
||||
communityDesc: 'Відкрита Discord-спільнота',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noPluginsFound: 'Плагінів не знайдено',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
confirmTip: 'Це вплине на програми, які використовують цей інструмент',
|
||||
nameForToolCallPlaceHolder: 'Використовується для розпізнавання машин, таких як getCurrentWeather, list_pets',
|
||||
descriptionPlaceholder: 'Короткий опис призначення інструменту, наприклад, отримання температури для конкретного місця.',
|
||||
toolOutput: {
|
||||
title: 'Вихідні дані інструменту',
|
||||
name: 'Ім\'я',
|
||||
reserved: 'Зарезервовано',
|
||||
reservedParameterDuplicateTip: 'text, json та файли є зарезервованими змінними. Змінні з такими іменами не можуть з’являтися в схемі вихідних даних.',
|
||||
description: 'Опис',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Тест',
|
||||
|
|
|
|||
|
|
@ -302,6 +302,8 @@ const translation = {
|
|||
accountDesc: 'Đi đến trang tài khoản',
|
||||
docDesc: 'Mở tài liệu trợ giúp',
|
||||
communityDesc: 'Mở cộng đồng Discord',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noWorkflowNodesFound: 'Không tìm thấy nút quy trình làm việc',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
description: 'Sự miêu tả',
|
||||
confirmTitle: 'Xác nhận để lưu ?',
|
||||
confirmTip: 'Các ứng dụng sử dụng công cụ này sẽ bị ảnh hưởng',
|
||||
toolOutput: {
|
||||
title: 'Đầu ra của công cụ',
|
||||
name: 'Tên',
|
||||
reserved: 'Dành riêng',
|
||||
reservedParameterDuplicateTip: 'text, json và files là các biến dành riêng. Các biến có tên này không thể xuất hiện trong sơ đồ đầu ra.',
|
||||
description: 'Mô tả',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: 'Kiểm tra',
|
||||
|
|
|
|||
|
|
@ -324,6 +324,8 @@ const translation = {
|
|||
communityDesc: '打开 Discord 社区',
|
||||
docDesc: '打开帮助文档',
|
||||
feedbackDesc: '打开社区反馈讨论',
|
||||
zenTitle: '专注模式',
|
||||
zenDesc: '切换画布专注模式',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: '未找到应用',
|
||||
|
|
|
|||
|
|
@ -301,6 +301,8 @@ const translation = {
|
|||
accountDesc: '導航到帳戶頁面',
|
||||
feedbackDesc: '開放社區反饋討論',
|
||||
docDesc: '開啟幫助文件',
|
||||
zenTitle: 'Zen Mode',
|
||||
zenDesc: 'Toggle canvas focus mode',
|
||||
},
|
||||
emptyState: {
|
||||
noAppsFound: '未找到應用',
|
||||
|
|
|
|||
|
|
@ -98,6 +98,13 @@ const translation = {
|
|||
nameForToolCallTip: '僅支援數位、字母和下劃線。',
|
||||
confirmTip: '使用此工具的應用程式將受到影響',
|
||||
nameForToolCallPlaceHolder: '用於機器識別,例如 getCurrentWeather、list_pets',
|
||||
toolOutput: {
|
||||
title: '工具輸出',
|
||||
name: '名稱',
|
||||
reserved: '已保留',
|
||||
reservedParameterDuplicateTip: 'text、json 和 files 是保留變數。這些名稱的變數不能出現在輸出結構中。',
|
||||
description: '描述',
|
||||
},
|
||||
},
|
||||
test: {
|
||||
title: '測試',
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"name": "dify-web",
|
||||
"version": "1.10.1",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@10.23.0+sha512.21c4e5698002ade97e4efe8b8b4a89a8de3c85a37919f957e7a0f30f38fbc5bbdd05980ffe29179b2fb6e6e691242e098d945d1601772cad0fef5fb6411e2a4b",
|
||||
"packageManager": "pnpm@10.24.0+sha512.01ff8ae71b4419903b65c60fb2dc9d34cf8bb6e06d03bde112ef38f7a34d6904c424ba66bea5cdcf12890230bf39f9580473140ed9c946fef328b6e5238a345a",
|
||||
"engines": {
|
||||
"node": ">=v22.11.0"
|
||||
},
|
||||
|
|
|
|||
Loading…
Reference in New Issue