refactor: replace bare dict with dict[str, Any] in RAG and service unit tests (#35184)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
wdeveloper16 2026-04-14 19:50:43 +02:00 committed by GitHub
parent 2c58b424a1
commit 62f42b3f24
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 31 additions and 20 deletions

View File

@ -1,5 +1,6 @@
import json
from types import SimpleNamespace
from typing import Any
from unittest.mock import MagicMock
import pytest
@ -57,7 +58,7 @@ class _FakeSelect:
return self
def _dataset_keyword_table(data_source_type: str = "database", keyword_table_dict: dict | None = None):
def _dataset_keyword_table(data_source_type: str = "database", keyword_table_dict: dict[str, Any] | None = None):
return SimpleNamespace(
data_source_type=data_source_type,
keyword_table_dict=keyword_table_dict,

View File

@ -1,4 +1,5 @@
from types import SimpleNamespace
from typing import Any
from unittest.mock import MagicMock, Mock, call, patch
from uuid import uuid4
@ -20,7 +21,7 @@ def create_mock_document(
doc_id: str,
score: float = 0.8,
provider: str = "dify",
additional_metadata: dict | None = None,
additional_metadata: dict[str, Any] | None = None,
) -> Document:
"""
Create a mock Document object for testing.

View File

@ -1,4 +1,5 @@
from types import SimpleNamespace
from typing import Any
from unittest.mock import Mock, patch
import pytest
@ -71,7 +72,9 @@ class TestParagraphIndexProcessor:
with pytest.raises(ValueError, match="No rules found in process rule"):
processor.transform([Document(page_content="text", metadata={})], process_rule={"mode": "custom"})
def test_transform_validates_segmentation(self, processor: ParagraphIndexProcessor, process_rule: dict) -> None:
def test_transform_validates_segmentation(
self, processor: ParagraphIndexProcessor, process_rule: dict[str, Any]
) -> None:
rules_without_segmentation = SimpleNamespace(segmentation=None)
with patch(
@ -84,7 +87,9 @@ class TestParagraphIndexProcessor:
process_rule={"mode": "custom", "rules": {"enabled": True}},
)
def test_transform_builds_split_documents(self, processor: ParagraphIndexProcessor, process_rule: dict) -> None:
def test_transform_builds_split_documents(
self, processor: ParagraphIndexProcessor, process_rule: dict[str, Any]
) -> None:
source_document = Document(page_content="source", metadata={"dataset_id": "dataset-1", "document_id": "doc-1"})
splitter = Mock()
splitter.split_documents.return_value = [

View File

@ -1,4 +1,5 @@
from types import SimpleNamespace
from typing import Any
from unittest.mock import MagicMock, Mock, patch
import pandas as pd
@ -77,7 +78,7 @@ class TestQAIndexProcessor:
processor.transform([Document(page_content="text", metadata={})], process_rule={"mode": "custom"})
def test_transform_preview_calls_formatter_once(
self, processor: QAIndexProcessor, process_rule: dict, fake_flask_app
self, processor: QAIndexProcessor, process_rule: dict[str, Any], fake_flask_app
) -> None:
document = Document(page_content="raw text", metadata={"dataset_id": "dataset-1", "document_id": "doc-1"})
split_node = Document(page_content=".question", metadata={})
@ -119,7 +120,7 @@ class TestQAIndexProcessor:
mock_format.assert_called_once()
def test_transform_non_preview_uses_thread_batches(
self, processor: QAIndexProcessor, process_rule: dict, fake_flask_app
self, processor: QAIndexProcessor, process_rule: dict[str, Any], fake_flask_app
) -> None:
documents = [
Document(page_content="doc-1", metadata={"document_id": "doc-1", "dataset_id": "dataset-1"}),

View File

@ -1,6 +1,7 @@
import threading
from contextlib import contextmanager, nullcontext
from types import SimpleNamespace
from typing import Any
from unittest.mock import MagicMock, Mock, patch
from uuid import uuid4
@ -45,7 +46,7 @@ def create_mock_document(
doc_id: str,
score: float = 0.8,
provider: str = "dify",
additional_metadata: dict | None = None,
additional_metadata: dict[str, Any] | None = None,
) -> Document:
"""
Create a mock Document object for testing.
@ -2021,7 +2022,7 @@ def create_mock_document_methods(
doc_id: str,
score: float = 0.8,
provider: str = "dify",
additional_metadata: dict | None = None,
additional_metadata: dict[str, Any] | None = None,
) -> Document:
"""
Create a mock Document object for testing.
@ -4091,7 +4092,7 @@ def _doc(
dataset_id: str = "dataset-1",
document_id: str = "document-1",
doc_id: str = "node-1",
extra: dict | None = None,
extra: dict[str, Any] | None = None,
) -> Document:
metadata = {
"score": score,

View File

@ -1,3 +1,4 @@
from typing import Any
from unittest.mock import MagicMock, Mock, patch
from uuid import uuid4
@ -55,7 +56,7 @@ def create_mock_document(
doc_id: str,
score: float = 0.8,
provider: str = "dify",
additional_metadata: dict | None = None,
additional_metadata: dict[str, Any] | None = None,
) -> Document:
"""
Create a mock Document object for testing.

View File

@ -1,5 +1,5 @@
from types import SimpleNamespace
from typing import cast
from typing import Any, cast
from unittest.mock import MagicMock, Mock
import pytest
@ -558,7 +558,7 @@ def test_append_workflow_export_data_filters_credentials(mocker) -> None:
"services.rag_pipeline.rag_pipeline_dsl_service.DependenciesAnalysisService.generate_dependencies",
return_value=[],
)
export_data: dict = {}
export_data: dict[str, Any] = {}
pipeline = Mock(id="p1", tenant_id="t1")
service._append_workflow_export_data(export_data=export_data, pipeline=pipeline, include_secret=False)
@ -641,7 +641,7 @@ def test_append_workflow_export_data_encrypts_knowledge_retrieval_dataset_ids(mo
"services.rag_pipeline.rag_pipeline_dsl_service.DependenciesAnalysisService.generate_dependencies",
return_value=[],
)
export_data: dict = {}
export_data: dict[str, Any] = {}
pipeline = Mock(id="p1", tenant_id="t1")
service._append_workflow_export_data(export_data=export_data, pipeline=pipeline, include_secret=False)

View File

@ -53,6 +53,7 @@ Tests available voice retrieval:
- text_to_speech: Enables TTS functionality
"""
from typing import Any
from unittest.mock import MagicMock, Mock, create_autospec, patch
import pytest
@ -109,7 +110,7 @@ class AudioServiceTestDataFactory:
return app
@staticmethod
def create_workflow_mock(features_dict: dict | None = None, **kwargs) -> Mock:
def create_workflow_mock(features_dict: dict[str, Any] | None = None, **kwargs) -> Mock:
"""
Create a mock Workflow object.
@ -128,8 +129,8 @@ class AudioServiceTestDataFactory:
@staticmethod
def create_app_model_config_mock(
speech_to_text_dict: dict | None = None,
text_to_speech_dict: dict | None = None,
speech_to_text_dict: dict[str, Any] | None = None,
text_to_speech_dict: dict[str, Any] | None = None,
**kwargs,
) -> Mock:
"""

View File

@ -94,8 +94,8 @@ class TestWorkflowAssociatedDataFactory:
app_id: str = "app-123",
version: str = Workflow.VERSION_DRAFT,
workflow_type: str = WorkflowType.WORKFLOW.value,
graph: dict | None = None,
features: dict | None = None,
graph: dict[str, Any] | None = None,
features: dict[str, Any] | None = None,
unique_hash: str | None = None,
**kwargs,
) -> MagicMock:
@ -1686,7 +1686,7 @@ class TestWorkflowServiceCredentialValidation:
"""Missing provider or model in node_data should be a no-op."""
# Arrange
workflow = self._make_workflow([])
node_data: dict = {} # no model key
node_data: dict[str, Any] = {} # no model key
# Act + Assert (no error expected)
service._validate_load_balancing_credentials(workflow, node_data, "node-1")
@ -2269,7 +2269,7 @@ class TestRebuildFileForUserInputsInStartNode:
# Arrange
file_var = self._make_variable("attachment", VariableEntityType.FILE)
start_data = self._make_start_node_data([file_var])
user_inputs: dict = {} # attachment not provided
user_inputs: dict[str, Any] = {} # attachment not provided
# Act
result = _rebuild_file_for_user_inputs_in_start_node(