mirror of https://github.com/langgenius/dify.git
Merge remote-tracking branch 'origin/main' into feat/queue-based-graph-engine
This commit is contained in:
commit
b68afdfa64
|
|
@ -173,7 +173,7 @@ class ModelConfig(BaseModel):
|
|||
|
||||
class Condition(BaseModel):
|
||||
"""
|
||||
Conditon detail
|
||||
Condition detail
|
||||
"""
|
||||
|
||||
name: str
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ SupportedComparisonOperator = Literal[
|
|||
|
||||
class Condition(BaseModel):
|
||||
"""
|
||||
Conditon detail
|
||||
Condition detail
|
||||
"""
|
||||
|
||||
name: str
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ SupportedComparisonOperator = Literal[
|
|||
|
||||
class Condition(BaseModel):
|
||||
"""
|
||||
Conditon detail
|
||||
Condition detail
|
||||
"""
|
||||
|
||||
name: str
|
||||
|
|
|
|||
|
|
@ -259,11 +259,30 @@ class MCPToolManageService:
|
|||
if sse_read_timeout is not None:
|
||||
mcp_provider.sse_read_timeout = sse_read_timeout
|
||||
if headers is not None:
|
||||
# Encrypt headers
|
||||
# Merge masked headers from frontend with existing real values
|
||||
if headers:
|
||||
encrypted_headers_dict = MCPToolManageService._encrypt_headers(headers, tenant_id)
|
||||
# existing decrypted and masked headers
|
||||
existing_decrypted = mcp_provider.decrypted_headers
|
||||
existing_masked = mcp_provider.masked_headers
|
||||
|
||||
# Build final headers: if value equals masked existing, keep original decrypted value
|
||||
final_headers: dict[str, str] = {}
|
||||
for key, incoming_value in headers.items():
|
||||
if (
|
||||
key in existing_masked
|
||||
and key in existing_decrypted
|
||||
and isinstance(incoming_value, str)
|
||||
and incoming_value == existing_masked.get(key)
|
||||
):
|
||||
# unchanged, use original decrypted value
|
||||
final_headers[key] = str(existing_decrypted[key])
|
||||
else:
|
||||
final_headers[key] = incoming_value
|
||||
|
||||
encrypted_headers_dict = MCPToolManageService._encrypt_headers(final_headers, tenant_id)
|
||||
mcp_provider.encrypted_headers = json.dumps(encrypted_headers_dict)
|
||||
else:
|
||||
# Explicitly clear headers if empty dict passed
|
||||
mcp_provider.encrypted_headers = None
|
||||
db.session.commit()
|
||||
except IntegrityError as e:
|
||||
|
|
|
|||
|
|
@ -290,9 +290,9 @@ class TestDisableSegmentsFromIndexTask:
|
|||
# Verify the call arguments (checking by attributes rather than object identity)
|
||||
call_args = mock_processor.clean.call_args
|
||||
assert call_args[0][0].id == dataset.id # First argument should be the dataset
|
||||
assert call_args[0][1] == [
|
||||
segment.index_node_id for segment in segments
|
||||
] # Second argument should be node IDs
|
||||
assert sorted(call_args[0][1]) == sorted(
|
||||
[segment.index_node_id for segment in segments]
|
||||
) # Compare sorted lists to handle any order while preserving duplicates
|
||||
assert call_args[1]["with_keywords"] is True
|
||||
assert call_args[1]["delete_child_chunks"] is False
|
||||
|
||||
|
|
@ -719,7 +719,9 @@ class TestDisableSegmentsFromIndexTask:
|
|||
# Verify the call arguments
|
||||
call_args = mock_processor.clean.call_args
|
||||
assert call_args[0][0].id == dataset.id # First argument should be the dataset
|
||||
assert call_args[0][1] == expected_node_ids # Second argument should be node IDs
|
||||
assert sorted(call_args[0][1]) == sorted(
|
||||
expected_node_ids
|
||||
) # Compare sorted lists to handle any order while preserving duplicates
|
||||
assert call_args[1]["with_keywords"] is True
|
||||
assert call_args[1]["delete_child_chunks"] is False
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,554 @@
|
|||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from faker import Faker
|
||||
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole
|
||||
from models.dataset import Dataset, Document
|
||||
from tasks.document_indexing_task import document_indexing_task
|
||||
|
||||
|
||||
class TestDocumentIndexingTask:
|
||||
"""Integration tests for document_indexing_task using testcontainers."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_external_service_dependencies(self):
|
||||
"""Mock setup for external service dependencies."""
|
||||
with (
|
||||
patch("tasks.document_indexing_task.IndexingRunner") as mock_indexing_runner,
|
||||
patch("tasks.document_indexing_task.FeatureService") as mock_feature_service,
|
||||
):
|
||||
# Setup mock indexing runner
|
||||
mock_runner_instance = MagicMock()
|
||||
mock_indexing_runner.return_value = mock_runner_instance
|
||||
|
||||
# Setup mock feature service
|
||||
mock_features = MagicMock()
|
||||
mock_features.billing.enabled = False
|
||||
mock_feature_service.get_features.return_value = mock_features
|
||||
|
||||
yield {
|
||||
"indexing_runner": mock_indexing_runner,
|
||||
"indexing_runner_instance": mock_runner_instance,
|
||||
"feature_service": mock_feature_service,
|
||||
"features": mock_features,
|
||||
}
|
||||
|
||||
def _create_test_dataset_and_documents(
|
||||
self, db_session_with_containers, mock_external_service_dependencies, document_count=3
|
||||
):
|
||||
"""
|
||||
Helper method to create a test dataset and documents for testing.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
mock_external_service_dependencies: Mock dependencies
|
||||
document_count: Number of documents to create
|
||||
|
||||
Returns:
|
||||
tuple: (dataset, documents) - Created dataset and document instances
|
||||
"""
|
||||
fake = Faker()
|
||||
|
||||
# Create account and tenant
|
||||
account = Account(
|
||||
email=fake.email(),
|
||||
name=fake.name(),
|
||||
interface_language="en-US",
|
||||
status="active",
|
||||
)
|
||||
db.session.add(account)
|
||||
db.session.commit()
|
||||
|
||||
tenant = Tenant(
|
||||
name=fake.company(),
|
||||
status="normal",
|
||||
)
|
||||
db.session.add(tenant)
|
||||
db.session.commit()
|
||||
|
||||
# Create tenant-account join
|
||||
join = TenantAccountJoin(
|
||||
tenant_id=tenant.id,
|
||||
account_id=account.id,
|
||||
role=TenantAccountRole.OWNER.value,
|
||||
current=True,
|
||||
)
|
||||
db.session.add(join)
|
||||
db.session.commit()
|
||||
|
||||
# Create dataset
|
||||
dataset = Dataset(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=tenant.id,
|
||||
name=fake.company(),
|
||||
description=fake.text(max_nb_chars=100),
|
||||
data_source_type="upload_file",
|
||||
indexing_technique="high_quality",
|
||||
created_by=account.id,
|
||||
)
|
||||
db.session.add(dataset)
|
||||
db.session.commit()
|
||||
|
||||
# Create documents
|
||||
documents = []
|
||||
for i in range(document_count):
|
||||
document = Document(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=tenant.id,
|
||||
dataset_id=dataset.id,
|
||||
position=i,
|
||||
data_source_type="upload_file",
|
||||
batch="test_batch",
|
||||
name=fake.file_name(),
|
||||
created_from="upload_file",
|
||||
created_by=account.id,
|
||||
indexing_status="waiting",
|
||||
enabled=True,
|
||||
)
|
||||
db.session.add(document)
|
||||
documents.append(document)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
# Refresh dataset to ensure it's properly loaded
|
||||
db.session.refresh(dataset)
|
||||
|
||||
return dataset, documents
|
||||
|
||||
def _create_test_dataset_with_billing_features(
|
||||
self, db_session_with_containers, mock_external_service_dependencies, billing_enabled=True
|
||||
):
|
||||
"""
|
||||
Helper method to create a test dataset with billing features configured.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
mock_external_service_dependencies: Mock dependencies
|
||||
billing_enabled: Whether billing is enabled
|
||||
|
||||
Returns:
|
||||
tuple: (dataset, documents) - Created dataset and document instances
|
||||
"""
|
||||
fake = Faker()
|
||||
|
||||
# Create account and tenant
|
||||
account = Account(
|
||||
email=fake.email(),
|
||||
name=fake.name(),
|
||||
interface_language="en-US",
|
||||
status="active",
|
||||
)
|
||||
db.session.add(account)
|
||||
db.session.commit()
|
||||
|
||||
tenant = Tenant(
|
||||
name=fake.company(),
|
||||
status="normal",
|
||||
)
|
||||
db.session.add(tenant)
|
||||
db.session.commit()
|
||||
|
||||
# Create tenant-account join
|
||||
join = TenantAccountJoin(
|
||||
tenant_id=tenant.id,
|
||||
account_id=account.id,
|
||||
role=TenantAccountRole.OWNER.value,
|
||||
current=True,
|
||||
)
|
||||
db.session.add(join)
|
||||
db.session.commit()
|
||||
|
||||
# Create dataset
|
||||
dataset = Dataset(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=tenant.id,
|
||||
name=fake.company(),
|
||||
description=fake.text(max_nb_chars=100),
|
||||
data_source_type="upload_file",
|
||||
indexing_technique="high_quality",
|
||||
created_by=account.id,
|
||||
)
|
||||
db.session.add(dataset)
|
||||
db.session.commit()
|
||||
|
||||
# Create documents
|
||||
documents = []
|
||||
for i in range(3):
|
||||
document = Document(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=tenant.id,
|
||||
dataset_id=dataset.id,
|
||||
position=i,
|
||||
data_source_type="upload_file",
|
||||
batch="test_batch",
|
||||
name=fake.file_name(),
|
||||
created_from="upload_file",
|
||||
created_by=account.id,
|
||||
indexing_status="waiting",
|
||||
enabled=True,
|
||||
)
|
||||
db.session.add(document)
|
||||
documents.append(document)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
# Configure billing features
|
||||
mock_external_service_dependencies["features"].billing.enabled = billing_enabled
|
||||
if billing_enabled:
|
||||
mock_external_service_dependencies["features"].billing.subscription.plan = "sandbox"
|
||||
mock_external_service_dependencies["features"].vector_space.limit = 100
|
||||
mock_external_service_dependencies["features"].vector_space.size = 50
|
||||
|
||||
# Refresh dataset to ensure it's properly loaded
|
||||
db.session.refresh(dataset)
|
||||
|
||||
return dataset, documents
|
||||
|
||||
def test_document_indexing_task_success(self, db_session_with_containers, mock_external_service_dependencies):
|
||||
"""
|
||||
Test successful document indexing with multiple documents.
|
||||
|
||||
This test verifies:
|
||||
- Proper dataset retrieval from database
|
||||
- Correct document processing and status updates
|
||||
- IndexingRunner integration
|
||||
- Database state updates
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, documents = self._create_test_dataset_and_documents(
|
||||
db_session_with_containers, mock_external_service_dependencies, document_count=3
|
||||
)
|
||||
document_ids = [doc.id for doc in documents]
|
||||
|
||||
# Act: Execute the task
|
||||
document_indexing_task(dataset.id, document_ids)
|
||||
|
||||
# Assert: Verify the expected outcomes
|
||||
# Verify indexing runner was called correctly
|
||||
mock_external_service_dependencies["indexing_runner"].assert_called_once()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once()
|
||||
|
||||
# Verify documents were updated to parsing status
|
||||
for document in documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "parsing"
|
||||
assert document.processing_started_at is not None
|
||||
|
||||
# Verify the run method was called with correct documents
|
||||
call_args = mock_external_service_dependencies["indexing_runner_instance"].run.call_args
|
||||
assert call_args is not None
|
||||
processed_documents = call_args[0][0] # First argument should be documents list
|
||||
assert len(processed_documents) == 3
|
||||
|
||||
def test_document_indexing_task_dataset_not_found(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling of non-existent dataset.
|
||||
|
||||
This test verifies:
|
||||
- Proper error handling for missing datasets
|
||||
- Early return without processing
|
||||
- Database session cleanup
|
||||
- No unnecessary indexing runner calls
|
||||
"""
|
||||
# Arrange: Use non-existent dataset ID
|
||||
fake = Faker()
|
||||
non_existent_dataset_id = fake.uuid4()
|
||||
document_ids = [fake.uuid4() for _ in range(3)]
|
||||
|
||||
# Act: Execute the task with non-existent dataset
|
||||
document_indexing_task(non_existent_dataset_id, document_ids)
|
||||
|
||||
# Assert: Verify no processing occurred
|
||||
mock_external_service_dependencies["indexing_runner"].assert_not_called()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called()
|
||||
|
||||
def test_document_indexing_task_document_not_found_in_dataset(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling when some documents don't exist in the dataset.
|
||||
|
||||
This test verifies:
|
||||
- Only existing documents are processed
|
||||
- Non-existent documents are ignored
|
||||
- Indexing runner receives only valid documents
|
||||
- Database state updates correctly
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, documents = self._create_test_dataset_and_documents(
|
||||
db_session_with_containers, mock_external_service_dependencies, document_count=2
|
||||
)
|
||||
|
||||
# Mix existing and non-existent document IDs
|
||||
fake = Faker()
|
||||
existing_document_ids = [doc.id for doc in documents]
|
||||
non_existent_document_ids = [fake.uuid4() for _ in range(2)]
|
||||
all_document_ids = existing_document_ids + non_existent_document_ids
|
||||
|
||||
# Act: Execute the task with mixed document IDs
|
||||
document_indexing_task(dataset.id, all_document_ids)
|
||||
|
||||
# Assert: Verify only existing documents were processed
|
||||
mock_external_service_dependencies["indexing_runner"].assert_called_once()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once()
|
||||
|
||||
# Verify only existing documents were updated
|
||||
for document in documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "parsing"
|
||||
assert document.processing_started_at is not None
|
||||
|
||||
# Verify the run method was called with only existing documents
|
||||
call_args = mock_external_service_dependencies["indexing_runner_instance"].run.call_args
|
||||
assert call_args is not None
|
||||
processed_documents = call_args[0][0] # First argument should be documents list
|
||||
assert len(processed_documents) == 2 # Only existing documents
|
||||
|
||||
def test_document_indexing_task_indexing_runner_exception(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling of IndexingRunner exceptions.
|
||||
|
||||
This test verifies:
|
||||
- Exceptions from IndexingRunner are properly caught
|
||||
- Task completes without raising exceptions
|
||||
- Database session is properly closed
|
||||
- Error logging occurs
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, documents = self._create_test_dataset_and_documents(
|
||||
db_session_with_containers, mock_external_service_dependencies, document_count=2
|
||||
)
|
||||
document_ids = [doc.id for doc in documents]
|
||||
|
||||
# Mock IndexingRunner to raise an exception
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.side_effect = Exception(
|
||||
"Indexing runner failed"
|
||||
)
|
||||
|
||||
# Act: Execute the task
|
||||
document_indexing_task(dataset.id, document_ids)
|
||||
|
||||
# Assert: Verify exception was handled gracefully
|
||||
# The task should complete without raising exceptions
|
||||
mock_external_service_dependencies["indexing_runner"].assert_called_once()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once()
|
||||
|
||||
# Verify documents were still updated to parsing status before the exception
|
||||
for document in documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "parsing"
|
||||
assert document.processing_started_at is not None
|
||||
|
||||
def test_document_indexing_task_mixed_document_states(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test processing documents with mixed initial states.
|
||||
|
||||
This test verifies:
|
||||
- Documents with different initial states are handled correctly
|
||||
- Only valid documents are processed
|
||||
- Database state updates are consistent
|
||||
- IndexingRunner receives correct documents
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, base_documents = self._create_test_dataset_and_documents(
|
||||
db_session_with_containers, mock_external_service_dependencies, document_count=2
|
||||
)
|
||||
|
||||
# Create additional documents with different states
|
||||
fake = Faker()
|
||||
extra_documents = []
|
||||
|
||||
# Document with different indexing status
|
||||
doc1 = Document(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
position=2,
|
||||
data_source_type="upload_file",
|
||||
batch="test_batch",
|
||||
name=fake.file_name(),
|
||||
created_from="upload_file",
|
||||
created_by=dataset.created_by,
|
||||
indexing_status="completed", # Already completed
|
||||
enabled=True,
|
||||
)
|
||||
db.session.add(doc1)
|
||||
extra_documents.append(doc1)
|
||||
|
||||
# Document with disabled status
|
||||
doc2 = Document(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
position=3,
|
||||
data_source_type="upload_file",
|
||||
batch="test_batch",
|
||||
name=fake.file_name(),
|
||||
created_from="upload_file",
|
||||
created_by=dataset.created_by,
|
||||
indexing_status="waiting",
|
||||
enabled=False, # Disabled
|
||||
)
|
||||
db.session.add(doc2)
|
||||
extra_documents.append(doc2)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
all_documents = base_documents + extra_documents
|
||||
document_ids = [doc.id for doc in all_documents]
|
||||
|
||||
# Act: Execute the task with mixed document states
|
||||
document_indexing_task(dataset.id, document_ids)
|
||||
|
||||
# Assert: Verify processing
|
||||
mock_external_service_dependencies["indexing_runner"].assert_called_once()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once()
|
||||
|
||||
# Verify all documents were updated to parsing status
|
||||
for document in all_documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "parsing"
|
||||
assert document.processing_started_at is not None
|
||||
|
||||
# Verify the run method was called with all documents
|
||||
call_args = mock_external_service_dependencies["indexing_runner_instance"].run.call_args
|
||||
assert call_args is not None
|
||||
processed_documents = call_args[0][0] # First argument should be documents list
|
||||
assert len(processed_documents) == 4
|
||||
|
||||
def test_document_indexing_task_billing_sandbox_plan_batch_limit(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test billing validation for sandbox plan batch upload limit.
|
||||
|
||||
This test verifies:
|
||||
- Sandbox plan batch upload limit enforcement
|
||||
- Error handling for batch upload limit exceeded
|
||||
- Document status updates to error state
|
||||
- Proper error message recording
|
||||
"""
|
||||
# Arrange: Create test data with billing enabled
|
||||
dataset, documents = self._create_test_dataset_with_billing_features(
|
||||
db_session_with_containers, mock_external_service_dependencies, billing_enabled=True
|
||||
)
|
||||
|
||||
# Configure sandbox plan with batch limit
|
||||
mock_external_service_dependencies["features"].billing.subscription.plan = "sandbox"
|
||||
|
||||
# Create more documents than sandbox plan allows (limit is 1)
|
||||
fake = Faker()
|
||||
extra_documents = []
|
||||
for i in range(2): # Total will be 5 documents (3 existing + 2 new)
|
||||
document = Document(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
position=i + 3,
|
||||
data_source_type="upload_file",
|
||||
batch="test_batch",
|
||||
name=fake.file_name(),
|
||||
created_from="upload_file",
|
||||
created_by=dataset.created_by,
|
||||
indexing_status="waiting",
|
||||
enabled=True,
|
||||
)
|
||||
db.session.add(document)
|
||||
extra_documents.append(document)
|
||||
|
||||
db.session.commit()
|
||||
all_documents = documents + extra_documents
|
||||
document_ids = [doc.id for doc in all_documents]
|
||||
|
||||
# Act: Execute the task with too many documents for sandbox plan
|
||||
document_indexing_task(dataset.id, document_ids)
|
||||
|
||||
# Assert: Verify error handling
|
||||
for document in all_documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "error"
|
||||
assert document.error is not None
|
||||
assert "batch upload" in document.error
|
||||
assert document.stopped_at is not None
|
||||
|
||||
# Verify no indexing runner was called
|
||||
mock_external_service_dependencies["indexing_runner"].assert_not_called()
|
||||
|
||||
def test_document_indexing_task_billing_disabled_success(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test successful processing when billing is disabled.
|
||||
|
||||
This test verifies:
|
||||
- Processing continues normally when billing is disabled
|
||||
- No billing validation occurs
|
||||
- Documents are processed successfully
|
||||
- IndexingRunner is called correctly
|
||||
"""
|
||||
# Arrange: Create test data with billing disabled
|
||||
dataset, documents = self._create_test_dataset_with_billing_features(
|
||||
db_session_with_containers, mock_external_service_dependencies, billing_enabled=False
|
||||
)
|
||||
|
||||
document_ids = [doc.id for doc in documents]
|
||||
|
||||
# Act: Execute the task with billing disabled
|
||||
document_indexing_task(dataset.id, document_ids)
|
||||
|
||||
# Assert: Verify successful processing
|
||||
mock_external_service_dependencies["indexing_runner"].assert_called_once()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once()
|
||||
|
||||
# Verify documents were updated to parsing status
|
||||
for document in documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "parsing"
|
||||
assert document.processing_started_at is not None
|
||||
|
||||
def test_document_indexing_task_document_is_paused_error(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling of DocumentIsPausedError from IndexingRunner.
|
||||
|
||||
This test verifies:
|
||||
- DocumentIsPausedError is properly caught and handled
|
||||
- Task completes without raising exceptions
|
||||
- Appropriate logging occurs
|
||||
- Database session is properly closed
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, documents = self._create_test_dataset_and_documents(
|
||||
db_session_with_containers, mock_external_service_dependencies, document_count=2
|
||||
)
|
||||
document_ids = [doc.id for doc in documents]
|
||||
|
||||
# Mock IndexingRunner to raise DocumentIsPausedError
|
||||
from core.indexing_runner import DocumentIsPausedError
|
||||
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.side_effect = DocumentIsPausedError(
|
||||
"Document indexing is paused"
|
||||
)
|
||||
|
||||
# Act: Execute the task
|
||||
document_indexing_task(dataset.id, document_ids)
|
||||
|
||||
# Assert: Verify exception was handled gracefully
|
||||
# The task should complete without raising exceptions
|
||||
mock_external_service_dependencies["indexing_runner"].assert_called_once()
|
||||
mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once()
|
||||
|
||||
# Verify documents were still updated to parsing status before the exception
|
||||
for document in documents:
|
||||
db.session.refresh(document)
|
||||
assert document.indexing_status == "parsing"
|
||||
assert document.processing_started_at is not None
|
||||
|
|
@ -50,8 +50,8 @@ const StrategyDetail: FC<Props> = ({
|
|||
res.push({
|
||||
name: outputKey,
|
||||
type: output.type === 'array'
|
||||
? `Array[${output.items?.type.slice(0, 1).toLocaleUpperCase()}${output.items?.type.slice(1)}]`
|
||||
: `${output.type.slice(0, 1).toLocaleUpperCase()}${output.type.slice(1)}`,
|
||||
? `Array[${output.items?.type ? output.items.type.slice(0, 1).toLocaleUpperCase() + output.items.type.slice(1) : 'Unknown'}]`
|
||||
: `${output.type ? output.type.slice(0, 1).toLocaleUpperCase() + output.type.slice(1) : 'Unknown'}`,
|
||||
description: output.description,
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -411,8 +411,8 @@ const formatItem = (
|
|||
outputSchema.push({
|
||||
variable: outputKey,
|
||||
type: dataType === 'array'
|
||||
? `array[${output.items?.type.slice(0, 1).toLocaleLowerCase()}${output.items?.type.slice(1)}]`
|
||||
: `${output.type.slice(0, 1).toLocaleLowerCase()}${output.type.slice(1)}`,
|
||||
? `Array[${output.items?.type ? output.items.type.slice(0, 1).toLocaleLowerCase() + output.items.type.slice(1) : 'Unknown'}]`
|
||||
: `${output.type ? output.type.slice(0, 1).toLocaleLowerCase() + output.type.slice(1) : 'Unknown'}`,
|
||||
description: output.description,
|
||||
children: output.type === 'object' ? {
|
||||
schema: {
|
||||
|
|
@ -507,8 +507,8 @@ const formatItem = (
|
|||
outputs.push({
|
||||
variable: outputKey,
|
||||
type: output.type === 'array'
|
||||
? `Array[${output.items?.type.slice(0, 1).toLocaleUpperCase()}${output.items?.type.slice(1)}]` as VarType
|
||||
: `${output.type.slice(0, 1).toLocaleUpperCase()}${output.type.slice(1)}` as VarType,
|
||||
? `Array[${output.items?.type ? output.items.type.slice(0, 1).toLocaleUpperCase() + output.items.type.slice(1) : 'Unknown'}]` as VarType
|
||||
: `${output.type ? output.type.slice(0, 1).toLocaleUpperCase() + output.type.slice(1) : 'Unknown'}` as VarType,
|
||||
})
|
||||
})
|
||||
res.vars = [
|
||||
|
|
|
|||
|
|
@ -188,8 +188,8 @@ const useConfig = (id: string, payload: AgentNodeType) => {
|
|||
res.push({
|
||||
name: outputKey,
|
||||
type: output.type === 'array'
|
||||
? `Array[${output.items?.type.slice(0, 1).toLocaleUpperCase()}${output.items?.type.slice(1)}]`
|
||||
: `${output.type.slice(0, 1).toLocaleUpperCase()}${output.type.slice(1)}`,
|
||||
? `Array[${output.items?.type ? output.items.type.slice(0, 1).toLocaleUpperCase() + output.items.type.slice(1) : 'Unknown'}]`
|
||||
: `${output.type ? output.type.slice(0, 1).toLocaleUpperCase() + output.type.slice(1) : 'Unknown'}`,
|
||||
description: output.description,
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -47,14 +47,14 @@ const useConfig = (id: string, payload: LoopNodeType) => {
|
|||
})
|
||||
|
||||
const changeErrorResponseMode = useCallback((item: { value: unknown }) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
draft.error_handle_mode = item.value as ErrorHandleMode
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [inputs, handleInputsChange])
|
||||
|
||||
const handleAddCondition = useCallback<HandleAddCondition>((valueSelector, varItem) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
if (!draft.break_conditions)
|
||||
draft.break_conditions = []
|
||||
|
||||
|
|
@ -66,34 +66,34 @@ const useConfig = (id: string, payload: LoopNodeType) => {
|
|||
value: varItem.type === VarType.boolean ? 'false' : '',
|
||||
})
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [getIsVarFileAttribute, inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [getIsVarFileAttribute, handleInputsChange])
|
||||
|
||||
const handleRemoveCondition = useCallback<HandleRemoveCondition>((conditionId) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
draft.break_conditions = draft.break_conditions?.filter(item => item.id !== conditionId)
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleUpdateCondition = useCallback<HandleUpdateCondition>((conditionId, newCondition) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
const targetCondition = draft.break_conditions?.find(item => item.id === conditionId)
|
||||
if (targetCondition)
|
||||
Object.assign(targetCondition, newCondition)
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleToggleConditionLogicalOperator = useCallback<HandleToggleConditionLogicalOperator>(() => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
draft.logical_operator = draft.logical_operator === LogicalOperator.and ? LogicalOperator.or : LogicalOperator.and
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleAddSubVariableCondition = useCallback<HandleAddSubVariableCondition>((conditionId: string, key?: string) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
const condition = draft.break_conditions?.find(item => item.id === conditionId)
|
||||
if (!condition)
|
||||
return
|
||||
|
|
@ -119,11 +119,11 @@ const useConfig = (id: string, payload: LoopNodeType) => {
|
|||
})
|
||||
}
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleRemoveSubVariableCondition = useCallback((conditionId: string, subConditionId: string) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
const condition = draft.break_conditions?.find(item => item.id === conditionId)
|
||||
if (!condition)
|
||||
return
|
||||
|
|
@ -133,11 +133,11 @@ const useConfig = (id: string, payload: LoopNodeType) => {
|
|||
if (subVarCondition)
|
||||
subVarCondition.conditions = subVarCondition.conditions.filter(item => item.id !== subConditionId)
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleUpdateSubVariableCondition = useCallback<HandleUpdateSubVariableCondition>((conditionId, subConditionId, newSubCondition) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
const targetCondition = draft.break_conditions?.find(item => item.id === conditionId)
|
||||
if (targetCondition && targetCondition.sub_variable_condition) {
|
||||
const targetSubCondition = targetCondition.sub_variable_condition.conditions.find(item => item.id === subConditionId)
|
||||
|
|
@ -145,24 +145,24 @@ const useConfig = (id: string, payload: LoopNodeType) => {
|
|||
Object.assign(targetSubCondition, newSubCondition)
|
||||
}
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleToggleSubVariableConditionLogicalOperator = useCallback<HandleToggleSubVariableConditionLogicalOperator>((conditionId) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
const targetCondition = draft.break_conditions?.find(item => item.id === conditionId)
|
||||
if (targetCondition && targetCondition.sub_variable_condition)
|
||||
targetCondition.sub_variable_condition.logical_operator = targetCondition.sub_variable_condition.logical_operator === LogicalOperator.and ? LogicalOperator.or : LogicalOperator.and
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleUpdateLoopCount = useCallback((value: number) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
draft.loop_count = value
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
handleInputsChange(newInputs)
|
||||
}, [handleInputsChange])
|
||||
|
||||
const handleAddLoopVariable = useCallback(() => {
|
||||
const newInputs = produce(inputsRef.current, (draft) => {
|
||||
|
|
|
|||
|
|
@ -189,8 +189,8 @@ const useConfig = (id: string, payload: ToolNodeType) => {
|
|||
res.push({
|
||||
name: outputKey,
|
||||
type: output.type === 'array'
|
||||
? `Array[${output.items?.type.slice(0, 1).toLocaleUpperCase()}${output.items?.type.slice(1)}]`
|
||||
: `${output.type.slice(0, 1).toLocaleUpperCase()}${output.type.slice(1)}`,
|
||||
? `Array[${output.items?.type ? output.items.type.slice(0, 1).toLocaleUpperCase() + output.items.type.slice(1) : 'Unknown'}]`
|
||||
: `${output.type ? output.type.slice(0, 1).toLocaleUpperCase() + output.type.slice(1) : 'Unknown'}`,
|
||||
description: output.description,
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Nicht verfügbar',
|
||||
credentialUnavailable: 'Anmeldeinformationen derzeit nicht verfügbar. Bitte kontaktieren Sie den Administrator.',
|
||||
customCredentialUnavailable: 'Benutzerdefinierte Anmeldeinformationen derzeit nicht verfügbar',
|
||||
credentialUnavailableInButton: 'Zugangsdaten nicht verfügbar',
|
||||
},
|
||||
deprecated: 'Abgelehnt',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
customCredentialUnavailable: 'Las credenciales personalizadas no están disponibles actualmente.',
|
||||
unavailable: 'No disponible',
|
||||
credentialUnavailable: 'Credenciales actualmente no disponibles. Por favor, contacte al administrador.',
|
||||
credentialUnavailableInButton: 'Credencial no disponible',
|
||||
},
|
||||
deprecated: 'Obsoleto',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'در دسترس نیست',
|
||||
credentialUnavailable: 'دسترسی به مدارک در حال حاضر امکانپذیر نیست. لطفاً با مدیر تماس بگیرید.',
|
||||
customCredentialUnavailable: 'اعتبارنامههای سفارشی در حال حاضر در دسترس نیستند',
|
||||
credentialUnavailableInButton: 'گواهی در دسترس نیست',
|
||||
},
|
||||
deprecated: 'منسوخ شده',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
customCredentialUnavailable: 'Les identifiants personnalisés ne sont actuellement pas disponibles.',
|
||||
credentialUnavailable: 'Les informations d\'identification ne sont actuellement pas disponibles. Veuillez contacter l\'administrateur.',
|
||||
unavailable: 'Non disponible',
|
||||
credentialUnavailableInButton: 'Identifiant indisponible',
|
||||
},
|
||||
deprecated: 'Obsolète',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'अप्राप्त',
|
||||
customCredentialUnavailable: 'कस्टम क्रेडेंशियल वर्तमान में उपलब्ध नहीं हैं',
|
||||
credentialUnavailable: 'वर्तमान में क्रेडेंशियल्स उपलब्ध नहीं हैं। कृपया प्रशासन से संपर्क करें।',
|
||||
credentialUnavailableInButton: 'प्रमाण पत्र उपलब्ध नहीं है',
|
||||
},
|
||||
deprecated: 'अनुशंसित नहीं',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -260,6 +260,7 @@ const translation = {
|
|||
authorizationName: 'Nama Otorisasi',
|
||||
workspaceDefault: 'Ruang Kerja Default',
|
||||
authorization: 'Otorisasi',
|
||||
credentialUnavailableInButton: 'Kredensial tidak tersedia',
|
||||
},
|
||||
searchInMarketplace: 'Cari di Marketplace',
|
||||
findMoreInMarketplace: 'Temukan selengkapnya di Marketplace',
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Non disponibile',
|
||||
customCredentialUnavailable: 'Le credenziali personalizzate attualmente non sono disponibili',
|
||||
credentialUnavailable: 'Credenziali attualmente non disponibili. Si prega di contattare l\'amministratore.',
|
||||
credentialUnavailableInButton: 'Credenziali non disponibili',
|
||||
},
|
||||
deprecated: 'Deprecato',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -250,6 +250,7 @@ const translation = {
|
|||
unavailable: '利用できません',
|
||||
customCredentialUnavailable: 'カスタム資格情報は現在利用できません',
|
||||
credentialUnavailable: '現在、資格情報は利用できません。管理者にご連絡ください。',
|
||||
credentialUnavailableInButton: '資格情報が利用できません',
|
||||
},
|
||||
autoUpdate: {
|
||||
strategy: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: '사용할 수 없음',
|
||||
credentialUnavailable: '현재 자격 증명이 사용 불가능합니다. 관리자에게 문의하십시오.',
|
||||
customCredentialUnavailable: '현재 사용자 정의 자격 증명이 사용 불가능합니다.',
|
||||
credentialUnavailableInButton: '자격 증명 사용 불가능',
|
||||
},
|
||||
deprecated: '사용 중단됨',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Niedostępny',
|
||||
customCredentialUnavailable: 'Niestandardowe dane logowania są obecnie niedostępne',
|
||||
credentialUnavailable: 'Kredencje są obecnie niedostępne. Proszę skontaktować się z administratorem.',
|
||||
credentialUnavailableInButton: 'Credential niedostępny',
|
||||
},
|
||||
deprecated: 'Nieaktualny',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
customCredentialUnavailable: 'Credenciais personalizadas atualmente indisponíveis',
|
||||
unavailable: 'Indisponível',
|
||||
credentialUnavailable: 'Credenciais atualmente indisponíveis. Por favor, contate o administrador.',
|
||||
credentialUnavailableInButton: 'Credencial indisponível',
|
||||
},
|
||||
deprecated: 'Obsoleto',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Necesar',
|
||||
customCredentialUnavailable: 'Credentialele personalizate sunt în prezent indisponibile',
|
||||
credentialUnavailable: 'Credențialele nu sunt disponibile în acest moment. Vă rugăm să contactați administratorul.',
|
||||
credentialUnavailableInButton: 'Credential indisponibil',
|
||||
},
|
||||
deprecated: 'Încetat de a mai fi utilizat',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Недоступно',
|
||||
customCredentialUnavailable: 'Кастомные учетные данные в настоящее время недоступны',
|
||||
credentialUnavailable: 'Учетные данные в настоящее время недоступны. Пожалуйста, свяжитесь с администратором.',
|
||||
credentialUnavailableInButton: 'Учетные данные недоступны',
|
||||
},
|
||||
deprecated: 'Устаревший',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Nedostopno',
|
||||
customCredentialUnavailable: 'Trenutno niso na voljo prilagojene prijave.',
|
||||
credentialUnavailable: 'Trenutno niso na voljo poverilnice. Prosimo, kontaktirajte administratorja.',
|
||||
credentialUnavailableInButton: 'Pogodba ni na voljo',
|
||||
},
|
||||
deprecated: 'Zastaran',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'ไม่มีให้บริการ',
|
||||
customCredentialUnavailable: 'ข้อมูลรับรองที่กำหนดเองขณะนี้ไม่สามารถใช้ได้',
|
||||
credentialUnavailable: 'ข้อมูลรับรองไม่สามารถใช้งานได้ในขณะนี้ กรุณาติดต่อผู้ดูแลระบบ.',
|
||||
credentialUnavailableInButton: 'ข้อมูลรับรองไม่พร้อมใช้งาน',
|
||||
},
|
||||
deprecated: 'เลิกใช้',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Kullanılamıyor',
|
||||
customCredentialUnavailable: 'Özel kimlik bilgileri şu anda mevcut değil.',
|
||||
credentialUnavailable: 'Kimlik bilgileri şu anda mevcut değil. Lütfen yönetici ile iletişime geçin.',
|
||||
credentialUnavailableInButton: 'Kimlik bilgileri mevcut değil',
|
||||
},
|
||||
deprecated: 'Kaldırılmış',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Недоступний',
|
||||
customCredentialUnavailable: 'Індивідуальні облікові дані наразі недоступні',
|
||||
credentialUnavailable: 'Облікові дані наразі недоступні. Будь ласка, зверніться до адміністратора.',
|
||||
credentialUnavailableInButton: 'Облікові дані недоступні',
|
||||
},
|
||||
deprecated: 'Застарілий',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: 'Không có sẵn',
|
||||
customCredentialUnavailable: 'Thông tin đăng nhập tùy chỉnh hiện không khả dụng',
|
||||
credentialUnavailable: 'Thông tin đăng nhập hiện không khả dụng. Vui lòng liên hệ với quản trị viên.',
|
||||
credentialUnavailableInButton: 'Thông tin xác thực không khả dụng',
|
||||
},
|
||||
deprecated: 'Đã bị ngưng sử dụng',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
|
|
@ -249,6 +249,7 @@ const translation = {
|
|||
unavailable: '無法使用',
|
||||
customCredentialUnavailable: '自訂憑證目前無法使用',
|
||||
credentialUnavailable: '凭證目前無法使用。請聯繫管理員。',
|
||||
credentialUnavailableInButton: '憑證不可用',
|
||||
},
|
||||
deprecated: '不推薦使用的',
|
||||
autoUpdate: {
|
||||
|
|
|
|||
Loading…
Reference in New Issue