Restore coverage for skipped workflow tests (#27018)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
-LAN- 2025-10-17 09:11:48 +08:00 committed by GitHub
parent 58524d6d2b
commit 9d5300440c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 38 additions and 52 deletions

View File

@ -7,14 +7,11 @@ This test suite validates the behavior of a workflow that:
3. Handles multiple answer nodes with different outputs
"""
import pytest
from core.workflow.graph_events import (
GraphRunStartedEvent,
GraphRunSucceededEvent,
NodeRunStartedEvent,
NodeRunStreamChunkEvent,
NodeRunSucceededEvent,
)
from .test_mock_config import MockConfigBuilder
@ -29,7 +26,6 @@ class TestComplexBranchWorkflow:
self.runner = TableTestRunner()
self.fixture_path = "test_complex_branch"
@pytest.mark.skip(reason="output in this workflow can be random")
def test_hello_branch_with_llm(self):
"""
Test when query contains 'hello' - should trigger true branch.
@ -41,42 +37,17 @@ class TestComplexBranchWorkflow:
fixture_path=self.fixture_path,
query="hello world",
expected_outputs={
"answer": f"{mock_text_1}contains 'hello'",
"answer": f"contains 'hello'{mock_text_1}",
},
description="Basic hello case with parallel LLM execution",
use_auto_mock=True,
mock_config=(MockConfigBuilder().with_node_output("1755502777322", {"text": mock_text_1}).build()),
expected_event_sequence=[
GraphRunStartedEvent,
# Start
NodeRunStartedEvent,
NodeRunSucceededEvent,
# If/Else (no streaming)
NodeRunStartedEvent,
NodeRunSucceededEvent,
# LLM (with streaming)
NodeRunStartedEvent,
]
# LLM
+ [NodeRunStreamChunkEvent] * (mock_text_1.count(" ") + 2)
+ [
# Answer's text
NodeRunStreamChunkEvent,
NodeRunSucceededEvent,
# Answer
NodeRunStartedEvent,
NodeRunSucceededEvent,
# Answer 2
NodeRunStartedEvent,
NodeRunSucceededEvent,
GraphRunSucceededEvent,
],
),
WorkflowTestCase(
fixture_path=self.fixture_path,
query="say hello to everyone",
expected_outputs={
"answer": "Mocked response for greetingcontains 'hello'",
"answer": "contains 'hello'Mocked response for greeting",
},
description="Hello in middle of sentence",
use_auto_mock=True,
@ -93,6 +64,35 @@ class TestComplexBranchWorkflow:
for result in suite_result.results:
assert result.success, f"Test '{result.test_case.description}' failed: {result.error}"
assert result.actual_outputs
assert any(isinstance(event, GraphRunStartedEvent) for event in result.events)
assert any(isinstance(event, GraphRunSucceededEvent) for event in result.events)
start_index = next(
idx for idx, event in enumerate(result.events) if isinstance(event, GraphRunStartedEvent)
)
success_index = max(
idx for idx, event in enumerate(result.events) if isinstance(event, GraphRunSucceededEvent)
)
assert start_index < success_index
started_node_ids = {event.node_id for event in result.events if isinstance(event, NodeRunStartedEvent)}
assert {"1755502773326", "1755502777322"}.issubset(started_node_ids), (
f"Branch or LLM nodes missing in events: {started_node_ids}"
)
assert any(isinstance(event, NodeRunStreamChunkEvent) for event in result.events), (
"Expected streaming chunks from LLM execution"
)
llm_start_index = next(
idx
for idx, event in enumerate(result.events)
if isinstance(event, NodeRunStartedEvent) and event.node_id == "1755502777322"
)
assert any(
idx > llm_start_index and isinstance(event, NodeRunStreamChunkEvent)
for idx, event in enumerate(result.events)
), "Streaming chunks should follow LLM node start"
def test_non_hello_branch_with_llm(self):
"""

View File

@ -21,7 +21,6 @@ from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom,
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
from core.workflow.entities import GraphInitParams, GraphRuntimeState, VariablePool
from core.workflow.graph import Graph
from core.workflow.nodes.llm import llm_utils
from core.workflow.nodes.llm.entities import (
ContextConfig,
@ -83,14 +82,6 @@ def graph_init_params() -> GraphInitParams:
)
@pytest.fixture
def graph() -> Graph:
# TODO: This fixture uses old Graph constructor parameters that are incompatible
# with the new queue-based engine. Need to rewrite for new engine architecture.
pytest.skip("Graph fixture incompatible with new queue-based engine - needs rewrite for ResponseStreamCoordinator")
return Graph()
@pytest.fixture
def graph_runtime_state() -> GraphRuntimeState:
variable_pool = VariablePool(
@ -105,7 +96,7 @@ def graph_runtime_state() -> GraphRuntimeState:
@pytest.fixture
def llm_node(
llm_node_data: LLMNodeData, graph_init_params: GraphInitParams, graph: Graph, graph_runtime_state: GraphRuntimeState
llm_node_data: LLMNodeData, graph_init_params: GraphInitParams, graph_runtime_state: GraphRuntimeState
) -> LLMNode:
mock_file_saver = mock.MagicMock(spec=LLMFileSaver)
node_config = {
@ -493,9 +484,7 @@ def test_handle_list_messages_basic(llm_node):
@pytest.fixture
def llm_node_for_multimodal(
llm_node_data, graph_init_params, graph, graph_runtime_state
) -> tuple[LLMNode, LLMFileSaver]:
def llm_node_for_multimodal(llm_node_data, graph_init_params, graph_runtime_state) -> tuple[LLMNode, LLMFileSaver]:
mock_file_saver: LLMFileSaver = mock.MagicMock(spec=LLMFileSaver)
node_config = {
"id": "1",
@ -655,7 +644,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown:
gen = llm_node._save_multimodal_output_and_convert_result_to_markdown(
contents=frozenset(["hello world"]), file_saver=mock_file_saver, file_outputs=[]
)
assert list(gen) == ["frozenset({'hello world'})"]
assert list(gen) == ["hello world"]
mock_file_saver.save_binary_string.assert_not_called()
mock_file_saver.save_remote_url.assert_not_called()

View File

@ -181,14 +181,11 @@ class TestAuthIntegration:
)
def test_all_providers_factory_creation(self, provider, credentials):
"""Test factory creation for all supported providers"""
try:
auth_class = ApiKeyAuthFactory.get_apikey_auth_factory(provider)
assert auth_class is not None
auth_class = ApiKeyAuthFactory.get_apikey_auth_factory(provider)
assert auth_class is not None
factory = ApiKeyAuthFactory(provider, credentials)
assert factory.auth is not None
except ImportError:
pytest.skip(f"Provider {provider} not implemented yet")
factory = ApiKeyAuthFactory(provider, credentials)
assert factory.auth is not None
def _create_success_response(self, status_code=200):
"""Create successful HTTP response mock"""