test(graph_engine): block response nodes during streaming (#26377)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
-LAN- 2025-09-28 22:19:11 +08:00 committed by GitHub
parent d00a72a435
commit 2e914808ea
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 250 additions and 0 deletions

View File

@ -0,0 +1,222 @@
app:
description: 'this is a chatflow with 2 answer nodes.
it''s outouts should like:
```
--- answer 1 ---
foo
--- answer 2 ---
<llm''s outputs>
```'
icon: 🤖
icon_background: '#FFEAD5'
mode: advanced-chat
name: test-answer-order
use_icon_as_answer_icon: false
dependencies:
- current_identifier: null
type: marketplace
value:
marketplace_plugin_unique_identifier: langgenius/openai:0.2.6@e2665624a156f52160927bceac9e169bd7e5ae6b936ae82575e14c90af390e6e
version: null
kind: app
version: 0.4.0
workflow:
conversation_variables: []
environment_variables: []
features:
file_upload:
allowed_file_extensions:
- .JPG
- .JPEG
- .PNG
- .GIF
- .WEBP
- .SVG
allowed_file_types:
- image
allowed_file_upload_methods:
- local_file
- remote_url
enabled: false
fileUploadConfig:
audio_file_size_limit: 50
batch_count_limit: 5
file_size_limit: 15
image_file_size_limit: 10
video_file_size_limit: 100
workflow_file_upload_limit: 10
image:
enabled: false
number_limits: 3
transfer_methods:
- local_file
- remote_url
number_limits: 3
opening_statement: ''
retriever_resource:
enabled: true
sensitive_word_avoidance:
enabled: false
speech_to_text:
enabled: false
suggested_questions: []
suggested_questions_after_answer:
enabled: false
text_to_speech:
enabled: false
language: ''
voice: ''
graph:
edges:
- data:
isInIteration: false
isInLoop: false
sourceType: answer
targetType: answer
id: 1759052466526-source-1759052469368-target
source: '1759052466526'
sourceHandle: source
target: '1759052469368'
targetHandle: target
type: custom
zIndex: 0
- data:
isInIteration: false
isInLoop: false
sourceType: start
targetType: llm
id: 1759052439553-source-1759052580454-target
source: '1759052439553'
sourceHandle: source
target: '1759052580454'
targetHandle: target
type: custom
zIndex: 0
- data:
isInIteration: false
isInLoop: false
sourceType: llm
targetType: answer
id: 1759052580454-source-1759052466526-target
source: '1759052580454'
sourceHandle: source
target: '1759052466526'
targetHandle: target
type: custom
zIndex: 0
nodes:
- data:
selected: false
title: Start
type: start
variables: []
height: 52
id: '1759052439553'
position:
x: 30
y: 242
positionAbsolute:
x: 30
y: 242
sourcePosition: right
targetPosition: left
type: custom
width: 242
- data:
answer: '--- answer 1 ---
foo
'
selected: false
title: Answer
type: answer
variables: []
height: 100
id: '1759052466526'
position:
x: 632
y: 242
positionAbsolute:
x: 632
y: 242
selected: true
sourcePosition: right
targetPosition: left
type: custom
width: 242
- data:
answer: '--- answer 2 ---
{{#1759052580454.text#}}
'
selected: false
title: Answer 2
type: answer
variables: []
height: 103
id: '1759052469368'
position:
x: 934
y: 242
positionAbsolute:
x: 934
y: 242
selected: false
sourcePosition: right
targetPosition: left
type: custom
width: 242
- data:
context:
enabled: false
variable_selector: []
model:
completion_params:
temperature: 0.7
mode: chat
name: gpt-4o
provider: langgenius/openai/openai
prompt_template:
- id: 5c1d873b-06b2-4dce-939e-672882bbd7c0
role: system
text: ''
- role: user
text: '{{#sys.query#}}'
selected: false
title: LLM
type: llm
vision:
enabled: false
height: 88
id: '1759052580454'
position:
x: 332
y: 242
positionAbsolute:
x: 332
y: 242
selected: false
sourcePosition: right
targetPosition: left
type: custom
width: 242
viewport:
x: 126.2797574512839
y: 289.55932160537446
zoom: 1.0743222672006216
rag_pipeline_variables: []

View File

@ -0,0 +1,28 @@
from .test_mock_config import MockConfigBuilder
from .test_table_runner import TableTestRunner, WorkflowTestCase
LLM_NODE_ID = "1759052580454"
def test_answer_nodes_emit_in_order() -> None:
mock_config = (
MockConfigBuilder()
.with_llm_response("unused default")
.with_node_output(LLM_NODE_ID, {"text": "mocked llm text"})
.build()
)
expected_answer = "--- answer 1 ---\n\nfoo\n--- answer 2 ---\n\nmocked llm text\n"
case = WorkflowTestCase(
fixture_path="test-answer-order",
query="",
expected_outputs={"answer": expected_answer},
use_auto_mock=True,
mock_config=mock_config,
)
runner = TableTestRunner()
result = runner.run_test_case(case)
assert result.success, result.error