From b5cb38641ad12c0e6eed404231690fd2bd6c4e0d Mon Sep 17 00:00:00 2001 From: Yeuoly Date: Sun, 10 Mar 2024 18:41:01 +0800 Subject: [PATCH] feat: workflow mock test --- .github/workflows/api-workflow-tests.yaml | 30 +++ api/core/workflow/nodes/code/code_node.py | 10 +- api/tests/integration_tests/.env.example | 6 +- .../integration_tests/workflow/__init__.py | 0 .../workflow/nodes/__mock/code_executor.py | 27 ++ .../workflow/nodes/test_code.py | 244 ++++++++++++++++++ 6 files changed, 311 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/api-workflow-tests.yaml create mode 100644 api/tests/integration_tests/workflow/__init__.py create mode 100644 api/tests/integration_tests/workflow/nodes/__mock/code_executor.py create mode 100644 api/tests/integration_tests/workflow/nodes/test_code.py diff --git a/.github/workflows/api-workflow-tests.yaml b/.github/workflows/api-workflow-tests.yaml new file mode 100644 index 0000000000..e4e35c6c44 --- /dev/null +++ b/.github/workflows/api-workflow-tests.yaml @@ -0,0 +1,30 @@ +name: Run Pytest + +on: + pull_request: + branches: + - main + +jobs: + test: + runs-on: ubuntu-latest + + env: + MOCK_SWITCH: true + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + cache: 'pip' + cache-dependency-path: ./api/requirements.txt + + - name: Install dependencies + run: pip install -r ./api/requirements.txt + + - name: Run pytest + run: pytest api/tests/integration_tests/workflow \ No newline at end of file diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 9cc5865133..8034f4e55d 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -132,10 +132,10 @@ class CodeNode(BaseNode): :return: """ if not isinstance(value, str): - raise ValueError(f"{variable} in input form must be a string") + raise ValueError(f"{variable} in output form must be a string") if len(value) > MAX_STRING_LENGTH: - raise ValueError(f'{variable} in input form must be less than {MAX_STRING_LENGTH} characters') + raise ValueError(f'{variable} in output form must be less than {MAX_STRING_LENGTH} characters') return value.replace('\x00', '') @@ -147,7 +147,7 @@ class CodeNode(BaseNode): :return: """ if not isinstance(value, int | float): - raise ValueError(f"{variable} in input form must be a number") + raise ValueError(f"{variable} in output form must be a number") if value > MAX_NUMBER or value < MIN_NUMBER: raise ValueError(f'{variable} in input form is out of range.') @@ -205,7 +205,7 @@ class CodeNode(BaseNode): if len(result[output_name]) > MAX_NUMBER_ARRAY_LENGTH: raise ValueError( - f'{prefix}.{output_name} in input form must be less than {MAX_NUMBER_ARRAY_LENGTH} characters' + f'{prefix}.{output_name} in output form must be less than {MAX_NUMBER_ARRAY_LENGTH} characters' ) transformed_result[output_name] = [ @@ -224,7 +224,7 @@ class CodeNode(BaseNode): if len(result[output_name]) > MAX_STRING_ARRAY_LENGTH: raise ValueError( - f'{prefix}.{output_name} in input form must be less than {MAX_STRING_ARRAY_LENGTH} characters' + f'{prefix}.{output_name} in output form must be less than {MAX_STRING_ARRAY_LENGTH} characters' ) transformed_result[output_name] = [ diff --git a/api/tests/integration_tests/.env.example b/api/tests/integration_tests/.env.example index 04abacf73d..dd1baa79d4 100644 --- a/api/tests/integration_tests/.env.example +++ b/api/tests/integration_tests/.env.example @@ -66,4 +66,8 @@ JINA_API_KEY= OLLAMA_BASE_URL= # Mock Switch -MOCK_SWITCH=false \ No newline at end of file +MOCK_SWITCH=false + +# CODE EXECUTION CONFIGURATION +CODE_EXECUTION_ENDPOINT= +CODE_EXECUTINO_API_KEY= \ No newline at end of file diff --git a/api/tests/integration_tests/workflow/__init__.py b/api/tests/integration_tests/workflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py b/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py new file mode 100644 index 0000000000..b95c76b133 --- /dev/null +++ b/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py @@ -0,0 +1,27 @@ +import os +import pytest + +from typing import Literal +from _pytest.monkeypatch import MonkeyPatch +from core.helper.code_executor.code_executor import CodeExecutor + +MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true' + +class MockedCodeExecutor: + @classmethod + def invoke(cls, language: Literal['python3', 'javascript', 'jina2'], code: str, inputs: dict) -> dict: + # invoke directly + if language == 'python3': + return { + "result": 3 + } + +@pytest.fixture +def setup_code_executor_mock(request, monkeypatch: MonkeyPatch): + if not MOCK: + yield + return + + monkeypatch.setattr(CodeExecutor, "execute_code", MockedCodeExecutor.invoke) + yield + monkeypatch.undo() diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py new file mode 100644 index 0000000000..2885b9f458 --- /dev/null +++ b/api/tests/integration_tests/workflow/nodes/test_code.py @@ -0,0 +1,244 @@ +import pytest + +from core.workflow.entities.variable_pool import VariablePool +from core.workflow.nodes.code.code_node import CodeNode +from models.workflow import WorkflowNodeExecutionStatus, WorkflowRunStatus +from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock + +@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True) +def test_execute_code(setup_code_executor_mock): + code = ''' + def main(args1: int, args2: int) -> dict: + return { + "result": args1 + args2, + } + ''' + # trim first 4 spaces at the beginning of each line + code = '\n'.join([line[4:] for line in code.split('\n')]) + node = CodeNode(config={ + 'id': '1', + 'data': { + 'outputs': { + 'result': { + 'type': 'number', + }, + }, + 'title': '123', + 'variables': [ + { + 'variable': 'args1', + 'value_selector': ['1', '123', 'args1'], + }, + { + 'variable': 'args2', + 'value_selector': ['1', '123', 'args2'] + } + ], + 'answer': '123', + 'code_language': 'python3', + 'code': code + } + }) + + # construct variable pool + pool = VariablePool(system_variables={}, user_inputs={}) + pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value=1) + pool.append_variable(node_id='1', variable_key_list=['123', 'args2'], value=2) + + # execute node + result = node.run(pool) + assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED + assert result.outputs['result'] == 3 + assert result.error is None + +@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True) +def test_execute_code_output_validator(setup_code_executor_mock): + code = ''' + def main(args1: int, args2: int) -> dict: + return { + "result": args1 + args2, + } + ''' + # trim first 4 spaces at the beginning of each line + code = '\n'.join([line[4:] for line in code.split('\n')]) + node = CodeNode(config={ + 'id': '1', + 'data': { + "outputs": { + "result": { + "type": "string", + }, + }, + 'title': '123', + 'variables': [ + { + 'variable': 'args1', + 'value_selector': ['1', '123', 'args1'], + }, + { + 'variable': 'args2', + 'value_selector': ['1', '123', 'args2'] + } + ], + 'answer': '123', + 'code_language': 'python3', + 'code': code + } + }) + + # construct variable pool + pool = VariablePool(system_variables={}, user_inputs={}) + pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value=1) + pool.append_variable(node_id='1', variable_key_list=['123', 'args2'], value=2) + + # execute node + result = node.run(pool) + + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == 'result in output form must be a string' + +def test_execute_code_output_validator_depth(): + code = ''' + def main(args1: int, args2: int) -> dict: + return { + "result": { + "result": args1 + args2, + } + } + ''' + # trim first 4 spaces at the beginning of each line + code = '\n'.join([line[4:] for line in code.split('\n')]) + node = CodeNode(config={ + 'id': '1', + 'data': { + "outputs": { + "string_validator": { + "type": "string", + }, + "number_validator": { + "type": "number", + }, + "number_array_validator": { + "type": "array[number]", + }, + "string_array_validator": { + "type": "array[string]", + }, + "object_validator": { + "type": "object", + "children": { + "result": { + "type": "number", + }, + "depth": { + "type": "object", + "children": { + "depth": { + "type": "object", + "children": { + "depth": { + "type": "number", + } + } + } + } + } + } + }, + }, + 'title': '123', + 'variables': [ + { + 'variable': 'args1', + 'value_selector': ['1', '123', 'args1'], + }, + { + 'variable': 'args2', + 'value_selector': ['1', '123', 'args2'] + } + ], + 'answer': '123', + 'code_language': 'python3', + 'code': code + } + }) + + # construct result + result = { + "number_validator": 1, + "string_validator": "1", + "number_array_validator": [1, 2, 3, 3.333], + "string_array_validator": ["1", "2", "3"], + "object_validator": { + "result": 1, + "depth": { + "depth": { + "depth": 1 + } + } + } + } + + # validate + node._transform_result(result, node.node_data.outputs) + + # construct result + result = { + "number_validator": "1", + "string_validator": 1, + "number_array_validator": ["1", "2", "3", "3.333"], + "string_array_validator": [1, 2, 3], + "object_validator": { + "result": "1", + "depth": { + "depth": { + "depth": "1" + } + } + } + } + + # validate + with pytest.raises(ValueError): + node._transform_result(result, node.node_data.outputs) + + # construct result + result = { + "number_validator": 1, + "string_validator": "1" * 2000, + "number_array_validator": [1, 2, 3, 3.333], + "string_array_validator": ["1", "2", "3"], + "object_validator": { + "result": 1, + "depth": { + "depth": { + "depth": 1 + } + } + } + } + + # validate + with pytest.raises(ValueError): + node._transform_result(result, node.node_data.outputs) + + # construct result + result = { + "number_validator": 1, + "string_validator": "1", + "number_array_validator": [1, 2, 3, 3.333] * 2000, + "string_array_validator": ["1", "2", "3"], + "object_validator": { + "result": 1, + "depth": { + "depth": { + "depth": 1 + } + } + } + } + + # validate + with pytest.raises(ValueError): + node._transform_result(result, node.node_data.outputs) + \ No newline at end of file