Plugins/fix backend ci errors (#12615)

This commit is contained in:
Yeuoly 2025-01-10 19:46:59 +08:00 committed by GitHub
parent d56079a549
commit cb8debee3e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 480 additions and 193 deletions

View File

@ -107,11 +107,46 @@ class LargeLanguageModel(AIModel):
content_list = []
usage = LLMUsage.empty_usage()
system_fingerprint = None
tools_calls: list[AssistantPromptMessage.ToolCall] = []
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
def get_tool_call(tool_name: str):
if not tool_name:
return tools_calls[-1]
tool_call = next(
(tool_call for tool_call in tools_calls if tool_call.function.name == tool_name), None
)
if tool_call is None:
tool_call = AssistantPromptMessage.ToolCall(
id="",
type="",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tool_name, arguments=""),
)
tools_calls.append(tool_call)
return tool_call
for new_tool_call in new_tool_calls:
# get tool call
tool_call = get_tool_call(new_tool_call.function.name)
# update tool call
if new_tool_call.id:
tool_call.id = new_tool_call.id
if new_tool_call.type:
tool_call.type = new_tool_call.type
if new_tool_call.function.name:
tool_call.function.name = new_tool_call.function.name
if new_tool_call.function.arguments:
tool_call.function.arguments += new_tool_call.function.arguments
for chunk in result:
if isinstance(chunk.delta.message.content, str):
content += chunk.delta.message.content
elif isinstance(chunk.delta.message.content, list):
content_list.extend(chunk.delta.message.content)
if chunk.delta.message.tool_calls:
increase_tool_call(chunk.delta.message.tool_calls)
usage = chunk.delta.usage or LLMUsage.empty_usage()
system_fingerprint = chunk.system_fingerprint
@ -120,7 +155,10 @@ class LargeLanguageModel(AIModel):
result = LLMResult(
model=model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(content=content or content_list),
message=AssistantPromptMessage(
content=content or content_list,
tool_calls=tools_calls,
),
usage=usage,
system_fingerprint=system_fingerprint,
)

View File

@ -48,6 +48,6 @@ class TimezoneConversionTool(BuiltinTool):
datetime_with_tz = input_timezone.localize(local_time)
# timezone convert
converted_datetime = datetime_with_tz.astimezone(output_timezone)
return converted_datetime.strftime(format=time_format)
return converted_datetime.strftime(format=time_format) # type: ignore
except Exception as e:
raise ToolInvokeError(str(e))

View File

@ -5,4 +5,7 @@ from core.tools.builtin_tool.provider import BuiltinToolProviderController
class WebscraperProvider(BuiltinToolProviderController):
def _validate_credentials(self, user_id: str, credentials: dict[str, Any]) -> None:
"""
Validate credentials
"""
pass

View File

@ -0,0 +1,44 @@
import os
from collections.abc import Callable
import pytest
# import monkeypatch
from _pytest.monkeypatch import MonkeyPatch
from core.plugin.manager.model import PluginModelManager
from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass
def mock_plugin_daemon(
monkeypatch: MonkeyPatch,
) -> Callable[[], None]:
"""
mock openai module
:param monkeypatch: pytest monkeypatch fixture
:return: unpatch function
"""
def unpatch() -> None:
monkeypatch.undo()
monkeypatch.setattr(PluginModelManager, "invoke_llm", MockModelClass.invoke_llm)
monkeypatch.setattr(PluginModelManager, "fetch_model_providers", MockModelClass.fetch_model_providers)
monkeypatch.setattr(PluginModelManager, "get_model_schema", MockModelClass.get_model_schema)
return unpatch
MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
@pytest.fixture
def setup_model_mock(monkeypatch):
if MOCK:
unpatch = mock_plugin_daemon(monkeypatch)
yield
if MOCK:
unpatch()

View File

@ -0,0 +1,249 @@
import datetime
import uuid
from collections.abc import Generator, Sequence
from decimal import Decimal
from json import dumps
# import monkeypatch
from typing import Optional
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage, PromptMessageTool
from core.model_runtime.entities.model_entities import (
AIModelEntity,
FetchFrom,
ModelFeature,
ModelPropertyKey,
ModelType,
)
from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
from core.plugin.manager.model import PluginModelManager
class MockModelClass(PluginModelManager):
def fetch_model_providers(self, tenant_id: str) -> Sequence[PluginModelProviderEntity]:
"""
Fetch model providers for the given tenant.
"""
return [
PluginModelProviderEntity(
id=uuid.uuid4().hex,
created_at=datetime.datetime.now(),
updated_at=datetime.datetime.now(),
provider="openai",
tenant_id=tenant_id,
plugin_unique_identifier="langgenius/openai/openai",
plugin_id="langgenius/openai",
declaration=ProviderEntity(
provider="openai",
label=I18nObject(
en_US="OpenAI",
zh_Hans="OpenAI",
),
description=I18nObject(
en_US="OpenAI",
zh_Hans="OpenAI",
),
icon_small=I18nObject(
en_US="https://example.com/icon_small.png",
zh_Hans="https://example.com/icon_small.png",
),
icon_large=I18nObject(
en_US="https://example.com/icon_large.png",
zh_Hans="https://example.com/icon_large.png",
),
supported_model_types=[ModelType.LLM],
configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL],
models=[
AIModelEntity(
model="gpt-3.5-turbo",
label=I18nObject(
en_US="gpt-3.5-turbo",
zh_Hans="gpt-3.5-turbo",
),
model_type=ModelType.LLM,
fetch_from=FetchFrom.PREDEFINED_MODEL,
model_properties={},
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL],
),
AIModelEntity(
model="gpt-3.5-turbo-instruct",
label=I18nObject(
en_US="gpt-3.5-turbo-instruct",
zh_Hans="gpt-3.5-turbo-instruct",
),
model_type=ModelType.LLM,
fetch_from=FetchFrom.PREDEFINED_MODEL,
model_properties={
ModelPropertyKey.MODE: LLMMode.COMPLETION,
},
features=[],
),
],
),
)
]
def get_model_schema(
self,
tenant_id: str,
user_id: str,
plugin_id: str,
provider: str,
model_type: str,
model: str,
credentials: dict,
) -> AIModelEntity | None:
"""
Get model schema
"""
return AIModelEntity(
model=model,
label=I18nObject(
en_US="OpenAI",
zh_Hans="OpenAI",
),
model_type=ModelType(model_type),
fetch_from=FetchFrom.PREDEFINED_MODEL,
model_properties={},
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL] if model == "gpt-3.5-turbo" else [],
)
@staticmethod
def generate_function_call(
tools: Optional[list[PromptMessageTool]],
) -> Optional[AssistantPromptMessage.ToolCall]:
if not tools or len(tools) == 0:
return None
function: PromptMessageTool = tools[0]
function_name = function.name
function_parameters = function.parameters
function_parameters_type = function_parameters["type"]
if function_parameters_type != "object":
return None
function_parameters_properties = function_parameters["properties"]
function_parameters_required = function_parameters["required"]
parameters = {}
for parameter_name, parameter in function_parameters_properties.items():
if parameter_name not in function_parameters_required:
continue
parameter_type = parameter["type"]
if parameter_type == "string":
if "enum" in parameter:
if len(parameter["enum"]) == 0:
continue
parameters[parameter_name] = parameter["enum"][0]
else:
parameters[parameter_name] = "kawaii"
elif parameter_type == "integer":
parameters[parameter_name] = 114514
elif parameter_type == "number":
parameters[parameter_name] = 1919810.0
elif parameter_type == "boolean":
parameters[parameter_name] = True
return AssistantPromptMessage.ToolCall(
id=str(uuid.uuid4()),
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
name=function_name,
arguments=dumps(parameters),
),
)
@staticmethod
def mocked_chat_create_sync(
model: str,
prompt_messages: list[PromptMessage],
tools: Optional[list[PromptMessageTool]] = None,
) -> LLMResult:
tool_call = MockModelClass.generate_function_call(tools=tools)
return LLMResult(
id=str(uuid.uuid4()),
model=model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(content="elaina", tool_calls=[tool_call] if tool_call else []),
usage=LLMUsage(
prompt_tokens=2,
completion_tokens=1,
total_tokens=3,
prompt_unit_price=Decimal(0.0001),
completion_unit_price=Decimal(0.0002),
prompt_price_unit=Decimal(1),
prompt_price=Decimal(0.0001),
completion_price_unit=Decimal(1),
completion_price=Decimal(0.0002),
total_price=Decimal(0.0003),
currency="USD",
latency=0.001,
),
)
@staticmethod
def mocked_chat_create_stream(
model: str,
prompt_messages: list[PromptMessage],
tools: Optional[list[PromptMessageTool]] = None,
) -> Generator[LLMResultChunk, None, None]:
tool_call = MockModelClass.generate_function_call(tools=tools)
full_text = "Hello, world!\n\n```python\nprint('Hello, world!')\n```"
for i in range(0, len(full_text) + 1):
if i == len(full_text):
yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(
content="",
tool_calls=[tool_call] if tool_call else [],
),
),
)
else:
yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(
content=full_text[i],
tool_calls=[tool_call] if tool_call else [],
),
usage=LLMUsage(
prompt_tokens=2,
completion_tokens=17,
total_tokens=19,
prompt_unit_price=Decimal(0.0001),
completion_unit_price=Decimal(0.0002),
prompt_price_unit=Decimal(1),
prompt_price=Decimal(0.0001),
completion_price_unit=Decimal(1),
completion_price=Decimal(0.0002),
total_price=Decimal(0.0003),
currency="USD",
latency=0.001,
),
),
)
def invoke_llm(
self: PluginModelManager,
*,
tenant_id: str,
user_id: str,
plugin_id: str,
provider: str,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: Optional[dict] = None,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
):
return MockModelClass.mocked_chat_create_stream(model=model, prompt_messages=prompt_messages, tools=tools)

View File

@ -0,0 +1,50 @@
from unittest.mock import MagicMock
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from models.provider import ProviderType
def get_mocked_fetch_model_config(
provider: str,
model: str,
mode: str,
credentials: dict,
):
model_provider_factory = ModelProviderFactory(tenant_id="test_tenant")
model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=model_provider_factory.get_provider_schema(provider),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
model_schema = model_provider_factory.get_model_schema(
provider=provider,
model_type=model_type_instance.model_type,
model=model,
credentials=credentials,
)
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model=model,
provider=provider,
mode=mode,
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
return MagicMock(return_value=(model_instance, model_config))

View File

@ -7,12 +7,7 @@ from unittest.mock import MagicMock
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers import ModelProviderFactory
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
@ -22,11 +17,11 @@ from core.workflow.nodes.event import RunCompletedEvent
from core.workflow.nodes.llm.node import LLMNode
from extensions.ext_database import db
from models.enums import UserFrom
from models.provider import ProviderType
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
@ -81,15 +76,19 @@ def init_llm_node(config: dict) -> LLMNode:
return node
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
def test_execute_llm(setup_model_mock):
node = init_llm_node(
config={
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"model": {
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {},
},
"prompt_template": [
{"role": "system", "text": "you are a helpful assistant.\ntoday's weather is {{#abc.output#}}."},
{"role": "user", "text": "{{#sys.query#}}"},
@ -103,37 +102,15 @@ def test_execute_llm(setup_openai_mock):
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance("openai")
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=provider_instance.get_provider_schema(),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
# Mock db.session.close()
db.session.close = MagicMock()
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
node._fetch_model_config = get_mocked_fetch_model_config(
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
mode="chat",
credentials=credentials,
)
# execute node
result = node._run()
@ -149,8 +126,7 @@ def test_execute_llm(setup_openai_mock):
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_model_mock):
"""
Test execute LLM node with jinja2
"""
@ -190,38 +166,15 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance("openai")
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=provider_instance.get_provider_schema(),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
# Mock db.session.close()
db.session.close = MagicMock()
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
node._fetch_model_config = get_mocked_fetch_model_config(
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
mode="chat",
credentials=credentials,
)
# execute node
result = node._run()

View File

@ -4,14 +4,7 @@ import uuid
from typing import Optional
from unittest.mock import MagicMock
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
@ -20,53 +13,11 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntime
from core.workflow.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode
from extensions.ext_database import db
from models.enums import UserFrom
from models.provider import ProviderType
from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
def get_mocked_fetch_model_config(
provider: str,
model: str,
mode: str,
credentials: dict,
):
model_provider_factory = ModelProviderFactory(tenant_id="test_tenant")
model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id="1",
provider=model_provider_factory.get_provider_schema(provider),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
model_schema = model_provider_factory.get_model_schema(
provider=provider,
model_type=model_type_instance.model_type,
model=model,
credentials=credentials,
)
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model=model,
provider=provider,
mode=mode,
credentials=credentials,
parameters={},
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
return MagicMock(return_value=(model_instance, model_config))
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock
def get_mocked_fetch_memory(memory_text: str):
@ -133,8 +84,7 @@ def init_parameter_extractor_node(config: dict):
)
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_function_calling_parameter_extractor(setup_openai_mock):
def test_function_calling_parameter_extractor(setup_model_mock):
"""
Test function calling for parameter extractor.
"""
@ -144,7 +94,12 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
"data": {
"title": "123",
"type": "parameter-extractor",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"model": {
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {},
},
"query": ["sys", "query"],
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
"instruction": "",
@ -155,25 +110,13 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
)
node._fetch_model_config = get_mocked_fetch_model_config(
provider="openai",
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
mode="chat",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)
db.session.close = MagicMock()
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather in SF",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
@ -182,8 +125,7 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
assert result.outputs.get("__reason") == None
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_instructions(setup_openai_mock):
def test_instructions(setup_model_mock):
"""
Test chat parameter extractor.
"""
@ -193,7 +135,12 @@ def test_instructions(setup_openai_mock):
"data": {
"title": "123",
"type": "parameter-extractor",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"model": {
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {},
},
"query": ["sys", "query"],
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
"reasoning_mode": "function_call",
@ -204,7 +151,7 @@ def test_instructions(setup_openai_mock):
)
node._fetch_model_config = get_mocked_fetch_model_config(
provider="openai",
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
mode="chat",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
@ -228,8 +175,7 @@ def test_instructions(setup_openai_mock):
assert "what's the weather in SF" in prompt.get("text")
@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
def test_chat_parameter_extractor(setup_anthropic_mock):
def test_chat_parameter_extractor(setup_model_mock):
"""
Test chat parameter extractor.
"""
@ -239,7 +185,12 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
"data": {
"title": "123",
"type": "parameter-extractor",
"model": {"provider": "anthropic", "name": "claude-2", "mode": "chat", "completion_params": {}},
"model": {
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {},
},
"query": ["sys", "query"],
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
"reasoning_mode": "prompt",
@ -250,10 +201,10 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
)
node._fetch_model_config = get_mocked_fetch_model_config(
provider="anthropic",
model="claude-2",
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
mode="chat",
credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")},
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)
db.session.close = MagicMock()
@ -275,8 +226,7 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
assert '<structure>\n{"type": "object"' in prompt.get("text")
@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True)
def test_completion_parameter_extractor(setup_openai_mock):
def test_completion_parameter_extractor(setup_model_mock):
"""
Test completion parameter extractor.
"""
@ -287,7 +237,7 @@ def test_completion_parameter_extractor(setup_openai_mock):
"title": "123",
"type": "parameter-extractor",
"model": {
"provider": "openai",
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo-instruct",
"mode": "completion",
"completion_params": {},
@ -302,7 +252,7 @@ def test_completion_parameter_extractor(setup_openai_mock):
)
node._fetch_model_config = get_mocked_fetch_model_config(
provider="openai",
provider="langgenius/openai/openai",
model="gpt-3.5-turbo-instruct",
mode="completion",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
@ -335,7 +285,7 @@ def test_extract_json_response():
"title": "123",
"type": "parameter-extractor",
"model": {
"provider": "openai",
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo-instruct",
"mode": "completion",
"completion_params": {},
@ -361,8 +311,7 @@ def test_extract_json_response():
assert result["location"] == "kawaii"
@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
def test_chat_parameter_extractor_with_memory(setup_model_mock):
"""
Test chat parameter extractor with memory.
"""
@ -372,7 +321,12 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
"data": {
"title": "123",
"type": "parameter-extractor",
"model": {"provider": "anthropic", "name": "claude-2", "mode": "chat", "completion_params": {}},
"model": {
"provider": "langgenius/openai/openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {},
},
"query": ["sys", "query"],
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
"reasoning_mode": "prompt",
@ -383,10 +337,10 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
)
node._fetch_model_config = get_mocked_fetch_model_config(
provider="anthropic",
model="claude-2",
provider="langgenius/openai/openai",
model="gpt-3.5-turbo",
mode="chat",
credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")},
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)
node._fetch_memory = get_mocked_fetch_memory("customized memory")
db.session.close = MagicMock()

View File

@ -1,13 +1,15 @@
import time
import uuid
from unittest.mock import MagicMock
from core.app.entities.app_invoke_entities import InvokeFrom
from core.workflow.entities.node_entities import NodeRunResult
from core.tools.utils.configuration import ToolParameterConfigurationManager
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.event.event import RunCompletedEvent
from core.workflow.nodes.tool.tool_node import ToolNode
from models.enums import UserFrom
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
@ -63,31 +65,28 @@ def test_tool_variable_invoke():
"data": {
"title": "a",
"desc": "a",
"provider_id": "maths",
"provider_id": "time",
"provider_type": "builtin",
"provider_name": "maths",
"tool_name": "eval_expression",
"tool_label": "eval_expression",
"provider_name": "time",
"tool_name": "current_time",
"tool_label": "current_time",
"tool_configurations": {},
"tool_parameters": {
"expression": {
"type": "variable",
"value": ["1", "123", "args1"],
}
},
"tool_parameters": {},
},
}
)
ToolParameterConfigurationManager.decrypt_tool_parameters = MagicMock(return_value={"format": "%Y-%m-%d %H:%M:%S"})
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], "1+1")
# execute node
result = node._run()
assert isinstance(result, NodeRunResult)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert "2" in result.outputs["text"]
assert result.outputs["files"] == []
for item in result:
if isinstance(item, RunCompletedEvent):
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert item.run_result.outputs is not None
assert item.run_result.outputs.get("text") is not None
def test_tool_mixed_invoke():
@ -97,28 +96,25 @@ def test_tool_mixed_invoke():
"data": {
"title": "a",
"desc": "a",
"provider_id": "maths",
"provider_id": "time",
"provider_type": "builtin",
"provider_name": "maths",
"tool_name": "eval_expression",
"tool_label": "eval_expression",
"tool_configurations": {},
"tool_parameters": {
"expression": {
"type": "mixed",
"value": "{{#1.args1#}}",
}
"provider_name": "time",
"tool_name": "current_time",
"tool_label": "current_time",
"tool_configurations": {
"format": "%Y-%m-%d %H:%M:%S",
},
"tool_parameters": {},
},
}
)
node.graph_runtime_state.variable_pool.add(["1", "args1"], "1+1")
ToolParameterConfigurationManager.decrypt_tool_parameters = MagicMock(return_value={"format": "%Y-%m-%d %H:%M:%S"})
# execute node
result = node._run()
assert isinstance(result, NodeRunResult)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs is not None
assert "2" in result.outputs["text"]
assert result.outputs["files"] == []
for item in result:
if isinstance(item, RunCompletedEvent):
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert item.run_result.outputs is not None
assert item.run_result.outputs.get("text") is not None