mirror of
https://github.com/langgenius/dify.git
synced 2026-05-13 08:57:28 +08:00
lint
This commit is contained in:
parent
187e12956a
commit
08d08f0ae3
@ -14,13 +14,13 @@ from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
|
||||
from core.plugin.impl.asset import PluginAssetManager
|
||||
from core.plugin.impl.model import PluginModelClient
|
||||
from extensions.ext_redis import redis_client
|
||||
from graphon.model_runtime import ModelRuntime
|
||||
from graphon.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
|
||||
from graphon.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
|
||||
from graphon.model_runtime.entities.model_entities import AIModelEntity, ModelType
|
||||
from graphon.model_runtime.entities.provider_entities import ProviderEntity
|
||||
from graphon.model_runtime.entities.rerank_entities import MultimodalRerankInput, RerankResult
|
||||
from graphon.model_runtime.entities.text_embedding_entities import EmbeddingInputType, EmbeddingResult
|
||||
from graphon.model_runtime import ModelRuntime
|
||||
from models.provider_ids import ModelProviderID
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -17,7 +17,9 @@ def get_mocked_fetch_model_config(
|
||||
):
|
||||
model_provider_factory = create_plugin_model_provider_factory(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b")
|
||||
model_type_instance = create_model_type_instance(
|
||||
factory=model_provider_factory, provider=provider, model_type=ModelType.LLM,
|
||||
factory=model_provider_factory,
|
||||
provider=provider,
|
||||
model_type=ModelType.LLM,
|
||||
)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
|
||||
@ -132,7 +132,9 @@ class TestAdvancedChatGenerateTaskPipeline:
|
||||
pipeline._task_state.answer = "partial answer"
|
||||
pipeline._workflow_run_id = "run-id"
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
total_tokens=7,
|
||||
node_run_steps=3,
|
||||
@ -372,7 +374,9 @@ class TestAdvancedChatGenerateTaskPipeline:
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._workflow_run_id = "run-id"
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish"
|
||||
@ -583,7 +587,9 @@ class TestAdvancedChatGenerateTaskPipeline:
|
||||
self.items = items
|
||||
|
||||
graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
|
||||
@ -617,7 +623,9 @@ class TestAdvancedChatGenerateTaskPipeline:
|
||||
def test_handle_message_end_event_applies_output_moderation(self, monkeypatch):
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
pipeline._base_task_pipeline.handle_output_moderation_when_task_finished = lambda answer: "safe"
|
||||
|
||||
@ -95,7 +95,9 @@ class TestWorkflowGenerateTaskPipeline:
|
||||
def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self):
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
total_tokens=5,
|
||||
node_run_steps=2,
|
||||
@ -283,7 +285,9 @@ class TestWorkflowGenerateTaskPipeline:
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._workflow_execution_id = "run-id"
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish"
|
||||
@ -725,7 +729,9 @@ class TestWorkflowGenerateTaskPipeline:
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._workflow_execution_id = "run-id"
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
|
||||
@ -753,7 +759,9 @@ class TestWorkflowGenerateTaskPipeline:
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._workflow_execution_id = "run-id"
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
pipeline._handle_ping_event = lambda event, **kwargs: iter(["ping"])
|
||||
@ -769,7 +777,9 @@ class TestWorkflowGenerateTaskPipeline:
|
||||
def test_process_stream_response_main_match_paths_and_cleanup(self):
|
||||
pipeline = _make_pipeline()
|
||||
pipeline._graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-id")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-id")
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
pipeline._base_task_pipeline.queue_manager.listen = lambda: iter(
|
||||
|
||||
@ -21,7 +21,9 @@ class TestTriggerPostLayer:
|
||||
)
|
||||
runtime_state = SimpleNamespace(
|
||||
outputs={"answer": "ok"},
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-1")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-1")
|
||||
),
|
||||
total_tokens=12,
|
||||
)
|
||||
|
||||
@ -60,7 +62,9 @@ class TestTriggerPostLayer:
|
||||
def test_on_event_handles_missing_trigger_log(self):
|
||||
runtime_state = SimpleNamespace(
|
||||
outputs={},
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-1")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-1")
|
||||
),
|
||||
total_tokens=0,
|
||||
)
|
||||
|
||||
@ -91,7 +95,9 @@ class TestTriggerPostLayer:
|
||||
def test_on_event_ignores_non_status_events(self):
|
||||
runtime_state = SimpleNamespace(
|
||||
outputs={},
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(workflow_execution_id="run-1")),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(workflow_execution_id="run-1")
|
||||
),
|
||||
total_tokens=0,
|
||||
)
|
||||
|
||||
|
||||
@ -60,7 +60,9 @@ def _make_layer(
|
||||
workflow_execution_id="run-id",
|
||||
conversation_id="conv-id",
|
||||
)
|
||||
runtime_state = GraphRuntimeState(variable_pool=VariablePool.from_bootstrap(system_variables=system_variables), start_at=0.0)
|
||||
runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=system_variables), start_at=0.0
|
||||
)
|
||||
read_only_state = ReadOnlyGraphRuntimeStateWrapper(runtime_state)
|
||||
|
||||
application_generate_entity = WorkflowAppGenerateEntity.model_construct(
|
||||
|
||||
@ -143,7 +143,8 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup
|
||||
|
||||
def test_model_provider_factory_raises_for_unknown_provider() -> None:
|
||||
factory = ModelProviderFactory(
|
||||
runtime=_FakeModelRuntime([
|
||||
runtime=_FakeModelRuntime(
|
||||
[
|
||||
_build_provider(
|
||||
provider="langgenius/openai/openai",
|
||||
provider_name="openai",
|
||||
@ -258,7 +259,8 @@ def test_model_provider_factory_validates_provider_credentials() -> None:
|
||||
|
||||
def test_model_provider_factory_provider_credentials_validate_requires_schema() -> None:
|
||||
factory = ModelProviderFactory(
|
||||
runtime=_FakeModelRuntime([
|
||||
runtime=_FakeModelRuntime(
|
||||
[
|
||||
_build_provider(
|
||||
provider="langgenius/openai/openai",
|
||||
provider_name="openai",
|
||||
@ -313,7 +315,8 @@ def test_model_provider_factory_validates_model_credentials() -> None:
|
||||
|
||||
def test_model_provider_factory_model_credentials_validate_requires_schema() -> None:
|
||||
factory = ModelProviderFactory(
|
||||
runtime=_FakeModelRuntime([
|
||||
runtime=_FakeModelRuntime(
|
||||
[
|
||||
_build_provider(
|
||||
provider="langgenius/openai/openai",
|
||||
provider_name="openai",
|
||||
@ -385,7 +388,8 @@ def test_model_provider_factory_builds_model_type_instances(
|
||||
expected_type: type[object],
|
||||
) -> None:
|
||||
factory = ModelProviderFactory(
|
||||
runtime=_FakeModelRuntime([
|
||||
runtime=_FakeModelRuntime(
|
||||
[
|
||||
_build_provider(
|
||||
provider="langgenius/openai/openai",
|
||||
provider_name="openai",
|
||||
@ -402,7 +406,8 @@ def test_model_provider_factory_builds_model_type_instances(
|
||||
|
||||
def test_model_provider_factory_rejects_unsupported_model_type() -> None:
|
||||
factory = ModelProviderFactory(
|
||||
runtime=_FakeModelRuntime([
|
||||
runtime=_FakeModelRuntime(
|
||||
[
|
||||
_build_provider(
|
||||
provider="langgenius/openai/openai",
|
||||
provider_name="openai",
|
||||
|
||||
@ -110,7 +110,9 @@ def _build_http_node(
|
||||
call_depth=0,
|
||||
)
|
||||
graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}
|
||||
),
|
||||
start_at=time.perf_counter(),
|
||||
)
|
||||
return HttpRequestNode(
|
||||
|
||||
@ -51,7 +51,9 @@ def _create_human_input_node(
|
||||
def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name#}}") -> HumanInputNode:
|
||||
system_variables = default_system_variables()
|
||||
graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=system_variables, user_inputs={}, environment_variables=[]),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=system_variables, user_inputs={}, environment_variables=[]
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
graph_init_params = GraphInitParams(
|
||||
@ -114,7 +116,9 @@ def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name#
|
||||
def _build_timeout_node() -> HumanInputNode:
|
||||
system_variables = default_system_variables()
|
||||
graph_runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=system_variables, user_inputs={}, environment_variables=[]),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=system_variables, user_inputs={}, environment_variables=[]
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
graph_init_params = GraphInitParams(
|
||||
|
||||
@ -244,7 +244,9 @@ def model_config(monkeypatch):
|
||||
model_provider_factory = ModelProviderFactory(runtime=create_plugin_model_runtime(tenant_id="test"))
|
||||
provider_instance = model_provider_factory.get_model_provider("openai")
|
||||
model_type_instance = create_model_type_instance(
|
||||
factory=model_provider_factory, provider="openai", model_type=ModelType.LLM,
|
||||
factory=model_provider_factory,
|
||||
provider="openai",
|
||||
model_type=ModelType.LLM,
|
||||
)
|
||||
|
||||
# Create a ProviderModelBundle
|
||||
|
||||
@ -35,7 +35,9 @@ def _build_context(graph_config: Mapping[str, object]) -> tuple[GraphInitParams,
|
||||
invoke_from="debugger",
|
||||
)
|
||||
runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
return init_params, runtime_state
|
||||
@ -82,7 +84,9 @@ def test_node_accepts_invoke_from_enum():
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
)
|
||||
runtime_state = GraphRuntimeState(
|
||||
variable_pool=VariablePool.from_bootstrap(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}),
|
||||
variable_pool=VariablePool.from_bootstrap(
|
||||
system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}
|
||||
),
|
||||
start_at=0.0,
|
||||
)
|
||||
|
||||
|
||||
@ -189,13 +189,17 @@ def test_run_extract_text(
|
||||
if mime_type == "application/pdf":
|
||||
mock_pdf_extract = Mock(return_value=expected_text[0])
|
||||
if extension:
|
||||
monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_file_extension", mock_pdf_extract)
|
||||
monkeypatch.setattr(
|
||||
"graphon.nodes.document_extractor.node._extract_text_by_file_extension", mock_pdf_extract
|
||||
)
|
||||
else:
|
||||
monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_mime_type", mock_pdf_extract)
|
||||
elif mime_type.startswith("application/vnd.openxmlformats"):
|
||||
mock_docx_extract = Mock(return_value=expected_text[0])
|
||||
if extension:
|
||||
monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_file_extension", mock_docx_extract)
|
||||
monkeypatch.setattr(
|
||||
"graphon.nodes.document_extractor.node._extract_text_by_file_extension", mock_docx_extract
|
||||
)
|
||||
else:
|
||||
monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_mime_type", mock_docx_extract)
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
import sys
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
@ -644,7 +644,9 @@ class TestWorkflowEntryHelpers:
|
||||
|
||||
with (
|
||||
patch.object(workflow_entry, "default_system_variables", return_value=sentinel.system_variables),
|
||||
patch("graphon.runtime.VariablePool.from_bootstrap", return_value=sentinel.variable_pool) as variable_pool_cls,
|
||||
patch(
|
||||
"graphon.runtime.VariablePool.from_bootstrap", return_value=sentinel.variable_pool
|
||||
) as variable_pool_cls,
|
||||
patch.object(workflow_entry, "add_variables_to_pool") as add_variables_to_pool,
|
||||
patch.object(
|
||||
workflow_entry, "DifyGraphInitContext", return_value=sentinel.graph_init_context
|
||||
|
||||
Loading…
Reference in New Issue
Block a user