mirror of https://github.com/langgenius/dify.git
chore: remove unused changes
This commit is contained in:
parent
930c36e757
commit
abb2b860f2
|
|
@ -359,6 +359,7 @@ class WorkflowRunNodeExecutionListApi(Resource):
|
|||
@login_required
|
||||
@account_initialization_required
|
||||
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
|
||||
@marshal_with(workflow_run_node_execution_list_model)
|
||||
def get(self, app_model: App, run_id):
|
||||
"""
|
||||
Get workflow run node execution list
|
||||
|
|
|
|||
|
|
@ -111,8 +111,9 @@ class LLMNodeData(BaseNodeData):
|
|||
),
|
||||
)
|
||||
|
||||
# Tool support (from Agent V2)
|
||||
# Tool support
|
||||
tools: Sequence[ToolMetadata] = Field(default_factory=list)
|
||||
max_iterations: int | None = Field(default=None, description="Maximum number of iterations for the LLM node")
|
||||
|
||||
@field_validator("prompt_config", mode="before")
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -1330,7 +1330,7 @@ class LLMNode(Node[LLMNodeData]):
|
|||
model_instance=model_instance,
|
||||
tools=tool_instances,
|
||||
files=prompt_files,
|
||||
max_iterations=10,
|
||||
max_iterations=self._node_data.max_iterations or 10,
|
||||
context=ExecutionContext(user_id=self.user_id, app_id=self.app_id, tenant_id=self.tenant_id),
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,13 +5,11 @@ Provides methods to query and attach generation details to workflow node executi
|
|||
and messages, avoiding N+1 query problems.
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from core.app.entities.llm_generation_entities import LLMGenerationDetailData
|
||||
from models import LLMGenerationDetail, WorkflowNodeExecutionModel
|
||||
from models import LLMGenerationDetail
|
||||
|
||||
|
||||
class LLMGenerationService:
|
||||
|
|
@ -20,26 +18,6 @@ class LLMGenerationService:
|
|||
def __init__(self, session: Session):
|
||||
self._session = session
|
||||
|
||||
def get_generation_details_for_workflow_run(
|
||||
self,
|
||||
workflow_run_id: str,
|
||||
*,
|
||||
tenant_id: str | None = None,
|
||||
app_id: str | None = None,
|
||||
) -> dict[str, LLMGenerationDetailData]:
|
||||
"""
|
||||
Batch query generation details for all LLM nodes in a workflow run.
|
||||
|
||||
Returns dict mapping node_id to LLMGenerationDetailData.
|
||||
"""
|
||||
stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.workflow_run_id == workflow_run_id)
|
||||
if tenant_id:
|
||||
stmt = stmt.where(LLMGenerationDetail.tenant_id == tenant_id)
|
||||
if app_id:
|
||||
stmt = stmt.where(LLMGenerationDetail.app_id == app_id)
|
||||
details = self._session.scalars(stmt).all()
|
||||
return {detail.node_id: detail.to_domain_model() for detail in details if detail.node_id}
|
||||
|
||||
def get_generation_detail_for_message(self, message_id: str) -> LLMGenerationDetailData | None:
|
||||
"""Query generation detail for a specific message."""
|
||||
stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id == message_id)
|
||||
|
|
@ -57,75 +35,3 @@ class LLMGenerationService:
|
|||
stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id.in_(message_ids))
|
||||
details = self._session.scalars(stmt).all()
|
||||
return {detail.message_id: detail.to_domain_model() for detail in details if detail.message_id}
|
||||
|
||||
def attach_generation_details_to_node_executions(
|
||||
self,
|
||||
node_executions: Sequence[WorkflowNodeExecutionModel],
|
||||
workflow_run_id: str,
|
||||
*,
|
||||
tenant_id: str | None = None,
|
||||
app_id: str | None = None,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Attach generation details to node executions and return as dicts.
|
||||
|
||||
Queries generation details in batch and attaches them to the corresponding
|
||||
node executions, avoiding N+1 queries.
|
||||
"""
|
||||
generation_details = self.get_generation_details_for_workflow_run(
|
||||
workflow_run_id, tenant_id=tenant_id, app_id=app_id
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"id": node.id,
|
||||
"index": node.index,
|
||||
"predecessor_node_id": node.predecessor_node_id,
|
||||
"node_id": node.node_id,
|
||||
"node_type": node.node_type,
|
||||
"title": node.title,
|
||||
"inputs": node.inputs_dict,
|
||||
"process_data": node.process_data_dict,
|
||||
"outputs": node.outputs_dict,
|
||||
"status": node.status,
|
||||
"error": node.error,
|
||||
"elapsed_time": node.elapsed_time,
|
||||
"execution_metadata": node.execution_metadata_dict,
|
||||
"extras": node.extras,
|
||||
"created_at": int(node.created_at.timestamp()) if node.created_at else None,
|
||||
"created_by_role": node.created_by_role,
|
||||
"created_by_account": _serialize_account(node.created_by_account),
|
||||
"created_by_end_user": _serialize_end_user(node.created_by_end_user),
|
||||
"finished_at": int(node.finished_at.timestamp()) if node.finished_at else None,
|
||||
"inputs_truncated": node.inputs_truncated,
|
||||
"outputs_truncated": node.outputs_truncated,
|
||||
"process_data_truncated": node.process_data_truncated,
|
||||
"generation_detail": generation_details[node.node_id].to_response_dict()
|
||||
if node.node_id in generation_details
|
||||
else None,
|
||||
}
|
||||
for node in node_executions
|
||||
]
|
||||
|
||||
|
||||
def _serialize_account(account) -> dict | None:
|
||||
"""Serialize Account to dict for API response."""
|
||||
if not account:
|
||||
return None
|
||||
return {
|
||||
"id": account.id,
|
||||
"name": account.name,
|
||||
"email": account.email,
|
||||
}
|
||||
|
||||
|
||||
def _serialize_end_user(end_user) -> dict | None:
|
||||
"""Serialize EndUser to dict for API response."""
|
||||
if not end_user:
|
||||
return None
|
||||
return {
|
||||
"id": end_user.id,
|
||||
"type": end_user.type,
|
||||
"is_anonymous": end_user.is_anonymous,
|
||||
"session_id": end_user.session_id,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import threading
|
||||
from typing import Any
|
||||
from collections.abc import Sequence
|
||||
|
||||
from sqlalchemy import Engine
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
import contexts
|
||||
from extensions.ext_database import db
|
||||
|
|
@ -11,12 +11,12 @@ from models import (
|
|||
Account,
|
||||
App,
|
||||
EndUser,
|
||||
WorkflowNodeExecutionModel,
|
||||
WorkflowRun,
|
||||
WorkflowRunTriggeredFrom,
|
||||
)
|
||||
from repositories.api_workflow_run_repository import APIWorkflowRunRepository
|
||||
from repositories.factory import DifyAPIRepositoryFactory
|
||||
from services.llm_generation_service import LLMGenerationService
|
||||
|
||||
|
||||
class WorkflowRunService:
|
||||
|
|
@ -137,9 +137,9 @@ class WorkflowRunService:
|
|||
app_model: App,
|
||||
run_id: str,
|
||||
user: Account | EndUser,
|
||||
) -> list[dict[str, Any]]:
|
||||
) -> Sequence[WorkflowNodeExecutionModel]:
|
||||
"""
|
||||
Get workflow run node execution list with generation details attached.
|
||||
Get workflow run node execution list
|
||||
"""
|
||||
workflow_run = self.get_workflow_run(app_model, run_id)
|
||||
|
||||
|
|
@ -154,18 +154,8 @@ class WorkflowRunService:
|
|||
if tenant_id is None:
|
||||
raise ValueError("User tenant_id cannot be None")
|
||||
|
||||
node_executions = self._node_execution_service_repo.get_executions_by_workflow_run(
|
||||
return self._node_execution_service_repo.get_executions_by_workflow_run(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_model.id,
|
||||
workflow_run_id=run_id,
|
||||
)
|
||||
|
||||
# Attach generation details using batch query
|
||||
with Session(db.engine) as session:
|
||||
generation_service = LLMGenerationService(session)
|
||||
return generation_service.attach_generation_details_to_node_executions(
|
||||
node_executions=node_executions,
|
||||
workflow_run_id=run_id,
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_model.id,
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in New Issue