mirror of
https://github.com/langgenius/dify.git
synced 2026-04-27 19:27:23 +08:00
Merge branch 'fix/chore-fix' into dev/plugin-deploy
This commit is contained in:
commit
d5cf64f289
@ -62,8 +62,8 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
app_record=app_record,
|
app_record=app_record,
|
||||||
model_config=application_generate_entity.model_conf,
|
model_config=application_generate_entity.model_conf,
|
||||||
prompt_template_entity=app_config.prompt_template,
|
prompt_template_entity=app_config.prompt_template,
|
||||||
inputs=inputs,
|
inputs=dict(inputs),
|
||||||
files=files,
|
files=list(files),
|
||||||
query=query,
|
query=query,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -84,8 +84,8 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
app_record=app_record,
|
app_record=app_record,
|
||||||
model_config=application_generate_entity.model_conf,
|
model_config=application_generate_entity.model_conf,
|
||||||
prompt_template_entity=app_config.prompt_template,
|
prompt_template_entity=app_config.prompt_template,
|
||||||
inputs=inputs,
|
inputs=dict(inputs),
|
||||||
files=files,
|
files=list(files),
|
||||||
query=query,
|
query=query,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
)
|
)
|
||||||
@ -97,8 +97,8 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
app_id=app_record.id,
|
app_id=app_record.id,
|
||||||
tenant_id=app_config.tenant_id,
|
tenant_id=app_config.tenant_id,
|
||||||
app_generate_entity=application_generate_entity,
|
app_generate_entity=application_generate_entity,
|
||||||
inputs=inputs,
|
inputs=dict(inputs),
|
||||||
query=query,
|
query=query or "",
|
||||||
message_id=message.id,
|
message_id=message.id,
|
||||||
)
|
)
|
||||||
except ModerationError as e:
|
except ModerationError as e:
|
||||||
@ -154,9 +154,9 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
app_record=app_record,
|
app_record=app_record,
|
||||||
model_config=application_generate_entity.model_conf,
|
model_config=application_generate_entity.model_conf,
|
||||||
prompt_template_entity=app_config.prompt_template,
|
prompt_template_entity=app_config.prompt_template,
|
||||||
inputs=inputs,
|
inputs=dict(inputs),
|
||||||
files=files,
|
files=list(files),
|
||||||
query=query,
|
query=query or "",
|
||||||
memory=memory,
|
memory=memory,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -171,6 +171,7 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
return
|
return
|
||||||
|
|
||||||
agent_entity = app_config.agent
|
agent_entity = app_config.agent
|
||||||
|
assert agent_entity is not None
|
||||||
|
|
||||||
# init model instance
|
# init model instance
|
||||||
model_instance = ModelInstance(
|
model_instance = ModelInstance(
|
||||||
@ -181,15 +182,16 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
app_record=app_record,
|
app_record=app_record,
|
||||||
model_config=application_generate_entity.model_conf,
|
model_config=application_generate_entity.model_conf,
|
||||||
prompt_template_entity=app_config.prompt_template,
|
prompt_template_entity=app_config.prompt_template,
|
||||||
inputs=inputs,
|
inputs=dict(inputs),
|
||||||
files=files,
|
files=list(files),
|
||||||
query=query,
|
query=query or "",
|
||||||
memory=memory,
|
memory=memory,
|
||||||
)
|
)
|
||||||
|
|
||||||
# change function call strategy based on LLM model
|
# change function call strategy based on LLM model
|
||||||
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
|
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
|
||||||
model_schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials)
|
model_schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials)
|
||||||
|
assert model_schema is not None
|
||||||
|
|
||||||
if {ModelFeature.MULTI_TOOL_CALL, ModelFeature.TOOL_CALL}.intersection(model_schema.features or []):
|
if {ModelFeature.MULTI_TOOL_CALL, ModelFeature.TOOL_CALL}.intersection(model_schema.features or []):
|
||||||
agent_entity.strategy = AgentEntity.Strategy.FUNCTION_CALLING
|
agent_entity.strategy = AgentEntity.Strategy.FUNCTION_CALLING
|
||||||
|
|||||||
@ -385,7 +385,7 @@ class WorkflowCycleManage:
|
|||||||
id=workflow_run.id,
|
id=workflow_run.id,
|
||||||
workflow_id=workflow_run.workflow_id,
|
workflow_id=workflow_run.workflow_id,
|
||||||
sequence_number=workflow_run.sequence_number,
|
sequence_number=workflow_run.sequence_number,
|
||||||
inputs=workflow_run.inputs_dict,
|
inputs=dict(workflow_run.inputs_dict),
|
||||||
created_at=int(workflow_run.created_at.timestamp()),
|
created_at=int(workflow_run.created_at.timestamp()),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@ -424,7 +424,7 @@ class WorkflowCycleManage:
|
|||||||
workflow_id=workflow_run.workflow_id,
|
workflow_id=workflow_run.workflow_id,
|
||||||
sequence_number=workflow_run.sequence_number,
|
sequence_number=workflow_run.sequence_number,
|
||||||
status=workflow_run.status,
|
status=workflow_run.status,
|
||||||
outputs=workflow_run.outputs_dict,
|
outputs=dict(workflow_run.outputs_dict),
|
||||||
error=workflow_run.error,
|
error=workflow_run.error,
|
||||||
elapsed_time=workflow_run.elapsed_time,
|
elapsed_time=workflow_run.elapsed_time,
|
||||||
total_tokens=workflow_run.total_tokens,
|
total_tokens=workflow_run.total_tokens,
|
||||||
@ -432,7 +432,7 @@ class WorkflowCycleManage:
|
|||||||
created_by=created_by,
|
created_by=created_by,
|
||||||
created_at=int(workflow_run.created_at.timestamp()),
|
created_at=int(workflow_run.created_at.timestamp()),
|
||||||
finished_at=int(workflow_run.finished_at.timestamp()),
|
finished_at=int(workflow_run.finished_at.timestamp()),
|
||||||
files=self._fetch_files_from_node_outputs(workflow_run.outputs_dict),
|
files=self._fetch_files_from_node_outputs(dict(workflow_run.outputs_dict)),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user