mirror of https://github.com/langgenius/dify.git
fix file render
This commit is contained in:
parent
d5a404236a
commit
a2195c813c
|
|
@ -10,7 +10,7 @@ from core.app.app_config.features.suggested_questions_after_answer.manager impor
|
|||
SuggestedQuestionsAfterAnswerConfigManager,
|
||||
)
|
||||
from core.app.app_config.features.text_to_speech.manager import TextToSpeechConfigManager
|
||||
from models.model import AppModelConfig
|
||||
from models.model import AppMode, AppModelConfig
|
||||
|
||||
|
||||
class BaseAppConfigManager:
|
||||
|
|
@ -33,11 +33,12 @@ class BaseAppConfigManager:
|
|||
return config_dict
|
||||
|
||||
@classmethod
|
||||
def convert_features(cls, config_dict: dict) -> AppAdditionalFeatures:
|
||||
def convert_features(cls, config_dict: dict, app_mode: AppMode) -> AppAdditionalFeatures:
|
||||
"""
|
||||
Convert app config to app model config
|
||||
|
||||
:param config_dict: app config
|
||||
:param app_mode: app mode
|
||||
"""
|
||||
config_dict = config_dict.copy()
|
||||
|
||||
|
|
@ -47,7 +48,8 @@ class BaseAppConfigManager:
|
|||
)
|
||||
|
||||
additional_features.file_upload = FileUploadConfigManager.convert(
|
||||
config=config_dict
|
||||
config=config_dict,
|
||||
is_vision=app_mode in [AppMode.CHAT, AppMode.COMPLETION, AppMode.AGENT_CHAT]
|
||||
)
|
||||
|
||||
additional_features.opening_statement, additional_features.suggested_questions = \
|
||||
|
|
|
|||
|
|
@ -5,22 +5,27 @@ from core.app.app_config.entities import FileExtraConfig
|
|||
|
||||
class FileUploadConfigManager:
|
||||
@classmethod
|
||||
def convert(cls, config: dict) -> Optional[FileExtraConfig]:
|
||||
def convert(cls, config: dict, is_vision: bool = True) -> Optional[FileExtraConfig]:
|
||||
"""
|
||||
Convert model config to model config
|
||||
|
||||
:param config: model config args
|
||||
:param is_vision: if True, the feature is vision feature
|
||||
"""
|
||||
file_upload_dict = config.get('file_upload')
|
||||
if file_upload_dict:
|
||||
if 'image' in file_upload_dict and file_upload_dict['image']:
|
||||
if 'enabled' in file_upload_dict['image'] and file_upload_dict['image']['enabled']:
|
||||
image_config = {
|
||||
'number_limits': file_upload_dict['image']['number_limits'],
|
||||
'transfer_methods': file_upload_dict['image']['transfer_methods']
|
||||
}
|
||||
|
||||
if is_vision:
|
||||
image_config['detail'] = file_upload_dict['image']['detail']
|
||||
|
||||
return FileExtraConfig(
|
||||
image_config={
|
||||
'number_limits': file_upload_dict['image']['number_limits'],
|
||||
'detail': file_upload_dict['image']['detail'],
|
||||
'transfer_methods': file_upload_dict['image']['transfer_methods']
|
||||
}
|
||||
image_config=image_config
|
||||
)
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -28,10 +28,11 @@ class AdvancedChatAppConfigManager(BaseAppConfigManager):
|
|||
workflow: Workflow) -> AdvancedChatAppConfig:
|
||||
features_dict = workflow.features_dict
|
||||
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
app_config = AdvancedChatAppConfig(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
app_mode=AppMode.value_of(app_model.mode),
|
||||
app_mode=app_mode,
|
||||
workflow_id=workflow.id,
|
||||
sensitive_word_avoidance=SensitiveWordAvoidanceConfigManager.convert(
|
||||
config=features_dict
|
||||
|
|
@ -39,7 +40,7 @@ class AdvancedChatAppConfigManager(BaseAppConfigManager):
|
|||
variables=WorkflowVariablesConfigManager.convert(
|
||||
workflow=workflow
|
||||
),
|
||||
additional_features=cls.convert_features(features_dict)
|
||||
additional_features=cls.convert_features(features_dict, app_mode)
|
||||
)
|
||||
|
||||
return app_config
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
|||
# parse files
|
||||
files = args['files'] if 'files' in args and args['files'] else []
|
||||
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
|
||||
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict)
|
||||
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
|
||||
if file_extra_config:
|
||||
file_objs = message_file_parser.validate_and_transform_files_arg(
|
||||
files,
|
||||
|
|
|
|||
|
|
@ -485,63 +485,85 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
|
|||
value_selector = route_chunk.value_selector
|
||||
route_chunk_node_id = value_selector[0]
|
||||
|
||||
# check chunk node id is before current node id or equal to current node id
|
||||
if route_chunk_node_id not in self._task_state.ran_node_execution_infos:
|
||||
break
|
||||
if route_chunk_node_id == 'sys':
|
||||
# system variable
|
||||
value = self._workflow_system_variables.get(SystemVariable.value_of(value_selector[1]))
|
||||
# new_value = []
|
||||
# if isinstance(value, list):
|
||||
# for item in value:
|
||||
# if isinstance(item, FileVar):
|
||||
# new_value.append(item.to_dict())
|
||||
#
|
||||
# if new_value:
|
||||
# value = new_value
|
||||
else:
|
||||
# check chunk node id is before current node id or equal to current node id
|
||||
if route_chunk_node_id not in self._task_state.ran_node_execution_infos:
|
||||
break
|
||||
|
||||
latest_node_execution_info = self._task_state.latest_node_execution_info
|
||||
latest_node_execution_info = self._task_state.latest_node_execution_info
|
||||
|
||||
# get route chunk node execution info
|
||||
route_chunk_node_execution_info = self._task_state.ran_node_execution_infos[route_chunk_node_id]
|
||||
if (route_chunk_node_execution_info.node_type == NodeType.LLM
|
||||
and latest_node_execution_info.node_type == NodeType.LLM):
|
||||
# only LLM support chunk stream output
|
||||
self._task_state.current_stream_generate_state.current_route_position += 1
|
||||
continue
|
||||
# get route chunk node execution info
|
||||
route_chunk_node_execution_info = self._task_state.ran_node_execution_infos[route_chunk_node_id]
|
||||
if (route_chunk_node_execution_info.node_type == NodeType.LLM
|
||||
and latest_node_execution_info.node_type == NodeType.LLM):
|
||||
# only LLM support chunk stream output
|
||||
self._task_state.current_stream_generate_state.current_route_position += 1
|
||||
continue
|
||||
|
||||
# get route chunk node execution
|
||||
route_chunk_node_execution = db.session.query(WorkflowNodeExecution).filter(
|
||||
WorkflowNodeExecution.id == route_chunk_node_execution_info.workflow_node_execution_id).first()
|
||||
# get route chunk node execution
|
||||
route_chunk_node_execution = db.session.query(WorkflowNodeExecution).filter(
|
||||
WorkflowNodeExecution.id == route_chunk_node_execution_info.workflow_node_execution_id).first()
|
||||
|
||||
outputs = route_chunk_node_execution.outputs_dict
|
||||
outputs = route_chunk_node_execution.outputs_dict
|
||||
|
||||
# get value from outputs
|
||||
value = None
|
||||
for key in value_selector[1:]:
|
||||
if not value:
|
||||
value = outputs.get(key)
|
||||
else:
|
||||
value = value.get(key)
|
||||
# get value from outputs
|
||||
value = None
|
||||
for key in value_selector[1:]:
|
||||
if not value:
|
||||
value = outputs.get(key)
|
||||
else:
|
||||
value = value.get(key)
|
||||
|
||||
if value:
|
||||
text = None
|
||||
text = ''
|
||||
if isinstance(value, str | int | float):
|
||||
text = str(value)
|
||||
elif isinstance(value, dict | list):
|
||||
# handle files
|
||||
file_vars = self._fetch_files_from_variable_value(value)
|
||||
for file_var in file_vars:
|
||||
try:
|
||||
file_var_obj = FileVar(**file_var)
|
||||
except Exception as e:
|
||||
logger.error(f'Error creating file var: {e}')
|
||||
continue
|
||||
elif isinstance(value, dict):
|
||||
# other types
|
||||
text = json.dumps(value, ensure_ascii=False)
|
||||
elif isinstance(value, FileVar):
|
||||
# convert file to markdown
|
||||
text = value.to_markdown()
|
||||
elif isinstance(value, list):
|
||||
for item in value:
|
||||
if isinstance(item, FileVar):
|
||||
text += item.to_markdown() + ' '
|
||||
|
||||
# convert file to markdown
|
||||
text = file_var_obj.to_markdown()
|
||||
text = text.strip()
|
||||
|
||||
if not text:
|
||||
# # handle files
|
||||
# file_vars = self._fetch_files_from_variable_value(value)
|
||||
# for file_var in file_vars:
|
||||
# try:
|
||||
# file_var_obj = FileVar(**file_var)
|
||||
# except Exception as e:
|
||||
# logger.error(f'Error creating file var: {e}')
|
||||
# continue
|
||||
#
|
||||
# # convert file to markdown
|
||||
# text = file_var_obj.to_markdown()
|
||||
|
||||
if not text and value:
|
||||
# other types
|
||||
text = json.dumps(value, ensure_ascii=False)
|
||||
|
||||
if text:
|
||||
for token in text:
|
||||
self._queue_manager.publish(
|
||||
QueueTextChunkEvent(
|
||||
text=token
|
||||
), PublishFrom.TASK_PIPELINE
|
||||
)
|
||||
time.sleep(0.01)
|
||||
self._queue_manager.publish(
|
||||
QueueTextChunkEvent(
|
||||
text=text
|
||||
), PublishFrom.TASK_PIPELINE
|
||||
)
|
||||
|
||||
self._task_state.current_stream_generate_state.current_route_position += 1
|
||||
|
||||
|
|
|
|||
|
|
@ -58,10 +58,11 @@ class AgentChatAppConfigManager(BaseAppConfigManager):
|
|||
else:
|
||||
config_dict = override_config_dict
|
||||
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
app_config = AgentChatAppConfig(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
app_mode=AppMode.value_of(app_model.mode),
|
||||
app_mode=app_mode,
|
||||
app_model_config_from=config_from,
|
||||
app_model_config_id=app_model_config.id,
|
||||
app_model_config_dict=config_dict,
|
||||
|
|
@ -80,7 +81,7 @@ class AgentChatAppConfigManager(BaseAppConfigManager):
|
|||
agent=AgentConfigManager.convert(
|
||||
config=config_dict
|
||||
),
|
||||
additional_features=cls.convert_features(config_dict)
|
||||
additional_features=cls.convert_features(config_dict, app_mode)
|
||||
)
|
||||
|
||||
app_config.variables, app_config.external_data_variables = BasicVariablesConfigManager.convert(
|
||||
|
|
|
|||
|
|
@ -52,10 +52,11 @@ class ChatAppConfigManager(BaseAppConfigManager):
|
|||
else:
|
||||
config_dict = override_config_dict
|
||||
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
app_config = ChatAppConfig(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
app_mode=AppMode.value_of(app_model.mode),
|
||||
app_mode=app_mode,
|
||||
app_model_config_from=config_from,
|
||||
app_model_config_id=app_model_config.id,
|
||||
app_model_config_dict=config_dict,
|
||||
|
|
@ -71,7 +72,7 @@ class ChatAppConfigManager(BaseAppConfigManager):
|
|||
dataset=DatasetConfigManager.convert(
|
||||
config=config_dict
|
||||
),
|
||||
additional_features=cls.convert_features(config_dict)
|
||||
additional_features=cls.convert_features(config_dict, app_mode)
|
||||
)
|
||||
|
||||
app_config.variables, app_config.external_data_variables = BasicVariablesConfigManager.convert(
|
||||
|
|
|
|||
|
|
@ -43,10 +43,11 @@ class CompletionAppConfigManager(BaseAppConfigManager):
|
|||
else:
|
||||
config_dict = override_config_dict
|
||||
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
app_config = CompletionAppConfig(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
app_mode=AppMode.value_of(app_model.mode),
|
||||
app_mode=app_mode,
|
||||
app_model_config_from=config_from,
|
||||
app_model_config_id=app_model_config.id,
|
||||
app_model_config_dict=config_dict,
|
||||
|
|
@ -62,7 +63,7 @@ class CompletionAppConfigManager(BaseAppConfigManager):
|
|||
dataset=DatasetConfigManager.convert(
|
||||
config=config_dict
|
||||
),
|
||||
additional_features=cls.convert_features(config_dict)
|
||||
additional_features=cls.convert_features(config_dict, app_mode)
|
||||
)
|
||||
|
||||
app_config.variables, app_config.external_data_variables = BasicVariablesConfigManager.convert(
|
||||
|
|
|
|||
|
|
@ -20,10 +20,11 @@ class WorkflowAppConfigManager(BaseAppConfigManager):
|
|||
def get_app_config(cls, app_model: App, workflow: Workflow) -> WorkflowAppConfig:
|
||||
features_dict = workflow.features_dict
|
||||
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
app_config = WorkflowAppConfig(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
app_mode=AppMode.value_of(app_model.mode),
|
||||
app_mode=app_mode,
|
||||
workflow_id=workflow.id,
|
||||
sensitive_word_avoidance=SensitiveWordAvoidanceConfigManager.convert(
|
||||
config=features_dict
|
||||
|
|
@ -31,7 +32,7 @@ class WorkflowAppConfigManager(BaseAppConfigManager):
|
|||
variables=WorkflowVariablesConfigManager.convert(
|
||||
workflow=workflow
|
||||
),
|
||||
additional_features=cls.convert_features(features_dict)
|
||||
additional_features=cls.convert_features(features_dict, app_mode)
|
||||
)
|
||||
|
||||
return app_config
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
|||
# parse files
|
||||
files = args['files'] if 'files' in args and args['files'] else []
|
||||
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
|
||||
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict)
|
||||
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
|
||||
if file_extra_config:
|
||||
file_objs = message_file_parser.validate_and_transform_files_arg(
|
||||
files,
|
||||
|
|
|
|||
|
|
@ -519,7 +519,7 @@ class WorkflowCycleManage:
|
|||
return None
|
||||
|
||||
if isinstance(value, dict):
|
||||
if '__variant' in value and value['__variant'] == FileVar.__class__.__name__:
|
||||
if '__variant' in value and value['__variant'] == FileVar.__name__:
|
||||
return value
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -61,6 +61,7 @@ class FileVar(BaseModel):
|
|||
def to_dict(self) -> dict:
|
||||
return {
|
||||
'__variant': self.__class__.__name__,
|
||||
'tenant_id': self.tenant_id,
|
||||
'type': self.type.value,
|
||||
'transfer_method': self.transfer_method.value,
|
||||
'url': self.preview_url,
|
||||
|
|
@ -77,9 +78,9 @@ class FileVar(BaseModel):
|
|||
"""
|
||||
preview_url = self.preview_url
|
||||
if self.type == FileType.IMAGE:
|
||||
text = f''
|
||||
text = f''
|
||||
else:
|
||||
text = f'[{self.filename or self.preview_url}]({self.preview_url})'
|
||||
text = f'[{self.filename or preview_url}]({preview_url})'
|
||||
|
||||
return text
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,10 @@ class TokenBufferMemory:
|
|||
if self.conversation.mode not in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
|
||||
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
|
||||
else:
|
||||
file_extra_config = FileUploadConfigManager.convert(message.workflow_run.workflow.features_dict)
|
||||
file_extra_config = FileUploadConfigManager.convert(
|
||||
message.workflow_run.workflow.features_dict,
|
||||
is_vision=False
|
||||
)
|
||||
|
||||
if file_extra_config:
|
||||
file_objs = message_file_parser.transform_message_files(
|
||||
|
|
|
|||
|
|
@ -45,6 +45,19 @@ class SystemVariable(Enum):
|
|||
FILES = 'files'
|
||||
CONVERSATION = 'conversation'
|
||||
|
||||
@classmethod
|
||||
def value_of(cls, value: str) -> 'SystemVariable':
|
||||
"""
|
||||
Get value of given system variable.
|
||||
|
||||
:param value: system variable value
|
||||
:return: system variable
|
||||
"""
|
||||
for system_variable in cls:
|
||||
if system_variable.value == value:
|
||||
return system_variable
|
||||
raise ValueError(f'invalid system variable value {value}')
|
||||
|
||||
|
||||
class NodeRunMetadataKey(Enum):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import json
|
||||
from typing import cast
|
||||
|
||||
from core.file.file_obj import FileVar
|
||||
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
|
||||
from core.workflow.entities.base_node_data_entities import BaseNodeData
|
||||
from core.workflow.entities.node_entities import NodeRunResult, NodeType
|
||||
from core.workflow.entities.variable_pool import ValueType, VariablePool
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.answer.entities import (
|
||||
AnswerNodeData,
|
||||
GenerateRouteChunk,
|
||||
|
|
@ -30,44 +32,61 @@ class AnswerNode(BaseNode):
|
|||
# generate routes
|
||||
generate_routes = self.extract_generate_route_from_node_data(node_data)
|
||||
|
||||
answer = []
|
||||
answer = ''
|
||||
for part in generate_routes:
|
||||
if part.type == "var":
|
||||
part = cast(VarGenerateRouteChunk, part)
|
||||
value_selector = part.value_selector
|
||||
value = variable_pool.get_variable_value(
|
||||
variable_selector=value_selector,
|
||||
target_value_type=ValueType.STRING
|
||||
variable_selector=value_selector
|
||||
)
|
||||
|
||||
answer_part = {
|
||||
"type": "text",
|
||||
"text": value
|
||||
}
|
||||
# TODO File
|
||||
text = ''
|
||||
if isinstance(value, str | int | float):
|
||||
text = str(value)
|
||||
elif isinstance(value, dict):
|
||||
# other types
|
||||
text = json.dumps(value, ensure_ascii=False)
|
||||
elif isinstance(value, FileVar):
|
||||
# convert file to markdown
|
||||
text = value.to_markdown()
|
||||
elif isinstance(value, list):
|
||||
for item in value:
|
||||
if isinstance(item, FileVar):
|
||||
text += item.to_markdown() + ' '
|
||||
|
||||
text = text.strip()
|
||||
|
||||
if not text and value:
|
||||
# other types
|
||||
text = json.dumps(value, ensure_ascii=False)
|
||||
|
||||
answer += text
|
||||
else:
|
||||
part = cast(TextGenerateRouteChunk, part)
|
||||
answer_part = {
|
||||
"type": "text",
|
||||
"text": part.text
|
||||
}
|
||||
|
||||
if len(answer) > 0 and answer[-1]["type"] == "text" and answer_part["type"] == "text":
|
||||
answer[-1]["text"] += answer_part["text"]
|
||||
else:
|
||||
answer.append(answer_part)
|
||||
|
||||
if len(answer) == 1 and answer[0]["type"] == "text":
|
||||
answer = answer[0]["text"]
|
||||
answer += part.text
|
||||
|
||||
# re-fetch variable values
|
||||
variable_values = {}
|
||||
for variable_selector in node_data.variables:
|
||||
value = variable_pool.get_variable_value(
|
||||
variable_selector=variable_selector.value_selector,
|
||||
target_value_type=ValueType.STRING
|
||||
variable_selector=variable_selector.value_selector
|
||||
)
|
||||
|
||||
if isinstance(value, str | int | float):
|
||||
value = str(value)
|
||||
elif isinstance(value, FileVar):
|
||||
value = value.to_dict()
|
||||
elif isinstance(value, list):
|
||||
new_value = []
|
||||
for item in value:
|
||||
if isinstance(item, FileVar):
|
||||
new_value.append(item.to_dict())
|
||||
else:
|
||||
new_value.append(item)
|
||||
|
||||
value = new_value
|
||||
|
||||
variable_values[variable_selector.variable] = value
|
||||
|
||||
return NodeRunResult(
|
||||
|
|
|
|||
Loading…
Reference in New Issue