mirror of https://github.com/langgenius/dify.git
add app info for workflow convert
This commit is contained in:
parent
b5fa68fdfe
commit
1ab3b73c14
|
|
@ -289,11 +289,21 @@ class ConvertToWorkflowApi(Resource):
|
|||
Convert expert mode of chatbot app to workflow mode
|
||||
Convert Completion App to Workflow App
|
||||
"""
|
||||
if request.data:
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('name', type=str, required=False, nullable=True, location='json')
|
||||
parser.add_argument('icon', type=str, required=False, nullable=True, location='json')
|
||||
parser.add_argument('icon_background', type=str, required=False, nullable=True, location='json')
|
||||
args = parser.parse_args()
|
||||
else:
|
||||
args = {}
|
||||
|
||||
# convert to workflow mode
|
||||
workflow_service = WorkflowService()
|
||||
new_app_model = workflow_service.convert_to_workflow(
|
||||
app_model=app_model,
|
||||
account=current_user
|
||||
account=current_user,
|
||||
args=args
|
||||
)
|
||||
|
||||
# return app id
|
||||
|
|
|
|||
|
|
@ -32,7 +32,11 @@ class WorkflowConverter:
|
|||
App Convert to Workflow Mode
|
||||
"""
|
||||
|
||||
def convert_to_workflow(self, app_model: App, account: Account) -> App:
|
||||
def convert_to_workflow(self, app_model: App,
|
||||
account: Account,
|
||||
name: str,
|
||||
icon: str,
|
||||
icon_background: str) -> App:
|
||||
"""
|
||||
Convert app to workflow
|
||||
|
||||
|
|
@ -44,6 +48,9 @@ class WorkflowConverter:
|
|||
|
||||
:param app_model: App instance
|
||||
:param account: Account
|
||||
:param name: new app name
|
||||
:param icon: new app icon
|
||||
:param icon_background: new app icon background
|
||||
:return: new App instance
|
||||
"""
|
||||
# convert app model config
|
||||
|
|
@ -56,11 +63,11 @@ class WorkflowConverter:
|
|||
# create new app
|
||||
new_app = App()
|
||||
new_app.tenant_id = app_model.tenant_id
|
||||
new_app.name = app_model.name + '(workflow)'
|
||||
new_app.name = name if name else app_model.name + '(workflow)'
|
||||
new_app.mode = AppMode.ADVANCED_CHAT.value \
|
||||
if app_model.mode == AppMode.CHAT.value else AppMode.WORKFLOW.value
|
||||
new_app.icon = app_model.icon
|
||||
new_app.icon_background = app_model.icon_background
|
||||
new_app.icon = icon if icon else app_model.icon
|
||||
new_app.icon_background = icon_background if icon_background else app_model.icon_background
|
||||
new_app.enable_site = app_model.enable_site
|
||||
new_app.enable_api = app_model.enable_api
|
||||
new_app.api_rpm = app_model.api_rpm
|
||||
|
|
@ -143,6 +150,7 @@ class WorkflowConverter:
|
|||
|
||||
# convert to llm node
|
||||
llm_node = self._convert_to_llm_node(
|
||||
original_app_mode=AppMode.value_of(app_model.mode),
|
||||
new_app_mode=new_app_mode,
|
||||
graph=graph,
|
||||
model_config=app_config.model,
|
||||
|
|
@ -400,13 +408,15 @@ class WorkflowConverter:
|
|||
}
|
||||
}
|
||||
|
||||
def _convert_to_llm_node(self, new_app_mode: AppMode,
|
||||
def _convert_to_llm_node(self, original_app_mode: AppMode,
|
||||
new_app_mode: AppMode,
|
||||
graph: dict,
|
||||
model_config: ModelConfigEntity,
|
||||
prompt_template: PromptTemplateEntity,
|
||||
file_upload: Optional[FileExtraConfig] = None) -> dict:
|
||||
"""
|
||||
Convert to LLM Node
|
||||
:param original_app_mode: original app mode
|
||||
:param new_app_mode: new app mode
|
||||
:param graph: graph
|
||||
:param model_config: model config
|
||||
|
|
@ -428,7 +438,7 @@ class WorkflowConverter:
|
|||
# get prompt template
|
||||
prompt_transform = SimplePromptTransform()
|
||||
prompt_template_config = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.WORKFLOW,
|
||||
app_mode=original_app_mode,
|
||||
provider=model_config.provider,
|
||||
model=model_config.model,
|
||||
pre_prompt=prompt_template.simple_prompt_template,
|
||||
|
|
@ -437,15 +447,18 @@ class WorkflowConverter:
|
|||
)
|
||||
|
||||
template = prompt_template_config['prompt_template'].template
|
||||
for v in start_node['data']['variables']:
|
||||
template = template.replace('{{' + v['variable'] + '}}', '{{#start.' + v['variable'] + '#}}')
|
||||
if not template:
|
||||
prompts = []
|
||||
else:
|
||||
for v in start_node['data']['variables']:
|
||||
template = template.replace('{{' + v['variable'] + '}}', '{{#start.' + v['variable'] + '#}}')
|
||||
|
||||
prompts = [
|
||||
{
|
||||
"role": 'user',
|
||||
"text": template
|
||||
}
|
||||
]
|
||||
prompts = [
|
||||
{
|
||||
"role": 'user',
|
||||
"text": template
|
||||
}
|
||||
]
|
||||
else:
|
||||
advanced_chat_prompt_template = prompt_template.advanced_chat_prompt_template
|
||||
|
||||
|
|
@ -466,7 +479,7 @@ class WorkflowConverter:
|
|||
# get prompt template
|
||||
prompt_transform = SimplePromptTransform()
|
||||
prompt_template_config = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.WORKFLOW,
|
||||
app_mode=original_app_mode,
|
||||
provider=model_config.provider,
|
||||
model=model_config.model,
|
||||
pre_prompt=prompt_template.simple_prompt_template,
|
||||
|
|
|
|||
|
|
@ -258,13 +258,14 @@ class WorkflowService:
|
|||
|
||||
return workflow_node_execution
|
||||
|
||||
def convert_to_workflow(self, app_model: App, account: Account) -> App:
|
||||
def convert_to_workflow(self, app_model: App, account: Account, args: dict) -> App:
|
||||
"""
|
||||
Basic mode of chatbot app(expert mode) to workflow
|
||||
Completion App to Workflow App
|
||||
|
||||
:param app_model: App instance
|
||||
:param account: Account instance
|
||||
:param args: dict
|
||||
:return:
|
||||
"""
|
||||
# chatbot convert to workflow mode
|
||||
|
|
@ -276,7 +277,10 @@ class WorkflowService:
|
|||
# convert to workflow
|
||||
new_app = workflow_converter.convert_to_workflow(
|
||||
app_model=app_model,
|
||||
account=account
|
||||
account=account,
|
||||
name=args.get('name'),
|
||||
icon=args.get('icon'),
|
||||
icon_background=args.get('icon_background'),
|
||||
)
|
||||
|
||||
return new_app
|
||||
|
|
|
|||
|
|
@ -92,7 +92,6 @@ def test__convert_to_http_request_node_for_chatbot(default_variables):
|
|||
|
||||
http_request_node = nodes[0]
|
||||
|
||||
assert len(http_request_node["data"]["variables"]) == 4 # appended _query variable
|
||||
assert http_request_node["data"]["method"] == "post"
|
||||
assert http_request_node["data"]["url"] == mock_api_based_extension.api_endpoint
|
||||
assert http_request_node["data"]["authorization"]["type"] == "api-key"
|
||||
|
|
@ -113,7 +112,7 @@ def test__convert_to_http_request_node_for_chatbot(default_variables):
|
|||
assert body_params["app_id"] == app_model.id
|
||||
assert body_params["tool_variable"] == external_data_variables[0].variable
|
||||
assert len(body_params["inputs"]) == 3
|
||||
assert body_params["query"] == "{{_query}}" # for chatbot
|
||||
assert body_params["query"] == "{{#sys.query#}}" # for chatbot
|
||||
|
||||
code_node = nodes[1]
|
||||
assert code_node["data"]["type"] == "code"
|
||||
|
|
@ -163,7 +162,6 @@ def test__convert_to_http_request_node_for_workflow_app(default_variables):
|
|||
|
||||
http_request_node = nodes[0]
|
||||
|
||||
assert len(http_request_node["data"]["variables"]) == 3
|
||||
assert http_request_node["data"]["method"] == "post"
|
||||
assert http_request_node["data"]["url"] == mock_api_based_extension.api_endpoint
|
||||
assert http_request_node["data"]["authorization"]["type"] == "api-key"
|
||||
|
|
@ -302,6 +300,7 @@ def test__convert_to_llm_node_for_chatbot_simple_chat_model(default_variables):
|
|||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
|
|
@ -345,6 +344,7 @@ def test__convert_to_llm_node_for_chatbot_simple_completion_model(default_variab
|
|||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
|
|
@ -393,6 +393,7 @@ def test__convert_to_llm_node_for_chatbot_advanced_chat_model(default_variables)
|
|||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
|
|
@ -444,6 +445,7 @@ def test__convert_to_llm_node_for_workflow_advanced_completion_model(default_var
|
|||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
|
|
|
|||
Loading…
Reference in New Issue