refactor(vibe): modularize config and enhance workflow generation

- Extract vibe configuration into dedicated module (api/core/llm_generator/vibe_config/)
  - node_schemas.py: Built-in node parameter definitions
  - fallback_rules.py: Keyword-based fallback rules and aliases
  - responses.py: Off-topic response templates
  - node_definitions.json: Shared schema for frontend/backend sync

- Enhance workflow generation with intent classification
  - Add off-topic detection with helpful suggestions
  - Support regeneration mode with previous workflow context
  - Add tool validation and sanitization
  - Support language preference for generated content

- Extract frontend config to use-workflow-vibe-config.ts
  - NODE_TYPE_ALIASES: Node type inference mapping
  - FIELD_NAME_CORRECTIONS: LLM output field corrections
  - correctFieldName(): Unified field correction helper

- Fix defensive null checks in variable utils and key-value list
- Change verbose debug logs from info to debug level
This commit is contained in:
aqiu 2025-12-27 06:44:17 +08:00
parent 88d377ab87
commit c4eee28fd8
19 changed files with 2571 additions and 86 deletions

View File

@ -1,3 +1,22 @@
<!-- OPENSPEC:START -->
# OpenSpec Instructions
These instructions are for AI assistants working in this project.
Always open `@/openspec/AGENTS.md` when the request:
- Mentions planning or proposals (words like proposal, spec, change, plan)
- Introduces new capabilities, breaking changes, architecture shifts, or big performance/security work
- Sounds ambiguous and you need the authoritative spec before coding
Use `@/openspec/AGENTS.md` to learn:
- How to create and apply change proposals
- Spec format and conventions
- Project structure and guidelines
Keep this managed block so 'openspec update' can refresh the instructions.
<!-- OPENSPEC:END -->
# AGENTS.md
## Project Overview

View File

@ -55,12 +55,28 @@ class InstructionTemplatePayload(BaseModel):
type: str = Field(..., description="Instruction template type")
class PreviousWorkflow(BaseModel):
"""Previous workflow attempt for regeneration context."""
nodes: list[dict[str, Any]] = Field(default_factory=list, description="Previously generated nodes")
edges: list[dict[str, Any]] = Field(default_factory=list, description="Previously generated edges")
warnings: list[str] = Field(default_factory=list, description="Warnings from previous generation")
class FlowchartGeneratePayload(BaseModel):
instruction: str = Field(..., description="Workflow flowchart generation instruction")
model_config_data: dict[str, Any] = Field(..., alias="model_config", description="Model configuration")
available_nodes: list[dict[str, Any]] = Field(default_factory=list, description="Available node types")
existing_nodes: list[dict[str, Any]] = Field(default_factory=list, description="Existing workflow nodes")
available_tools: list[dict[str, Any]] = Field(default_factory=list, description="Available tools")
selected_node_ids: list[str] = Field(default_factory=list, description="IDs of selected nodes for context")
# Phase 10: Regenerate with previous workflow context
previous_workflow: PreviousWorkflow | None = Field(default=None, description="Previous workflow for regeneration")
regenerate_mode: bool = Field(default=False, description="Whether this is a regeneration request")
# Language preference for generated content (node titles, descriptions)
language: str | None = Field(default=None, description="Preferred language for generated content")
# Available models that user has configured (for LLM/question-classifier nodes)
available_models: list[dict[str, Any]] = Field(default_factory=list, description="User's configured models")
def reg(cls: type[BaseModel]):
@ -267,7 +283,7 @@ class InstructionGenerateApi(Resource):
@console_ns.route("/flowchart-generate")
class FlowchartGenerateApi(Resource):
@console_ns.doc("generate_workflow_flowchart")
@console_ns.doc(description="Generate workflow flowchart using LLM")
@console_ns.doc(description="Generate workflow flowchart using LLM with intent classification")
@console_ns.expect(console_ns.models[FlowchartGeneratePayload.__name__])
@console_ns.response(200, "Flowchart generated successfully")
@console_ns.response(400, "Invalid request parameters")
@ -280,6 +296,15 @@ class FlowchartGenerateApi(Resource):
_, current_tenant_id = current_account_with_tenant()
try:
# Convert PreviousWorkflow to dict if present
previous_workflow_dict = None
if args.previous_workflow:
previous_workflow_dict = {
"nodes": args.previous_workflow.nodes,
"edges": args.previous_workflow.edges,
"warnings": args.previous_workflow.warnings,
}
result = LLMGenerator.generate_workflow_flowchart(
tenant_id=current_tenant_id,
instruction=args.instruction,
@ -287,6 +312,11 @@ class FlowchartGenerateApi(Resource):
available_nodes=args.available_nodes,
existing_nodes=args.existing_nodes,
available_tools=args.available_tools,
selected_node_ids=args.selected_node_ids,
previous_workflow=previous_workflow_dict,
regenerate_mode=args.regenerate_mode,
preferred_language=args.language,
available_models=args.available_models,
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)

View File

@ -18,7 +18,6 @@ from core.llm_generator.prompts import (
SUGGESTED_QUESTIONS_MAX_TOKENS,
SUGGESTED_QUESTIONS_TEMPERATURE,
SYSTEM_STRUCTURED_OUTPUT_GENERATE,
WORKFLOW_FLOWCHART_PROMPT_TEMPLATE,
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
)
from core.model_manager import ModelManager
@ -295,20 +294,62 @@ class LLMGenerator:
available_nodes: Sequence[dict[str, object]] | None = None,
existing_nodes: Sequence[dict[str, object]] | None = None,
available_tools: Sequence[dict[str, object]] | None = None,
selected_node_ids: Sequence[str] | None = None,
previous_workflow: dict[str, object] | None = None,
regenerate_mode: bool = False,
preferred_language: str | None = None,
available_models: Sequence[dict[str, object]] | None = None,
):
model_parameters = model_config.get("completion_params", {})
prompt_template = PromptTemplateParser(WORKFLOW_FLOWCHART_PROMPT_TEMPLATE)
prompt_generate = prompt_template.format(
inputs={
"TASK_DESCRIPTION": instruction,
"AVAILABLE_NODES": json.dumps(available_nodes or [], ensure_ascii=False),
"EXISTING_NODES": json.dumps(existing_nodes or [], ensure_ascii=False),
"AVAILABLE_TOOLS": json.dumps(available_tools or [], ensure_ascii=False),
},
remove_template_variables=False,
"""
Generate workflow flowchart with enhanced prompts and inline intent classification.
Returns a dict with:
- intent: "generate" | "off_topic" | "error"
- flowchart: Mermaid syntax string (for generate intent)
- message: User-friendly explanation
- warnings: List of validation warnings
- suggestions: List of workflow suggestions (for off_topic intent)
- error: Error message if generation failed
"""
from core.llm_generator.vibe_prompts import (
build_vibe_enhanced_prompt,
extract_mermaid_from_response,
parse_vibe_response,
sanitize_tool_nodes,
validate_node_parameters,
validate_tool_references,
)
prompt_messages = [UserPromptMessage(content=prompt_generate)]
model_parameters = model_config.get("completion_params", {})
# Build enhanced prompts with context
system_prompt, user_prompt = build_vibe_enhanced_prompt(
instruction=instruction,
available_nodes=list(available_nodes) if available_nodes else None,
available_tools=list(available_tools) if available_tools else None,
existing_nodes=list(existing_nodes) if existing_nodes else None,
selected_node_ids=list(selected_node_ids) if selected_node_ids else None,
previous_workflow=dict(previous_workflow) if previous_workflow else None,
regenerate_mode=regenerate_mode,
preferred_language=preferred_language,
available_models=list(available_models) if available_models else None,
)
prompt_messages: list[PromptMessage] = [
SystemPromptMessage(content=system_prompt),
UserPromptMessage(content=user_prompt),
]
# DEBUG: Log model input
logger.debug("=" * 80)
logger.debug("[VIBE] generate_workflow_flowchart - MODEL INPUT")
logger.debug("=" * 80)
logger.debug("[VIBE] Instruction: %s", instruction)
logger.debug("[VIBE] Model: %s/%s", model_config.get("provider", ""), model_config.get("name", ""))
system_prompt_log = system_prompt[:2000] + "..." if len(system_prompt) > 2000 else system_prompt
logger.debug("[VIBE] System Prompt:\n%s", system_prompt_log)
logger.debug("[VIBE] User Prompt:\n%s", user_prompt)
logger.debug("=" * 80)
model_manager = ModelManager()
model_instance = model_manager.get_model_instance(
@ -318,9 +359,6 @@ class LLMGenerator:
model=model_config.get("name", ""),
)
flowchart = ""
error = ""
try:
response: LLMResult = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
@ -328,18 +366,110 @@ class LLMGenerator:
stream=False,
)
content = response.message.get_text_content()
# DEBUG: Log model output
logger.debug("=" * 80)
logger.debug("[VIBE] generate_workflow_flowchart - MODEL OUTPUT")
logger.debug("=" * 80)
logger.debug("[VIBE] Raw Response:\n%s", content)
logger.debug("=" * 80)
if not isinstance(content, str):
raise ValueError("Flowchart response is not a string")
match = re.search(r"```(?:mermaid)?\s*([\s\S]+?)```", content, flags=re.IGNORECASE)
flowchart = (match.group(1) if match else content).strip()
# Parse the enhanced response format
parsed = parse_vibe_response(content)
# DEBUG: Log parsed result
logger.debug("[VIBE] Parsed Response:")
logger.debug("[VIBE] intent: %s", parsed.get("intent"))
logger.debug("[VIBE] message: %s", parsed.get("message", "")[:200] if parsed.get("message") else "")
logger.debug("[VIBE] mermaid: %s", parsed.get("mermaid", "")[:500] if parsed.get("mermaid") else "")
logger.debug("[VIBE] warnings: %s", parsed.get("warnings", []))
logger.debug("[VIBE] suggestions: %s", parsed.get("suggestions", []))
if parsed.get("error"):
logger.debug("[VIBE] error: %s", parsed.get("error"))
logger.debug("=" * 80)
# Handle error case from parsing
if parsed.get("intent") == "error":
# Fall back to legacy parsing for backwards compatibility
match = re.search(r"```(?:mermaid)?\s*([\s\S]+?)```", content, flags=re.IGNORECASE)
flowchart = (match.group(1) if match else content).strip()
return {
"intent": "generate",
"flowchart": flowchart,
"message": "",
"warnings": [],
"tool_recommendations": [],
"error": "",
}
# Handle off_topic case
if parsed.get("intent") == "off_topic":
return {
"intent": "off_topic",
"flowchart": "",
"message": parsed.get("message", ""),
"suggestions": parsed.get("suggestions", []),
"warnings": [],
"tool_recommendations": [],
"error": "",
}
# Handle generate case
flowchart = extract_mermaid_from_response(parsed)
# Sanitize tool nodes - replace invalid tools with fallback nodes
original_nodes = parsed.get("nodes", [])
sanitized_nodes, sanitize_warnings = sanitize_tool_nodes(
original_nodes,
list(available_tools) if available_tools else None,
)
# Update parsed nodes with sanitized version
parsed["nodes"] = sanitized_nodes
# Validate tool references and get recommendations for unconfigured tools
validation_warnings, tool_recommendations = validate_tool_references(
sanitized_nodes,
list(available_tools) if available_tools else None,
)
# Validate node parameters are properly filled (Phase 9: Auto-Fill)
param_warnings = validate_node_parameters(sanitized_nodes)
existing_warnings = parsed.get("warnings", [])
all_warnings = existing_warnings + sanitize_warnings + validation_warnings + param_warnings
return {
"intent": "generate",
"flowchart": flowchart,
"nodes": sanitized_nodes, # Include sanitized nodes in response
"edges": parsed.get("edges", []),
"message": parsed.get("message", ""),
"warnings": all_warnings,
"tool_recommendations": tool_recommendations,
"error": "",
}
except InvokeError as e:
error = str(e)
return {
"intent": "error",
"flowchart": "",
"message": "",
"warnings": [],
"tool_recommendations": [],
"error": str(e),
}
except Exception as e:
logger.exception("Failed to generate workflow flowchart, model: %s", model_config.get("name"))
error = str(e)
return {"flowchart": flowchart, "error": error}
return {
"intent": "error",
"flowchart": "",
"message": "",
"warnings": [],
"tool_recommendations": [],
"error": str(e),
}
@classmethod
def generate_code(cls, tenant_id: str, instruction: str, model_config: dict, code_language: str = "javascript"):

View File

@ -147,6 +147,8 @@ WORKFLOW_FLOWCHART_PROMPT_TEMPLATE = """
You are an expert workflow designer. Generate a Mermaid flowchart based on the user's request.
Constraints:
- Detect the language of the user's request. Generate all node titles in the same language as the user's input.
- If the input language cannot be determined, use {{PREFERRED_LANGUAGE}} as the fallback language.
- Use only node types listed in <available_nodes>.
- Use only tools listed in <available_tools>. When using a tool node, set type=tool and tool=<tool_key>.
- Tools may include MCP providers (provider_type=mcp). Tool selection still uses tool_key.

View File

@ -0,0 +1,26 @@
"""
Vibe Workflow Generator Configuration Module.
This module centralizes configuration for the Vibe workflow generation feature,
including node schemas, fallback rules, and response templates.
"""
from core.llm_generator.vibe_config.fallback_rules import (
FALLBACK_RULES,
FIELD_NAME_CORRECTIONS,
NODE_TYPE_ALIASES,
get_corrected_field_name,
)
from core.llm_generator.vibe_config.node_schemas import BUILTIN_NODE_SCHEMAS
from core.llm_generator.vibe_config.responses import DEFAULT_SUGGESTIONS, OFF_TOPIC_RESPONSES
__all__ = [
"BUILTIN_NODE_SCHEMAS",
"DEFAULT_SUGGESTIONS",
"FALLBACK_RULES",
"FIELD_NAME_CORRECTIONS",
"NODE_TYPE_ALIASES",
"OFF_TOPIC_RESPONSES",
"get_corrected_field_name",
]

View File

@ -0,0 +1,138 @@
"""
Fallback Rules for Vibe Workflow Generation.
This module defines keyword-based rules for determining fallback node types
when the LLM generates invalid tool references.
Note: These definitions are mirrored in node_definitions.json for frontend sync.
When updating these values, also update the JSON file.
"""
# Keyword rules for smart fallback detection
# Maps node type to keywords that suggest using that node type as a fallback
FALLBACK_RULES: dict[str, list[str]] = {
"http-request": [
"http",
"url",
"web",
"scrape",
"scraper",
"fetch",
"api",
"request",
"download",
"upload",
"webhook",
"endpoint",
"rest",
"get",
"post",
],
"code": [
"code",
"script",
"calculate",
"compute",
"process",
"transform",
"parse",
"convert",
"format",
"filter",
"sort",
"math",
"logic",
],
"llm": [
"analyze",
"summarize",
"summary",
"extract",
"classify",
"translate",
"generate",
"write",
"rewrite",
"explain",
"answer",
"chat",
],
}
# Node type aliases for inference from natural language
# Maps common terms to canonical node type names
NODE_TYPE_ALIASES: dict[str, str] = {
# Start node aliases
"start": "start",
"begin": "start",
"input": "start",
# End node aliases
"end": "end",
"finish": "end",
"output": "end",
# LLM node aliases
"llm": "llm",
"ai": "llm",
"gpt": "llm",
"model": "llm",
"chat": "llm",
# Code node aliases
"code": "code",
"script": "code",
"python": "code",
"javascript": "code",
# HTTP request node aliases
"http-request": "http-request",
"http": "http-request",
"request": "http-request",
"api": "http-request",
"fetch": "http-request",
"webhook": "http-request",
# Conditional node aliases
"if-else": "if-else",
"condition": "if-else",
"branch": "if-else",
"switch": "if-else",
# Loop node aliases
"iteration": "iteration",
"loop": "loop",
"foreach": "iteration",
# Tool node alias
"tool": "tool",
}
# Field name corrections for LLM-generated node configs
# Maps incorrect field names to correct ones for specific node types
FIELD_NAME_CORRECTIONS: dict[str, dict[str, str]] = {
"http-request": {
"text": "body", # LLM might use "text" instead of "body"
"content": "body",
"response": "body",
},
"code": {
"text": "result", # LLM might use "text" instead of "result"
"output": "result",
},
"llm": {
"response": "text",
"answer": "text",
},
}
def get_corrected_field_name(node_type: str, field: str) -> str:
"""
Get the corrected field name for a node type.
Args:
node_type: The type of the node (e.g., "http-request", "code")
field: The field name to correct
Returns:
The corrected field name, or the original if no correction needed
"""
corrections = FIELD_NAME_CORRECTIONS.get(node_type, {})
return corrections.get(field, field)

View File

@ -0,0 +1,82 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Vibe Workflow Generator - Node Definitions for Frontend/Backend Sync",
"version": "1.0.0",
"nodeTypeAliases": {
"start": "start",
"begin": "start",
"input": "start",
"end": "end",
"finish": "end",
"output": "end",
"llm": "llm",
"ai": "llm",
"gpt": "llm",
"model": "llm",
"chat": "llm",
"code": "code",
"script": "code",
"python": "code",
"javascript": "code",
"http-request": "http-request",
"http": "http-request",
"request": "http-request",
"api": "http-request",
"fetch": "http-request",
"webhook": "http-request",
"if-else": "if-else",
"condition": "if-else",
"branch": "if-else",
"switch": "if-else",
"iteration": "iteration",
"loop": "loop",
"foreach": "iteration",
"tool": "tool"
},
"fieldNameCorrections": {
"http-request": {
"text": "body",
"content": "body",
"response": "body"
},
"code": {
"text": "result",
"output": "result"
},
"llm": {
"response": "text",
"answer": "text"
}
},
"fallbackKeywords": {
"http-request": [
"http", "url", "web", "scrape", "scraper", "fetch", "api", "request",
"download", "upload", "webhook", "endpoint", "rest", "get", "post"
],
"code": [
"code", "script", "calculate", "compute", "process", "transform",
"parse", "convert", "format", "filter", "sort", "math", "logic"
],
"llm": [
"analyze", "summarize", "summary", "extract", "classify", "translate",
"generate", "write", "rewrite", "explain", "answer", "chat"
]
},
"nodeOutputFields": {
"start": ["All defined variables available as {{#start.variable_name#}}"],
"end": [],
"http-request": ["body", "status_code", "headers"],
"code": ["result"],
"llm": ["text"],
"if-else": [],
"knowledge-retrieval": ["result"],
"template-transform": ["output"],
"variable-aggregator": ["output"],
"iteration": ["item", "index"]
}
}

View File

@ -0,0 +1,211 @@
"""
Built-in Node Schemas for Vibe Workflow Generation.
These schemas define the parameter structures for each node type,
helping the LLM understand what configuration each node requires.
"""
from typing import Any
# Built-in node schemas with parameter definitions
# These help the model understand what config each node type requires
BUILTIN_NODE_SCHEMAS: dict[str, dict[str, Any]] = {
"start": {
"description": "Workflow entry point - defines input variables",
"required": [],
"parameters": {
"variables": {
"type": "array",
"description": "Input variables for the workflow",
"item_schema": {
"variable": "string - variable name",
"label": "string - display label",
"type": "enum: text-input, paragraph, number, select, file, file-list",
"required": "boolean",
"max_length": "number (optional)",
},
},
},
"outputs": ["All defined variables are available as {{#start.variable_name#}}"],
},
"end": {
"description": "Workflow exit point - defines output variables",
"required": ["outputs"],
"parameters": {
"outputs": {
"type": "array",
"description": "Output variables to return",
"item_schema": {
"variable": "string - output variable name",
"type": "enum: string, number, object, array",
"value_selector": "array - path to source value, e.g. ['node_id', 'field']",
},
},
},
},
"http-request": {
"description": "Send HTTP requests to external APIs or fetch web content",
"required": ["url", "method"],
"parameters": {
"url": {
"type": "string",
"description": "Full URL including protocol (https://...)",
"example": "{{#start.url#}} or https://api.example.com/data",
},
"method": {
"type": "enum",
"options": ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD"],
"description": "HTTP method",
},
"headers": {
"type": "string",
"description": "HTTP headers as newline-separated 'Key: Value' pairs",
"example": "Content-Type: application/json\nAuthorization: Bearer {{#start.api_key#}}",
},
"params": {
"type": "string",
"description": "URL query parameters as newline-separated 'key: value' pairs",
},
"body": {
"type": "object",
"description": "Request body with type field required",
"example": {"type": "none", "data": []},
},
"authorization": {
"type": "object",
"description": "Authorization config",
"example": {"type": "no-auth"},
},
"timeout": {
"type": "number",
"description": "Request timeout in seconds",
"default": 60,
},
},
"outputs": ["body (response content)", "status_code", "headers"],
},
"code": {
"description": "Execute Python or JavaScript code for custom logic",
"required": ["code", "language"],
"parameters": {
"code": {
"type": "string",
"description": "Code to execute. Must define a main() function that returns a dict.",
},
"language": {
"type": "enum",
"options": ["python3", "javascript"],
},
"variables": {
"type": "array",
"description": "Input variables passed to the code",
"item_schema": {"variable": "string", "value_selector": "array"},
},
"outputs": {
"type": "object",
"description": "Output variable definitions",
},
},
"outputs": ["Variables defined in outputs schema"],
},
"llm": {
"description": "Call a large language model for text generation/processing",
"required": ["prompt_template"],
"parameters": {
"model": {
"type": "object",
"description": "Model configuration (provider, name, mode)",
},
"prompt_template": {
"type": "array",
"description": "Messages for the LLM",
"item_schema": {
"role": "enum: system, user, assistant",
"text": "string - message content, can include {{#node_id.field#}} references",
},
},
"context": {
"type": "object",
"description": "Optional context settings",
},
"memory": {
"type": "object",
"description": "Optional memory/conversation settings",
},
},
"outputs": ["text (generated response)"],
},
"if-else": {
"description": "Conditional branching based on conditions",
"required": ["conditions"],
"parameters": {
"conditions": {
"type": "array",
"description": "List of condition cases",
"item_schema": {
"case_id": "string - unique case identifier",
"logical_operator": "enum: and, or",
"conditions": "array of {variable_selector, comparison_operator, value}",
},
},
},
"outputs": ["Branches: true (conditions met), false (else)"],
},
"knowledge-retrieval": {
"description": "Query knowledge base for relevant content",
"required": ["query_variable_selector", "dataset_ids"],
"parameters": {
"query_variable_selector": {
"type": "array",
"description": "Path to query variable, e.g. ['start', 'query']",
},
"dataset_ids": {
"type": "array",
"description": "List of knowledge base IDs to search",
},
"retrieval_mode": {
"type": "enum",
"options": ["single", "multiple"],
},
},
"outputs": ["result (retrieved documents)"],
},
"template-transform": {
"description": "Transform data using Jinja2 templates",
"required": ["template"],
"parameters": {
"template": {
"type": "string",
"description": "Jinja2 template string",
},
"variables": {
"type": "array",
"description": "Variables to pass to template",
},
},
"outputs": ["output (transformed string)"],
},
"variable-aggregator": {
"description": "Aggregate variables from multiple branches",
"required": ["variables"],
"parameters": {
"variables": {
"type": "array",
"description": "List of variable selectors to aggregate",
},
},
"outputs": ["output (aggregated value)"],
},
"iteration": {
"description": "Loop over array items",
"required": ["iterator_selector"],
"parameters": {
"iterator_selector": {
"type": "array",
"description": "Path to array variable to iterate",
},
},
"outputs": ["item (current iteration item)", "index (current index)"],
},
}

View File

@ -0,0 +1,74 @@
"""
Response Templates for Vibe Workflow Generation.
This module defines templates for off-topic responses and default suggestions
to guide users back to workflow-related requests.
"""
# Off-topic response templates for different categories
# Each category has messages in multiple languages
OFF_TOPIC_RESPONSES: dict[str, dict[str, str]] = {
"weather": {
"en": (
"I'm the workflow design assistant - I can't check the weather, "
"but I can help you build AI workflows! For example, I could help you "
"create a workflow that fetches weather data from an API."
),
"zh": "我是工作流设计助手无法查询天气。但我可以帮你创建一个从API获取天气数据的工作流",
},
"math": {
"en": (
"I focus on workflow design rather than calculations. However, "
"if you need calculations in a workflow, I can help you add a Code node "
"that handles math operations!"
),
"zh": "我专注于工作流设计而非计算。但如果您需要在工作流中进行计算,我可以帮您添加一个处理数学运算的代码节点!",
},
"joke": {
"en": (
"While I'd love to share a laugh, I'm specialized in workflow design. "
"How about we create something fun instead - like a workflow that generates jokes using AI?"
),
"zh": "虽然我很想讲笑话但我专门从事工作流设计。不如我们创建一个有趣的东西——比如使用AI生成笑话的工作流",
},
"translation": {
"en": (
"I can't translate directly, but I can help you build a translation workflow! "
"Would you like to create one using an LLM node?"
),
"zh": "我不能直接翻译但我可以帮你构建一个翻译工作流要创建一个使用LLM节点的翻译流程吗",
},
"general_coding": {
"en": (
"I'm specialized in Dify workflow design rather than general coding help. "
"But if you want to add code logic to your workflow, I can help you configure a Code node!"
),
"zh": (
"我专注于Dify工作流设计而非通用编程帮助。"
"但如果您想在工作流中添加代码逻辑,我可以帮您配置一个代码节点!"
),
},
"default": {
"en": (
"I'm the Dify workflow design assistant. I help create AI automation workflows, "
"but I can't help with general questions. Would you like to create a workflow instead?"
),
"zh": "我是Dify工作流设计助手。我帮助创建AI自动化工作流但无法回答一般性问题。您想创建一个工作流吗",
},
}
# Default suggestions for off-topic requests
# These help guide users towards valid workflow requests
DEFAULT_SUGGESTIONS: dict[str, list[str]] = {
"en": [
"Create a chatbot workflow",
"Build a document summarization pipeline",
"Add email notification to workflow",
],
"zh": [
"创建一个聊天机器人工作流",
"构建文档摘要处理流程",
"添加邮件通知到工作流",
],
}

File diff suppressed because it is too large Load Diff

View File

@ -25,3 +25,4 @@ export * from './use-workflow-search'
export * from './use-workflow-start-run'
export * from './use-workflow-variables'
export * from './use-workflow-vibe'
export * from './use-workflow-vibe-config'

View File

@ -0,0 +1,100 @@
/**
* Vibe Workflow Generator Configuration
*
* This module centralizes configuration for the Vibe workflow generation feature,
* including node type aliases and field name corrections.
*
* Note: These definitions are mirrored in the backend at:
* api/core/llm_generator/vibe_config/node_definitions.json
* When updating these values, also update the backend JSON file.
*/
/**
* Node type aliases for inference from natural language.
* Maps common terms to canonical node type names.
*/
export const NODE_TYPE_ALIASES: Record<string, string> = {
// Start node aliases
start: 'start',
begin: 'start',
input: 'start',
// End node aliases
end: 'end',
finish: 'end',
output: 'end',
// LLM node aliases
llm: 'llm',
ai: 'llm',
gpt: 'llm',
model: 'llm',
chat: 'llm',
// Code node aliases
code: 'code',
script: 'code',
python: 'code',
javascript: 'code',
// HTTP request node aliases
'http-request': 'http-request',
http: 'http-request',
request: 'http-request',
api: 'http-request',
fetch: 'http-request',
webhook: 'http-request',
// Conditional node aliases
'if-else': 'if-else',
condition: 'if-else',
branch: 'if-else',
switch: 'if-else',
// Loop node aliases
iteration: 'iteration',
loop: 'loop',
foreach: 'iteration',
// Tool node alias
tool: 'tool',
}
/**
* Field name corrections for LLM-generated node configs.
* Maps incorrect field names to correct ones for specific node types.
*/
export const FIELD_NAME_CORRECTIONS: Record<string, Record<string, string>> = {
'http-request': {
text: 'body', // LLM might use "text" instead of "body"
content: 'body',
response: 'body',
},
code: {
text: 'result', // LLM might use "text" instead of "result"
output: 'result',
},
llm: {
response: 'text',
answer: 'text',
},
}
/**
* Correct field names based on node type.
* LLM sometimes generates wrong field names (e.g., "text" instead of "body" for HTTP nodes).
*
* @param field - The field name to correct
* @param nodeType - The type of the node
* @returns The corrected field name, or the original if no correction needed
*/
export const correctFieldName = (field: string, nodeType: string): string => {
const corrections = FIELD_NAME_CORRECTIONS[nodeType]
if (corrections && corrections[field])
return corrections[field]
return field
}
/**
* Get the canonical node type from an alias.
*
* @param alias - The alias to look up
* @returns The canonical node type, or undefined if not found
*/
export const getCanonicalNodeType = (alias: string): string | undefined => {
return NODE_TYPE_ALIASES[alias.toLowerCase()]
}

View File

@ -13,6 +13,7 @@ import Toast from '@/app/components/base/toast'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { useGetLanguage } from '@/context/i18n'
import type { BackendEdgeSpec, BackendNodeSpec } from '@/service/debug'
import { generateFlowchart } from '@/service/debug'
import {
useAllBuiltInTools,
@ -42,6 +43,7 @@ import { useNodesMetaData } from './use-nodes-meta-data'
import { useNodesSyncDraft } from './use-nodes-sync-draft'
import { useNodesReadOnly } from './use-workflow'
import { useWorkflowHistory, WorkflowHistoryEvent } from './use-workflow-history'
import { NODE_TYPE_ALIASES, correctFieldName } from './use-workflow-vibe-config'
type VibeCommandDetail = {
dsl?: string
@ -105,6 +107,74 @@ const normalizeProviderIcon = (icon?: ToolWithProvider['icon']) => {
return icon
}
/**
* Replace variable references in node data using the nodeIdMap.
* Handles:
* - String templates: {{#old_id.field#}} {{#new_id.field#}}
* - Value selectors: ["old_id", "field"] ["new_id", "field"]
* - Mixed content objects: {type: "mixed", value: "..."} normalized to string
* - Field name correction based on node type
*/
const replaceVariableReferences = (
data: unknown,
nodeIdMap: Map<string, Node>,
parentKey?: string,
): unknown => {
if (typeof data === 'string') {
// Replace {{#old_id.field#}} patterns and correct field names
return data.replace(/\{\{#([^.#]+)\.([^#]+)#\}\}/g, (match, oldId, field) => {
const newNode = nodeIdMap.get(oldId)
if (newNode) {
const nodeType = newNode.data?.type as string || ''
const correctedField = correctFieldName(field, nodeType)
return `{{#${newNode.id}.${correctedField}#}}`
}
return match // Keep original if no mapping found
})
}
if (Array.isArray(data)) {
// Check if this is a value_selector array: ["node_id", "field", ...]
if (data.length >= 2 && typeof data[0] === 'string' && typeof data[1] === 'string') {
const potentialNodeId = data[0]
const newNode = nodeIdMap.get(potentialNodeId)
if (newNode) {
const nodeType = newNode.data?.type as string || ''
const correctedField = correctFieldName(data[1], nodeType)
// Replace the node ID and correct field name in value_selector
return [newNode.id, correctedField, ...data.slice(2)]
}
}
// Recursively process array elements
return data.map(item => replaceVariableReferences(item, nodeIdMap))
}
if (data !== null && typeof data === 'object') {
const obj = data as Record<string, unknown>
// Handle "mixed content" objects like {type: "mixed", value: "{{#...#}}"}
// These should be normalized to plain strings for fields like 'url'
if (obj.type === 'mixed' && typeof obj.value === 'string') {
const processedValue = replaceVariableReferences(obj.value, nodeIdMap) as string
// For certain fields (url, headers, params), return just the string value
if (parentKey && ['url', 'headers', 'params'].includes(parentKey)) {
return processedValue
}
// Otherwise keep the object structure but update the value
return { ...obj, value: processedValue }
}
// Recursively process object properties
const result: Record<string, unknown> = {}
for (const [key, value] of Object.entries(obj)) {
result[key] = replaceVariableReferences(value, nodeIdMap, key)
}
return result
}
return data // Return primitives as-is
}
const parseNodeLabel = (label: string) => {
const tokens = label.split('|').map(token => token.trim()).filter(Boolean)
const info: Record<string, string> = {}
@ -116,8 +186,17 @@ const parseNodeLabel = (label: string) => {
info[rawKey.trim().toLowerCase()] = rest.join('=').trim()
})
// Fallback: if no type= found, try to infer from label text
if (!info.type && tokens.length === 1 && !tokens[0].includes('=')) {
info.type = tokens[0]
const labelLower = tokens[0].toLowerCase()
// Check if label matches a known node type alias
if (NODE_TYPE_ALIASES[labelLower]) {
info.type = NODE_TYPE_ALIASES[labelLower]
info.title = tokens[0] // Use original label as title
}
else {
info.type = tokens[0]
}
}
if (!info.tool && info.tool_key)
@ -356,7 +435,7 @@ export const useWorkflowVibe = () => {
const { handleSyncWorkflowDraft } = useNodesSyncDraft()
const { getNodesReadOnly } = useNodesReadOnly()
const { saveStateToHistory } = useWorkflowHistory()
const { defaultModel } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.textGeneration)
const { defaultModel, modelList } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.textGeneration)
const { data: buildInTools } = useAllBuiltInTools()
const { data: customTools } = useAllCustomTools()
@ -476,14 +555,24 @@ export const useWorkflowVibe = () => {
const toolLookup = useMemo(() => {
const map = new Map<string, ToolDefaultValue>()
toolOptions.forEach((tool) => {
// Primary key: provider_id/tool_name (e.g., "google/google_search")
const primaryKey = normalizeKey(`${tool.provider_id}/${tool.tool_name}`)
map.set(primaryKey, tool)
// Fallback 1: provider_name/tool_name (e.g., "Google/google_search")
const providerNameKey = normalizeKey(`${tool.provider_name}/${tool.tool_name}`)
map.set(providerNameKey, tool)
// Fallback 2: tool_label (display name)
const labelKey = normalizeKey(tool.tool_label)
map.set(labelKey, tool)
// Fallback 3: tool_name alone (for partial matching when model omits provider)
const toolNameKey = normalizeKey(tool.tool_name)
if (!map.has(toolNameKey)) {
// Only set if not already taken (avoid collisions between providers)
map.set(toolNameKey, tool)
}
})
return map
}, [toolOptions])
@ -502,6 +591,310 @@ export const useWorkflowVibe = () => {
return map
}, [nodesMetaDataMap])
const createGraphFromBackendNodes = useCallback(async (
backendNodes: BackendNodeSpec[],
backendEdges: BackendEdgeSpec[],
): Promise<FlowGraph> => {
const { getNodes } = store.getState()
const nodes = getNodes()
if (!nodesMetaDataMap) {
Toast.notify({ type: 'error', message: t('workflow.vibe.nodesUnavailable') })
return { nodes: [], edges: [] }
}
const existingStartNode = nodes.find(node => node.data.type === BlockEnum.Start)
const newNodes: Node[] = []
const nodeIdMap = new Map<string, Node>()
for (const nodeSpec of backendNodes) {
// Map string type to BlockEnum
const typeKey = normalizeKey(nodeSpec.type)
const nodeType = nodeTypeLookup.get(typeKey)
if (!nodeType) {
// Skip unknown node types
continue
}
if (nodeType === BlockEnum.Start && existingStartNode) {
// Merge backend variables into existing Start node
const backendVariables = (nodeSpec.config?.variables as Array<Record<string, unknown>>) || []
if (backendVariables.length > 0) {
const existingVariables = (existingStartNode.data.variables as Array<Record<string, unknown>>) || []
// Add new variables that don't already exist
for (const backendVar of backendVariables) {
const varName = backendVar.variable as string
const exists = existingVariables.some(v => v.variable === varName)
if (!exists) {
existingVariables.push(backendVar)
}
}
// Note: we don't mutate existingStartNode directly here for the return value,
// but we should probably include it in the graph if we want it to be part of the preview?
// Actually, existingStartNode is already in 'nodes'.
// The preview usually shows ONLY new nodes + maybe start node?
// User's code applied changes to existingStartNode directly.
// For preview, we might want to clone it.
// For now, we just map it.
}
nodeIdMap.set(nodeSpec.id, existingStartNode)
continue
}
const nodeDefault = nodesMetaDataMap[nodeType]
if (!nodeDefault)
continue
const defaultValue = nodeDefault.defaultValue || {}
const title = nodeSpec.title?.trim() || nodeDefault.metaData.title || defaultValue.title || nodeSpec.type
// For tool nodes, try to get tool default value from config
let toolDefaultValue: ToolDefaultValue | undefined
if (nodeType === BlockEnum.Tool && nodeSpec.config) {
const toolName = nodeSpec.config.tool_name as string | undefined
const providerId = nodeSpec.config.provider_id as string | undefined
if (toolName && providerId) {
const toolKey = normalizeKey(`${providerId}/${toolName}`)
toolDefaultValue = toolLookup.get(toolKey) || toolLookup.get(normalizeKey(toolName))
}
}
const desc = (toolDefaultValue?.tool_description || (defaultValue as { desc?: string }).desc || '') as string
// Merge backend config into node data
// Backend provides: { url: "{{#start.url#}}", method: "GET", ... }
const backendConfig = nodeSpec.config || {}
// Deep merge for nested objects (e.g., body, authorization) to preserve required fields
const mergedConfig: Record<string, unknown> = { ...backendConfig }
const defaultValueRecord = defaultValue as Record<string, unknown>
// For http-request nodes, ensure body has all required fields
if (nodeType === BlockEnum.HttpRequest) {
const defaultBody = defaultValueRecord.body as Record<string, unknown> | undefined
const backendBody = backendConfig.body as Record<string, unknown> | undefined
if (defaultBody || backendBody) {
mergedConfig.body = {
type: 'none',
data: [],
...(defaultBody || {}),
...(backendBody || {}),
}
// Ensure data is always an array
if (!Array.isArray((mergedConfig.body as Record<string, unknown>).data)) {
(mergedConfig.body as Record<string, unknown>).data = []
}
}
// Ensure authorization has type
const defaultAuth = defaultValueRecord.authorization as Record<string, unknown> | undefined
const backendAuth = backendConfig.authorization as Record<string, unknown> | undefined
if (defaultAuth || backendAuth) {
mergedConfig.authorization = {
type: 'no-auth',
...(defaultAuth || {}),
...(backendAuth || {}),
}
}
}
// For any node with model config, ALWAYS use user's default model
if (backendConfig.model && defaultModel) {
mergedConfig.model = {
provider: defaultModel.provider.provider,
name: defaultModel.model,
mode: 'chat',
}
}
const data = {
...(defaultValue as Record<string, unknown>),
title,
desc,
type: nodeType,
selected: false,
...(toolDefaultValue || {}),
// Apply backend-generated config (url, method, headers, etc.)
...mergedConfig,
}
const newNode = generateNewNode({
id: uuid4(),
type: getNodeCustomTypeByNodeDataType(nodeType),
data,
position: nodeSpec.position || { x: 0, y: 0 },
}).newNode
newNodes.push(newNode)
nodeIdMap.set(nodeSpec.id, newNode)
}
// Replace variable references in all node configs using the nodeIdMap
for (const node of newNodes) {
node.data = replaceVariableReferences(node.data, nodeIdMap) as typeof node.data
}
if (!newNodes.length) {
Toast.notify({ type: 'error', message: t('workflow.vibe.invalidFlowchart') })
return { nodes: [], edges: [] }
}
const buildEdge = (
source: Node,
target: Node,
sourceHandle = 'source',
targetHandle = 'target',
): Edge => ({
id: `${source.id}-${sourceHandle}-${target.id}-${targetHandle}`,
type: CUSTOM_EDGE,
source: source.id,
sourceHandle,
target: target.id,
targetHandle,
data: {
sourceType: source.data.type,
targetType: target.data.type,
isInIteration: false,
isInLoop: false,
_connectedNodeIsSelected: false,
},
zIndex: 0,
})
const newEdges: Edge[] = []
for (const edgeSpec of backendEdges) {
const sourceNode = nodeIdMap.get(edgeSpec.source)
const targetNode = nodeIdMap.get(edgeSpec.target)
if (!sourceNode || !targetNode)
continue
let sourceHandle = edgeSpec.sourceHandle || 'source'
// Handle IfElse branch handles
if (sourceNode.data.type === BlockEnum.IfElse && !edgeSpec.sourceHandle) {
sourceHandle = 'source'
}
newEdges.push(buildEdge(sourceNode, targetNode, sourceHandle, edgeSpec.targetHandle || 'target'))
}
// Layout nodes
const bounds = nodes.reduce(
(acc, node) => {
const width = node.width ?? NODE_WIDTH
acc.maxX = Math.max(acc.maxX, node.position.x + width)
acc.minY = Math.min(acc.minY, node.position.y)
return acc
},
{ maxX: 0, minY: 0 },
)
const baseX = nodes.length ? bounds.maxX + NODE_WIDTH_X_OFFSET : 0
const baseY = Number.isFinite(bounds.minY) ? bounds.minY : 0
const branchOffset = Math.max(120, NODE_WIDTH_X_OFFSET / 2)
const layoutNodeIds = new Set(newNodes.map(node => node.id))
const layoutEdges = newEdges.filter(edge =>
layoutNodeIds.has(edge.source) && layoutNodeIds.has(edge.target),
)
try {
const layout = await getLayoutByDagre(newNodes, layoutEdges)
const layoutedNodes = newNodes.map((node) => {
const info = layout.nodes.get(node.id)
if (!info)
return node
return {
...node,
position: {
x: baseX + info.x,
y: baseY + info.y,
},
}
})
newNodes.splice(0, newNodes.length, ...layoutedNodes)
}
catch {
newNodes.forEach((node, index) => {
const row = Math.floor(index / 4)
const col = index % 4
node.position = {
x: baseX + col * NODE_WIDTH_X_OFFSET,
y: baseY + row * branchOffset,
}
})
}
return {
nodes: newNodes,
edges: newEdges,
}
}, [
defaultModel,
nodeTypeLookup,
nodesMetaDataMap,
store,
t,
toolLookup,
])
// Apply backend-provided nodes directly (bypasses mermaid parsing)
const applyBackendNodesToWorkflow = useCallback(async (
backendNodes: BackendNodeSpec[],
backendEdges: BackendEdgeSpec[],
) => {
const { getNodes, setNodes, edges, setEdges } = store.getState()
const nodes = getNodes()
const {
setShowVibePanel,
} = workflowStore.getState()
const { nodes: newNodes, edges: newEdges } = await createGraphFromBackendNodes(backendNodes, backendEdges)
if (newNodes.length === 0) {
setShowVibePanel(false)
return
}
const allNodes = [...nodes, ...newNodes]
const nodesConnectedMap = getNodesConnectedSourceOrTargetHandleIdsMap(
newEdges.map(edge => ({ type: 'add', edge })),
allNodes,
)
const updatedNodes = allNodes.map((node) => {
const connected = nodesConnectedMap[node.id]
if (!connected)
return node
return {
...node,
data: {
...node.data,
...connected,
_connectedSourceHandleIds: dedupeHandles(connected._connectedSourceHandleIds),
_connectedTargetHandleIds: dedupeHandles(connected._connectedTargetHandleIds),
},
}
})
setNodes(updatedNodes)
setEdges([...edges, ...newEdges])
saveStateToHistory(WorkflowHistoryEvent.NodeAdd, { nodeId: newNodes[0].id })
handleSyncWorkflowDraft()
workflowStore.setState(state => ({
...state,
showVibePanel: false,
vibePanelMermaidCode: '',
}))
}, [
createGraphFromBackendNodes,
handleSyncWorkflowDraft,
saveStateToHistory,
store,
])
const flowchartToWorkflowGraph = useCallback(async (mermaidCode: string): Promise<FlowGraph> => {
const { getNodes } = store.getState()
const nodes = getNodes()
@ -699,7 +1092,7 @@ export const useWorkflowVibe = () => {
nodes: updatedNodes,
edges: newEdges,
}
}, [nodeTypeLookup, toolLookup])
}, [nodeTypeLookup, nodesMetaDataMap, store, t, toolLookup])
const applyFlowchartToWorkflow = useCallback(() => {
if (!currentFlowGraph || !currentFlowGraph.nodes || currentFlowGraph.nodes.length === 0) {
@ -724,15 +1117,16 @@ export const useWorkflowVibe = () => {
}, [
currentFlowGraph,
handleSyncWorkflowDraft,
nodeTypeLookup,
nodesMetaDataMap,
saveStateToHistory,
store,
t,
toolLookup,
])
const handleVibeCommand = useCallback(async (dsl?: string, skipPanelPreview = false) => {
const handleVibeCommand = useCallback(async (
dsl?: string,
skipPanelPreview = false,
regenerateMode = false,
) => {
if (getNodesReadOnly()) {
Toast.notify({ type: 'error', message: t('workflow.vibe.readOnly') })
return
@ -768,6 +1162,9 @@ export const useWorkflowVibe = () => {
isVibeGenerating: true,
vibePanelMermaidCode: '',
vibePanelInstruction: trimmed,
vibePanelIntent: '',
vibePanelMessage: '',
vibePanelSuggestions: [],
}))
try {
@ -790,71 +1187,173 @@ export const useWorkflowVibe = () => {
tool_name: tool.tool_name,
tool_label: tool.tool_label,
tool_key: `${tool.provider_id}/${tool.tool_name}`,
tool_description: tool.tool_description,
is_team_authorization: tool.is_team_authorization,
// Include parameter schemas so backend can inform model how to use tools
parameters: tool.paramSchemas,
output_schema: tool.output_schema,
}))
const availableNodesPayload = availableNodesList.map(node => ({
type: node.type,
title: node.title,
description: node.description,
}))
const stream = await generateFlowchart({
instruction: trimmed,
model_config: latestModelConfig!,
existing_nodes: existingNodesPayload,
tools: toolsPayload,
regenerate_mode: regenerateMode,
})
let mermaidCode = trimmed
if (!isMermaidFlowchart(trimmed)) {
const { error, flowchart } = await generateFlowchart({
instruction: trimmed,
model_config: latestModelConfig,
available_nodes: availableNodesPayload,
existing_nodes: existingNodesPayload,
available_tools: toolsPayload,
})
let mermaidCode = ''
let backendNodes: BackendNodeSpec[] | undefined
let backendEdges: BackendEdgeSpec[] | undefined
if (error) {
Toast.notify({ type: 'error', message: error })
setIsVibeGenerating(false)
return
const reader = stream.getReader()
const decoder = new TextDecoder()
while (true) {
const { done, value } = await reader.read()
if (done)
break
const chunk = decoder.decode(value)
const lines = chunk.split('\n')
for (const line of lines) {
if (!line.trim() || !line.startsWith('data: '))
continue
try {
const data = JSON.parse(line.slice(6))
if (data.event === 'message' || data.event === 'workflow_generated') {
if (data.data?.text) {
mermaidCode += data.data.text
workflowStore.setState(state => ({
...state,
vibePanelMermaidCode: mermaidCode,
}))
}
if (data.data?.nodes) {
backendNodes = data.data.nodes
workflowStore.setState(state => ({
...state,
vibePanelBackendNodes: backendNodes,
}))
}
if (data.data?.edges) {
backendEdges = data.data.edges
workflowStore.setState(state => ({
...state,
vibePanelBackendEdges: backendEdges,
}))
}
if (data.data?.intent) {
workflowStore.setState(state => ({
...state,
vibePanelIntent: data.data.intent,
}))
}
if (data.data?.message) {
workflowStore.setState(state => ({
...state,
vibePanelMessage: data.data.message,
}))
}
if (data.data?.suggestions) {
workflowStore.setState(state => ({
...state,
vibePanelSuggestions: data.data.suggestions,
}))
}
}
}
catch (e) {
console.error('Error parsing chunk:', e)
}
}
if (!flowchart) {
Toast.notify({ type: 'error', message: t('workflow.vibe.missingFlowchart') })
setIsVibeGenerating(false)
return
}
mermaidCode = flowchart
}
workflowStore.setState(state => ({
...state,
vibePanelMermaidCode: mermaidCode,
isVibeGenerating: false,
}))
setIsVibeGenerating(false)
const workflowGraph = await flowchartToWorkflowGraph(mermaidCode)
addVersion(workflowGraph)
// Add version for preview
if (backendNodes && backendNodes.length > 0 && backendEdges) {
const graph = await createGraphFromBackendNodes(backendNodes, backendEdges)
addVersion(graph)
}
else if (mermaidCode) {
const graph = await flowchartToWorkflowGraph(mermaidCode)
addVersion(graph)
}
if (skipPanelPreview)
applyFlowchartToWorkflow()
if (skipPanelPreview) {
// Prefer backend nodes (already sanitized) over mermaid re-parsing
if (backendNodes && backendNodes.length > 0 && backendEdges) {
await applyBackendNodesToWorkflow(backendNodes, backendEdges)
}
else {
await applyFlowchartToWorkflow()
}
}
}
catch (error: unknown) {
// Handle API errors (e.g., network errors, server errors)
const { setIsVibeGenerating } = workflowStore.getState()
setIsVibeGenerating(false)
// Extract error message from Response object or Error
let errorMessage = t('workflow.vibe.generateError')
if (error instanceof Response) {
try {
const errorData = await error.json()
errorMessage = errorData?.message || errorMessage
}
catch {
// If we can't parse the response, use the default error message
}
}
else if (error instanceof Error) {
errorMessage = error.message || errorMessage
}
Toast.notify({ type: 'error', message: errorMessage })
}
finally {
isGeneratingRef.current = false
}
}, [
availableNodesList,
addVersion,
applyBackendNodesToWorkflow,
applyFlowchartToWorkflow,
createGraphFromBackendNodes,
flowchartToWorkflowGraph,
getLatestModelConfig,
getNodesReadOnly,
handleSyncWorkflowDraft,
nodeTypeLookup,
nodesMetaDataMap,
saveStateToHistory,
store,
t,
toolLookup,
toolOptions,
getLatestModelConfig,
])
const handleAccept = useCallback(() => {
applyFlowchartToWorkflow()
}, [applyFlowchartToWorkflow])
const handleRegenerate = useCallback(async () => {
if (!lastInstructionRef.current) {
Toast.notify({ type: 'error', message: t('workflow.vibe.missingInstruction') })
return
}
// Pass regenerateMode=true to include previous workflow context
await handleVibeCommand(lastInstructionRef.current, false, true)
}, [handleVibeCommand, t])
const handleAccept = useCallback(async (vibePanelMermaidCode: string | undefined) => {
// Prefer backend nodes (already sanitized) over mermaid re-parsing
const { vibePanelBackendNodes, vibePanelBackendEdges } = workflowStore.getState()
if (vibePanelBackendNodes && vibePanelBackendNodes.length > 0 && vibePanelBackendEdges) {
await applyBackendNodesToWorkflow(vibePanelBackendNodes, vibePanelBackendEdges)
}
else {
// Use applyFlowchartToWorkflow which uses currentFlowGraph (populated by addVersion)
applyFlowchartToWorkflow()
}
}, [applyBackendNodesToWorkflow, applyFlowchartToWorkflow])
useEffect(() => {
const handler = (event: CustomEvent<VibeCommandDetail>) => {
@ -862,7 +1361,7 @@ export const useWorkflowVibe = () => {
}
const acceptHandler = () => {
handleAccept()
handleAccept(undefined)
}
document.addEventListener(VIBE_COMMAND_EVENT, handler as EventListener)

View File

@ -1390,9 +1390,9 @@ export const getNodeUsedVars = (node: Node): ValueSelector[] => {
payload.url,
payload.headers,
payload.params,
typeof payload.body.data === 'string'
typeof payload.body?.data === 'string'
? payload.body.data
: payload.body.data.map(d => d.value).join(''),
: (payload.body?.data?.map(d => d.value).join('') ?? ''),
])
break
}

View File

@ -5,6 +5,9 @@ import { useCallback, useEffect, useState } from 'react'
const UNIQUE_ID_PREFIX = 'key-value-'
const strToKeyValueList = (value: string) => {
if (typeof value !== 'string' || !value)
return []
return value.split('\n').map((item) => {
const [key, ...others] = item.split(':')
return {
@ -16,7 +19,7 @@ const strToKeyValueList = (value: string) => {
}
const useKeyValueList = (value: string, onChange: (value: string) => void, noFilter?: boolean) => {
const [list, doSetList] = useState<KeyValue[]>(() => value ? strToKeyValueList(value) : [])
const [list, doSetList] = useState<KeyValue[]>(() => typeof value === 'string' && value ? strToKeyValueList(value) : [])
const setList = (l: KeyValue[]) => {
doSetList(l.map((item) => {
return {

View File

@ -3,7 +3,7 @@
import type { FC } from 'react'
import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations'
import type { CompletionParams, Model } from '@/types/app'
import { RiClipboardLine } from '@remixicon/react'
import { RiCheckLine, RiClipboardLine, RiInformation2Line, RiRefreshLine } from '@remixicon/react'
import copy from 'copy-to-clipboard'
import { useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
@ -39,6 +39,14 @@ const VibePanel: FC = () => {
const vibePanelPreviewNodes = currentFlowGraph?.nodes || []
const vibePanelPreviewEdges = currentFlowGraph?.edges || []
const setVibePanelInstruction = useStore(s => s.setVibePanelInstruction)
const vibePanelIntent = useStore(s => s.vibePanelIntent)
const setVibePanelIntent = useStore(s => s.setVibePanelIntent)
const vibePanelMessage = useStore(s => s.vibePanelMessage)
const setVibePanelMessage = useStore(s => s.setVibePanelMessage)
const vibePanelSuggestions = useStore(s => s.vibePanelSuggestions)
const setVibePanelSuggestions = useStore(s => s.setVibePanelSuggestions)
const localModel = localStorage.getItem('auto-gen-model')
? JSON.parse(localStorage.getItem('auto-gen-model') as string) as Model
@ -97,13 +105,13 @@ const VibePanel: FC = () => {
}, [workflowStore])
const handleClose = useCallback(() => {
workflowStore.setState(state => ({
...state,
showVibePanel: false,
vibePanelMermaidCode: '',
isVibeGenerating: false,
}))
}, [workflowStore])
setShowVibePanel(false)
setVibePanelMermaidCode('')
setIsVibeGenerating(false)
setVibePanelIntent('')
setVibePanelMessage('')
setVibePanelSuggestions([])
}, [setShowVibePanel, setVibePanelMermaidCode, setIsVibeGenerating, setVibePanelIntent, setVibePanelMessage, setVibePanelSuggestions])
const handleGenerate = useCallback(() => {
const event = new CustomEvent(VIBE_COMMAND_EVENT, {
@ -124,6 +132,15 @@ const VibePanel: FC = () => {
Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') })
}, [workflowStore, t])
const handleSuggestionClick = useCallback((suggestion: string) => {
setVibePanelInstruction(suggestion)
// Trigger generation with the suggestion
const event = new CustomEvent(VIBE_COMMAND_EVENT, {
detail: { dsl: suggestion },
})
document.dispatchEvent(event)
}, [setVibePanelInstruction])
if (!showVibePanel)
return null
@ -134,6 +151,40 @@ const VibePanel: FC = () => {
</div>
)
const renderOffTopic = (
<div className="flex h-full w-0 grow flex-col items-center justify-center bg-background-default-subtle p-6">
<div className="flex max-w-[400px] flex-col items-center text-center">
<div className="mb-4 flex h-12 w-12 items-center justify-center rounded-full bg-state-warning-hover">
<RiInformation2Line className="h-6 w-6 text-text-warning" />
</div>
<div className="mb-2 text-base font-semibold text-text-primary">
{t('workflow.vibe.offTopicTitle')}
</div>
<div className="mb-6 text-sm text-text-secondary">
{vibePanelMessage || t('workflow.vibe.offTopicDefault')}
</div>
{vibePanelSuggestions.length > 0 && (
<div className="w-full">
<div className="mb-3 text-xs font-medium text-text-tertiary">
{t('workflow.vibe.trySuggestion')}
</div>
<div className="flex flex-col gap-2">
{vibePanelSuggestions.map((suggestion, index) => (
<button
key={index}
onClick={() => handleSuggestionClick(suggestion)}
className="w-full rounded-lg border border-divider-regular bg-components-panel-bg px-4 py-2.5 text-left text-sm text-text-secondary transition-colors hover:border-components-button-primary-border hover:bg-state-accent-hover"
>
{suggestion}
</button>
))}
</div>
</div>
)}
</div>
</div>
)
return (
<Modal
isShow={showVibePanel}
@ -184,7 +235,8 @@ const VibePanel: FC = () => {
</div>
</div>
{!isVibeGenerating && vibePanelPreviewNodes.length > 0 && (
{!isVibeGenerating && vibePanelIntent === 'off_topic' && renderOffTopic}
{!isVibeGenerating && vibePanelIntent !== 'off_topic' && (vibePanelPreviewNodes.length > 0 || vibePanelMermaidCode) && (
<div className="h-full w-0 grow bg-background-default-subtle p-6 pb-0">
<div className="flex h-full flex-col">
<div className="mb-3 flex shrink-0 items-center justify-between">
@ -226,7 +278,7 @@ const VibePanel: FC = () => {
</div>
)}
{isVibeGenerating && renderLoading}
{!isVibeGenerating && vibePanelPreviewNodes.length === 0 && <ResPlaceholder />}
{!isVibeGenerating && vibePanelIntent !== 'off_topic' && vibePanelPreviewNodes.length === 0 && !vibePanelMermaidCode && <ResPlaceholder />}
</div>
</Modal>
)

View File

@ -1,5 +1,8 @@
import type { BackendEdgeSpec, BackendNodeSpec } from '@/service/debug'
import type { StateCreator } from 'zustand'
export type VibeIntent = 'generate' | 'off_topic' | 'error' | ''
export type PanelSliceShape = {
panelWidth: number
showFeaturesPanel: boolean
@ -26,6 +29,24 @@ export type PanelSliceShape = {
setInitShowLastRunTab: (initShowLastRunTab: boolean) => void
showVibePanel: boolean
setShowVibePanel: (showVibePanel: boolean) => void
vibePanelMermaidCode: string
setVibePanelMermaidCode: (vibePanelMermaidCode: string) => void
vibePanelBackendNodes?: BackendNodeSpec[]
setVibePanelBackendNodes: (nodes?: BackendNodeSpec[]) => void
vibePanelBackendEdges?: BackendEdgeSpec[]
setVibePanelBackendEdges: (edges?: BackendEdgeSpec[]) => void
isVibeGenerating: boolean
setIsVibeGenerating: (isVibeGenerating: boolean) => void
vibePanelInstruction: string
setVibePanelInstruction: (vibePanelInstruction: string) => void
vibePanelIntent: VibeIntent
setVibePanelIntent: (vibePanelIntent: VibeIntent) => void
vibePanelMessage: string
setVibePanelMessage: (vibePanelMessage: string) => void
vibePanelSuggestions: string[]
setVibePanelSuggestions: (vibePanelSuggestions: string[]) => void
vibePanelLastWarnings: string[]
setVibePanelLastWarnings: (vibePanelLastWarnings: string[]) => void
}
export const createPanelSlice: StateCreator<PanelSliceShape> = set => ({
@ -48,4 +69,22 @@ export const createPanelSlice: StateCreator<PanelSliceShape> = set => ({
setInitShowLastRunTab: initShowLastRunTab => set(() => ({ initShowLastRunTab })),
showVibePanel: false,
setShowVibePanel: showVibePanel => set(() => ({ showVibePanel })),
vibePanelMermaidCode: '',
setVibePanelMermaidCode: vibePanelMermaidCode => set(() => ({ vibePanelMermaidCode })),
vibePanelBackendNodes: undefined,
setVibePanelBackendNodes: vibePanelBackendNodes => set(() => ({ vibePanelBackendNodes })),
vibePanelBackendEdges: undefined,
setVibePanelBackendEdges: vibePanelBackendEdges => set(() => ({ vibePanelBackendEdges })),
isVibeGenerating: false,
setIsVibeGenerating: isVibeGenerating => set(() => ({ isVibeGenerating })),
vibePanelInstruction: '',
setVibePanelInstruction: vibePanelInstruction => set(() => ({ vibePanelInstruction })),
vibePanelIntent: '',
setVibePanelIntent: vibePanelIntent => set(() => ({ vibePanelIntent })),
vibePanelMessage: '',
setVibePanelMessage: vibePanelMessage => set(() => ({ vibePanelMessage })),
vibePanelSuggestions: [],
setVibePanelSuggestions: vibePanelSuggestions => set(() => ({ vibePanelSuggestions })),
vibePanelLastWarnings: [],
setVibePanelLastWarnings: vibePanelLastWarnings => set(() => ({ vibePanelLastWarnings })),
})

View File

@ -140,6 +140,10 @@ const translation = {
regenerate: 'Regenerate',
apply: 'Apply',
noFlowchart: 'No flowchart provided',
offTopicDefault: 'I\'m the Dify workflow design assistant. I can help you create AI automation workflows, but I can\'t answer general questions. Would you like to create a workflow instead?',
offTopicTitle: 'Off-Topic Request',
trySuggestion: 'Try one of these suggestions:',
generateError: 'Failed to generate workflow. Please try again.',
},
publishLimit: {
startNodeTitlePrefix: 'Upgrade to',

View File

@ -19,8 +19,45 @@ export type GenRes = {
error?: string
}
export type ToolRecommendation = {
requested_capability: string
unconfigured_tools: Array<{
provider_id: string
tool_name: string
description: string
}>
configured_alternatives: Array<{
provider_id: string
tool_name: string
description: string
}>
recommendation: string
}
export type BackendNodeSpec = {
id: string
type: string
title?: string
config?: Record<string, any>
position?: { x: number; y: number }
}
export type BackendEdgeSpec = {
source: string
target: string
sourceHandle?: string
targetHandle?: string
}
export type FlowchartGenRes = {
intent?: 'generate' | 'off_topic' | 'error'
flowchart: string
nodes?: BackendNodeSpec[]
edges?: BackendEdgeSpec[]
message?: string
warnings?: string[]
suggestions?: string[]
tool_recommendations?: ToolRecommendation[]
error?: string
}