diff --git a/AGENTS.md b/AGENTS.md index 782861ad36..a231e6cf95 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,3 +1,22 @@ + +# OpenSpec Instructions + +These instructions are for AI assistants working in this project. + +Always open `@/openspec/AGENTS.md` when the request: +- Mentions planning or proposals (words like proposal, spec, change, plan) +- Introduces new capabilities, breaking changes, architecture shifts, or big performance/security work +- Sounds ambiguous and you need the authoritative spec before coding + +Use `@/openspec/AGENTS.md` to learn: +- How to create and apply change proposals +- Spec format and conventions +- Project structure and guidelines + +Keep this managed block so 'openspec update' can refresh the instructions. + + + # AGENTS.md ## Project Overview diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index 1a1cde7329..2f1b3a0db4 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -55,12 +55,28 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") +class PreviousWorkflow(BaseModel): + """Previous workflow attempt for regeneration context.""" + + nodes: list[dict[str, Any]] = Field(default_factory=list, description="Previously generated nodes") + edges: list[dict[str, Any]] = Field(default_factory=list, description="Previously generated edges") + warnings: list[str] = Field(default_factory=list, description="Warnings from previous generation") + + class FlowchartGeneratePayload(BaseModel): instruction: str = Field(..., description="Workflow flowchart generation instruction") model_config_data: dict[str, Any] = Field(..., alias="model_config", description="Model configuration") available_nodes: list[dict[str, Any]] = Field(default_factory=list, description="Available node types") existing_nodes: list[dict[str, Any]] = Field(default_factory=list, description="Existing workflow nodes") available_tools: list[dict[str, Any]] = Field(default_factory=list, description="Available tools") + selected_node_ids: list[str] = Field(default_factory=list, description="IDs of selected nodes for context") + # Phase 10: Regenerate with previous workflow context + previous_workflow: PreviousWorkflow | None = Field(default=None, description="Previous workflow for regeneration") + regenerate_mode: bool = Field(default=False, description="Whether this is a regeneration request") + # Language preference for generated content (node titles, descriptions) + language: str | None = Field(default=None, description="Preferred language for generated content") + # Available models that user has configured (for LLM/question-classifier nodes) + available_models: list[dict[str, Any]] = Field(default_factory=list, description="User's configured models") def reg(cls: type[BaseModel]): @@ -267,7 +283,7 @@ class InstructionGenerateApi(Resource): @console_ns.route("/flowchart-generate") class FlowchartGenerateApi(Resource): @console_ns.doc("generate_workflow_flowchart") - @console_ns.doc(description="Generate workflow flowchart using LLM") + @console_ns.doc(description="Generate workflow flowchart using LLM with intent classification") @console_ns.expect(console_ns.models[FlowchartGeneratePayload.__name__]) @console_ns.response(200, "Flowchart generated successfully") @console_ns.response(400, "Invalid request parameters") @@ -280,6 +296,15 @@ class FlowchartGenerateApi(Resource): _, current_tenant_id = current_account_with_tenant() try: + # Convert PreviousWorkflow to dict if present + previous_workflow_dict = None + if args.previous_workflow: + previous_workflow_dict = { + "nodes": args.previous_workflow.nodes, + "edges": args.previous_workflow.edges, + "warnings": args.previous_workflow.warnings, + } + result = LLMGenerator.generate_workflow_flowchart( tenant_id=current_tenant_id, instruction=args.instruction, @@ -287,6 +312,11 @@ class FlowchartGenerateApi(Resource): available_nodes=args.available_nodes, existing_nodes=args.existing_nodes, available_tools=args.available_tools, + selected_node_ids=args.selected_node_ids, + previous_workflow=previous_workflow_dict, + regenerate_mode=args.regenerate_mode, + preferred_language=args.language, + available_models=args.available_models, ) except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 4cc60a4878..21b4ed38f4 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -18,7 +18,6 @@ from core.llm_generator.prompts import ( SUGGESTED_QUESTIONS_MAX_TOKENS, SUGGESTED_QUESTIONS_TEMPERATURE, SYSTEM_STRUCTURED_OUTPUT_GENERATE, - WORKFLOW_FLOWCHART_PROMPT_TEMPLATE, WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, ) from core.model_manager import ModelManager @@ -295,20 +294,62 @@ class LLMGenerator: available_nodes: Sequence[dict[str, object]] | None = None, existing_nodes: Sequence[dict[str, object]] | None = None, available_tools: Sequence[dict[str, object]] | None = None, + selected_node_ids: Sequence[str] | None = None, + previous_workflow: dict[str, object] | None = None, + regenerate_mode: bool = False, + preferred_language: str | None = None, + available_models: Sequence[dict[str, object]] | None = None, ): - model_parameters = model_config.get("completion_params", {}) - prompt_template = PromptTemplateParser(WORKFLOW_FLOWCHART_PROMPT_TEMPLATE) - prompt_generate = prompt_template.format( - inputs={ - "TASK_DESCRIPTION": instruction, - "AVAILABLE_NODES": json.dumps(available_nodes or [], ensure_ascii=False), - "EXISTING_NODES": json.dumps(existing_nodes or [], ensure_ascii=False), - "AVAILABLE_TOOLS": json.dumps(available_tools or [], ensure_ascii=False), - }, - remove_template_variables=False, + """ + Generate workflow flowchart with enhanced prompts and inline intent classification. + + Returns a dict with: + - intent: "generate" | "off_topic" | "error" + - flowchart: Mermaid syntax string (for generate intent) + - message: User-friendly explanation + - warnings: List of validation warnings + - suggestions: List of workflow suggestions (for off_topic intent) + - error: Error message if generation failed + """ + from core.llm_generator.vibe_prompts import ( + build_vibe_enhanced_prompt, + extract_mermaid_from_response, + parse_vibe_response, + sanitize_tool_nodes, + validate_node_parameters, + validate_tool_references, ) - prompt_messages = [UserPromptMessage(content=prompt_generate)] + model_parameters = model_config.get("completion_params", {}) + + # Build enhanced prompts with context + system_prompt, user_prompt = build_vibe_enhanced_prompt( + instruction=instruction, + available_nodes=list(available_nodes) if available_nodes else None, + available_tools=list(available_tools) if available_tools else None, + existing_nodes=list(existing_nodes) if existing_nodes else None, + selected_node_ids=list(selected_node_ids) if selected_node_ids else None, + previous_workflow=dict(previous_workflow) if previous_workflow else None, + regenerate_mode=regenerate_mode, + preferred_language=preferred_language, + available_models=list(available_models) if available_models else None, + ) + + prompt_messages: list[PromptMessage] = [ + SystemPromptMessage(content=system_prompt), + UserPromptMessage(content=user_prompt), + ] + + # DEBUG: Log model input + logger.debug("=" * 80) + logger.debug("[VIBE] generate_workflow_flowchart - MODEL INPUT") + logger.debug("=" * 80) + logger.debug("[VIBE] Instruction: %s", instruction) + logger.debug("[VIBE] Model: %s/%s", model_config.get("provider", ""), model_config.get("name", "")) + system_prompt_log = system_prompt[:2000] + "..." if len(system_prompt) > 2000 else system_prompt + logger.debug("[VIBE] System Prompt:\n%s", system_prompt_log) + logger.debug("[VIBE] User Prompt:\n%s", user_prompt) + logger.debug("=" * 80) model_manager = ModelManager() model_instance = model_manager.get_model_instance( @@ -318,9 +359,6 @@ class LLMGenerator: model=model_config.get("name", ""), ) - flowchart = "" - error = "" - try: response: LLMResult = model_instance.invoke_llm( prompt_messages=list(prompt_messages), @@ -328,18 +366,110 @@ class LLMGenerator: stream=False, ) content = response.message.get_text_content() + + # DEBUG: Log model output + logger.debug("=" * 80) + logger.debug("[VIBE] generate_workflow_flowchart - MODEL OUTPUT") + logger.debug("=" * 80) + logger.debug("[VIBE] Raw Response:\n%s", content) + logger.debug("=" * 80) if not isinstance(content, str): raise ValueError("Flowchart response is not a string") - match = re.search(r"```(?:mermaid)?\s*([\s\S]+?)```", content, flags=re.IGNORECASE) - flowchart = (match.group(1) if match else content).strip() + # Parse the enhanced response format + parsed = parse_vibe_response(content) + + # DEBUG: Log parsed result + logger.debug("[VIBE] Parsed Response:") + logger.debug("[VIBE] intent: %s", parsed.get("intent")) + logger.debug("[VIBE] message: %s", parsed.get("message", "")[:200] if parsed.get("message") else "") + logger.debug("[VIBE] mermaid: %s", parsed.get("mermaid", "")[:500] if parsed.get("mermaid") else "") + logger.debug("[VIBE] warnings: %s", parsed.get("warnings", [])) + logger.debug("[VIBE] suggestions: %s", parsed.get("suggestions", [])) + if parsed.get("error"): + logger.debug("[VIBE] error: %s", parsed.get("error")) + logger.debug("=" * 80) + + # Handle error case from parsing + if parsed.get("intent") == "error": + # Fall back to legacy parsing for backwards compatibility + match = re.search(r"```(?:mermaid)?\s*([\s\S]+?)```", content, flags=re.IGNORECASE) + flowchart = (match.group(1) if match else content).strip() + return { + "intent": "generate", + "flowchart": flowchart, + "message": "", + "warnings": [], + "tool_recommendations": [], + "error": "", + } + + # Handle off_topic case + if parsed.get("intent") == "off_topic": + return { + "intent": "off_topic", + "flowchart": "", + "message": parsed.get("message", ""), + "suggestions": parsed.get("suggestions", []), + "warnings": [], + "tool_recommendations": [], + "error": "", + } + + # Handle generate case + flowchart = extract_mermaid_from_response(parsed) + + # Sanitize tool nodes - replace invalid tools with fallback nodes + original_nodes = parsed.get("nodes", []) + sanitized_nodes, sanitize_warnings = sanitize_tool_nodes( + original_nodes, + list(available_tools) if available_tools else None, + ) + # Update parsed nodes with sanitized version + parsed["nodes"] = sanitized_nodes + + # Validate tool references and get recommendations for unconfigured tools + validation_warnings, tool_recommendations = validate_tool_references( + sanitized_nodes, + list(available_tools) if available_tools else None, + ) + + # Validate node parameters are properly filled (Phase 9: Auto-Fill) + param_warnings = validate_node_parameters(sanitized_nodes) + + existing_warnings = parsed.get("warnings", []) + all_warnings = existing_warnings + sanitize_warnings + validation_warnings + param_warnings + + return { + "intent": "generate", + "flowchart": flowchart, + "nodes": sanitized_nodes, # Include sanitized nodes in response + "edges": parsed.get("edges", []), + "message": parsed.get("message", ""), + "warnings": all_warnings, + "tool_recommendations": tool_recommendations, + "error": "", + } + except InvokeError as e: - error = str(e) + return { + "intent": "error", + "flowchart": "", + "message": "", + "warnings": [], + "tool_recommendations": [], + "error": str(e), + } except Exception as e: logger.exception("Failed to generate workflow flowchart, model: %s", model_config.get("name")) - error = str(e) - - return {"flowchart": flowchart, "error": error} + return { + "intent": "error", + "flowchart": "", + "message": "", + "warnings": [], + "tool_recommendations": [], + "error": str(e), + } @classmethod def generate_code(cls, tenant_id: str, instruction: str, model_config: dict, code_language: str = "javascript"): diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 766ae07231..76d7231b55 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -147,6 +147,8 @@ WORKFLOW_FLOWCHART_PROMPT_TEMPLATE = """ You are an expert workflow designer. Generate a Mermaid flowchart based on the user's request. Constraints: +- Detect the language of the user's request. Generate all node titles in the same language as the user's input. +- If the input language cannot be determined, use {{PREFERRED_LANGUAGE}} as the fallback language. - Use only node types listed in . - Use only tools listed in . When using a tool node, set type=tool and tool=. - Tools may include MCP providers (provider_type=mcp). Tool selection still uses tool_key. diff --git a/api/core/llm_generator/vibe_config/__init__.py b/api/core/llm_generator/vibe_config/__init__.py new file mode 100644 index 0000000000..85e8a87d18 --- /dev/null +++ b/api/core/llm_generator/vibe_config/__init__.py @@ -0,0 +1,26 @@ +""" +Vibe Workflow Generator Configuration Module. + +This module centralizes configuration for the Vibe workflow generation feature, +including node schemas, fallback rules, and response templates. +""" + +from core.llm_generator.vibe_config.fallback_rules import ( + FALLBACK_RULES, + FIELD_NAME_CORRECTIONS, + NODE_TYPE_ALIASES, + get_corrected_field_name, +) +from core.llm_generator.vibe_config.node_schemas import BUILTIN_NODE_SCHEMAS +from core.llm_generator.vibe_config.responses import DEFAULT_SUGGESTIONS, OFF_TOPIC_RESPONSES + +__all__ = [ + "BUILTIN_NODE_SCHEMAS", + "DEFAULT_SUGGESTIONS", + "FALLBACK_RULES", + "FIELD_NAME_CORRECTIONS", + "NODE_TYPE_ALIASES", + "OFF_TOPIC_RESPONSES", + "get_corrected_field_name", +] + diff --git a/api/core/llm_generator/vibe_config/fallback_rules.py b/api/core/llm_generator/vibe_config/fallback_rules.py new file mode 100644 index 0000000000..6265ac8db3 --- /dev/null +++ b/api/core/llm_generator/vibe_config/fallback_rules.py @@ -0,0 +1,138 @@ +""" +Fallback Rules for Vibe Workflow Generation. + +This module defines keyword-based rules for determining fallback node types +when the LLM generates invalid tool references. + +Note: These definitions are mirrored in node_definitions.json for frontend sync. +When updating these values, also update the JSON file. +""" + +# Keyword rules for smart fallback detection +# Maps node type to keywords that suggest using that node type as a fallback +FALLBACK_RULES: dict[str, list[str]] = { + "http-request": [ + "http", + "url", + "web", + "scrape", + "scraper", + "fetch", + "api", + "request", + "download", + "upload", + "webhook", + "endpoint", + "rest", + "get", + "post", + ], + "code": [ + "code", + "script", + "calculate", + "compute", + "process", + "transform", + "parse", + "convert", + "format", + "filter", + "sort", + "math", + "logic", + ], + "llm": [ + "analyze", + "summarize", + "summary", + "extract", + "classify", + "translate", + "generate", + "write", + "rewrite", + "explain", + "answer", + "chat", + ], +} + + +# Node type aliases for inference from natural language +# Maps common terms to canonical node type names +NODE_TYPE_ALIASES: dict[str, str] = { + # Start node aliases + "start": "start", + "begin": "start", + "input": "start", + # End node aliases + "end": "end", + "finish": "end", + "output": "end", + # LLM node aliases + "llm": "llm", + "ai": "llm", + "gpt": "llm", + "model": "llm", + "chat": "llm", + # Code node aliases + "code": "code", + "script": "code", + "python": "code", + "javascript": "code", + # HTTP request node aliases + "http-request": "http-request", + "http": "http-request", + "request": "http-request", + "api": "http-request", + "fetch": "http-request", + "webhook": "http-request", + # Conditional node aliases + "if-else": "if-else", + "condition": "if-else", + "branch": "if-else", + "switch": "if-else", + # Loop node aliases + "iteration": "iteration", + "loop": "loop", + "foreach": "iteration", + # Tool node alias + "tool": "tool", +} + + +# Field name corrections for LLM-generated node configs +# Maps incorrect field names to correct ones for specific node types +FIELD_NAME_CORRECTIONS: dict[str, dict[str, str]] = { + "http-request": { + "text": "body", # LLM might use "text" instead of "body" + "content": "body", + "response": "body", + }, + "code": { + "text": "result", # LLM might use "text" instead of "result" + "output": "result", + }, + "llm": { + "response": "text", + "answer": "text", + }, +} + + +def get_corrected_field_name(node_type: str, field: str) -> str: + """ + Get the corrected field name for a node type. + + Args: + node_type: The type of the node (e.g., "http-request", "code") + field: The field name to correct + + Returns: + The corrected field name, or the original if no correction needed + """ + corrections = FIELD_NAME_CORRECTIONS.get(node_type, {}) + return corrections.get(field, field) + diff --git a/api/core/llm_generator/vibe_config/node_definitions.json b/api/core/llm_generator/vibe_config/node_definitions.json new file mode 100644 index 0000000000..73abc61c76 --- /dev/null +++ b/api/core/llm_generator/vibe_config/node_definitions.json @@ -0,0 +1,82 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Vibe Workflow Generator - Node Definitions for Frontend/Backend Sync", + "version": "1.0.0", + + "nodeTypeAliases": { + "start": "start", + "begin": "start", + "input": "start", + "end": "end", + "finish": "end", + "output": "end", + "llm": "llm", + "ai": "llm", + "gpt": "llm", + "model": "llm", + "chat": "llm", + "code": "code", + "script": "code", + "python": "code", + "javascript": "code", + "http-request": "http-request", + "http": "http-request", + "request": "http-request", + "api": "http-request", + "fetch": "http-request", + "webhook": "http-request", + "if-else": "if-else", + "condition": "if-else", + "branch": "if-else", + "switch": "if-else", + "iteration": "iteration", + "loop": "loop", + "foreach": "iteration", + "tool": "tool" + }, + + "fieldNameCorrections": { + "http-request": { + "text": "body", + "content": "body", + "response": "body" + }, + "code": { + "text": "result", + "output": "result" + }, + "llm": { + "response": "text", + "answer": "text" + } + }, + + "fallbackKeywords": { + "http-request": [ + "http", "url", "web", "scrape", "scraper", "fetch", "api", "request", + "download", "upload", "webhook", "endpoint", "rest", "get", "post" + ], + "code": [ + "code", "script", "calculate", "compute", "process", "transform", + "parse", "convert", "format", "filter", "sort", "math", "logic" + ], + "llm": [ + "analyze", "summarize", "summary", "extract", "classify", "translate", + "generate", "write", "rewrite", "explain", "answer", "chat" + ] + }, + + "nodeOutputFields": { + "start": ["All defined variables available as {{#start.variable_name#}}"], + "end": [], + "http-request": ["body", "status_code", "headers"], + "code": ["result"], + "llm": ["text"], + "if-else": [], + "knowledge-retrieval": ["result"], + "template-transform": ["output"], + "variable-aggregator": ["output"], + "iteration": ["item", "index"] + } +} + diff --git a/api/core/llm_generator/vibe_config/node_schemas.py b/api/core/llm_generator/vibe_config/node_schemas.py new file mode 100644 index 0000000000..779aba2efa --- /dev/null +++ b/api/core/llm_generator/vibe_config/node_schemas.py @@ -0,0 +1,211 @@ +""" +Built-in Node Schemas for Vibe Workflow Generation. + +These schemas define the parameter structures for each node type, +helping the LLM understand what configuration each node requires. +""" + +from typing import Any + +# Built-in node schemas with parameter definitions +# These help the model understand what config each node type requires +BUILTIN_NODE_SCHEMAS: dict[str, dict[str, Any]] = { + "start": { + "description": "Workflow entry point - defines input variables", + "required": [], + "parameters": { + "variables": { + "type": "array", + "description": "Input variables for the workflow", + "item_schema": { + "variable": "string - variable name", + "label": "string - display label", + "type": "enum: text-input, paragraph, number, select, file, file-list", + "required": "boolean", + "max_length": "number (optional)", + }, + }, + }, + "outputs": ["All defined variables are available as {{#start.variable_name#}}"], + }, + "end": { + "description": "Workflow exit point - defines output variables", + "required": ["outputs"], + "parameters": { + "outputs": { + "type": "array", + "description": "Output variables to return", + "item_schema": { + "variable": "string - output variable name", + "type": "enum: string, number, object, array", + "value_selector": "array - path to source value, e.g. ['node_id', 'field']", + }, + }, + }, + }, + "http-request": { + "description": "Send HTTP requests to external APIs or fetch web content", + "required": ["url", "method"], + "parameters": { + "url": { + "type": "string", + "description": "Full URL including protocol (https://...)", + "example": "{{#start.url#}} or https://api.example.com/data", + }, + "method": { + "type": "enum", + "options": ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD"], + "description": "HTTP method", + }, + "headers": { + "type": "string", + "description": "HTTP headers as newline-separated 'Key: Value' pairs", + "example": "Content-Type: application/json\nAuthorization: Bearer {{#start.api_key#}}", + }, + "params": { + "type": "string", + "description": "URL query parameters as newline-separated 'key: value' pairs", + }, + "body": { + "type": "object", + "description": "Request body with type field required", + "example": {"type": "none", "data": []}, + }, + "authorization": { + "type": "object", + "description": "Authorization config", + "example": {"type": "no-auth"}, + }, + "timeout": { + "type": "number", + "description": "Request timeout in seconds", + "default": 60, + }, + }, + "outputs": ["body (response content)", "status_code", "headers"], + }, + "code": { + "description": "Execute Python or JavaScript code for custom logic", + "required": ["code", "language"], + "parameters": { + "code": { + "type": "string", + "description": "Code to execute. Must define a main() function that returns a dict.", + }, + "language": { + "type": "enum", + "options": ["python3", "javascript"], + }, + "variables": { + "type": "array", + "description": "Input variables passed to the code", + "item_schema": {"variable": "string", "value_selector": "array"}, + }, + "outputs": { + "type": "object", + "description": "Output variable definitions", + }, + }, + "outputs": ["Variables defined in outputs schema"], + }, + "llm": { + "description": "Call a large language model for text generation/processing", + "required": ["prompt_template"], + "parameters": { + "model": { + "type": "object", + "description": "Model configuration (provider, name, mode)", + }, + "prompt_template": { + "type": "array", + "description": "Messages for the LLM", + "item_schema": { + "role": "enum: system, user, assistant", + "text": "string - message content, can include {{#node_id.field#}} references", + }, + }, + "context": { + "type": "object", + "description": "Optional context settings", + }, + "memory": { + "type": "object", + "description": "Optional memory/conversation settings", + }, + }, + "outputs": ["text (generated response)"], + }, + "if-else": { + "description": "Conditional branching based on conditions", + "required": ["conditions"], + "parameters": { + "conditions": { + "type": "array", + "description": "List of condition cases", + "item_schema": { + "case_id": "string - unique case identifier", + "logical_operator": "enum: and, or", + "conditions": "array of {variable_selector, comparison_operator, value}", + }, + }, + }, + "outputs": ["Branches: true (conditions met), false (else)"], + }, + "knowledge-retrieval": { + "description": "Query knowledge base for relevant content", + "required": ["query_variable_selector", "dataset_ids"], + "parameters": { + "query_variable_selector": { + "type": "array", + "description": "Path to query variable, e.g. ['start', 'query']", + }, + "dataset_ids": { + "type": "array", + "description": "List of knowledge base IDs to search", + }, + "retrieval_mode": { + "type": "enum", + "options": ["single", "multiple"], + }, + }, + "outputs": ["result (retrieved documents)"], + }, + "template-transform": { + "description": "Transform data using Jinja2 templates", + "required": ["template"], + "parameters": { + "template": { + "type": "string", + "description": "Jinja2 template string", + }, + "variables": { + "type": "array", + "description": "Variables to pass to template", + }, + }, + "outputs": ["output (transformed string)"], + }, + "variable-aggregator": { + "description": "Aggregate variables from multiple branches", + "required": ["variables"], + "parameters": { + "variables": { + "type": "array", + "description": "List of variable selectors to aggregate", + }, + }, + "outputs": ["output (aggregated value)"], + }, + "iteration": { + "description": "Loop over array items", + "required": ["iterator_selector"], + "parameters": { + "iterator_selector": { + "type": "array", + "description": "Path to array variable to iterate", + }, + }, + "outputs": ["item (current iteration item)", "index (current index)"], + }, +} + diff --git a/api/core/llm_generator/vibe_config/responses.py b/api/core/llm_generator/vibe_config/responses.py new file mode 100644 index 0000000000..4303fcb709 --- /dev/null +++ b/api/core/llm_generator/vibe_config/responses.py @@ -0,0 +1,74 @@ +""" +Response Templates for Vibe Workflow Generation. + +This module defines templates for off-topic responses and default suggestions +to guide users back to workflow-related requests. +""" + +# Off-topic response templates for different categories +# Each category has messages in multiple languages +OFF_TOPIC_RESPONSES: dict[str, dict[str, str]] = { + "weather": { + "en": ( + "I'm the workflow design assistant - I can't check the weather, " + "but I can help you build AI workflows! For example, I could help you " + "create a workflow that fetches weather data from an API." + ), + "zh": "我是工作流设计助手,无法查询天气。但我可以帮你创建一个从API获取天气数据的工作流!", + }, + "math": { + "en": ( + "I focus on workflow design rather than calculations. However, " + "if you need calculations in a workflow, I can help you add a Code node " + "that handles math operations!" + ), + "zh": "我专注于工作流设计而非计算。但如果您需要在工作流中进行计算,我可以帮您添加一个处理数学运算的代码节点!", + }, + "joke": { + "en": ( + "While I'd love to share a laugh, I'm specialized in workflow design. " + "How about we create something fun instead - like a workflow that generates jokes using AI?" + ), + "zh": "虽然我很想讲笑话,但我专门从事工作流设计。不如我们创建一个有趣的东西——比如使用AI生成笑话的工作流?", + }, + "translation": { + "en": ( + "I can't translate directly, but I can help you build a translation workflow! " + "Would you like to create one using an LLM node?" + ), + "zh": "我不能直接翻译,但我可以帮你构建一个翻译工作流!要创建一个使用LLM节点的翻译流程吗?", + }, + "general_coding": { + "en": ( + "I'm specialized in Dify workflow design rather than general coding help. " + "But if you want to add code logic to your workflow, I can help you configure a Code node!" + ), + "zh": ( + "我专注于Dify工作流设计,而非通用编程帮助。" + "但如果您想在工作流中添加代码逻辑,我可以帮您配置一个代码节点!" + ), + }, + "default": { + "en": ( + "I'm the Dify workflow design assistant. I help create AI automation workflows, " + "but I can't help with general questions. Would you like to create a workflow instead?" + ), + "zh": "我是Dify工作流设计助手。我帮助创建AI自动化工作流,但无法回答一般性问题。您想创建一个工作流吗?", + }, +} + +# Default suggestions for off-topic requests +# These help guide users towards valid workflow requests +DEFAULT_SUGGESTIONS: dict[str, list[str]] = { + "en": [ + "Create a chatbot workflow", + "Build a document summarization pipeline", + "Add email notification to workflow", + ], + "zh": [ + "创建一个聊天机器人工作流", + "构建文档摘要处理流程", + "添加邮件通知到工作流", + ], +} + diff --git a/api/core/llm_generator/vibe_prompts.py b/api/core/llm_generator/vibe_prompts.py new file mode 100644 index 0000000000..59a07f3c71 --- /dev/null +++ b/api/core/llm_generator/vibe_prompts.py @@ -0,0 +1,1038 @@ +""" +Vibe Workflow Generator - Enhanced Prompts with Inline Intent Classification. + +This module provides prompts for the agent-enhanced workflow generation +with inline intent classification (no separate ReAct loop to stay within +single endpoint constraints). +""" + +import json +import re +from typing import Any + +from core.llm_generator.vibe_config import ( + BUILTIN_NODE_SCHEMAS, + DEFAULT_SUGGESTIONS, + FALLBACK_RULES, + OFF_TOPIC_RESPONSES, +) + + +def extract_instruction_values(instruction: str) -> dict[str, Any]: + """ + Extract concrete values from user instruction for auto-fill hints. + + This pre-processes the instruction to find URLs, emails, and other + concrete values that can be used as defaults in the generated workflow. + """ + urls = re.findall(r'https?://[^\s<>"{}|\\^`\[\]]+', instruction) + + return { + "urls": urls, + "emails": re.findall(r'[\w.-]+@[\w.-]+\.\w+', instruction), + "api_endpoints": [u for u in urls if "/api/" in u or "/v1/" in u or "/v2/" in u], + "file_extensions": re.findall(r'\.(json|csv|txt|pdf|docx?)(?:\s|$)', instruction, re.IGNORECASE), + "json_paths": re.findall(r'\.[\w]+(?:\.[\w]+)+', instruction), # e.g., .data.results + } + + +def format_extracted_values(extracted: dict[str, Any]) -> str: + """Format extracted values as XML for prompt inclusion.""" + parts = [] + + if extracted.get("urls"): + urls_str = ", ".join(extracted["urls"]) + parts.append(f" {urls_str}") + + if extracted.get("api_endpoints"): + endpoints_str = ", ".join(extracted["api_endpoints"]) + parts.append(f" {endpoints_str}") + + if extracted.get("emails"): + emails_str = ", ".join(extracted["emails"]) + parts.append(f" {emails_str}") + + if extracted.get("file_extensions"): + exts_str = ", ".join(extracted["file_extensions"]) + parts.append(f" {exts_str}") + + if parts: + return "\n" + "\n".join(parts) + "\n" + return "" + + +VIBE_ENHANCED_SYSTEM_PROMPT = """ +You are a Dify workflow design assistant. +You help users create AI automation workflows by generating workflow configurations. + + + +- Detect the language of the user's request automatically. +- Generate ALL node titles and user-facing text in the SAME language as the user's input. +- If the input language cannot be determined, use {preferred_language} as the fallback language. +- Example: If user writes in Chinese, node titles should be in Chinese (e.g., "获取数据", "处理结果"). +- Example: If user writes in English, node titles should be in English (e.g., "Fetch Data", "Process Results"). + + + +- Generate workflow configurations from natural language descriptions +- Validate tool references against available integrations +- Provide clear, helpful responses +- Reject requests that are not about workflow design + + +{available_nodes_formatted} + +{available_tools_formatted} + +{available_models_formatted} + + + How to reference data from other nodes in your workflow + {{{{#node_id.field_name#}}}} + + {{{{#start.url#}}}} + {{{{#start.query#}}}} + {{{{#llm_node_id.text#}}}} + {{{{#http_node_id.body#}}}} + {{{{#code_node_id.result#}}}} + + + + + + ONLY use tools with status="configured" from available_tools. + NEVER invent tool names like "webscraper", "email_sender", etc. + If no matching tool exists, use http-request or code node as fallback. + + + ALWAYS fill ALL required_params for each node type. + Check the node's params section to know what config is needed. + + + Use {{{{#node_id.field#}}}} syntax to reference outputs from previous nodes. + Start node variables: {{{{#start.variable_name#}}}} + + + + + When user requests capability with NO matching tool in available_tools + + Configure with: url (the URL to fetch), method: GET + + + Configure with: url, method, headers, body as needed + + + Write Python/JavaScript code with main() function + + + Use prompt_template with appropriate system/user messages + + Add warning to response explaining the fallback substitution + + + + +{existing_nodes_formatted} + + +{selected_nodes_formatted} + + + + + +```json +{{{{ + "intent": "generate", + "thinking": "Brief analysis of user request and approach", + "message": "User-friendly explanation of the workflow", + "mermaid": "flowchart TD\\n N1[\\"type=start|title=Start\\"]\\n ...", + "nodes": [ + {{{{ + "id": "node_id", + "type": "node_type", + "title": "Display Name", + "config": {{{{ /* REQUIRED: Fill all required_params from node schema */ }}}} + }}}} + ], + "edges": [{{{{"source": "node1_id", "target": "node2_id"}}}}], + "warnings": ["Any warnings about fallbacks or missing features"] +}}}} +``` + + +```json +{{{{ + "intent": "off_topic", + "message": "Explanation of what you can help with", + "suggestions": ["Workflow suggestion 1", "Suggestion 2"] +}}}} +``` + + + + + Weather queries, math calculations, jokes, general knowledge + Translation requests, general coding help, account/billing questions + Workflow creation, node configuration, automation design + Questions about Dify workflow capabilities + + + + Use `flowchart TD` for top-down flow + Node format: `ID["type=TYPE|title=TITLE"]` or `ID["type=tool|title=TITLE|tool=TOOL_KEY"]` + type= and title= are REQUIRED for EVERY node + Declare all nodes BEFORE edges + Use `-->` for connections, `-->|true|` and `-->|false|` for branches + + N1["type=start|title=Start"] + N2["type=http-request|title=Fetch Data"] + N3["type=tool|title=Search|tool=google/google_search"] + Start[Start] + N1["type=tool|title=Scrape|tool=webscraper"] + + + + + + {{{{ + "id": "fetch", + "type": "http-request", + "title": "Fetch Webpage", + "config": {{{{ + "url": "{{{{#start.url#}}}}", + "method": "GET", + "headers": "", + "params": "", + "body": {{{{"type": "none", "data": []}}}}, + "authorization": {{{{"type": "no-auth"}}}} + }}}} + }}}} + + + {{{{ + "id": "analyze", + "type": "llm", + "title": "Analyze Content", + "config": {{{{ + "prompt_template": [ + {{{{"role": "system", "text": "You are a helpful analyst."}}}}, + {{{{"role": "user", "text": "Analyze this content:\\n\\n{{{{#fetch.body#}}}}"}}}} + ] + }}}} + }}}} + + + {{{{ + "id": "process", + "type": "code", + "title": "Process Data", + "config": {{{{ + "language": "python3", + "code": "def main(data):\\n return {{\\"result\\": data.upper()}}" + }}}} + }}}} + + + + + CRITICAL: Auto-fill all parameters so workflow runs immediately + + + MUST define input variables for ALL data the workflow needs from user + Use extracted values from user instruction as "default" when available + + URL fetching workflow → add "url" variable with type="text-input" + Text processing workflow → add "content" or "query" variable + API integration → add "api_key" variable if authentication needed + + + {{{{ + "variables": [ + {{{{"variable": "url", "label": "Target URL", "type": "text-input", "required": true, "default": "https://..."}}}} + ] + }}}} + + + + + EVERY node parameter that needs data must reference a source: + - User input from start node → {{{{#start.variable_name#}}}} + - Output from previous node → {{{{#node_id.output_field#}}}} + - Or a concrete hardcoded value extracted from user instruction + NEVER leave parameters empty - always fill with variable reference or concrete value + + + + url: MUST be {{{{#start.url#}}}} OR concrete URL from instruction - NEVER empty + method: Set based on action (fetch/get → GET, send/post/create → POST, update → PUT, delete → DELETE) + headers: Include Authorization if API key is available + + + + prompt_template MUST reference previous node output for context + Example: {{{{"role": "user", "text": "Analyze this:\\n\\n{{{{#http_node.body#}}}}"}}}} + Include system message to set AI behavior/role + + + + variables array MUST include inputs from previous nodes + Example: {{{{"variable": "data", "value_selector": ["http_node", "body"]}}}} + code must define main() function that returns dict + + + + outputs MUST use value_selector to reference the final processing node's output + Example: {{{{"variable": "result", "value_selector": ["llm_node", "text"]}}}} + + + +{extracted_values_formatted} +""" + +VIBE_ENHANCED_USER_PROMPT = """ +{instruction} + + + + + Is this request about workflow/automation design? + - If NO (weather, jokes, math, translations, general questions) → return off_topic response + - If YES → proceed to Step 2 + + + + - What is the user trying to achieve? + - What inputs are needed (define in start node)? + - What processing steps are required? + - What outputs should be produced (define in end node)? + + + + - Check available_tools - which tools with status="configured" can be used? + - For each required capability, check if a matching tool exists + - If NO matching tool: use fallback node (http-request, code, or llm) + - NEVER invent tool names - only use exact keys from available_tools + + + + - For EACH node, check its required_params in available_nodes + - Fill ALL required config fields with proper values + - Use {{{{#node_id.field#}}}} syntax to reference previous node outputs + - http-request MUST have: url, method + - code MUST have: code, language + - llm MUST have: prompt_template + + + + - Create mermaid flowchart with correct syntax + - Generate nodes array with complete config for each node + - Generate edges array connecting the nodes + - Add warnings if using fallback nodes + + + +{previous_attempt_formatted} + + +Generate your JSON response now. Remember: +1. Fill ALL required_params for each node type +2. Use variable references like {{{{#start.url#}}}} to connect nodes +3. Never invent tool names - use fallback nodes instead + +""" + +def format_available_nodes(nodes: list[dict[str, Any]] | None) -> str: + """Format available nodes as XML with parameter schemas.""" + lines = [""] + + # First, add built-in nodes with their schemas + for node_type, schema in BUILTIN_NODE_SCHEMAS.items(): + lines.append(f' ') + lines.append(f" {schema.get('description', '')}") + + required = schema.get("required", []) + if required: + lines.append(f" {', '.join(required)}") + + params = schema.get("parameters", {}) + if params: + lines.append(" ") + for param_name, param_info in params.items(): + param_type = param_info.get("type", "string") + is_required = param_name in required + desc = param_info.get("description", "") + + if param_type == "enum": + options = param_info.get("options", []) + lines.append( + f' ' + f"{desc}" + ) + else: + lines.append( + f' {desc}' + ) + + # Add example if present + if "example" in param_info: + example = param_info["example"] + if isinstance(example, dict): + example = json.dumps(example) + lines.append(f" ") + lines.append(" ") + + outputs = schema.get("outputs", []) + if outputs: + lines.append(f" {', '.join(outputs)}") + + lines.append(" ") + + # Add custom nodes from the provided list (without detailed schemas) + if nodes: + for node in nodes: + node_type = node.get("type", "unknown") + # Skip if already covered by built-in schemas + if node_type in BUILTIN_NODE_SCHEMAS: + continue + description = node.get("description", "No description") + lines.append(f' ') + lines.append(f" {description}") + lines.append(" ") + + lines.append("") + return "\n".join(lines) + + +def format_available_tools(tools: list[dict[str, Any]] | None) -> str: + """Format available tools as XML with parameter schemas.""" + lines = [""] + + if not tools: + lines.append(" ") + lines.append(" ") + lines.append("") + return "\n".join(lines) + + configured_tools: list[dict[str, Any]] = [] + unconfigured_tools: list[dict[str, Any]] = [] + + for tool in tools: + if tool.get("is_team_authorization", False): + configured_tools.append(tool) + else: + unconfigured_tools.append(tool) + + # Configured tools (ready to use) + lines.append(" ") + if configured_tools: + for tool in configured_tools: + tool_key = tool.get("tool_key") or f"{tool.get('provider_id')}/{tool.get('tool_name')}" + description = tool.get("tool_description") or tool.get("description", "") + lines.append(f' ') + lines.append(f" {description}") + + # Add parameter schemas if available + parameters = tool.get("parameters") + if parameters: + lines.append(" ") + for param in parameters: + param_name = param.get("name", "") + param_type = param.get("type", "string") + required = param.get("required", False) + param_desc = param.get("human_description") or param.get("llm_description") or "" + # Handle localized descriptions + if isinstance(param_desc, dict): + param_desc = param_desc.get("en_US") or param_desc.get("zh_Hans") or str(param_desc) + options = param.get("options", []) + + if options: + opt_str = ",".join(str(o.get("value", o)) if isinstance(o, dict) else str(o) for o in options) + lines.append( + f' {param_desc}' + ) + else: + lines.append( + f' {param_desc}' + ) + lines.append(" ") + + lines.append(" ") + else: + lines.append(" ") + + # Unconfigured tools (need setup first) + lines.append("") + lines.append(" ") + if unconfigured_tools: + for tool in unconfigured_tools: + tool_key = tool.get("tool_key") or f"{tool.get('provider_id')}/{tool.get('tool_name')}" + description = tool.get("tool_description") or tool.get("description", "") + lines.append(f' ') + lines.append(f" {description}") + lines.append(" ") + lines.append(" ") + else: + lines.append(" ") + + lines.append("") + return "\n".join(lines) + + +def format_existing_nodes(nodes: list[dict[str, Any]] | None) -> str: + """Format existing workflow nodes for context.""" + if not nodes: + return "No existing nodes in workflow (creating from scratch)." + + lines = [] + for node in nodes: + node_id = node.get("id", "unknown") + node_type = node.get("type", "unknown") + title = node.get("title", "Untitled") + lines.append(f"- [{node_id}] {title} ({node_type})") + return "\n".join(lines) + + +def format_selected_nodes( + selected_ids: list[str] | None, + existing_nodes: list[dict[str, Any]] | None, +) -> str: + """Format selected nodes for modification context.""" + if not selected_ids: + return "No nodes selected (generating new workflow)." + + node_map = {n.get("id"): n for n in (existing_nodes or [])} + lines = [] + for node_id in selected_ids: + if node_id in node_map: + node = node_map[node_id] + lines.append(f"- [{node_id}] {node.get('title', 'Untitled')} ({node.get('type', 'unknown')})") + else: + lines.append(f"- [{node_id}] (not found in current workflow)") + return "\n".join(lines) + + +def format_previous_attempt( + previous_workflow: dict[str, Any] | None, + regenerate_mode: bool = False, +) -> str: + """ + Format previous workflow attempt as XML context for regeneration. + + When regenerating, we pass the previous workflow and warnings so the model + can fix specific issues instead of starting from scratch. + """ + if not regenerate_mode or not previous_workflow: + return "" + + nodes = previous_workflow.get("nodes", []) + edges = previous_workflow.get("edges", []) + warnings = previous_workflow.get("warnings", []) + + parts = [""] + parts.append(" ") + parts.append(" Your previous generation had issues. Please fix them while keeping the good parts.") + parts.append(" ") + + if warnings: + parts.append(" ") + for warning in warnings: + parts.append(f" - {warning}") + parts.append(" ") + + if nodes: + # Summarize nodes without full config to save tokens + parts.append(" ") + for node in nodes: + node_id = node.get("id", "unknown") + node_type = node.get("type", "unknown") + title = node.get("title", "Untitled") + config = node.get("config", {}) + + # Show key config issues for debugging + config_summary = "" + if node_type == "http-request": + url = config.get("url", "") + if not url: + config_summary = " (url: EMPTY - needs fix)" + elif url.startswith("{{#"): + config_summary = f" (url: {url})" + elif node_type == "tool": + tool_name = config.get("tool_name", "") + config_summary = f" (tool: {tool_name})" + + parts.append(f" - [{node_id}] {title} ({node_type}){config_summary}") + parts.append(" ") + + if edges: + parts.append(" ") + for edge in edges: + parts.append(f" - {edge.get('source', '?')} → {edge.get('target', '?')}") + parts.append(" ") + + parts.append(" ") + parts.append(" 1. Keep the workflow structure if it makes sense") + parts.append(" 2. Fix any invalid tool references - use http-request or code as fallback") + parts.append(" 3. Fill ALL required parameters (url, method, prompt_template, etc.)") + parts.append(" 4. Use {{#node_id.field#}} syntax for variable references") + parts.append(" 5. Define input variables in the Start node") + parts.append(" ") + parts.append("") + + return "\n".join(parts) + + +def format_available_models(models: list[dict[str, Any]] | None) -> str: + """Format available models as XML for prompt inclusion.""" + if not models: + return "\n \n" + + lines = [""] + for model in models: + provider = model.get("provider", "unknown") + model_name = model.get("model", "unknown") + lines.append(f' ') + lines.append("") + + # Add model selection rule + lines.append("") + lines.append("") + lines.append(" CRITICAL: For LLM, question-classifier, and parameter-extractor nodes, you MUST select a model from available_models.") + if len(models) == 1: + first_model = models[0] + lines.append(f' Use provider="{first_model.get("provider")}" and name="{first_model.get("model")}" for all model-dependent nodes.') + else: + lines.append(" Choose the most suitable model for each task from the available options.") + lines.append(" NEVER use models not listed in available_models (e.g., openai/gpt-4o if not listed).") + lines.append("") + + return "\n".join(lines) + + +def build_vibe_enhanced_prompt( + instruction: str, + available_nodes: list[dict[str, Any]] | None = None, + available_tools: list[dict[str, Any]] | None = None, + existing_nodes: list[dict[str, Any]] | None = None, + selected_node_ids: list[str] | None = None, + previous_workflow: dict[str, Any] | None = None, + regenerate_mode: bool = False, + preferred_language: str | None = None, + available_models: list[dict[str, Any]] | None = None, +) -> tuple[str, str]: + """Build the complete system and user prompts.""" + # Extract concrete values from user instruction for auto-fill hints + extracted = extract_instruction_values(instruction) + extracted_values_xml = format_extracted_values(extracted) + + # Format previous attempt context for regeneration + previous_attempt_xml = format_previous_attempt(previous_workflow, regenerate_mode) + + # Default to English if no preferred language specified + language_hint = preferred_language or "English" + + system_prompt = VIBE_ENHANCED_SYSTEM_PROMPT.format( + preferred_language=language_hint, + available_nodes_formatted=format_available_nodes(available_nodes), + available_tools_formatted=format_available_tools(available_tools), + existing_nodes_formatted=format_existing_nodes(existing_nodes), + selected_nodes_formatted=format_selected_nodes(selected_node_ids, existing_nodes), + extracted_values_formatted=extracted_values_xml, + previous_attempt_formatted=previous_attempt_xml, + available_models_formatted=format_available_models(available_models), + ) + + user_prompt = VIBE_ENHANCED_USER_PROMPT.format( + instruction=instruction, + previous_attempt_formatted=previous_attempt_xml, + ) + + return system_prompt, user_prompt + + +def parse_vibe_response(content: str) -> dict[str, Any]: + """Parse LLM response into structured format.""" + # Extract JSON from markdown code block if present + json_match = re.search(r"```(?:json)?\s*([\s\S]+?)```", content) + if json_match: + content = json_match.group(1).strip() + + # Try parsing JSON + try: + data = json.loads(content) + except json.JSONDecodeError: + # Attempt simple repair: remove trailing commas + cleaned = re.sub(r",\s*([}\]])", r"\1", content) + try: + data = json.loads(cleaned) + except json.JSONDecodeError: + # Return error format + return { + "intent": "error", + "error": "Failed to parse LLM response as JSON", + "raw_content": content[:500], # First 500 chars for debugging + } + + # Validate and normalize + if "intent" not in data: + data["intent"] = "generate" # Default assumption + + # Ensure required fields for generate intent + if data["intent"] == "generate": + data.setdefault("mermaid", "") + data.setdefault("nodes", []) + data.setdefault("edges", []) + data.setdefault("message", "") + data.setdefault("warnings", []) + + # Ensure required fields for off_topic intent + if data["intent"] == "off_topic": + data.setdefault("message", OFF_TOPIC_RESPONSES["default"]["en"]) + data.setdefault("suggestions", DEFAULT_SUGGESTIONS["en"]) + + return data + + +def validate_tool_references( + nodes: list[dict[str, Any]], + available_tools: list[dict[str, Any]] | None, +) -> tuple[list[str], list[dict[str, Any]]]: + """ + Validate tool references and return warnings and recommendations. + + Returns: + tuple of (warnings, tool_recommendations) + """ + if not available_tools: + return [], [] + + # Build lookup sets for configured and unconfigured tools + configured_keys: set[str] = set() + unconfigured_keys: set[str] = set() + tool_info_map: dict[str, dict[str, Any]] = {} + + for tool in available_tools: + provider = tool.get("provider_id") or tool.get("provider", "") + tool_key = tool.get("tool_key") or tool.get("tool_name", "") + is_authorized = tool.get("is_team_authorization", False) + + full_key = f"{provider}/{tool_key}" if provider else tool_key + tool_info_map[full_key] = { + "provider_id": provider, + "tool_name": tool_key, + "description": tool.get("tool_description") or tool.get("description", ""), + } + + if is_authorized: + configured_keys.add(full_key) + if tool_key: + configured_keys.add(tool_key) + else: + unconfigured_keys.add(full_key) + if tool_key: + unconfigured_keys.add(tool_key) + + warnings: list[str] = [] + recommendations: list[dict[str, Any]] = [] + seen_recommendations: set[str] = set() + + for node in nodes: + if node.get("type") == "tool": + config = node.get("config", {}) + tool_ref = config.get("tool_key") or config.get("tool") or node.get("tool_name") + + if not tool_ref: + continue + + # Check if tool is configured + if tool_ref in configured_keys: + continue + + # Check if tool exists but is unconfigured + if tool_ref in unconfigured_keys: + if tool_ref not in seen_recommendations: + seen_recommendations.add(tool_ref) + warnings.append(f"Tool '{tool_ref}' requires configuration") + tool_info = tool_info_map.get(tool_ref, {}) + recommendations.append({ + "requested_capability": f"Use {tool_ref}", + "unconfigured_tools": [tool_info] if tool_info else [], + "configured_alternatives": [], + "recommendation": f"Configure '{tool_ref}' in Tools settings to enable this functionality", + }) + else: + # Tool doesn't exist at all + warnings.append(f"Tool '{tool_ref}' not found in available tools") + + return warnings, recommendations + + +def determine_fallback_type(tool_ref: str, node_title: str) -> str | None: + """ + Determine the best fallback node type based on tool name/title semantics. + + Returns: + - "http-request" for web/API related tools + - "code" for logic/calculation related tools + - "llm" for text/AI analysis related tools + - None if no appropriate fallback can be determined + """ + combined = f"{tool_ref} {node_title}".lower() + + for fallback_type, keywords in FALLBACK_RULES.items(): + if any(kw in combined for kw in keywords): + return fallback_type + + # No matching rule - don't force a fallback + return None + + +def create_http_request_fallback(original_node: dict[str, Any]) -> dict[str, Any]: + """Create http-request fallback node, preserving original URL if present.""" + config = original_node.get("config", {}) + tool_params = config.get("tool_parameters", {}) + # Also check "params" - LLM may put tool parameters there + params = config.get("params", {}) + if isinstance(params, str): + # params might be a string (query params), not tool params + params = {} + + # Try to preserve URL from original config (check multiple locations) + original_url = ( + config.get("url") + or tool_params.get("url") + or params.get("url") + or "" + ) + + # Headers should be a string (newline separated key: value pairs) + headers = config.get("headers") or tool_params.get("headers") or params.get("headers") or "" + if isinstance(headers, dict): + # Convert dict to string format + headers = "\n".join(f"{k}: {v}" for k, v in headers.items()) if headers else "" + + # Body should have a type field - use "none" as default + body = config.get("body") or tool_params.get("body") or params.get("body") or {} + if not isinstance(body, dict) or "type" not in body: + body = {"type": "none", "data": []} + + # Method - check multiple locations + method = config.get("method") or tool_params.get("method") or params.get("method") or "GET" + + return { + "id": original_node.get("id", ""), + "type": "http-request", + "title": f"{original_node.get('title', 'Request')} (fallback)", + "config": { + "method": method, + "url": original_url, + "headers": headers, + "params": "", + "body": body, + "authorization": {"type": "no-auth"}, + }, + } + + +def create_code_fallback(original_node: dict[str, Any]) -> dict[str, Any]: + """Create code fallback node with placeholder implementation.""" + title = original_node.get("title", "Process") + return { + "id": original_node.get("id", ""), + "type": "code", + "title": f"{title} (fallback)", + "config": { + "language": "python3", + "code": f'def main():\n # TODO: Implement "{title}" logic\n return {{"result": "placeholder"}}', + }, + } + + +def create_llm_fallback(original_node: dict[str, Any]) -> dict[str, Any]: + """Create LLM fallback node for text analysis tasks.""" + title = original_node.get("title", "Analyze") + return { + "id": original_node.get("id", ""), + "type": "llm", + "title": f"{title} (fallback)", + "config": { + "prompt_template": [ + {"role": "system", "text": "You are a helpful assistant."}, + {"role": "user", "text": f"Please help with: {title}"}, + ], + }, + } + + +def sanitize_tool_nodes( + nodes: list[dict[str, Any]], + available_tools: list[dict[str, Any]] | None, +) -> tuple[list[dict[str, Any]], list[str]]: + """ + Replace invalid tool nodes with fallback nodes (http-request or code). + + This is a safety net for when the LLM hallucinates tool names despite prompt instructions. + + Returns: + tuple of (sanitized_nodes, warnings) + """ + if not nodes: + return [], [] + + # Build set of valid tool keys + valid_tool_keys: set[str] = set() + if available_tools: + for tool in available_tools: + provider = tool.get("provider_id") or tool.get("provider", "") + tool_key = tool.get("tool_key") or tool.get("tool_name", "") + if provider and tool_key: + valid_tool_keys.add(f"{provider}/{tool_key}") + if tool_key: + valid_tool_keys.add(tool_key) + + sanitized: list[dict[str, Any]] = [] + warnings: list[str] = [] + + for node in nodes: + if node.get("type") != "tool": + sanitized.append(node) + continue + + # Check if tool reference is valid + config = node.get("config", {}) + tool_ref = ( + config.get("tool_key") + or config.get("tool_name") + or config.get("provider_id", "") + "/" + config.get("tool_name", "") + ) + + # Normalize and check validity + normalized_refs = [tool_ref] + if "/" in tool_ref: + # Also check just the tool name part + normalized_refs.append(tool_ref.split("/")[-1]) + + is_valid = any(ref in valid_tool_keys for ref in normalized_refs if ref) + + if is_valid: + sanitized.append(node) + else: + # Determine the best fallback type based on tool semantics + node_title = node.get("title", "") + fallback_type = determine_fallback_type(tool_ref, node_title) + + if fallback_type == "http-request": + fallback_node = create_http_request_fallback(node) + sanitized.append(fallback_node) + warnings.append( + f"Tool '{tool_ref}' not found. Replaced with http-request node. " + "Please configure the URL if not set." + ) + elif fallback_type == "code": + fallback_node = create_code_fallback(node) + sanitized.append(fallback_node) + warnings.append( + f"Tool '{tool_ref}' not found. Replaced with code node. " + "Please implement the logic in the code editor." + ) + elif fallback_type == "llm": + fallback_node = create_llm_fallback(node) + sanitized.append(fallback_node) + warnings.append( + f"Tool '{tool_ref}' not found. Replaced with LLM node. " + "Please configure the prompt template." + ) + else: + # No appropriate fallback - keep original node and warn + sanitized.append(node) + warnings.append( + f"Tool '{tool_ref}' not found and no suitable fallback determined. " + "Please configure a valid tool or replace this node manually." + ) + + return sanitized, warnings + + +def validate_node_parameters(nodes: list[dict[str, Any]]) -> list[str]: + """ + Validate that all required parameters are properly filled in generated nodes. + + Returns a list of warnings for nodes with missing or empty parameters. + """ + warnings: list[str] = [] + + for node in nodes: + node_id = node.get("id", "unknown") + node_type = node.get("type", "") + config = node.get("config", {}) + + if node_type == "http-request": + url = config.get("url", "") + if not url: + warnings.append(f"Node '{node_id}': http-request is missing required 'url' parameter") + elif url == "": + warnings.append(f"Node '{node_id}': http-request has empty 'url' - please configure") + method = config.get("method", "") + if not method: + warnings.append(f"Node '{node_id}': http-request should have 'method' (GET, POST, etc.)") + + elif node_type == "llm": + prompt_template = config.get("prompt_template", []) + if not prompt_template: + warnings.append(f"Node '{node_id}': LLM node is missing 'prompt_template'") + else: + # Check if any prompt references previous node output + has_reference = any("{{#" in p.get("text", "") for p in prompt_template if isinstance(p, dict)) + if not has_reference: + warnings.append( + f"Node '{node_id}': LLM prompt should reference previous node output " + "using {{#node_id.field#}} syntax" + ) + + elif node_type == "code": + code = config.get("code", "") + if not code: + warnings.append(f"Node '{node_id}': code node is missing 'code' parameter") + language = config.get("language", "") + if not language: + warnings.append(f"Node '{node_id}': code node should specify 'language' (python3 or javascript)") + + elif node_type == "start": + variables = config.get("variables", []) + if not variables: + warnings.append( + "Start node should define input variables for user data (e.g., url, query, content)" + ) + + elif node_type == "end": + outputs = config.get("outputs", []) + if not outputs: + warnings.append( + "End node should define output variables to return workflow results" + ) + + return warnings + + +def extract_mermaid_from_response(data: dict[str, Any]) -> str: + """Extract mermaid flowchart from parsed response.""" + mermaid = data.get("mermaid", "") + if not mermaid: + return "" + + # Clean up mermaid code + mermaid = mermaid.strip() + # Remove code fence if present + if mermaid.startswith("```"): + match = re.search(r"```(?:mermaid)?\s*([\s\S]+?)```", mermaid) + if match: + mermaid = match.group(1).strip() + + return mermaid + diff --git a/web/app/components/workflow/hooks/index.ts b/web/app/components/workflow/hooks/index.ts index df54065dea..f3765b92ed 100644 --- a/web/app/components/workflow/hooks/index.ts +++ b/web/app/components/workflow/hooks/index.ts @@ -25,3 +25,4 @@ export * from './use-workflow-search' export * from './use-workflow-start-run' export * from './use-workflow-variables' export * from './use-workflow-vibe' +export * from './use-workflow-vibe-config' diff --git a/web/app/components/workflow/hooks/use-workflow-vibe-config.ts b/web/app/components/workflow/hooks/use-workflow-vibe-config.ts new file mode 100644 index 0000000000..b62fdc96dc --- /dev/null +++ b/web/app/components/workflow/hooks/use-workflow-vibe-config.ts @@ -0,0 +1,100 @@ +/** + * Vibe Workflow Generator Configuration + * + * This module centralizes configuration for the Vibe workflow generation feature, + * including node type aliases and field name corrections. + * + * Note: These definitions are mirrored in the backend at: + * api/core/llm_generator/vibe_config/node_definitions.json + * When updating these values, also update the backend JSON file. + */ + +/** + * Node type aliases for inference from natural language. + * Maps common terms to canonical node type names. + */ +export const NODE_TYPE_ALIASES: Record = { + // Start node aliases + start: 'start', + begin: 'start', + input: 'start', + // End node aliases + end: 'end', + finish: 'end', + output: 'end', + // LLM node aliases + llm: 'llm', + ai: 'llm', + gpt: 'llm', + model: 'llm', + chat: 'llm', + // Code node aliases + code: 'code', + script: 'code', + python: 'code', + javascript: 'code', + // HTTP request node aliases + 'http-request': 'http-request', + http: 'http-request', + request: 'http-request', + api: 'http-request', + fetch: 'http-request', + webhook: 'http-request', + // Conditional node aliases + 'if-else': 'if-else', + condition: 'if-else', + branch: 'if-else', + switch: 'if-else', + // Loop node aliases + iteration: 'iteration', + loop: 'loop', + foreach: 'iteration', + // Tool node alias + tool: 'tool', +} + +/** + * Field name corrections for LLM-generated node configs. + * Maps incorrect field names to correct ones for specific node types. + */ +export const FIELD_NAME_CORRECTIONS: Record> = { + 'http-request': { + text: 'body', // LLM might use "text" instead of "body" + content: 'body', + response: 'body', + }, + code: { + text: 'result', // LLM might use "text" instead of "result" + output: 'result', + }, + llm: { + response: 'text', + answer: 'text', + }, +} + +/** + * Correct field names based on node type. + * LLM sometimes generates wrong field names (e.g., "text" instead of "body" for HTTP nodes). + * + * @param field - The field name to correct + * @param nodeType - The type of the node + * @returns The corrected field name, or the original if no correction needed + */ +export const correctFieldName = (field: string, nodeType: string): string => { + const corrections = FIELD_NAME_CORRECTIONS[nodeType] + if (corrections && corrections[field]) + return corrections[field] + return field +} + +/** + * Get the canonical node type from an alias. + * + * @param alias - The alias to look up + * @returns The canonical node type, or undefined if not found + */ +export const getCanonicalNodeType = (alias: string): string | undefined => { + return NODE_TYPE_ALIASES[alias.toLowerCase()] +} + diff --git a/web/app/components/workflow/hooks/use-workflow-vibe.tsx b/web/app/components/workflow/hooks/use-workflow-vibe.tsx index 010e5ca53c..72e98301b8 100644 --- a/web/app/components/workflow/hooks/use-workflow-vibe.tsx +++ b/web/app/components/workflow/hooks/use-workflow-vibe.tsx @@ -13,6 +13,7 @@ import Toast from '@/app/components/base/toast' import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks' import { useGetLanguage } from '@/context/i18n' +import type { BackendEdgeSpec, BackendNodeSpec } from '@/service/debug' import { generateFlowchart } from '@/service/debug' import { useAllBuiltInTools, @@ -42,6 +43,7 @@ import { useNodesMetaData } from './use-nodes-meta-data' import { useNodesSyncDraft } from './use-nodes-sync-draft' import { useNodesReadOnly } from './use-workflow' import { useWorkflowHistory, WorkflowHistoryEvent } from './use-workflow-history' +import { NODE_TYPE_ALIASES, correctFieldName } from './use-workflow-vibe-config' type VibeCommandDetail = { dsl?: string @@ -105,6 +107,74 @@ const normalizeProviderIcon = (icon?: ToolWithProvider['icon']) => { return icon } +/** + * Replace variable references in node data using the nodeIdMap. + * Handles: + * - String templates: {{#old_id.field#}} → {{#new_id.field#}} + * - Value selectors: ["old_id", "field"] → ["new_id", "field"] + * - Mixed content objects: {type: "mixed", value: "..."} → normalized to string + * - Field name correction based on node type + */ +const replaceVariableReferences = ( + data: unknown, + nodeIdMap: Map, + parentKey?: string, +): unknown => { + if (typeof data === 'string') { + // Replace {{#old_id.field#}} patterns and correct field names + return data.replace(/\{\{#([^.#]+)\.([^#]+)#\}\}/g, (match, oldId, field) => { + const newNode = nodeIdMap.get(oldId) + if (newNode) { + const nodeType = newNode.data?.type as string || '' + const correctedField = correctFieldName(field, nodeType) + return `{{#${newNode.id}.${correctedField}#}}` + } + return match // Keep original if no mapping found + }) + } + + if (Array.isArray(data)) { + // Check if this is a value_selector array: ["node_id", "field", ...] + if (data.length >= 2 && typeof data[0] === 'string' && typeof data[1] === 'string') { + const potentialNodeId = data[0] + const newNode = nodeIdMap.get(potentialNodeId) + if (newNode) { + const nodeType = newNode.data?.type as string || '' + const correctedField = correctFieldName(data[1], nodeType) + // Replace the node ID and correct field name in value_selector + return [newNode.id, correctedField, ...data.slice(2)] + } + } + // Recursively process array elements + return data.map(item => replaceVariableReferences(item, nodeIdMap)) + } + + if (data !== null && typeof data === 'object') { + const obj = data as Record + + // Handle "mixed content" objects like {type: "mixed", value: "{{#...#}}"} + // These should be normalized to plain strings for fields like 'url' + if (obj.type === 'mixed' && typeof obj.value === 'string') { + const processedValue = replaceVariableReferences(obj.value, nodeIdMap) as string + // For certain fields (url, headers, params), return just the string value + if (parentKey && ['url', 'headers', 'params'].includes(parentKey)) { + return processedValue + } + // Otherwise keep the object structure but update the value + return { ...obj, value: processedValue } + } + + // Recursively process object properties + const result: Record = {} + for (const [key, value] of Object.entries(obj)) { + result[key] = replaceVariableReferences(value, nodeIdMap, key) + } + return result + } + + return data // Return primitives as-is +} + const parseNodeLabel = (label: string) => { const tokens = label.split('|').map(token => token.trim()).filter(Boolean) const info: Record = {} @@ -116,8 +186,17 @@ const parseNodeLabel = (label: string) => { info[rawKey.trim().toLowerCase()] = rest.join('=').trim() }) + // Fallback: if no type= found, try to infer from label text if (!info.type && tokens.length === 1 && !tokens[0].includes('=')) { - info.type = tokens[0] + const labelLower = tokens[0].toLowerCase() + // Check if label matches a known node type alias + if (NODE_TYPE_ALIASES[labelLower]) { + info.type = NODE_TYPE_ALIASES[labelLower] + info.title = tokens[0] // Use original label as title + } + else { + info.type = tokens[0] + } } if (!info.tool && info.tool_key) @@ -356,7 +435,7 @@ export const useWorkflowVibe = () => { const { handleSyncWorkflowDraft } = useNodesSyncDraft() const { getNodesReadOnly } = useNodesReadOnly() const { saveStateToHistory } = useWorkflowHistory() - const { defaultModel } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.textGeneration) + const { defaultModel, modelList } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.textGeneration) const { data: buildInTools } = useAllBuiltInTools() const { data: customTools } = useAllCustomTools() @@ -476,14 +555,24 @@ export const useWorkflowVibe = () => { const toolLookup = useMemo(() => { const map = new Map() toolOptions.forEach((tool) => { + // Primary key: provider_id/tool_name (e.g., "google/google_search") const primaryKey = normalizeKey(`${tool.provider_id}/${tool.tool_name}`) map.set(primaryKey, tool) + // Fallback 1: provider_name/tool_name (e.g., "Google/google_search") const providerNameKey = normalizeKey(`${tool.provider_name}/${tool.tool_name}`) map.set(providerNameKey, tool) + // Fallback 2: tool_label (display name) const labelKey = normalizeKey(tool.tool_label) map.set(labelKey, tool) + + // Fallback 3: tool_name alone (for partial matching when model omits provider) + const toolNameKey = normalizeKey(tool.tool_name) + if (!map.has(toolNameKey)) { + // Only set if not already taken (avoid collisions between providers) + map.set(toolNameKey, tool) + } }) return map }, [toolOptions]) @@ -502,6 +591,310 @@ export const useWorkflowVibe = () => { return map }, [nodesMetaDataMap]) + const createGraphFromBackendNodes = useCallback(async ( + backendNodes: BackendNodeSpec[], + backendEdges: BackendEdgeSpec[], + ): Promise => { + const { getNodes } = store.getState() + const nodes = getNodes() + + if (!nodesMetaDataMap) { + Toast.notify({ type: 'error', message: t('workflow.vibe.nodesUnavailable') }) + return { nodes: [], edges: [] } + } + + const existingStartNode = nodes.find(node => node.data.type === BlockEnum.Start) + const newNodes: Node[] = [] + const nodeIdMap = new Map() + + for (const nodeSpec of backendNodes) { + // Map string type to BlockEnum + const typeKey = normalizeKey(nodeSpec.type) + const nodeType = nodeTypeLookup.get(typeKey) + if (!nodeType) { + // Skip unknown node types + continue + } + + if (nodeType === BlockEnum.Start && existingStartNode) { + // Merge backend variables into existing Start node + const backendVariables = (nodeSpec.config?.variables as Array>) || [] + if (backendVariables.length > 0) { + const existingVariables = (existingStartNode.data.variables as Array>) || [] + // Add new variables that don't already exist + for (const backendVar of backendVariables) { + const varName = backendVar.variable as string + const exists = existingVariables.some(v => v.variable === varName) + if (!exists) { + existingVariables.push(backendVar) + } + } + // Note: we don't mutate existingStartNode directly here for the return value, + // but we should probably include it in the graph if we want it to be part of the preview? + // Actually, existingStartNode is already in 'nodes'. + // The preview usually shows ONLY new nodes + maybe start node? + // User's code applied changes to existingStartNode directly. + // For preview, we might want to clone it. + // For now, we just map it. + } + + nodeIdMap.set(nodeSpec.id, existingStartNode) + continue + } + + const nodeDefault = nodesMetaDataMap[nodeType] + if (!nodeDefault) + continue + + const defaultValue = nodeDefault.defaultValue || {} + const title = nodeSpec.title?.trim() || nodeDefault.metaData.title || defaultValue.title || nodeSpec.type + + // For tool nodes, try to get tool default value from config + let toolDefaultValue: ToolDefaultValue | undefined + if (nodeType === BlockEnum.Tool && nodeSpec.config) { + const toolName = nodeSpec.config.tool_name as string | undefined + const providerId = nodeSpec.config.provider_id as string | undefined + if (toolName && providerId) { + const toolKey = normalizeKey(`${providerId}/${toolName}`) + toolDefaultValue = toolLookup.get(toolKey) || toolLookup.get(normalizeKey(toolName)) + } + } + + const desc = (toolDefaultValue?.tool_description || (defaultValue as { desc?: string }).desc || '') as string + + // Merge backend config into node data + // Backend provides: { url: "{{#start.url#}}", method: "GET", ... } + const backendConfig = nodeSpec.config || {} + + // Deep merge for nested objects (e.g., body, authorization) to preserve required fields + const mergedConfig: Record = { ...backendConfig } + const defaultValueRecord = defaultValue as Record + + // For http-request nodes, ensure body has all required fields + if (nodeType === BlockEnum.HttpRequest) { + const defaultBody = defaultValueRecord.body as Record | undefined + const backendBody = backendConfig.body as Record | undefined + if (defaultBody || backendBody) { + mergedConfig.body = { + type: 'none', + data: [], + ...(defaultBody || {}), + ...(backendBody || {}), + } + // Ensure data is always an array + if (!Array.isArray((mergedConfig.body as Record).data)) { + (mergedConfig.body as Record).data = [] + } + } + + // Ensure authorization has type + const defaultAuth = defaultValueRecord.authorization as Record | undefined + const backendAuth = backendConfig.authorization as Record | undefined + if (defaultAuth || backendAuth) { + mergedConfig.authorization = { + type: 'no-auth', + ...(defaultAuth || {}), + ...(backendAuth || {}), + } + } + } + + // For any node with model config, ALWAYS use user's default model + if (backendConfig.model && defaultModel) { + mergedConfig.model = { + provider: defaultModel.provider.provider, + name: defaultModel.model, + mode: 'chat', + } + } + + const data = { + ...(defaultValue as Record), + title, + desc, + type: nodeType, + selected: false, + ...(toolDefaultValue || {}), + // Apply backend-generated config (url, method, headers, etc.) + ...mergedConfig, + } + + const newNode = generateNewNode({ + id: uuid4(), + type: getNodeCustomTypeByNodeDataType(nodeType), + data, + position: nodeSpec.position || { x: 0, y: 0 }, + }).newNode + + newNodes.push(newNode) + nodeIdMap.set(nodeSpec.id, newNode) + } + + // Replace variable references in all node configs using the nodeIdMap + for (const node of newNodes) { + node.data = replaceVariableReferences(node.data, nodeIdMap) as typeof node.data + } + + if (!newNodes.length) { + Toast.notify({ type: 'error', message: t('workflow.vibe.invalidFlowchart') }) + return { nodes: [], edges: [] } + } + + const buildEdge = ( + source: Node, + target: Node, + sourceHandle = 'source', + targetHandle = 'target', + ): Edge => ({ + id: `${source.id}-${sourceHandle}-${target.id}-${targetHandle}`, + type: CUSTOM_EDGE, + source: source.id, + sourceHandle, + target: target.id, + targetHandle, + data: { + sourceType: source.data.type, + targetType: target.data.type, + isInIteration: false, + isInLoop: false, + _connectedNodeIsSelected: false, + }, + zIndex: 0, + }) + + const newEdges: Edge[] = [] + for (const edgeSpec of backendEdges) { + const sourceNode = nodeIdMap.get(edgeSpec.source) + const targetNode = nodeIdMap.get(edgeSpec.target) + if (!sourceNode || !targetNode) + continue + + let sourceHandle = edgeSpec.sourceHandle || 'source' + // Handle IfElse branch handles + if (sourceNode.data.type === BlockEnum.IfElse && !edgeSpec.sourceHandle) { + sourceHandle = 'source' + } + + newEdges.push(buildEdge(sourceNode, targetNode, sourceHandle, edgeSpec.targetHandle || 'target')) + } + + // Layout nodes + const bounds = nodes.reduce( + (acc, node) => { + const width = node.width ?? NODE_WIDTH + acc.maxX = Math.max(acc.maxX, node.position.x + width) + acc.minY = Math.min(acc.minY, node.position.y) + return acc + }, + { maxX: 0, minY: 0 }, + ) + + const baseX = nodes.length ? bounds.maxX + NODE_WIDTH_X_OFFSET : 0 + const baseY = Number.isFinite(bounds.minY) ? bounds.minY : 0 + const branchOffset = Math.max(120, NODE_WIDTH_X_OFFSET / 2) + + const layoutNodeIds = new Set(newNodes.map(node => node.id)) + const layoutEdges = newEdges.filter(edge => + layoutNodeIds.has(edge.source) && layoutNodeIds.has(edge.target), + ) + + try { + const layout = await getLayoutByDagre(newNodes, layoutEdges) + const layoutedNodes = newNodes.map((node) => { + const info = layout.nodes.get(node.id) + if (!info) + return node + return { + ...node, + position: { + x: baseX + info.x, + y: baseY + info.y, + }, + } + }) + newNodes.splice(0, newNodes.length, ...layoutedNodes) + } + catch { + newNodes.forEach((node, index) => { + const row = Math.floor(index / 4) + const col = index % 4 + node.position = { + x: baseX + col * NODE_WIDTH_X_OFFSET, + y: baseY + row * branchOffset, + } + }) + } + + return { + nodes: newNodes, + edges: newEdges, + } + }, [ + defaultModel, + nodeTypeLookup, + nodesMetaDataMap, + store, + t, + toolLookup, + ]) + + // Apply backend-provided nodes directly (bypasses mermaid parsing) + const applyBackendNodesToWorkflow = useCallback(async ( + backendNodes: BackendNodeSpec[], + backendEdges: BackendEdgeSpec[], + ) => { + const { getNodes, setNodes, edges, setEdges } = store.getState() + const nodes = getNodes() + const { + setShowVibePanel, + } = workflowStore.getState() + + const { nodes: newNodes, edges: newEdges } = await createGraphFromBackendNodes(backendNodes, backendEdges) + + if (newNodes.length === 0) { + setShowVibePanel(false) + return + } + + const allNodes = [...nodes, ...newNodes] + const nodesConnectedMap = getNodesConnectedSourceOrTargetHandleIdsMap( + newEdges.map(edge => ({ type: 'add', edge })), + allNodes, + ) + + const updatedNodes = allNodes.map((node) => { + const connected = nodesConnectedMap[node.id] + if (!connected) + return node + + return { + ...node, + data: { + ...node.data, + ...connected, + _connectedSourceHandleIds: dedupeHandles(connected._connectedSourceHandleIds), + _connectedTargetHandleIds: dedupeHandles(connected._connectedTargetHandleIds), + }, + } + }) + + setNodes(updatedNodes) + setEdges([...edges, ...newEdges]) + saveStateToHistory(WorkflowHistoryEvent.NodeAdd, { nodeId: newNodes[0].id }) + handleSyncWorkflowDraft() + + workflowStore.setState(state => ({ + ...state, + showVibePanel: false, + vibePanelMermaidCode: '', + })) + }, [ + createGraphFromBackendNodes, + handleSyncWorkflowDraft, + saveStateToHistory, + store, + ]) + const flowchartToWorkflowGraph = useCallback(async (mermaidCode: string): Promise => { const { getNodes } = store.getState() const nodes = getNodes() @@ -699,7 +1092,7 @@ export const useWorkflowVibe = () => { nodes: updatedNodes, edges: newEdges, } - }, [nodeTypeLookup, toolLookup]) + }, [nodeTypeLookup, nodesMetaDataMap, store, t, toolLookup]) const applyFlowchartToWorkflow = useCallback(() => { if (!currentFlowGraph || !currentFlowGraph.nodes || currentFlowGraph.nodes.length === 0) { @@ -724,15 +1117,16 @@ export const useWorkflowVibe = () => { }, [ currentFlowGraph, handleSyncWorkflowDraft, - nodeTypeLookup, - nodesMetaDataMap, saveStateToHistory, store, t, - toolLookup, ]) - const handleVibeCommand = useCallback(async (dsl?: string, skipPanelPreview = false) => { + const handleVibeCommand = useCallback(async ( + dsl?: string, + skipPanelPreview = false, + regenerateMode = false, + ) => { if (getNodesReadOnly()) { Toast.notify({ type: 'error', message: t('workflow.vibe.readOnly') }) return @@ -768,6 +1162,9 @@ export const useWorkflowVibe = () => { isVibeGenerating: true, vibePanelMermaidCode: '', vibePanelInstruction: trimmed, + vibePanelIntent: '', + vibePanelMessage: '', + vibePanelSuggestions: [], })) try { @@ -790,71 +1187,173 @@ export const useWorkflowVibe = () => { tool_name: tool.tool_name, tool_label: tool.tool_label, tool_key: `${tool.provider_id}/${tool.tool_name}`, + tool_description: tool.tool_description, + is_team_authorization: tool.is_team_authorization, + // Include parameter schemas so backend can inform model how to use tools + parameters: tool.paramSchemas, + output_schema: tool.output_schema, })) - const availableNodesPayload = availableNodesList.map(node => ({ - type: node.type, - title: node.title, - description: node.description, - })) + const stream = await generateFlowchart({ + instruction: trimmed, + model_config: latestModelConfig!, + existing_nodes: existingNodesPayload, + tools: toolsPayload, + regenerate_mode: regenerateMode, + }) - let mermaidCode = trimmed - if (!isMermaidFlowchart(trimmed)) { - const { error, flowchart } = await generateFlowchart({ - instruction: trimmed, - model_config: latestModelConfig, - available_nodes: availableNodesPayload, - existing_nodes: existingNodesPayload, - available_tools: toolsPayload, - }) + let mermaidCode = '' + let backendNodes: BackendNodeSpec[] | undefined + let backendEdges: BackendEdgeSpec[] | undefined - if (error) { - Toast.notify({ type: 'error', message: error }) - setIsVibeGenerating(false) - return + const reader = stream.getReader() + const decoder = new TextDecoder() + + while (true) { + const { done, value } = await reader.read() + if (done) + break + + const chunk = decoder.decode(value) + const lines = chunk.split('\n') + + for (const line of lines) { + if (!line.trim() || !line.startsWith('data: ')) + continue + + try { + const data = JSON.parse(line.slice(6)) + if (data.event === 'message' || data.event === 'workflow_generated') { + if (data.data?.text) { + mermaidCode += data.data.text + workflowStore.setState(state => ({ + ...state, + vibePanelMermaidCode: mermaidCode, + })) + } + if (data.data?.nodes) { + backendNodes = data.data.nodes + workflowStore.setState(state => ({ + ...state, + vibePanelBackendNodes: backendNodes, + })) + } + if (data.data?.edges) { + backendEdges = data.data.edges + workflowStore.setState(state => ({ + ...state, + vibePanelBackendEdges: backendEdges, + })) + } + if (data.data?.intent) { + workflowStore.setState(state => ({ + ...state, + vibePanelIntent: data.data.intent, + })) + } + if (data.data?.message) { + workflowStore.setState(state => ({ + ...state, + vibePanelMessage: data.data.message, + })) + } + if (data.data?.suggestions) { + workflowStore.setState(state => ({ + ...state, + vibePanelSuggestions: data.data.suggestions, + })) + } + } + } + catch (e) { + console.error('Error parsing chunk:', e) + } } - - if (!flowchart) { - Toast.notify({ type: 'error', message: t('workflow.vibe.missingFlowchart') }) - setIsVibeGenerating(false) - return - } - - mermaidCode = flowchart } - workflowStore.setState(state => ({ - ...state, - vibePanelMermaidCode: mermaidCode, - isVibeGenerating: false, - })) + setIsVibeGenerating(false) - const workflowGraph = await flowchartToWorkflowGraph(mermaidCode) - addVersion(workflowGraph) + // Add version for preview + if (backendNodes && backendNodes.length > 0 && backendEdges) { + const graph = await createGraphFromBackendNodes(backendNodes, backendEdges) + addVersion(graph) + } + else if (mermaidCode) { + const graph = await flowchartToWorkflowGraph(mermaidCode) + addVersion(graph) + } - if (skipPanelPreview) - applyFlowchartToWorkflow() + if (skipPanelPreview) { + // Prefer backend nodes (already sanitized) over mermaid re-parsing + if (backendNodes && backendNodes.length > 0 && backendEdges) { + await applyBackendNodesToWorkflow(backendNodes, backendEdges) + } + else { + await applyFlowchartToWorkflow() + } + } + } + catch (error: unknown) { + // Handle API errors (e.g., network errors, server errors) + const { setIsVibeGenerating } = workflowStore.getState() + setIsVibeGenerating(false) + + // Extract error message from Response object or Error + let errorMessage = t('workflow.vibe.generateError') + if (error instanceof Response) { + try { + const errorData = await error.json() + errorMessage = errorData?.message || errorMessage + } + catch { + // If we can't parse the response, use the default error message + } + } + else if (error instanceof Error) { + errorMessage = error.message || errorMessage + } + + Toast.notify({ type: 'error', message: errorMessage }) } finally { isGeneratingRef.current = false } }, [ - availableNodesList, + addVersion, + applyBackendNodesToWorkflow, + applyFlowchartToWorkflow, + createGraphFromBackendNodes, + flowchartToWorkflowGraph, + getLatestModelConfig, getNodesReadOnly, - handleSyncWorkflowDraft, nodeTypeLookup, nodesMetaDataMap, - saveStateToHistory, store, t, - toolLookup, toolOptions, - getLatestModelConfig, ]) - const handleAccept = useCallback(() => { - applyFlowchartToWorkflow() - }, [applyFlowchartToWorkflow]) + const handleRegenerate = useCallback(async () => { + if (!lastInstructionRef.current) { + Toast.notify({ type: 'error', message: t('workflow.vibe.missingInstruction') }) + return + } + + // Pass regenerateMode=true to include previous workflow context + await handleVibeCommand(lastInstructionRef.current, false, true) + }, [handleVibeCommand, t]) + + const handleAccept = useCallback(async (vibePanelMermaidCode: string | undefined) => { + // Prefer backend nodes (already sanitized) over mermaid re-parsing + const { vibePanelBackendNodes, vibePanelBackendEdges } = workflowStore.getState() + if (vibePanelBackendNodes && vibePanelBackendNodes.length > 0 && vibePanelBackendEdges) { + await applyBackendNodesToWorkflow(vibePanelBackendNodes, vibePanelBackendEdges) + } + else { + // Use applyFlowchartToWorkflow which uses currentFlowGraph (populated by addVersion) + applyFlowchartToWorkflow() + } + }, [applyBackendNodesToWorkflow, applyFlowchartToWorkflow]) useEffect(() => { const handler = (event: CustomEvent) => { @@ -862,7 +1361,7 @@ export const useWorkflowVibe = () => { } const acceptHandler = () => { - handleAccept() + handleAccept(undefined) } document.addEventListener(VIBE_COMMAND_EVENT, handler as EventListener) diff --git a/web/app/components/workflow/nodes/_base/components/variable/utils.ts b/web/app/components/workflow/nodes/_base/components/variable/utils.ts index a7dc04e571..b7c7125ca6 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/utils.ts +++ b/web/app/components/workflow/nodes/_base/components/variable/utils.ts @@ -1390,9 +1390,9 @@ export const getNodeUsedVars = (node: Node): ValueSelector[] => { payload.url, payload.headers, payload.params, - typeof payload.body.data === 'string' + typeof payload.body?.data === 'string' ? payload.body.data - : payload.body.data.map(d => d.value).join(''), + : (payload.body?.data?.map(d => d.value).join('') ?? ''), ]) break } diff --git a/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts b/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts index 650ae47156..d5a4f3d872 100644 --- a/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts +++ b/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts @@ -5,6 +5,9 @@ import { useCallback, useEffect, useState } from 'react' const UNIQUE_ID_PREFIX = 'key-value-' const strToKeyValueList = (value: string) => { + if (typeof value !== 'string' || !value) + return [] + return value.split('\n').map((item) => { const [key, ...others] = item.split(':') return { @@ -16,7 +19,7 @@ const strToKeyValueList = (value: string) => { } const useKeyValueList = (value: string, onChange: (value: string) => void, noFilter?: boolean) => { - const [list, doSetList] = useState(() => value ? strToKeyValueList(value) : []) + const [list, doSetList] = useState(() => typeof value === 'string' && value ? strToKeyValueList(value) : []) const setList = (l: KeyValue[]) => { doSetList(l.map((item) => { return { diff --git a/web/app/components/workflow/panel/vibe-panel/index.tsx b/web/app/components/workflow/panel/vibe-panel/index.tsx index 966172518c..87579b454a 100644 --- a/web/app/components/workflow/panel/vibe-panel/index.tsx +++ b/web/app/components/workflow/panel/vibe-panel/index.tsx @@ -3,7 +3,7 @@ import type { FC } from 'react' import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations' import type { CompletionParams, Model } from '@/types/app' -import { RiClipboardLine } from '@remixicon/react' +import { RiCheckLine, RiClipboardLine, RiInformation2Line, RiRefreshLine } from '@remixicon/react' import copy from 'copy-to-clipboard' import { useCallback, useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' @@ -39,6 +39,14 @@ const VibePanel: FC = () => { const vibePanelPreviewNodes = currentFlowGraph?.nodes || [] const vibePanelPreviewEdges = currentFlowGraph?.edges || [] + + const setVibePanelInstruction = useStore(s => s.setVibePanelInstruction) + const vibePanelIntent = useStore(s => s.vibePanelIntent) + const setVibePanelIntent = useStore(s => s.setVibePanelIntent) + const vibePanelMessage = useStore(s => s.vibePanelMessage) + const setVibePanelMessage = useStore(s => s.setVibePanelMessage) + const vibePanelSuggestions = useStore(s => s.vibePanelSuggestions) + const setVibePanelSuggestions = useStore(s => s.setVibePanelSuggestions) const localModel = localStorage.getItem('auto-gen-model') ? JSON.parse(localStorage.getItem('auto-gen-model') as string) as Model @@ -97,13 +105,13 @@ const VibePanel: FC = () => { }, [workflowStore]) const handleClose = useCallback(() => { - workflowStore.setState(state => ({ - ...state, - showVibePanel: false, - vibePanelMermaidCode: '', - isVibeGenerating: false, - })) - }, [workflowStore]) + setShowVibePanel(false) + setVibePanelMermaidCode('') + setIsVibeGenerating(false) + setVibePanelIntent('') + setVibePanelMessage('') + setVibePanelSuggestions([]) + }, [setShowVibePanel, setVibePanelMermaidCode, setIsVibeGenerating, setVibePanelIntent, setVibePanelMessage, setVibePanelSuggestions]) const handleGenerate = useCallback(() => { const event = new CustomEvent(VIBE_COMMAND_EVENT, { @@ -124,6 +132,15 @@ const VibePanel: FC = () => { Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) }, [workflowStore, t]) + const handleSuggestionClick = useCallback((suggestion: string) => { + setVibePanelInstruction(suggestion) + // Trigger generation with the suggestion + const event = new CustomEvent(VIBE_COMMAND_EVENT, { + detail: { dsl: suggestion }, + }) + document.dispatchEvent(event) + }, [setVibePanelInstruction]) + if (!showVibePanel) return null @@ -134,6 +151,40 @@ const VibePanel: FC = () => { ) + const renderOffTopic = ( +
+
+
+ +
+
+ {t('workflow.vibe.offTopicTitle')} +
+
+ {vibePanelMessage || t('workflow.vibe.offTopicDefault')} +
+ {vibePanelSuggestions.length > 0 && ( +
+
+ {t('workflow.vibe.trySuggestion')} +
+
+ {vibePanelSuggestions.map((suggestion, index) => ( + + ))} +
+
+ )} +
+
+ ) + return ( { - {!isVibeGenerating && vibePanelPreviewNodes.length > 0 && ( + {!isVibeGenerating && vibePanelIntent === 'off_topic' && renderOffTopic} + {!isVibeGenerating && vibePanelIntent !== 'off_topic' && (vibePanelPreviewNodes.length > 0 || vibePanelMermaidCode) && (
@@ -226,7 +278,7 @@ const VibePanel: FC = () => {
)} {isVibeGenerating && renderLoading} - {!isVibeGenerating && vibePanelPreviewNodes.length === 0 && } + {!isVibeGenerating && vibePanelIntent !== 'off_topic' && vibePanelPreviewNodes.length === 0 && !vibePanelMermaidCode && }
) diff --git a/web/app/components/workflow/store/workflow/panel-slice.ts b/web/app/components/workflow/store/workflow/panel-slice.ts index e90418823a..5cb2f8193f 100644 --- a/web/app/components/workflow/store/workflow/panel-slice.ts +++ b/web/app/components/workflow/store/workflow/panel-slice.ts @@ -1,5 +1,8 @@ +import type { BackendEdgeSpec, BackendNodeSpec } from '@/service/debug' import type { StateCreator } from 'zustand' +export type VibeIntent = 'generate' | 'off_topic' | 'error' | '' + export type PanelSliceShape = { panelWidth: number showFeaturesPanel: boolean @@ -26,6 +29,24 @@ export type PanelSliceShape = { setInitShowLastRunTab: (initShowLastRunTab: boolean) => void showVibePanel: boolean setShowVibePanel: (showVibePanel: boolean) => void + vibePanelMermaidCode: string + setVibePanelMermaidCode: (vibePanelMermaidCode: string) => void + vibePanelBackendNodes?: BackendNodeSpec[] + setVibePanelBackendNodes: (nodes?: BackendNodeSpec[]) => void + vibePanelBackendEdges?: BackendEdgeSpec[] + setVibePanelBackendEdges: (edges?: BackendEdgeSpec[]) => void + isVibeGenerating: boolean + setIsVibeGenerating: (isVibeGenerating: boolean) => void + vibePanelInstruction: string + setVibePanelInstruction: (vibePanelInstruction: string) => void + vibePanelIntent: VibeIntent + setVibePanelIntent: (vibePanelIntent: VibeIntent) => void + vibePanelMessage: string + setVibePanelMessage: (vibePanelMessage: string) => void + vibePanelSuggestions: string[] + setVibePanelSuggestions: (vibePanelSuggestions: string[]) => void + vibePanelLastWarnings: string[] + setVibePanelLastWarnings: (vibePanelLastWarnings: string[]) => void } export const createPanelSlice: StateCreator = set => ({ @@ -48,4 +69,22 @@ export const createPanelSlice: StateCreator = set => ({ setInitShowLastRunTab: initShowLastRunTab => set(() => ({ initShowLastRunTab })), showVibePanel: false, setShowVibePanel: showVibePanel => set(() => ({ showVibePanel })), + vibePanelMermaidCode: '', + setVibePanelMermaidCode: vibePanelMermaidCode => set(() => ({ vibePanelMermaidCode })), + vibePanelBackendNodes: undefined, + setVibePanelBackendNodes: vibePanelBackendNodes => set(() => ({ vibePanelBackendNodes })), + vibePanelBackendEdges: undefined, + setVibePanelBackendEdges: vibePanelBackendEdges => set(() => ({ vibePanelBackendEdges })), + isVibeGenerating: false, + setIsVibeGenerating: isVibeGenerating => set(() => ({ isVibeGenerating })), + vibePanelInstruction: '', + setVibePanelInstruction: vibePanelInstruction => set(() => ({ vibePanelInstruction })), + vibePanelIntent: '', + setVibePanelIntent: vibePanelIntent => set(() => ({ vibePanelIntent })), + vibePanelMessage: '', + setVibePanelMessage: vibePanelMessage => set(() => ({ vibePanelMessage })), + vibePanelSuggestions: [], + setVibePanelSuggestions: vibePanelSuggestions => set(() => ({ vibePanelSuggestions })), + vibePanelLastWarnings: [], + setVibePanelLastWarnings: vibePanelLastWarnings => set(() => ({ vibePanelLastWarnings })), }) diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 9d00be30c7..203b3197a4 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -140,6 +140,10 @@ const translation = { regenerate: 'Regenerate', apply: 'Apply', noFlowchart: 'No flowchart provided', + offTopicDefault: 'I\'m the Dify workflow design assistant. I can help you create AI automation workflows, but I can\'t answer general questions. Would you like to create a workflow instead?', + offTopicTitle: 'Off-Topic Request', + trySuggestion: 'Try one of these suggestions:', + generateError: 'Failed to generate workflow. Please try again.', }, publishLimit: { startNodeTitlePrefix: 'Upgrade to', diff --git a/web/service/debug.ts b/web/service/debug.ts index 40aa8c2173..7e69fb5e29 100644 --- a/web/service/debug.ts +++ b/web/service/debug.ts @@ -19,8 +19,45 @@ export type GenRes = { error?: string } +export type ToolRecommendation = { + requested_capability: string + unconfigured_tools: Array<{ + provider_id: string + tool_name: string + description: string + }> + configured_alternatives: Array<{ + provider_id: string + tool_name: string + description: string + }> + recommendation: string +} + +export type BackendNodeSpec = { + id: string + type: string + title?: string + config?: Record + position?: { x: number; y: number } +} + +export type BackendEdgeSpec = { + source: string + target: string + sourceHandle?: string + targetHandle?: string +} + export type FlowchartGenRes = { + intent?: 'generate' | 'off_topic' | 'error' flowchart: string + nodes?: BackendNodeSpec[] + edges?: BackendEdgeSpec[] + message?: string + warnings?: string[] + suggestions?: string[] + tool_recommendations?: ToolRecommendation[] error?: string }