diff --git a/api/controllers/console/apikey.py b/api/controllers/console/apikey.py
index efab1bc03c..7c39b04464 100644
--- a/api/controllers/console/apikey.py
+++ b/api/controllers/console/apikey.py
@@ -1,5 +1,4 @@
import flask_restx
-from flask import Response
from flask_restx import Resource, fields, marshal_with
from flask_restx._http import HTTPStatus
from sqlalchemy import select
@@ -156,11 +155,6 @@ class AppApiKeyListResource(BaseApiKeyListResource):
"""Create a new API key for an app"""
return super().post(resource_id)
- def after_request(self, resp: Response):
- resp.headers["Access-Control-Allow-Origin"] = "*"
- resp.headers["Access-Control-Allow-Credentials"] = "true"
- return resp
-
resource_type = "app"
resource_model = App
resource_id_field = "app_id"
@@ -177,11 +171,6 @@ class AppApiKeyResource(BaseApiKeyResource):
"""Delete an API key for an app"""
return super().delete(resource_id, api_key_id)
- def after_request(self, resp):
- resp.headers["Access-Control-Allow-Origin"] = "*"
- resp.headers["Access-Control-Allow-Credentials"] = "true"
- return resp
-
resource_type = "app"
resource_model = App
resource_id_field = "app_id"
@@ -206,11 +195,6 @@ class DatasetApiKeyListResource(BaseApiKeyListResource):
"""Create a new API key for a dataset"""
return super().post(resource_id)
- def after_request(self, resp: Response):
- resp.headers["Access-Control-Allow-Origin"] = "*"
- resp.headers["Access-Control-Allow-Credentials"] = "true"
- return resp
-
resource_type = "dataset"
resource_model = Dataset
resource_id_field = "dataset_id"
@@ -227,11 +211,6 @@ class DatasetApiKeyResource(BaseApiKeyResource):
"""Delete an API key for a dataset"""
return super().delete(resource_id, api_key_id)
- def after_request(self, resp: Response):
- resp.headers["Access-Control-Allow-Origin"] = "*"
- resp.headers["Access-Control-Allow-Credentials"] = "true"
- return resp
-
resource_type = "dataset"
resource_model = Dataset
resource_id_field = "dataset_id"
diff --git a/api/core/rag/extractor/firecrawl/firecrawl_app.py b/api/core/rag/extractor/firecrawl/firecrawl_app.py
index c20ecd2b89..789ac8557d 100644
--- a/api/core/rag/extractor/firecrawl/firecrawl_app.py
+++ b/api/core/rag/extractor/firecrawl/firecrawl_app.py
@@ -25,7 +25,7 @@ class FirecrawlApp:
}
if params:
json_data.update(params)
- response = self._post_request(f"{self.base_url}/v1/scrape", json_data, headers)
+ response = self._post_request(f"{self.base_url}/v2/scrape", json_data, headers)
if response.status_code == 200:
response_data = response.json()
data = response_data["data"]
@@ -42,7 +42,7 @@ class FirecrawlApp:
json_data = {"url": url}
if params:
json_data.update(params)
- response = self._post_request(f"{self.base_url}/v1/crawl", json_data, headers)
+ response = self._post_request(f"{self.base_url}/v2/crawl", json_data, headers)
if response.status_code == 200:
# There's also another two fields in the response: "success" (bool) and "url" (str)
job_id = response.json().get("id")
@@ -51,9 +51,25 @@ class FirecrawlApp:
self._handle_error(response, "start crawl job")
return "" # unreachable
+ def map(self, url: str, params: dict[str, Any] | None = None) -> dict[str, Any]:
+ # Documentation: https://docs.firecrawl.dev/api-reference/endpoint/map
+ headers = self._prepare_headers()
+ json_data: dict[str, Any] = {"url": url, "integration": "dify"}
+ if params:
+ # Pass through provided params, including optional "sitemap": "only" | "include" | "skip"
+ json_data.update(params)
+ response = self._post_request(f"{self.base_url}/v2/map", json_data, headers)
+ if response.status_code == 200:
+ return cast(dict[str, Any], response.json())
+ elif response.status_code in {402, 409, 500, 429, 408}:
+ self._handle_error(response, "start map job")
+ return {}
+ else:
+ raise Exception(f"Failed to start map job. Status code: {response.status_code}")
+
def check_crawl_status(self, job_id) -> dict[str, Any]:
headers = self._prepare_headers()
- response = self._get_request(f"{self.base_url}/v1/crawl/{job_id}", headers)
+ response = self._get_request(f"{self.base_url}/v2/crawl/{job_id}", headers)
if response.status_code == 200:
crawl_status_response = response.json()
if crawl_status_response.get("status") == "completed":
@@ -135,12 +151,16 @@ class FirecrawlApp:
"lang": "en",
"country": "us",
"timeout": 60000,
- "ignoreInvalidURLs": False,
+ "ignoreInvalidURLs": True,
"scrapeOptions": {},
+ "sources": [
+ {"type": "web"},
+ ],
+ "integration": "dify",
}
if params:
json_data.update(params)
- response = self._post_request(f"{self.base_url}/v1/search", json_data, headers)
+ response = self._post_request(f"{self.base_url}/v2/search", json_data, headers)
if response.status_code == 200:
response_data = response.json()
if not response_data.get("success"):
diff --git a/api/core/tools/entities/tool_entities.py b/api/core/tools/entities/tool_entities.py
index 62e3aa8b5d..15a4f0aafd 100644
--- a/api/core/tools/entities/tool_entities.py
+++ b/api/core/tools/entities/tool_entities.py
@@ -189,6 +189,11 @@ class ToolInvokeMessage(BaseModel):
data: Mapping[str, Any] = Field(..., description="Detailed log data")
metadata: Mapping[str, Any] = Field(default_factory=dict, description="The metadata of the log")
+ @field_validator("metadata", mode="before")
+ @classmethod
+ def _normalize_metadata(cls, value: Mapping[str, Any] | None) -> Mapping[str, Any]:
+ return value or {}
+
class RetrieverResourceMessage(BaseModel):
retriever_resources: list[RetrievalSourceMetadata] = Field(..., description="retriever resources")
context: str = Field(..., description="context")
@@ -376,6 +381,11 @@ class ToolEntity(BaseModel):
def set_parameters(cls, v, validation_info: ValidationInfo) -> list[ToolParameter]:
return v or []
+ @field_validator("output_schema", mode="before")
+ @classmethod
+ def _normalize_output_schema(cls, value: Mapping[str, object] | None) -> Mapping[str, object]:
+ return value or {}
+
class OAuthSchema(BaseModel):
client_schema: list[ProviderConfig] = Field(
diff --git a/api/core/workflow/graph_engine/command_channels/redis_channel.py b/api/core/workflow/graph_engine/command_channels/redis_channel.py
index c841459170..527647ae3b 100644
--- a/api/core/workflow/graph_engine/command_channels/redis_channel.py
+++ b/api/core/workflow/graph_engine/command_channels/redis_channel.py
@@ -41,6 +41,7 @@ class RedisChannel:
self._redis = redis_client
self._key = channel_key
self._command_ttl = command_ttl
+ self._pending_key = f"{channel_key}:pending"
def fetch_commands(self) -> list[GraphEngineCommand]:
"""
@@ -49,6 +50,9 @@ class RedisChannel:
Returns:
List of pending commands (drains the Redis list)
"""
+ if not self._has_pending_commands():
+ return []
+
commands: list[GraphEngineCommand] = []
# Use pipeline for atomic operations
@@ -85,6 +89,7 @@ class RedisChannel:
with self._redis.pipeline() as pipe:
pipe.rpush(self._key, command_json)
pipe.expire(self._key, self._command_ttl)
+ pipe.set(self._pending_key, "1", ex=self._command_ttl)
pipe.execute()
def _deserialize_command(self, data: dict[str, Any]) -> GraphEngineCommand | None:
@@ -112,3 +117,17 @@ class RedisChannel:
except (ValueError, TypeError):
return None
+
+ def _has_pending_commands(self) -> bool:
+ """
+ Check and consume the pending marker to avoid unnecessary list reads.
+
+ Returns:
+ True if commands should be fetched from Redis.
+ """
+ with self._redis.pipeline() as pipe:
+ pipe.get(self._pending_key)
+ pipe.delete(self._pending_key)
+ pending_value, _ = pipe.execute()
+
+ return pending_value is not None
diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py
index 7247b17967..1cb5851ab1 100644
--- a/api/core/workflow/graph_engine/event_management/event_handlers.py
+++ b/api/core/workflow/graph_engine/event_management/event_handlers.py
@@ -7,6 +7,7 @@ from collections.abc import Mapping
from functools import singledispatchmethod
from typing import TYPE_CHECKING, final
+from core.model_runtime.entities.llm_entities import LLMUsage
from core.workflow.entities import GraphRuntimeState
from core.workflow.enums import ErrorStrategy, NodeExecutionType
from core.workflow.graph import Graph
@@ -125,6 +126,7 @@ class EventHandler:
node_execution = self._graph_execution.get_or_create_node_execution(event.node_id)
is_initial_attempt = node_execution.retry_count == 0
node_execution.mark_started(event.id)
+ self._graph_runtime_state.increment_node_run_steps()
# Track in response coordinator for stream ordering
self._response_coordinator.track_node_execution(event.node_id, event.id)
@@ -163,6 +165,8 @@ class EventHandler:
node_execution = self._graph_execution.get_or_create_node_execution(event.node_id)
node_execution.mark_taken()
+ self._accumulate_node_usage(event.node_run_result.llm_usage)
+
# Store outputs in variable pool
self._store_node_outputs(event.node_id, event.node_run_result.outputs)
@@ -212,6 +216,8 @@ class EventHandler:
node_execution.mark_failed(event.error)
self._graph_execution.record_node_failure()
+ self._accumulate_node_usage(event.node_run_result.llm_usage)
+
result = self._error_handler.handle_node_failure(event)
if result:
@@ -235,6 +241,8 @@ class EventHandler:
node_execution = self._graph_execution.get_or_create_node_execution(event.node_id)
node_execution.mark_taken()
+ self._accumulate_node_usage(event.node_run_result.llm_usage)
+
# Persist outputs produced by the exception strategy (e.g. default values)
self._store_node_outputs(event.node_id, event.node_run_result.outputs)
@@ -286,6 +294,19 @@ class EventHandler:
self._state_manager.enqueue_node(event.node_id)
self._state_manager.start_execution(event.node_id)
+ def _accumulate_node_usage(self, usage: LLMUsage) -> None:
+ """Accumulate token usage into the shared runtime state."""
+ if usage.total_tokens <= 0:
+ return
+
+ self._graph_runtime_state.add_tokens(usage.total_tokens)
+
+ current_usage = self._graph_runtime_state.llm_usage
+ if current_usage.total_tokens == 0:
+ self._graph_runtime_state.llm_usage = usage
+ else:
+ self._graph_runtime_state.llm_usage = current_usage.plus(usage)
+
def _store_node_outputs(self, node_id: str, outputs: Mapping[str, object]) -> None:
"""
Store node outputs in the variable pool.
diff --git a/api/core/workflow/graph_engine/orchestration/dispatcher.py b/api/core/workflow/graph_engine/orchestration/dispatcher.py
index a7229ce4e8..8340c10b49 100644
--- a/api/core/workflow/graph_engine/orchestration/dispatcher.py
+++ b/api/core/workflow/graph_engine/orchestration/dispatcher.py
@@ -8,7 +8,12 @@ import threading
import time
from typing import TYPE_CHECKING, final
-from core.workflow.graph_events.base import GraphNodeEventBase
+from core.workflow.graph_events import (
+ GraphNodeEventBase,
+ NodeRunExceptionEvent,
+ NodeRunFailedEvent,
+ NodeRunSucceededEvent,
+)
from ..event_management import EventManager
from .execution_coordinator import ExecutionCoordinator
@@ -72,13 +77,16 @@ class Dispatcher:
if self._thread and self._thread.is_alive():
self._thread.join(timeout=10.0)
+ _COMMAND_TRIGGER_EVENTS = (
+ NodeRunSucceededEvent,
+ NodeRunFailedEvent,
+ NodeRunExceptionEvent,
+ )
+
def _dispatcher_loop(self) -> None:
"""Main dispatcher loop."""
try:
while not self._stop_event.is_set():
- # Check for commands
- self._execution_coordinator.check_commands()
-
# Check for scaling
self._execution_coordinator.check_scaling()
@@ -87,6 +95,8 @@ class Dispatcher:
event = self._event_queue.get(timeout=0.1)
# Route to the event handler
self._event_handler.dispatch(event)
+ if self._should_check_commands(event):
+ self._execution_coordinator.check_commands()
self._event_queue.task_done()
except queue.Empty:
# Check if execution is complete
@@ -102,3 +112,7 @@ class Dispatcher:
# Signal the event emitter that execution is complete
if self._event_emitter:
self._event_emitter.mark_complete()
+
+ def _should_check_commands(self, event: GraphNodeEventBase) -> bool:
+ """Return True if the event represents a node completion."""
+ return isinstance(event, self._COMMAND_TRIGGER_EVENTS)
diff --git a/api/migrations/versions/2025_10_14_1618-d98acf217d43_add_app_mode_for_messsage.py b/api/migrations/versions/2025_10_14_1618-d98acf217d43_add_app_mode_for_messsage.py
index 0c65e7b7bf..a4a28924a1 100644
--- a/api/migrations/versions/2025_10_14_1618-d98acf217d43_add_app_mode_for_messsage.py
+++ b/api/migrations/versions/2025_10_14_1618-d98acf217d43_add_app_mode_for_messsage.py
@@ -23,43 +23,52 @@ def upgrade():
batch_op.create_index('message_app_mode_idx', ['app_mode'], unique=False)
conn = op.get_bind()
-
+
# Strategy: Update in batches to minimize lock time
# For large tables (millions of rows), this prevents long-running transactions
batch_size = 10000
-
+
print("Starting backfill of app_mode from conversations...")
-
+
# Use a more efficient UPDATE with JOIN
# This query updates messages.app_mode from conversations.mode
- update_query = """
+ # Using string formatting for LIMIT since it's a constant
+ update_query = f"""
UPDATE messages m
SET app_mode = c.mode
FROM conversations c
WHERE m.conversation_id = c.id
AND m.app_mode IS NULL
AND m.id IN (
- SELECT id FROM messages
- WHERE app_mode IS NULL
- LIMIT :batch_size
+ SELECT id FROM messages
+ WHERE app_mode IS NULL
+ LIMIT {batch_size}
)
"""
-
+
# Execute batched updates
total_updated = 0
+ iteration = 0
while True:
- result = conn.execute(sa.text(update_query), {"batch_size": batch_size})
- rows_updated = result.rowcount
+ iteration += 1
+ result = conn.execute(sa.text(update_query))
+
+ # Check if result is None or has no rowcount
+ if result is None:
+ print("Warning: Query returned None, stopping backfill")
+ break
+
+ rows_updated = result.rowcount if hasattr(result, 'rowcount') else 0
total_updated += rows_updated
-
+
if rows_updated == 0:
break
-
- print(f"Updated {rows_updated} messages (total: {total_updated})")
-
- # Commit each batch to release locks
- # Note: Alembic auto-commits in upgrade() by default
-
+
+ print(f"Iteration {iteration}: Updated {rows_updated} messages (total: {total_updated})")
+
+ # For very large tables, add a small delay to reduce load
+ # Uncomment if needed: import time; time.sleep(0.1)
+
print(f"Backfill completed. Total messages updated: {total_updated}")
# ### end Alembic commands ###
diff --git a/api/schedule/clean_workflow_runlogs_precise.py b/api/schedule/clean_workflow_runlogs_precise.py
index 485a79782c..db4198720d 100644
--- a/api/schedule/clean_workflow_runlogs_precise.py
+++ b/api/schedule/clean_workflow_runlogs_precise.py
@@ -1,8 +1,11 @@
import datetime
import logging
import time
+from collections.abc import Sequence
import click
+from sqlalchemy import select
+from sqlalchemy.orm import Session, sessionmaker
import app
from configs import dify_config
@@ -35,50 +38,53 @@ def clean_workflow_runlogs_precise():
retention_days = dify_config.WORKFLOW_LOG_RETENTION_DAYS
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=retention_days)
+ session_factory = sessionmaker(db.engine, expire_on_commit=False)
try:
- total_workflow_runs = db.session.query(WorkflowRun).where(WorkflowRun.created_at < cutoff_date).count()
- if total_workflow_runs == 0:
- logger.info("No expired workflow run logs found")
- return
- logger.info("Found %s expired workflow run logs to clean", total_workflow_runs)
+ with session_factory.begin() as session:
+ total_workflow_runs = session.query(WorkflowRun).where(WorkflowRun.created_at < cutoff_date).count()
+ if total_workflow_runs == 0:
+ logger.info("No expired workflow run logs found")
+ return
+ logger.info("Found %s expired workflow run logs to clean", total_workflow_runs)
total_deleted = 0
failed_batches = 0
batch_count = 0
-
while True:
- workflow_runs = (
- db.session.query(WorkflowRun.id).where(WorkflowRun.created_at < cutoff_date).limit(BATCH_SIZE).all()
- )
+ with session_factory.begin() as session:
+ workflow_run_ids = session.scalars(
+ select(WorkflowRun.id)
+ .where(WorkflowRun.created_at < cutoff_date)
+ .order_by(WorkflowRun.created_at, WorkflowRun.id)
+ .limit(BATCH_SIZE)
+ ).all()
- if not workflow_runs:
- break
-
- workflow_run_ids = [run.id for run in workflow_runs]
- batch_count += 1
-
- success = _delete_batch_with_retry(workflow_run_ids, failed_batches)
-
- if success:
- total_deleted += len(workflow_run_ids)
- failed_batches = 0
- else:
- failed_batches += 1
- if failed_batches >= MAX_RETRIES:
- logger.error("Failed to delete batch after %s retries, aborting cleanup for today", MAX_RETRIES)
+ if not workflow_run_ids:
break
+
+ batch_count += 1
+
+ success = _delete_batch(session, workflow_run_ids, failed_batches)
+
+ if success:
+ total_deleted += len(workflow_run_ids)
+ failed_batches = 0
else:
- # Calculate incremental delay times: 5, 10, 15 minutes
- retry_delay_minutes = failed_batches * 5
- logger.warning("Batch deletion failed, retrying in %s minutes...", retry_delay_minutes)
- time.sleep(retry_delay_minutes * 60)
- continue
+ failed_batches += 1
+ if failed_batches >= MAX_RETRIES:
+ logger.error("Failed to delete batch after %s retries, aborting cleanup for today", MAX_RETRIES)
+ break
+ else:
+ # Calculate incremental delay times: 5, 10, 15 minutes
+ retry_delay_minutes = failed_batches * 5
+ logger.warning("Batch deletion failed, retrying in %s minutes...", retry_delay_minutes)
+ time.sleep(retry_delay_minutes * 60)
+ continue
logger.info("Cleanup completed: %s expired workflow run logs deleted", total_deleted)
except Exception:
- db.session.rollback()
logger.exception("Unexpected error in workflow log cleanup")
raise
@@ -87,69 +93,56 @@ def clean_workflow_runlogs_precise():
click.echo(click.style(f"Cleaned workflow run logs from db success latency: {execution_time:.2f}s", fg="green"))
-def _delete_batch_with_retry(workflow_run_ids: list[str], attempt_count: int) -> bool:
- """Delete a single batch with a retry mechanism and complete cascading deletion"""
+def _delete_batch(session: Session, workflow_run_ids: Sequence[str], attempt_count: int) -> bool:
+ """Delete a single batch of workflow runs and all related data within a nested transaction."""
try:
- with db.session.begin_nested():
+ with session.begin_nested():
message_data = (
- db.session.query(Message.id, Message.conversation_id)
+ session.query(Message.id, Message.conversation_id)
.where(Message.workflow_run_id.in_(workflow_run_ids))
.all()
)
message_id_list = [msg.id for msg in message_data]
conversation_id_list = list({msg.conversation_id for msg in message_data if msg.conversation_id})
if message_id_list:
- db.session.query(AppAnnotationHitHistory).where(
- AppAnnotationHitHistory.message_id.in_(message_id_list)
- ).delete(synchronize_session=False)
+ message_related_models = [
+ AppAnnotationHitHistory,
+ MessageAgentThought,
+ MessageChain,
+ MessageFile,
+ MessageAnnotation,
+ MessageFeedback,
+ ]
+ for model in message_related_models:
+ session.query(model).where(model.message_id.in_(message_id_list)).delete(synchronize_session=False) # type: ignore
+ # error: "DeclarativeAttributeIntercept" has no attribute "message_id". But this type is only in lib
+ # and these 6 types all have the message_id field.
- db.session.query(MessageAgentThought).where(MessageAgentThought.message_id.in_(message_id_list)).delete(
+ session.query(Message).where(Message.workflow_run_id.in_(workflow_run_ids)).delete(
synchronize_session=False
)
- db.session.query(MessageChain).where(MessageChain.message_id.in_(message_id_list)).delete(
- synchronize_session=False
- )
-
- db.session.query(MessageFile).where(MessageFile.message_id.in_(message_id_list)).delete(
- synchronize_session=False
- )
-
- db.session.query(MessageAnnotation).where(MessageAnnotation.message_id.in_(message_id_list)).delete(
- synchronize_session=False
- )
-
- db.session.query(MessageFeedback).where(MessageFeedback.message_id.in_(message_id_list)).delete(
- synchronize_session=False
- )
-
- db.session.query(Message).where(Message.workflow_run_id.in_(workflow_run_ids)).delete(
- synchronize_session=False
- )
-
- db.session.query(WorkflowAppLog).where(WorkflowAppLog.workflow_run_id.in_(workflow_run_ids)).delete(
+ session.query(WorkflowAppLog).where(WorkflowAppLog.workflow_run_id.in_(workflow_run_ids)).delete(
synchronize_session=False
)
- db.session.query(WorkflowNodeExecutionModel).where(
+ session.query(WorkflowNodeExecutionModel).where(
WorkflowNodeExecutionModel.workflow_run_id.in_(workflow_run_ids)
).delete(synchronize_session=False)
if conversation_id_list:
- db.session.query(ConversationVariable).where(
+ session.query(ConversationVariable).where(
ConversationVariable.conversation_id.in_(conversation_id_list)
).delete(synchronize_session=False)
- db.session.query(Conversation).where(Conversation.id.in_(conversation_id_list)).delete(
+ session.query(Conversation).where(Conversation.id.in_(conversation_id_list)).delete(
synchronize_session=False
)
- db.session.query(WorkflowRun).where(WorkflowRun.id.in_(workflow_run_ids)).delete(synchronize_session=False)
+ session.query(WorkflowRun).where(WorkflowRun.id.in_(workflow_run_ids)).delete(synchronize_session=False)
- db.session.commit()
- return True
+ return True
except Exception:
- db.session.rollback()
logger.exception("Batch deletion failed (attempt %s)", attempt_count + 1)
return False
diff --git a/api/services/website_service.py b/api/services/website_service.py
index 37588d6ba5..a23f01ec71 100644
--- a/api/services/website_service.py
+++ b/api/services/website_service.py
@@ -23,6 +23,7 @@ class CrawlOptions:
only_main_content: bool = False
includes: str | None = None
excludes: str | None = None
+ prompt: str | None = None
max_depth: int | None = None
use_sitemap: bool = True
@@ -70,6 +71,7 @@ class WebsiteCrawlApiRequest:
only_main_content=self.options.get("only_main_content", False),
includes=self.options.get("includes"),
excludes=self.options.get("excludes"),
+ prompt=self.options.get("prompt"),
max_depth=self.options.get("max_depth"),
use_sitemap=self.options.get("use_sitemap", True),
)
@@ -174,6 +176,7 @@ class WebsiteService:
def _crawl_with_firecrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
+ params: dict[str, Any]
if not request.options.crawl_sub_pages:
params = {
"includePaths": [],
@@ -188,8 +191,10 @@ class WebsiteService:
"limit": request.options.limit,
"scrapeOptions": {"onlyMainContent": request.options.only_main_content},
}
- if request.options.max_depth:
- params["maxDepth"] = request.options.max_depth
+
+ # Add optional prompt for Firecrawl v2 crawl-params compatibility
+ if request.options.prompt:
+ params["prompt"] = request.options.prompt
job_id = firecrawl_app.crawl_url(request.url, params)
website_crawl_time_cache_key = f"website_crawl_{job_id}"
diff --git a/api/templates/without-brand/invite_member_mail_template_en-US.html b/api/templates/without-brand/invite_member_mail_template_en-US.html
index fc7f3679ba..b78a6a0760 100644
--- a/api/templates/without-brand/invite_member_mail_template_en-US.html
+++ b/api/templates/without-brand/invite_member_mail_template_en-US.html
@@ -75,10 +75,7 @@
-
-
-
-
+
Dear {{ to }},
{{ inviter_name }} is pleased to invite you to join our workspace on {{application_title}}, a platform specifically designed for LLM application development. On {{application_title}}, you can explore, create, and collaborate to build and operate AI applications.
diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx
index f286773685..a820516e88 100644
--- a/web/app/components/develop/template/template_workflow.en.mdx
+++ b/web/app/components/develop/template/template_workflow.en.mdx
@@ -1025,8 +1025,8 @@ Workflow applications offers non-session support and is ideal for translation, a