From 57c4fc6bf88fcd4f3b6a422c1c3906eb5ebf00f3 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Fri, 8 Aug 2025 14:47:15 +0800 Subject: [PATCH 01/21] Fix X button animation glitches in secret key modals (#23614) --- web/app/components/develop/secret-key/secret-key-generate.tsx | 4 +++- web/app/components/develop/secret-key/secret-key-modal.tsx | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/web/app/components/develop/secret-key/secret-key-generate.tsx b/web/app/components/develop/secret-key/secret-key-generate.tsx index 3d15b045ef..4ec6f52b3f 100644 --- a/web/app/components/develop/secret-key/secret-key-generate.tsx +++ b/web/app/components/develop/secret-key/secret-key-generate.tsx @@ -23,7 +23,9 @@ const SecretKeyGenerateModal = ({ const { t } = useTranslation() return ( - +
+ +

{t('appApi.apiKeyModal.generateTips')}

diff --git a/web/app/components/develop/secret-key/secret-key-modal.tsx b/web/app/components/develop/secret-key/secret-key-modal.tsx index b61e2314c5..bde1811d05 100644 --- a/web/app/components/develop/secret-key/secret-key-modal.tsx +++ b/web/app/components/develop/secret-key/secret-key-modal.tsx @@ -84,7 +84,9 @@ const SecretKeyModal = ({ return ( - +
+ +

{t('appApi.apiKeyModal.apiSecretKeyTips')}

{!apiKeysList &&
} { From 5cf55fcbab6c30d2bcd786f0bbad44ba95d36206 Mon Sep 17 00:00:00 2001 From: GuanMu Date: Fri, 8 Aug 2025 15:20:24 +0800 Subject: [PATCH 02/21] feat: Enhance the alignment logic of the nodes in the context menu (#23617) --- .../workflow/selection-contextmenu.tsx | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/web/app/components/workflow/selection-contextmenu.tsx b/web/app/components/workflow/selection-contextmenu.tsx index 71c8e97ab7..6cc348ad2b 100644 --- a/web/app/components/workflow/selection-contextmenu.tsx +++ b/web/app/components/workflow/selection-contextmenu.tsx @@ -260,7 +260,30 @@ const SelectionContextmenu = () => { // Get all selected nodes const selectedNodeIds = selectedNodes.map(node => node.id) - const nodesToAlign = nodes.filter(node => selectedNodeIds.includes(node.id)) + + // Find container nodes and their children + // Container nodes (like Iteration and Loop) have child nodes that should not be aligned independently + // when the container is selected. This prevents child nodes from being moved outside their containers. + const childNodeIds = new Set() + + nodes.forEach((node) => { + // Check if this is a container node (Iteration or Loop) + if (node.data._children && node.data._children.length > 0) { + // If container node is selected, add its children to the exclusion set + if (selectedNodeIds.includes(node.id)) { + // Add all its children to the childNodeIds set + node.data._children.forEach((child: { nodeId: string; nodeType: string }) => { + childNodeIds.add(child.nodeId) + }) + } + } + }) + + // Filter out child nodes from the alignment operation + // Only align nodes that are selected AND are not children of container nodes + // This ensures container nodes can be aligned while their children stay in the same relative position + const nodesToAlign = nodes.filter(node => + selectedNodeIds.includes(node.id) && !childNodeIds.has(node.id)) if (nodesToAlign.length <= 1) { handleSelectionContextmenuCancel() From b32b7712e2087364809de685bce12edc8596dfdc Mon Sep 17 00:00:00 2001 From: Joel Date: Fri, 8 Aug 2025 18:34:12 +0800 Subject: [PATCH 03/21] fix: i18n options too long breaks plugin setting ui (#23640) --- web/app/components/plugins/reference-setting-modal/modal.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/plugins/reference-setting-modal/modal.tsx b/web/app/components/plugins/reference-setting-modal/modal.tsx index 9fefbdbb55..d47e78ecd2 100644 --- a/web/app/components/plugins/reference-setting-modal/modal.tsx +++ b/web/app/components/plugins/reference-setting-modal/modal.tsx @@ -50,9 +50,9 @@ const PluginSettingModal: FC = ({ isShow onClose={onHide} closable - className='w-[480px] !p-0' + className='w-[620px] max-w-[620px] !p-0' > -
+
{t(`${i18nPrefix}.title`)}
From 14e1c16cf29ceec789b46626e9976dc271f384f8 Mon Sep 17 00:00:00 2001 From: yunqiqiliang <132561395+yunqiqiliang@users.noreply.github.com> Date: Fri, 8 Aug 2025 22:57:47 +0800 Subject: [PATCH 04/21] Fix ClickZetta stability and reduce logging noise (#23632) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../rag/datasource/vdb/clickzetta/README.md | 6 +- .../vdb/clickzetta/clickzetta_vector.py | 745 ++++++++++++------ api/tasks/clean_dataset_task.py | 9 +- 3 files changed, 505 insertions(+), 255 deletions(-) diff --git a/api/core/rag/datasource/vdb/clickzetta/README.md b/api/core/rag/datasource/vdb/clickzetta/README.md index 40229f8d44..2ee3e657d3 100644 --- a/api/core/rag/datasource/vdb/clickzetta/README.md +++ b/api/core/rag/datasource/vdb/clickzetta/README.md @@ -185,6 +185,6 @@ Clickzetta supports advanced full-text search with multiple analyzers: ## References -- [Clickzetta Vector Search Documentation](../../../../../../../yunqidoc/cn_markdown_20250526/vector-search.md) -- [Clickzetta Inverted Index Documentation](../../../../../../../yunqidoc/cn_markdown_20250526/inverted-index.md) -- [Clickzetta SQL Functions](../../../../../../../yunqidoc/cn_markdown_20250526/sql_functions/) +- [Clickzetta Vector Search Documentation](https://yunqi.tech/documents/vector-search) +- [Clickzetta Inverted Index Documentation](https://yunqi.tech/documents/inverted-index) +- [Clickzetta SQL Functions](https://yunqi.tech/documents/sql-reference) diff --git a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py index 50a395a373..1059b855a2 100644 --- a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py +++ b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py @@ -1,7 +1,9 @@ import json import logging import queue +import re import threading +import time import uuid from typing import TYPE_CHECKING, Any, Optional @@ -67,6 +69,243 @@ class ClickzettaConfig(BaseModel): return values +class ClickzettaConnectionPool: + """ + Global connection pool for ClickZetta connections. + Manages connection reuse across ClickzettaVector instances. + """ + + _instance: Optional["ClickzettaConnectionPool"] = None + _lock = threading.Lock() + + def __init__(self): + self._pools: dict[str, list[tuple[Connection, float]]] = {} # config_key -> [(connection, last_used_time)] + self._pool_locks: dict[str, threading.Lock] = {} + self._max_pool_size = 5 # Maximum connections per configuration + self._connection_timeout = 300 # 5 minutes timeout + self._cleanup_thread: Optional[threading.Thread] = None + self._shutdown = False + self._start_cleanup_thread() + + @classmethod + def get_instance(cls) -> "ClickzettaConnectionPool": + """Get singleton instance of connection pool.""" + if cls._instance is None: + with cls._lock: + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def _get_config_key(self, config: ClickzettaConfig) -> str: + """Generate unique key for connection configuration.""" + return ( + f"{config.username}:{config.instance}:{config.service}:" + f"{config.workspace}:{config.vcluster}:{config.schema_name}" + ) + + def _create_connection(self, config: ClickzettaConfig) -> "Connection": + """Create a new ClickZetta connection.""" + max_retries = 3 + retry_delay = 1.0 + + for attempt in range(max_retries): + try: + connection = clickzetta.connect( + username=config.username, + password=config.password, + instance=config.instance, + service=config.service, + workspace=config.workspace, + vcluster=config.vcluster, + schema=config.schema_name, + ) + + # Configure connection session settings + self._configure_connection(connection) + logger.debug("Created new ClickZetta connection (attempt %d/%d)", attempt + 1, max_retries) + return connection + except Exception: + logger.exception("ClickZetta connection attempt %d/%d failed", attempt + 1, max_retries) + if attempt < max_retries - 1: + time.sleep(retry_delay * (2**attempt)) + else: + raise + + raise RuntimeError(f"Failed to create ClickZetta connection after {max_retries} attempts") + + def _configure_connection(self, connection: "Connection") -> None: + """Configure connection session settings.""" + try: + with connection.cursor() as cursor: + # Temporarily suppress ClickZetta client logging to reduce noise + clickzetta_logger = logging.getLogger("clickzetta") + original_level = clickzetta_logger.level + clickzetta_logger.setLevel(logging.WARNING) + + try: + # Use quote mode for string literal escaping + cursor.execute("SET cz.sql.string.literal.escape.mode = 'quote'") + + # Apply performance optimization hints + performance_hints = [ + # Vector index optimization + "SET cz.storage.parquet.vector.index.read.memory.cache = true", + "SET cz.storage.parquet.vector.index.read.local.cache = false", + # Query optimization + "SET cz.sql.table.scan.push.down.filter = true", + "SET cz.sql.table.scan.enable.ensure.filter = true", + "SET cz.storage.always.prefetch.internal = true", + "SET cz.optimizer.generate.columns.always.valid = true", + "SET cz.sql.index.prewhere.enabled = true", + # Storage optimization + "SET cz.storage.parquet.enable.io.prefetch = false", + "SET cz.optimizer.enable.mv.rewrite = false", + "SET cz.sql.dump.as.lz4 = true", + "SET cz.optimizer.limited.optimization.naive.query = true", + "SET cz.sql.table.scan.enable.push.down.log = false", + "SET cz.storage.use.file.format.local.stats = false", + "SET cz.storage.local.file.object.cache.level = all", + # Job execution optimization + "SET cz.sql.job.fast.mode = true", + "SET cz.storage.parquet.non.contiguous.read = true", + "SET cz.sql.compaction.after.commit = true", + ] + + for hint in performance_hints: + cursor.execute(hint) + finally: + # Restore original logging level + clickzetta_logger.setLevel(original_level) + + except Exception: + logger.exception("Failed to configure connection, continuing with defaults") + + def _is_connection_valid(self, connection: "Connection") -> bool: + """Check if connection is still valid.""" + try: + with connection.cursor() as cursor: + cursor.execute("SELECT 1") + return True + except Exception: + return False + + def get_connection(self, config: ClickzettaConfig) -> "Connection": + """Get a connection from the pool or create a new one.""" + config_key = self._get_config_key(config) + + # Ensure pool lock exists + if config_key not in self._pool_locks: + with self._lock: + if config_key not in self._pool_locks: + self._pool_locks[config_key] = threading.Lock() + self._pools[config_key] = [] + + with self._pool_locks[config_key]: + pool = self._pools[config_key] + current_time = time.time() + + # Try to reuse existing connection + while pool: + connection, last_used = pool.pop(0) + + # Check if connection is not expired and still valid + if current_time - last_used < self._connection_timeout and self._is_connection_valid(connection): + logger.debug("Reusing ClickZetta connection from pool") + return connection + else: + # Connection expired or invalid, close it + try: + connection.close() + except Exception: + pass + + # No valid connection found, create new one + return self._create_connection(config) + + def return_connection(self, config: ClickzettaConfig, connection: "Connection") -> None: + """Return a connection to the pool.""" + config_key = self._get_config_key(config) + + if config_key not in self._pool_locks: + # Pool was cleaned up, just close the connection + try: + connection.close() + except Exception: + pass + return + + with self._pool_locks[config_key]: + pool = self._pools[config_key] + + # Only return to pool if not at capacity and connection is valid + if len(pool) < self._max_pool_size and self._is_connection_valid(connection): + pool.append((connection, time.time())) + logger.debug("Returned ClickZetta connection to pool") + else: + # Pool full or connection invalid, close it + try: + connection.close() + except Exception: + pass + + def _cleanup_expired_connections(self) -> None: + """Clean up expired connections from all pools.""" + current_time = time.time() + + with self._lock: + for config_key in list(self._pools.keys()): + if config_key not in self._pool_locks: + continue + + with self._pool_locks[config_key]: + pool = self._pools[config_key] + valid_connections = [] + + for connection, last_used in pool: + if current_time - last_used < self._connection_timeout: + valid_connections.append((connection, last_used)) + else: + try: + connection.close() + except Exception: + pass + + self._pools[config_key] = valid_connections + + def _start_cleanup_thread(self) -> None: + """Start background thread for connection cleanup.""" + + def cleanup_worker(): + while not self._shutdown: + try: + time.sleep(60) # Cleanup every minute + if not self._shutdown: + self._cleanup_expired_connections() + except Exception: + logger.exception("Error in connection pool cleanup") + + self._cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True) + self._cleanup_thread.start() + + def shutdown(self) -> None: + """Shutdown connection pool and close all connections.""" + self._shutdown = True + + with self._lock: + for config_key in list(self._pools.keys()): + if config_key not in self._pool_locks: + continue + + with self._pool_locks[config_key]: + pool = self._pools[config_key] + for connection, _ in pool: + try: + connection.close() + except Exception: + pass + pool.clear() + + class ClickzettaVector(BaseVector): """ Clickzetta vector storage implementation. @@ -82,70 +321,74 @@ class ClickzettaVector(BaseVector): super().__init__(collection_name) self._config = config self._table_name = collection_name.replace("-", "_").lower() # Ensure valid table name - self._connection: Optional[Connection] = None - self._init_connection() + self._connection_pool = ClickzettaConnectionPool.get_instance() self._init_write_queue() - def _init_connection(self): - """Initialize Clickzetta connection.""" - self._connection = clickzetta.connect( - username=self._config.username, - password=self._config.password, - instance=self._config.instance, - service=self._config.service, - workspace=self._config.workspace, - vcluster=self._config.vcluster, - schema=self._config.schema_name, - ) + def _get_connection(self) -> "Connection": + """Get a connection from the pool.""" + return self._connection_pool.get_connection(self._config) - # Set session parameters for better string handling and performance optimization - if self._connection is not None: - with self._connection.cursor() as cursor: - # Use quote mode for string literal escaping to handle quotes better - cursor.execute("SET cz.sql.string.literal.escape.mode = 'quote'") - logger.info("Set string literal escape mode to 'quote' for better quote handling") + def _return_connection(self, connection: "Connection") -> None: + """Return a connection to the pool.""" + self._connection_pool.return_connection(self._config, connection) - # Performance optimization hints for vector operations - self._set_performance_hints(cursor) + class ConnectionContext: + """Context manager for borrowing and returning connections.""" - def _set_performance_hints(self, cursor): - """Set ClickZetta performance optimization hints for vector operations.""" + def __init__(self, vector_instance: "ClickzettaVector"): + self.vector = vector_instance + self.connection: Optional[Connection] = None + + def __enter__(self) -> "Connection": + self.connection = self.vector._get_connection() + return self.connection + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.connection: + self.vector._return_connection(self.connection) + + def get_connection_context(self) -> "ClickzettaVector.ConnectionContext": + """Get a connection context manager.""" + return self.ConnectionContext(self) + + def _parse_metadata(self, raw_metadata: str, row_id: str) -> dict: + """ + Parse metadata from JSON string with proper error handling and fallback. + + Args: + raw_metadata: Raw JSON string from database + row_id: Row ID for fallback document_id + + Returns: + Parsed metadata dict with guaranteed required fields + """ try: - # Performance optimization hints for vector operations and query processing - performance_hints = [ - # Vector index optimization - "SET cz.storage.parquet.vector.index.read.memory.cache = true", - "SET cz.storage.parquet.vector.index.read.local.cache = false", - # Query optimization - "SET cz.sql.table.scan.push.down.filter = true", - "SET cz.sql.table.scan.enable.ensure.filter = true", - "SET cz.storage.always.prefetch.internal = true", - "SET cz.optimizer.generate.columns.always.valid = true", - "SET cz.sql.index.prewhere.enabled = true", - # Storage optimization - "SET cz.storage.parquet.enable.io.prefetch = false", - "SET cz.optimizer.enable.mv.rewrite = false", - "SET cz.sql.dump.as.lz4 = true", - "SET cz.optimizer.limited.optimization.naive.query = true", - "SET cz.sql.table.scan.enable.push.down.log = false", - "SET cz.storage.use.file.format.local.stats = false", - "SET cz.storage.local.file.object.cache.level = all", - # Job execution optimization - "SET cz.sql.job.fast.mode = true", - "SET cz.storage.parquet.non.contiguous.read = true", - "SET cz.sql.compaction.after.commit = true", - ] + if raw_metadata: + metadata = json.loads(raw_metadata) - for hint in performance_hints: - cursor.execute(hint) + # Handle double-encoded JSON + if isinstance(metadata, str): + metadata = json.loads(metadata) - logger.info( - "Applied %d performance optimization hints for ClickZetta vector operations", len(performance_hints) - ) + # Ensure we have a dict + if not isinstance(metadata, dict): + metadata = {} + else: + metadata = {} + except (json.JSONDecodeError, TypeError): + logger.exception("JSON parsing failed for metadata") + # Fallback: extract document_id with regex + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', raw_metadata or "") + metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} - except Exception: - # Catch any errors setting performance hints but continue with defaults - logger.exception("Failed to set some performance hints, continuing with default settings") + # Ensure required fields are set + metadata["doc_id"] = row_id # segment id + + # Ensure document_id exists (critical for Dify's format_retrieval_documents) + if "document_id" not in metadata: + metadata["document_id"] = row_id # fallback to segment id + + return metadata @classmethod def _init_write_queue(cls): @@ -204,24 +447,33 @@ class ClickzettaVector(BaseVector): return "clickzetta" def _ensure_connection(self) -> "Connection": - """Ensure connection is available and return it.""" - if self._connection is None: - raise RuntimeError("Database connection not initialized") - return self._connection + """Get a connection from the pool.""" + return self._get_connection() def _table_exists(self) -> bool: """Check if the table exists.""" try: - connection = self._ensure_connection() - with connection.cursor() as cursor: - cursor.execute(f"DESC {self._config.schema_name}.{self._table_name}") - return True - except (RuntimeError, ValueError) as e: - if "table or view not found" in str(e).lower(): + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + cursor.execute(f"DESC {self._config.schema_name}.{self._table_name}") + return True + except Exception as e: + error_message = str(e).lower() + # Handle ClickZetta specific "table or view not found" errors + if any( + phrase in error_message + for phrase in ["table or view not found", "czlh-42000", "semantic analysis exception"] + ): + logger.debug("Table %s.%s does not exist", self._config.schema_name, self._table_name) return False else: - # Re-raise if it's a different error - raise + # For other connection/permission errors, log warning but return False to avoid blocking cleanup + logger.exception( + "Table existence check failed for %s.%s, assuming it doesn't exist", + self._config.schema_name, + self._table_name, + ) + return False def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs): """Create the collection and add initial documents.""" @@ -253,17 +505,17 @@ class ClickzettaVector(BaseVector): ) COMMENT 'Dify RAG knowledge base vector storage table for document embeddings and content' """ - connection = self._ensure_connection() - with connection.cursor() as cursor: - cursor.execute(create_table_sql) - logger.info("Created table %s.%s", self._config.schema_name, self._table_name) + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + cursor.execute(create_table_sql) + logger.info("Created table %s.%s", self._config.schema_name, self._table_name) - # Create vector index - self._create_vector_index(cursor) + # Create vector index + self._create_vector_index(cursor) - # Create inverted index for full-text search if enabled - if self._config.enable_inverted_index: - self._create_inverted_index(cursor) + # Create inverted index for full-text search if enabled + if self._config.enable_inverted_index: + self._create_inverted_index(cursor) def _create_vector_index(self, cursor): """Create HNSW vector index for similarity search.""" @@ -432,39 +684,53 @@ class ClickzettaVector(BaseVector): f"VALUES (?, ?, CAST(? AS JSON), CAST(? AS VECTOR({vector_dimension})))" ) - connection = self._ensure_connection() - with connection.cursor() as cursor: - try: - # Set session-level hints for batch insert operations - # Note: executemany doesn't support hints parameter, so we set them as session variables - cursor.execute("SET cz.sql.job.fast.mode = true") - cursor.execute("SET cz.sql.compaction.after.commit = true") - cursor.execute("SET cz.storage.always.prefetch.internal = true") + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + try: + # Set session-level hints for batch insert operations + # Note: executemany doesn't support hints parameter, so we set them as session variables + # Temporarily suppress ClickZetta client logging to reduce noise + clickzetta_logger = logging.getLogger("clickzetta") + original_level = clickzetta_logger.level + clickzetta_logger.setLevel(logging.WARNING) - cursor.executemany(insert_sql, data_rows) - logger.info( - "Inserted batch %d/%d (%d valid docs using parameterized query with VECTOR(%d) cast)", - batch_index // batch_size + 1, - total_batches, - len(data_rows), - vector_dimension, - ) - except (RuntimeError, ValueError, TypeError, ConnectionError) as e: - logger.exception("Parameterized SQL execution failed for %d documents", len(data_rows)) - logger.exception("SQL template: %s", insert_sql) - logger.exception("Sample data row: %s", data_rows[0] if data_rows else "None") - raise + try: + cursor.execute("SET cz.sql.job.fast.mode = true") + cursor.execute("SET cz.sql.compaction.after.commit = true") + cursor.execute("SET cz.storage.always.prefetch.internal = true") + finally: + # Restore original logging level + clickzetta_logger.setLevel(original_level) + + cursor.executemany(insert_sql, data_rows) + logger.info( + "Inserted batch %d/%d (%d valid docs using parameterized query with VECTOR(%d) cast)", + batch_index // batch_size + 1, + total_batches, + len(data_rows), + vector_dimension, + ) + except (RuntimeError, ValueError, TypeError, ConnectionError) as e: + logger.exception("Parameterized SQL execution failed for %d documents", len(data_rows)) + logger.exception("SQL template: %s", insert_sql) + logger.exception("Sample data row: %s", data_rows[0] if data_rows else "None") + raise def text_exists(self, id: str) -> bool: """Check if a document exists by ID.""" + # Check if table exists first + if not self._table_exists(): + return False + safe_id = self._safe_doc_id(id) - connection = self._ensure_connection() - with connection.cursor() as cursor: - cursor.execute( - f"SELECT COUNT(*) FROM {self._config.schema_name}.{self._table_name} WHERE id = ?", [safe_id] - ) - result = cursor.fetchone() - return result[0] > 0 if result else False + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + cursor.execute( + f"SELECT COUNT(*) FROM {self._config.schema_name}.{self._table_name} WHERE id = ?", + binding_params=[safe_id], + ) + result = cursor.fetchone() + return result[0] > 0 if result else False def delete_by_ids(self, ids: list[str]) -> None: """Delete documents by IDs.""" @@ -482,13 +748,14 @@ class ClickzettaVector(BaseVector): def _delete_by_ids_impl(self, ids: list[str]) -> None: """Implementation of delete by IDs (executed in write worker thread).""" safe_ids = [self._safe_doc_id(id) for id in ids] - # Create properly escaped string literals for SQL - id_list = ",".join(f"'{id}'" for id in safe_ids) - sql = f"DELETE FROM {self._config.schema_name}.{self._table_name} WHERE id IN ({id_list})" - connection = self._ensure_connection() - with connection.cursor() as cursor: - cursor.execute(sql) + # Use parameterized query to prevent SQL injection + placeholders = ",".join("?" for _ in safe_ids) + sql = f"DELETE FROM {self._config.schema_name}.{self._table_name} WHERE id IN ({placeholders})" + + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + cursor.execute(sql, binding_params=safe_ids) def delete_by_metadata_field(self, key: str, value: str) -> None: """Delete documents by metadata field.""" @@ -502,19 +769,28 @@ class ClickzettaVector(BaseVector): def _delete_by_metadata_field_impl(self, key: str, value: str) -> None: """Implementation of delete by metadata field (executed in write worker thread).""" - connection = self._ensure_connection() - with connection.cursor() as cursor: - # Using JSON path to filter with parameterized query - # Note: JSON path requires literal key name, cannot be parameterized - # Use json_extract_string function for ClickZetta compatibility - sql = ( - f"DELETE FROM {self._config.schema_name}.{self._table_name} " - f"WHERE json_extract_string({Field.METADATA_KEY.value}, '$.{key}') = ?" - ) - cursor.execute(sql, [value]) + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + # Using JSON path to filter with parameterized query + # Note: JSON path requires literal key name, cannot be parameterized + # Use json_extract_string function for ClickZetta compatibility + sql = ( + f"DELETE FROM {self._config.schema_name}.{self._table_name} " + f"WHERE json_extract_string({Field.METADATA_KEY.value}, '$.{key}') = ?" + ) + cursor.execute(sql, binding_params=[value]) def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: """Search for documents by vector similarity.""" + # Check if table exists first + if not self._table_exists(): + logger.warning( + "Table %s.%s does not exist, returning empty results", + self._config.schema_name, + self._table_name, + ) + return [] + top_k = kwargs.get("top_k", 10) score_threshold = kwargs.get("score_threshold", 0.0) document_ids_filter = kwargs.get("document_ids_filter") @@ -565,56 +841,31 @@ class ClickzettaVector(BaseVector): """ documents = [] - connection = self._ensure_connection() - with connection.cursor() as cursor: - # Use hints parameter for vector search optimization - search_hints = { - "hints": { - "sdk.job.timeout": 60, # Increase timeout for vector search - "cz.sql.job.fast.mode": True, - "cz.storage.parquet.vector.index.read.memory.cache": True, + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + # Use hints parameter for vector search optimization + search_hints = { + "hints": { + "sdk.job.timeout": 60, # Increase timeout for vector search + "cz.sql.job.fast.mode": True, + "cz.storage.parquet.vector.index.read.memory.cache": True, + } } - } - cursor.execute(search_sql, parameters=search_hints) - results = cursor.fetchall() + cursor.execute(search_sql, search_hints) + results = cursor.fetchall() - for row in results: - # Parse metadata from JSON string (may be double-encoded) - try: - if row[2]: - metadata = json.loads(row[2]) + for row in results: + # Parse metadata using centralized method + metadata = self._parse_metadata(row[2], row[0]) - # If result is a string, it's double-encoded JSON - parse again - if isinstance(metadata, str): - metadata = json.loads(metadata) - - if not isinstance(metadata, dict): - metadata = {} + # Add score based on distance + if self._config.vector_distance_function == "cosine_distance": + metadata["score"] = 1 - (row[3] / 2) else: - metadata = {} - except (json.JSONDecodeError, TypeError) as e: - logger.exception("JSON parsing failed") - # Fallback: extract document_id with regex - import re + metadata["score"] = 1 / (1 + row[3]) - doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) - metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} - - # Ensure required fields are set - metadata["doc_id"] = row[0] # segment id - - # Ensure document_id exists (critical for Dify's format_retrieval_documents) - if "document_id" not in metadata: - metadata["document_id"] = row[0] # fallback to segment id - - # Add score based on distance - if self._config.vector_distance_function == "cosine_distance": - metadata["score"] = 1 - (row[3] / 2) - else: - metadata["score"] = 1 / (1 + row[3]) - - doc = Document(page_content=row[1], metadata=metadata) - documents.append(doc) + doc = Document(page_content=row[1], metadata=metadata) + documents.append(doc) return documents @@ -624,6 +875,15 @@ class ClickzettaVector(BaseVector): logger.warning("Full-text search is not enabled. Enable inverted index in config.") return [] + # Check if table exists first + if not self._table_exists(): + logger.warning( + "Table %s.%s does not exist, returning empty results", + self._config.schema_name, + self._table_name, + ) + return [] + top_k = kwargs.get("top_k", 10) document_ids_filter = kwargs.get("document_ids_filter") @@ -659,62 +919,70 @@ class ClickzettaVector(BaseVector): """ documents = [] - connection = self._ensure_connection() - with connection.cursor() as cursor: - try: - # Use hints parameter for full-text search optimization - fulltext_hints = { - "hints": { - "sdk.job.timeout": 30, # Timeout for full-text search - "cz.sql.job.fast.mode": True, - "cz.sql.index.prewhere.enabled": True, + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + try: + # Use hints parameter for full-text search optimization + fulltext_hints = { + "hints": { + "sdk.job.timeout": 30, # Timeout for full-text search + "cz.sql.job.fast.mode": True, + "cz.sql.index.prewhere.enabled": True, + } } - } - cursor.execute(search_sql, parameters=fulltext_hints) - results = cursor.fetchall() + cursor.execute(search_sql, fulltext_hints) + results = cursor.fetchall() - for row in results: - # Parse metadata from JSON string (may be double-encoded) - try: - if row[2]: - metadata = json.loads(row[2]) + for row in results: + # Parse metadata from JSON string (may be double-encoded) + try: + if row[2]: + metadata = json.loads(row[2]) - # If result is a string, it's double-encoded JSON - parse again - if isinstance(metadata, str): - metadata = json.loads(metadata) + # If result is a string, it's double-encoded JSON - parse again + if isinstance(metadata, str): + metadata = json.loads(metadata) - if not isinstance(metadata, dict): + if not isinstance(metadata, dict): + metadata = {} + else: metadata = {} - else: - metadata = {} - except (json.JSONDecodeError, TypeError) as e: - logger.exception("JSON parsing failed") - # Fallback: extract document_id with regex - import re + except (json.JSONDecodeError, TypeError) as e: + logger.exception("JSON parsing failed") + # Fallback: extract document_id with regex - doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) - metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) + metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} - # Ensure required fields are set - metadata["doc_id"] = row[0] # segment id + # Ensure required fields are set + metadata["doc_id"] = row[0] # segment id - # Ensure document_id exists (critical for Dify's format_retrieval_documents) - if "document_id" not in metadata: - metadata["document_id"] = row[0] # fallback to segment id + # Ensure document_id exists (critical for Dify's format_retrieval_documents) + if "document_id" not in metadata: + metadata["document_id"] = row[0] # fallback to segment id - # Add a relevance score for full-text search - metadata["score"] = 1.0 # Clickzetta doesn't provide relevance scores - doc = Document(page_content=row[1], metadata=metadata) - documents.append(doc) - except (RuntimeError, ValueError, TypeError, ConnectionError) as e: - logger.exception("Full-text search failed") - # Fallback to LIKE search if full-text search fails - return self._search_by_like(query, **kwargs) + # Add a relevance score for full-text search + metadata["score"] = 1.0 # Clickzetta doesn't provide relevance scores + doc = Document(page_content=row[1], metadata=metadata) + documents.append(doc) + except (RuntimeError, ValueError, TypeError, ConnectionError) as e: + logger.exception("Full-text search failed") + # Fallback to LIKE search if full-text search fails + return self._search_by_like(query, **kwargs) return documents def _search_by_like(self, query: str, **kwargs: Any) -> list[Document]: """Fallback search using LIKE operator.""" + # Check if table exists first + if not self._table_exists(): + logger.warning( + "Table %s.%s does not exist, returning empty results", + self._config.schema_name, + self._table_name, + ) + return [] + top_k = kwargs.get("top_k", 10) document_ids_filter = kwargs.get("document_ids_filter") @@ -746,58 +1014,33 @@ class ClickzettaVector(BaseVector): """ documents = [] - connection = self._ensure_connection() - with connection.cursor() as cursor: - # Use hints parameter for LIKE search optimization - like_hints = { - "hints": { - "sdk.job.timeout": 20, # Timeout for LIKE search - "cz.sql.job.fast.mode": True, + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + # Use hints parameter for LIKE search optimization + like_hints = { + "hints": { + "sdk.job.timeout": 20, # Timeout for LIKE search + "cz.sql.job.fast.mode": True, + } } - } - cursor.execute(search_sql, parameters=like_hints) - results = cursor.fetchall() + cursor.execute(search_sql, like_hints) + results = cursor.fetchall() - for row in results: - # Parse metadata from JSON string (may be double-encoded) - try: - if row[2]: - metadata = json.loads(row[2]) + for row in results: + # Parse metadata using centralized method + metadata = self._parse_metadata(row[2], row[0]) - # If result is a string, it's double-encoded JSON - parse again - if isinstance(metadata, str): - metadata = json.loads(metadata) - - if not isinstance(metadata, dict): - metadata = {} - else: - metadata = {} - except (json.JSONDecodeError, TypeError) as e: - logger.exception("JSON parsing failed") - # Fallback: extract document_id with regex - import re - - doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) - metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} - - # Ensure required fields are set - metadata["doc_id"] = row[0] # segment id - - # Ensure document_id exists (critical for Dify's format_retrieval_documents) - if "document_id" not in metadata: - metadata["document_id"] = row[0] # fallback to segment id - - metadata["score"] = 0.5 # Lower score for LIKE search - doc = Document(page_content=row[1], metadata=metadata) - documents.append(doc) + metadata["score"] = 0.5 # Lower score for LIKE search + doc = Document(page_content=row[1], metadata=metadata) + documents.append(doc) return documents def delete(self) -> None: """Delete the entire collection.""" - connection = self._ensure_connection() - with connection.cursor() as cursor: - cursor.execute(f"DROP TABLE IF EXISTS {self._config.schema_name}.{self._table_name}") + with self.get_connection_context() as connection: + with connection.cursor() as cursor: + cursor.execute(f"DROP TABLE IF EXISTS {self._config.schema_name}.{self._table_name}") def _format_vector_simple(self, vector: list[float]) -> str: """Simple vector formatting for SQL queries.""" diff --git a/api/tasks/clean_dataset_task.py b/api/tasks/clean_dataset_task.py index c769446ed5..69e5df0253 100644 --- a/api/tasks/clean_dataset_task.py +++ b/api/tasks/clean_dataset_task.py @@ -59,7 +59,14 @@ def clean_dataset_task( # Fix: Always clean vector database resources regardless of document existence # This ensures all 33 vector databases properly drop tables/collections/indices if doc_form is None: - raise ValueError("Index type must be specified.") + # Use default paragraph index type for empty datasets to enable vector database cleanup + from core.rag.index_processor.constant.index_type import IndexType + + doc_form = IndexType.PARAGRAPH_INDEX + logging.info( + click.style(f"No documents found, using default index type for cleanup: {doc_form}", fg="yellow") + ) + index_processor = IndexProcessorFactory(doc_form).init_index_processor() index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True) From 8362365eae5ade8f35a153debc9f2671167d3083 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Fri, 8 Aug 2025 22:58:52 +0800 Subject: [PATCH 05/21] Fix file type misclassification in logs interface (#23641) --- .../base/file-uploader/utils.spec.ts | 234 +++++++++++++++++- .../components/base/file-uploader/utils.ts | 24 +- 2 files changed, 250 insertions(+), 8 deletions(-) diff --git a/web/app/components/base/file-uploader/utils.spec.ts b/web/app/components/base/file-uploader/utils.spec.ts index 4a3408ef00..774c38eb53 100644 --- a/web/app/components/base/file-uploader/utils.spec.ts +++ b/web/app/components/base/file-uploader/utils.spec.ts @@ -36,7 +36,7 @@ describe('file-uploader utils', () => { }) describe('fileUpload', () => { - it('should handle successful file upload', async () => { + it('should handle successful file upload', () => { const mockFile = new File(['test'], 'test.txt') const mockCallbacks = { onProgressCallback: jest.fn(), @@ -46,13 +46,12 @@ describe('file-uploader utils', () => { jest.mocked(upload).mockResolvedValue({ id: '123' }) - await fileUpload({ + fileUpload({ file: mockFile, ...mockCallbacks, }) expect(upload).toHaveBeenCalled() - expect(mockCallbacks.onSuccessCallback).toHaveBeenCalledWith({ id: '123' }) }) }) @@ -284,7 +283,23 @@ describe('file-uploader utils', () => { }) describe('getProcessedFilesFromResponse', () => { - it('should process files correctly', () => { + beforeEach(() => { + jest.mocked(mime.getAllExtensions).mockImplementation((mimeType: string) => { + const mimeMap: Record> = { + 'image/jpeg': new Set(['jpg', 'jpeg']), + 'image/png': new Set(['png']), + 'image/gif': new Set(['gif']), + 'video/mp4': new Set(['mp4']), + 'audio/mp3': new Set(['mp3']), + 'application/pdf': new Set(['pdf']), + 'text/plain': new Set(['txt']), + 'application/json': new Set(['json']), + } + return mimeMap[mimeType] || new Set() + }) + }) + + it('should process files correctly without type correction', () => { const files = [{ related_id: '2a38e2ca-1295-415d-a51d-65d4ff9912d9', extension: '.jpeg', @@ -294,6 +309,8 @@ describe('file-uploader utils', () => { transfer_method: TransferMethod.local_file, type: 'image', url: 'https://upload.dify.dev/files/xxx/file-preview', + upload_file_id: '2a38e2ca-1295-415d-a51d-65d4ff9912d9', + remote_url: '', }] const result = getProcessedFilesFromResponse(files) @@ -309,6 +326,215 @@ describe('file-uploader utils', () => { url: 'https://upload.dify.dev/files/xxx/file-preview', }) }) + + it('should correct image file misclassified as document', () => { + const files = [{ + related_id: '123', + extension: '.jpg', + filename: 'image.jpg', + size: 1024, + mime_type: 'image/jpeg', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/image.jpg', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('image') + }) + + it('should correct video file misclassified as document', () => { + const files = [{ + related_id: '123', + extension: '.mp4', + filename: 'video.mp4', + size: 1024, + mime_type: 'video/mp4', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/video.mp4', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('video') + }) + + it('should correct audio file misclassified as document', () => { + const files = [{ + related_id: '123', + extension: '.mp3', + filename: 'audio.mp3', + size: 1024, + mime_type: 'audio/mp3', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/audio.mp3', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('audio') + }) + + it('should correct document file misclassified as image', () => { + const files = [{ + related_id: '123', + extension: '.pdf', + filename: 'document.pdf', + size: 1024, + mime_type: 'application/pdf', + transfer_method: TransferMethod.local_file, + type: 'image', + url: 'https://example.com/document.pdf', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('document') + }) + + it('should NOT correct when filename and MIME type conflict', () => { + const files = [{ + related_id: '123', + extension: '.pdf', + filename: 'document.pdf', + size: 1024, + mime_type: 'image/jpeg', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/document.pdf', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('document') + }) + + it('should NOT correct when filename and MIME type both point to wrong type', () => { + const files = [{ + related_id: '123', + extension: '.jpg', + filename: 'image.jpg', + size: 1024, + mime_type: 'image/jpeg', + transfer_method: TransferMethod.local_file, + type: 'image', + url: 'https://example.com/image.jpg', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('image') + }) + + it('should handle files with missing filename', () => { + const files = [{ + related_id: '123', + extension: '', + filename: '', + size: 1024, + mime_type: 'image/jpeg', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/file', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('document') + }) + + it('should handle files with missing MIME type', () => { + const files = [{ + related_id: '123', + extension: '.jpg', + filename: 'image.jpg', + size: 1024, + mime_type: '', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/image.jpg', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('document') + }) + + it('should handle files with unknown extensions', () => { + const files = [{ + related_id: '123', + extension: '.unknown', + filename: 'file.unknown', + size: 1024, + mime_type: 'application/unknown', + transfer_method: TransferMethod.local_file, + type: 'document', + url: 'https://example.com/file.unknown', + upload_file_id: '123', + remote_url: '', + }] + + const result = getProcessedFilesFromResponse(files) + expect(result[0].supportFileType).toBe('document') + }) + + it('should handle multiple different file types correctly', () => { + const files = [ + { + related_id: '1', + extension: '.jpg', + filename: 'correct-image.jpg', + mime_type: 'image/jpeg', + type: 'image', + size: 1024, + transfer_method: TransferMethod.local_file, + url: 'https://example.com/correct-image.jpg', + upload_file_id: '1', + remote_url: '', + }, + { + related_id: '2', + extension: '.png', + filename: 'misclassified-image.png', + mime_type: 'image/png', + type: 'document', + size: 2048, + transfer_method: TransferMethod.local_file, + url: 'https://example.com/misclassified-image.png', + upload_file_id: '2', + remote_url: '', + }, + { + related_id: '3', + extension: '.pdf', + filename: 'conflicted.pdf', + mime_type: 'image/jpeg', + type: 'document', + size: 3072, + transfer_method: TransferMethod.local_file, + url: 'https://example.com/conflicted.pdf', + upload_file_id: '3', + remote_url: '', + }, + ] + + const result = getProcessedFilesFromResponse(files) + + expect(result[0].supportFileType).toBe('image') // correct, no change + expect(result[1].supportFileType).toBe('image') // corrected from document to image + expect(result[2].supportFileType).toBe('document') // conflict, no change + }) }) describe('getFileNameFromUrl', () => { diff --git a/web/app/components/base/file-uploader/utils.ts b/web/app/components/base/file-uploader/utils.ts index e870f9edab..9c217646ca 100644 --- a/web/app/components/base/file-uploader/utils.ts +++ b/web/app/components/base/file-uploader/utils.ts @@ -70,10 +70,13 @@ export const getFileExtension = (fileName: string, fileMimetype: string, isRemot } } if (!extension) { - if (extensions.size > 0) - extension = extensions.values().next().value.toLowerCase() - else + if (extensions.size > 0) { + const firstExtension = extensions.values().next().value + extension = firstExtension ? firstExtension.toLowerCase() : '' + } + else { extension = extensionInFileName + } } if (isRemote) @@ -145,6 +148,19 @@ export const getProcessedFiles = (files: FileEntity[]) => { export const getProcessedFilesFromResponse = (files: FileResponse[]) => { return files.map((fileItem) => { + let supportFileType = fileItem.type + + if (fileItem.filename && fileItem.mime_type) { + const detectedTypeFromFileName = getSupportFileType(fileItem.filename, '') + const detectedTypeFromMime = getSupportFileType('', fileItem.mime_type) + + if (detectedTypeFromFileName + && detectedTypeFromMime + && detectedTypeFromFileName === detectedTypeFromMime + && detectedTypeFromFileName !== fileItem.type) + supportFileType = detectedTypeFromFileName + } + return { id: fileItem.related_id, name: fileItem.filename, @@ -152,7 +168,7 @@ export const getProcessedFilesFromResponse = (files: FileResponse[]) => { type: fileItem.mime_type, progress: 100, transferMethod: fileItem.transfer_method, - supportFileType: fileItem.type, + supportFileType, uploadedId: fileItem.upload_file_id || fileItem.related_id, url: fileItem.url || fileItem.remote_url, } From 41345199d83c50ce3af84ef1dab032373a44a062 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Sat, 9 Aug 2025 12:57:24 +0800 Subject: [PATCH 06/21] Feat add testcontainers test for api base extendsion service (#23652) --- .../test_api_based_extension_service.py | 487 ++++++++++++++++++ 1 file changed, 487 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_api_based_extension_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_api_based_extension_service.py b/api/tests/test_containers_integration_tests/services/test_api_based_extension_service.py new file mode 100644 index 0000000000..38f532fd64 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_api_based_extension_service.py @@ -0,0 +1,487 @@ +from unittest.mock import patch + +import pytest +from faker import Faker + +from models.api_based_extension import APIBasedExtension +from services.account_service import AccountService, TenantService +from services.api_based_extension_service import APIBasedExtensionService + + +class TestAPIBasedExtensionService: + """Integration tests for APIBasedExtensionService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.account_service.FeatureService") as mock_account_feature_service, + patch("services.api_based_extension_service.APIBasedExtensionRequestor") as mock_requestor, + ): + # Setup default mock returns + mock_account_feature_service.get_features.return_value.billing.enabled = False + + # Mock successful ping response + mock_requestor_instance = mock_requestor.return_value + mock_requestor_instance.request.return_value = {"result": "pong"} + + yield { + "account_feature_service": mock_account_feature_service, + "requestor": mock_requestor, + "requestor_instance": mock_requestor_instance, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + # Setup mocks for account creation + mock_external_service_dependencies[ + "account_feature_service" + ].get_system_features.return_value.is_allow_register = True + + # Create account and tenant + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + return account, tenant + + def test_save_extension_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful saving of API-based extension. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Setup extension data + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + # Save extension + saved_extension = APIBasedExtensionService.save(extension_data) + + # Verify extension was saved correctly + assert saved_extension.id is not None + assert saved_extension.tenant_id == tenant.id + assert saved_extension.name == extension_data.name + assert saved_extension.api_endpoint == extension_data.api_endpoint + assert saved_extension.api_key == extension_data.api_key # Should be decrypted when retrieved + assert saved_extension.created_at is not None + + # Verify extension was saved to database + from extensions.ext_database import db + + db.session.refresh(saved_extension) + assert saved_extension.id is not None + + # Verify ping connection was called + mock_external_service_dependencies["requestor_instance"].request.assert_called_once() + + def test_save_extension_validation_errors(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation errors when saving extension with invalid data. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Test empty name + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = "" + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + with pytest.raises(ValueError, match="name must not be empty"): + APIBasedExtensionService.save(extension_data) + + # Test empty api_endpoint + extension_data.name = fake.company() + extension_data.api_endpoint = "" + + with pytest.raises(ValueError, match="api_endpoint must not be empty"): + APIBasedExtensionService.save(extension_data) + + # Test empty api_key + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = "" + + with pytest.raises(ValueError, match="api_key must not be empty"): + APIBasedExtensionService.save(extension_data) + + def test_get_all_by_tenant_id_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of all extensions by tenant ID. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create multiple extensions + extensions = [] + for i in range(3): + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = f"Extension {i}: {fake.company()}" + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + saved_extension = APIBasedExtensionService.save(extension_data) + extensions.append(saved_extension) + + # Get all extensions for tenant + extension_list = APIBasedExtensionService.get_all_by_tenant_id(tenant.id) + + # Verify results + assert len(extension_list) == 3 + + # Verify all extensions belong to the correct tenant and are ordered by created_at desc + for i, extension in enumerate(extension_list): + assert extension.tenant_id == tenant.id + assert extension.api_key is not None # Should be decrypted + if i > 0: + # Verify descending order (newer first) + assert extension.created_at <= extension_list[i - 1].created_at + + def test_get_with_tenant_id_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of extension by tenant ID and extension ID. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create an extension + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + created_extension = APIBasedExtensionService.save(extension_data) + + # Get extension by ID + retrieved_extension = APIBasedExtensionService.get_with_tenant_id(tenant.id, created_extension.id) + + # Verify extension was retrieved correctly + assert retrieved_extension is not None + assert retrieved_extension.id == created_extension.id + assert retrieved_extension.tenant_id == tenant.id + assert retrieved_extension.name == extension_data.name + assert retrieved_extension.api_endpoint == extension_data.api_endpoint + assert retrieved_extension.api_key == extension_data.api_key # Should be decrypted + assert retrieved_extension.created_at is not None + + def test_get_with_tenant_id_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test retrieval of extension when extension is not found. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + non_existent_extension_id = fake.uuid4() + + # Try to get non-existent extension + with pytest.raises(ValueError, match="API based extension is not found"): + APIBasedExtensionService.get_with_tenant_id(tenant.id, non_existent_extension_id) + + def test_delete_extension_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful deletion of extension. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create an extension first + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + created_extension = APIBasedExtensionService.save(extension_data) + extension_id = created_extension.id + + # Delete the extension + APIBasedExtensionService.delete(created_extension) + + # Verify extension was deleted + from extensions.ext_database import db + + deleted_extension = db.session.query(APIBasedExtension).filter(APIBasedExtension.id == extension_id).first() + assert deleted_extension is None + + def test_save_extension_duplicate_name(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation error when saving extension with duplicate name. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create first extension + extension_data1 = APIBasedExtension() + extension_data1.tenant_id = tenant.id + extension_data1.name = "Test Extension" + extension_data1.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data1.api_key = fake.password(length=20) + + APIBasedExtensionService.save(extension_data1) + + # Try to create second extension with same name + extension_data2 = APIBasedExtension() + extension_data2.tenant_id = tenant.id + extension_data2.name = "Test Extension" # Same name + extension_data2.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data2.api_key = fake.password(length=20) + + with pytest.raises(ValueError, match="name must be unique, it is already existed"): + APIBasedExtensionService.save(extension_data2) + + def test_save_extension_update_existing(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful update of existing extension. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create initial extension + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + created_extension = APIBasedExtensionService.save(extension_data) + + # Save original values for later comparison + original_name = created_extension.name + original_endpoint = created_extension.api_endpoint + + # Update the extension + new_name = fake.company() + new_endpoint = f"https://{fake.domain_name()}/api" + new_api_key = fake.password(length=20) + + created_extension.name = new_name + created_extension.api_endpoint = new_endpoint + created_extension.api_key = new_api_key + + updated_extension = APIBasedExtensionService.save(created_extension) + + # Verify extension was updated correctly + assert updated_extension.id == created_extension.id + assert updated_extension.tenant_id == tenant.id + assert updated_extension.name == new_name + assert updated_extension.api_endpoint == new_endpoint + + # Verify original values were changed + assert updated_extension.name != original_name + assert updated_extension.api_endpoint != original_endpoint + + # Verify ping connection was called for both create and update + assert mock_external_service_dependencies["requestor_instance"].request.call_count == 2 + + # Verify the update by retrieving the extension again + retrieved_extension = APIBasedExtensionService.get_with_tenant_id(tenant.id, created_extension.id) + assert retrieved_extension.name == new_name + assert retrieved_extension.api_endpoint == new_endpoint + assert retrieved_extension.api_key == new_api_key # Should be decrypted when retrieved + + def test_save_extension_connection_error(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test connection error when saving extension with invalid endpoint. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Mock connection error + mock_external_service_dependencies["requestor_instance"].request.side_effect = ValueError( + "connection error: request timeout" + ) + + # Setup extension data + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = "https://invalid-endpoint.com/api" + extension_data.api_key = fake.password(length=20) + + # Try to save extension with connection error + with pytest.raises(ValueError, match="connection error: request timeout"): + APIBasedExtensionService.save(extension_data) + + def test_save_extension_invalid_api_key_length( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test validation error when saving extension with API key that is too short. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Setup extension data with short API key + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = "1234" # Less than 5 characters + + # Try to save extension with short API key + with pytest.raises(ValueError, match="api_key must be at least 5 characters"): + APIBasedExtensionService.save(extension_data) + + def test_save_extension_empty_fields(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation errors when saving extension with empty required fields. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Test with None values + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = None + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + with pytest.raises(ValueError, match="name must not be empty"): + APIBasedExtensionService.save(extension_data) + + # Test with None api_endpoint + extension_data.name = fake.company() + extension_data.api_endpoint = None + + with pytest.raises(ValueError, match="api_endpoint must not be empty"): + APIBasedExtensionService.save(extension_data) + + # Test with None api_key + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = None + + with pytest.raises(ValueError, match="api_key must not be empty"): + APIBasedExtensionService.save(extension_data) + + def test_get_all_by_tenant_id_empty_list(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test retrieval of extensions when no extensions exist for tenant. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Get all extensions for tenant (none exist) + extension_list = APIBasedExtensionService.get_all_by_tenant_id(tenant.id) + + # Verify empty list is returned + assert len(extension_list) == 0 + assert extension_list == [] + + def test_save_extension_invalid_ping_response(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation error when ping response is invalid. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Mock invalid ping response + mock_external_service_dependencies["requestor_instance"].request.return_value = {"result": "invalid"} + + # Setup extension data + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + # Try to save extension with invalid ping response + with pytest.raises(ValueError, match="{'result': 'invalid'}"): + APIBasedExtensionService.save(extension_data) + + def test_save_extension_missing_ping_result(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation error when ping response is missing result field. + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Mock ping response without result field + mock_external_service_dependencies["requestor_instance"].request.return_value = {"status": "ok"} + + # Setup extension data + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + # Try to save extension with missing ping result + with pytest.raises(ValueError, match="{'status': 'ok'}"): + APIBasedExtensionService.save(extension_data) + + def test_get_with_tenant_id_wrong_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test retrieval of extension when tenant ID doesn't match. + """ + fake = Faker() + account1, tenant1 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create second account and tenant + account2, tenant2 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create extension in first tenant + extension_data = APIBasedExtension() + extension_data.tenant_id = tenant1.id + extension_data.name = fake.company() + extension_data.api_endpoint = f"https://{fake.domain_name()}/api" + extension_data.api_key = fake.password(length=20) + + created_extension = APIBasedExtensionService.save(extension_data) + + # Try to get extension with wrong tenant ID + with pytest.raises(ValueError, match="API based extension is not found"): + APIBasedExtensionService.get_with_tenant_id(tenant2.id, created_extension.id) From 5a0a2b7e44b7a0adc81910014b415e15822ab7ad Mon Sep 17 00:00:00 2001 From: Ganondorf <364776488@qq.com> Date: Sat, 9 Aug 2025 15:31:32 +0800 Subject: [PATCH 07/21] Allow to export full screen image of workflow (#23655) --- .../workflow/operator/export-image.tsx | 258 +++++++++++++----- web/i18n/en-US/workflow.ts | 2 + web/i18n/ja-JP/workflow.ts | 2 + web/i18n/zh-Hans/workflow.ts | 2 + 4 files changed, 202 insertions(+), 62 deletions(-) diff --git a/web/app/components/workflow/operator/export-image.tsx b/web/app/components/workflow/operator/export-image.tsx index f59f0cd92b..546c702d64 100644 --- a/web/app/components/workflow/operator/export-image.tsx +++ b/web/app/components/workflow/operator/export-image.tsx @@ -16,15 +16,20 @@ import { PortalToFollowElemContent, PortalToFollowElemTrigger, } from '@/app/components/base/portal-to-follow-elem' +import { getNodesBounds, useReactFlow } from 'reactflow' +import ImagePreview from '@/app/components/base/image-uploader/image-preview' const ExportImage: FC = () => { const { t } = useTranslation() const { getNodesReadOnly } = useNodesReadOnly() + const reactFlow = useReactFlow() const appDetail = useAppStore(s => s.appDetail) const [open, setOpen] = useState(false) + const [previewUrl, setPreviewUrl] = useState('') + const [previewTitle, setPreviewTitle] = useState('') - const handleExportImage = useCallback(async (type: 'png' | 'jpeg' | 'svg') => { + const handleExportImage = useCallback(async (type: 'png' | 'jpeg' | 'svg', currentWorkflow = false) => { if (!appDetail) return @@ -44,31 +49,123 @@ const ExportImage: FC = () => { } let dataUrl - switch (type) { - case 'png': - dataUrl = await toPng(flowElement, { filter }) - break - case 'jpeg': - dataUrl = await toJpeg(flowElement, { filter }) - break - case 'svg': - dataUrl = await toSvg(flowElement, { filter }) - break - default: - dataUrl = await toPng(flowElement, { filter }) + let filename = `${appDetail.name}` + + if (currentWorkflow) { + // Get all nodes and their bounds + const nodes = reactFlow.getNodes() + const nodesBounds = getNodesBounds(nodes) + + // Save current viewport + const currentViewport = reactFlow.getViewport() + + // Calculate the required zoom to fit all nodes + const viewportWidth = window.innerWidth + const viewportHeight = window.innerHeight + const zoom = Math.min( + viewportWidth / (nodesBounds.width + 100), + viewportHeight / (nodesBounds.height + 100), + 1, + ) + + // Calculate center position + const centerX = nodesBounds.x + nodesBounds.width / 2 + const centerY = nodesBounds.y + nodesBounds.height / 2 + + // Set viewport to show all nodes + reactFlow.setViewport({ + x: viewportWidth / 2 - centerX * zoom, + y: viewportHeight / 2 - centerY * zoom, + zoom, + }) + + // Wait for the transition to complete + await new Promise(resolve => setTimeout(resolve, 300)) + + // Calculate actual content size with padding + const padding = 50 // More padding for better visualization + const contentWidth = nodesBounds.width + padding * 2 + const contentHeight = nodesBounds.height + padding * 2 + + // Export with higher quality for whole workflow + const exportOptions = { + filter, + backgroundColor: '#1a1a1a', // Dark background to match previous style + pixelRatio: 2, // Higher resolution for better zoom + width: contentWidth, + height: contentHeight, + style: { + width: `${contentWidth}px`, + height: `${contentHeight}px`, + transform: `translate(${padding - nodesBounds.x}px, ${padding - nodesBounds.y}px) scale(${zoom})`, + }, + } + + switch (type) { + case 'png': + dataUrl = await toPng(flowElement, exportOptions) + break + case 'jpeg': + dataUrl = await toJpeg(flowElement, exportOptions) + break + case 'svg': + dataUrl = await toSvg(flowElement, { filter }) + break + default: + dataUrl = await toPng(flowElement, exportOptions) + } + + filename += '-whole-workflow' + + // Restore original viewport after a delay + setTimeout(() => { + reactFlow.setViewport(currentViewport) + }, 500) + } + else { + // Current viewport export (existing functionality) + switch (type) { + case 'png': + dataUrl = await toPng(flowElement, { filter }) + break + case 'jpeg': + dataUrl = await toJpeg(flowElement, { filter }) + break + case 'svg': + dataUrl = await toSvg(flowElement, { filter }) + break + default: + dataUrl = await toPng(flowElement, { filter }) + } } - const link = document.createElement('a') - link.href = dataUrl - link.download = `${appDetail.name}.${type}` - document.body.appendChild(link) - link.click() - document.body.removeChild(link) + if (currentWorkflow) { + // For whole workflow, show preview first + setPreviewUrl(dataUrl) + setPreviewTitle(`${filename}.${type}`) + + // Also auto-download + const link = document.createElement('a') + link.href = dataUrl + link.download = `${filename}.${type}` + document.body.appendChild(link) + link.click() + document.body.removeChild(link) + } + else { + // For current view, just download + const link = document.createElement('a') + link.href = dataUrl + link.download = `${filename}.${type}` + document.body.appendChild(link) + link.click() + document.body.removeChild(link) + } } catch (error) { console.error('Export image failed:', error) } - }, [getNodesReadOnly, appDetail]) + }, [getNodesReadOnly, appDetail, reactFlow]) const handleTrigger = useCallback(() => { if (getNodesReadOnly()) @@ -78,53 +175,90 @@ const ExportImage: FC = () => { }, [getNodesReadOnly]) return ( - - - -
- -
-
-
- -
-
+ <> + + +
handleExportImage('png')} + className={cn( + 'flex h-8 w-8 cursor-pointer items-center justify-center rounded-lg hover:bg-state-base-hover hover:text-text-secondary', + `${getNodesReadOnly() && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled'}`, + )} + onClick={handleTrigger} > - {t('workflow.common.exportPNG')} +
-
handleExportImage('jpeg')} - > - {t('workflow.common.exportJPEG')} -
-
handleExportImage('svg')} - > - {t('workflow.common.exportSVG')} + + + +
+
+
+ {t('workflow.common.currentView')} +
+
handleExportImage('png')} + > + {t('workflow.common.exportPNG')} +
+
handleExportImage('jpeg')} + > + {t('workflow.common.exportJPEG')} +
+
handleExportImage('svg')} + > + {t('workflow.common.exportSVG')} +
+ +
+ +
+ {t('workflow.common.currentWorkflow')} +
+
handleExportImage('png', true)} + > + {t('workflow.common.exportPNG')} +
+
handleExportImage('jpeg', true)} + > + {t('workflow.common.exportJPEG')} +
+
handleExportImage('svg', true)} + > + {t('workflow.common.exportSVG')} +
-
-
- + + + + {previewUrl && ( + setPreviewUrl('')} + /> + )} + ) } diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 2653303e63..467044d89a 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -74,6 +74,8 @@ const translation = { exportPNG: 'Export as PNG', exportJPEG: 'Export as JPEG', exportSVG: 'Export as SVG', + currentView: 'Current View', + currentWorkflow: 'Current Workflow', model: 'Model', workflowAsTool: 'Workflow as Tool', configureRequired: 'Configure Required', diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index b447bff2b5..30b914da3f 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -74,6 +74,8 @@ const translation = { exportPNG: 'PNG で出力', exportJPEG: 'JPEG で出力', exportSVG: 'SVG で出力', + currentView: '現在のビュー', + currentWorkflow: '現在のワークフロー', model: 'モデル', workflowAsTool: 'ワークフローをツールとして公開する', configureRequired: '設定が必要', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index e18c597306..6a74dc7e0a 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -73,6 +73,8 @@ const translation = { exportPNG: '导出为 PNG', exportJPEG: '导出为 JPEG', exportSVG: '导出为 SVG', + currentView: '当前视图', + currentWorkflow: '整个工作流', model: '模型', workflowAsTool: '发布为工具', configureRequired: '需要进行配置', From f9abcfd78936c31ac052227e937544c3dcd4126a Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 9 Aug 2025 16:31:24 +0800 Subject: [PATCH 08/21] fix: change i18n workflow trigger mechanism to run in main repository (#23662) --- .../translate-i18n-base-on-english.yml | 18 +++++++++++++----- web/i18n-config/auto-gen-i18n.js | 8 +++++--- web/i18n/de-DE/workflow.ts | 2 ++ web/i18n/es-ES/workflow.ts | 2 ++ web/i18n/fa-IR/workflow.ts | 2 ++ web/i18n/fr-FR/workflow.ts | 2 ++ web/i18n/hi-IN/workflow.ts | 2 ++ web/i18n/it-IT/workflow.ts | 2 ++ web/i18n/ko-KR/workflow.ts | 2 ++ web/i18n/pl-PL/workflow.ts | 2 ++ web/i18n/pt-BR/workflow.ts | 2 ++ web/i18n/ro-RO/workflow.ts | 2 ++ web/i18n/ru-RU/workflow.ts | 2 ++ web/i18n/sl-SI/workflow.ts | 2 ++ web/i18n/th-TH/workflow.ts | 2 ++ web/i18n/tr-TR/workflow.ts | 2 ++ web/i18n/uk-UA/workflow.ts | 2 ++ web/i18n/vi-VN/workflow.ts | 2 ++ web/i18n/zh-Hant/workflow.ts | 2 ++ 19 files changed, 52 insertions(+), 8 deletions(-) diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml index 1cb9c0967b..4b06174ee1 100644 --- a/.github/workflows/translate-i18n-base-on-english.yml +++ b/.github/workflows/translate-i18n-base-on-english.yml @@ -1,9 +1,10 @@ name: Check i18n Files and Create PR on: - pull_request: - types: [closed] + push: branches: [main] + paths: + - 'web/i18n/en-US/*.ts' permissions: contents: write @@ -11,7 +12,7 @@ permissions: jobs: check-and-update: - if: github.event.pull_request.merged == true + if: github.repository == 'langgenius/dify' runs-on: ubuntu-latest defaults: run: @@ -19,7 +20,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - fetch-depth: 2 # last 2 commits + fetch-depth: 2 token: ${{ secrets.GITHUB_TOKEN }} - name: Check for file changes in i18n/en-US @@ -31,6 +32,13 @@ jobs: echo "Changed files: $changed_files" if [ -n "$changed_files" ]; then echo "FILES_CHANGED=true" >> $GITHUB_ENV + file_args="" + for file in $changed_files; do + filename=$(basename "$file" .ts) + file_args="$file_args --file=$filename" + done + echo "FILE_ARGS=$file_args" >> $GITHUB_ENV + echo "File arguments: $file_args" else echo "FILES_CHANGED=false" >> $GITHUB_ENV fi @@ -55,7 +63,7 @@ jobs: - name: Generate i18n translations if: env.FILES_CHANGED == 'true' - run: pnpm run auto-gen-i18n + run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }} - name: Create Pull Request if: env.FILES_CHANGED == 'true' diff --git a/web/i18n-config/auto-gen-i18n.js b/web/i18n-config/auto-gen-i18n.js index 9a8e741063..8949a15e52 100644 --- a/web/i18n-config/auto-gen-i18n.js +++ b/web/i18n-config/auto-gen-i18n.js @@ -212,7 +212,9 @@ export default translation // Add command line argument support const isDryRun = process.argv.includes('--dry-run') -const targetFile = process.argv.find(arg => arg.startsWith('--file='))?.split('=')[1] +const targetFiles = process.argv + .filter(arg => arg.startsWith('--file=')) + .map(arg => arg.split('=')[1]) const targetLang = process.argv.find(arg => arg.startsWith('--lang='))?.split('=')[1] // Rate limiting helper @@ -230,8 +232,8 @@ async function main() { .map(file => file.replace(/\.ts$/, '')) // Removed app-debug exclusion, now only skip specific problematic keys - // Filter by target file if specified - const filesToProcess = targetFile ? files.filter(f => f === targetFile) : files + // Filter by target files if specified + const filesToProcess = targetFiles.length > 0 ? files.filter(f => targetFiles.includes(f)) : files const languagesToProcess = targetLang ? [targetLang] : Object.keys(languageKeyMap) console.log(`📁 Files to process: ${filesToProcess.join(', ')}`) diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index 639e47aa0a..7dada39329 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -114,6 +114,8 @@ const translation = { needEndNode: 'Der Endknoten muss hinzugefügt werden.', needAnswerNode: 'Der Antwortknoten muss hinzugefügt werden.', tagBound: 'Anzahl der Apps, die dieses Tag verwenden', + currentWorkflow: 'Aktueller Arbeitsablauf', + currentView: 'Aktuelle Ansicht', }, env: { envPanelTitle: 'Umgebungsvariablen', diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index e5fd23158c..cca0bd3e55 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -114,6 +114,8 @@ const translation = { needEndNode: 'Se debe agregar el nodo Final', addBlock: 'Agregar nodo', tagBound: 'Número de aplicaciones que utilizan esta etiqueta', + currentView: 'Vista actual', + currentWorkflow: 'Flujo de trabajo actual', }, env: { envPanelTitle: 'Variables de Entorno', diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index 982b32b010..04db159c3f 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -114,6 +114,8 @@ const translation = { needAnswerNode: 'باید گره پاسخ اضافه شود', addBlock: 'نود اضافه کنید', tagBound: 'تعداد برنامه‌هایی که از این برچسب استفاده می‌کنند', + currentView: 'نمای فعلی', + currentWorkflow: 'گردش کار فعلی', }, env: { envPanelTitle: 'متغیرهای محیطی', diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index f75b11a804..ff414dd8d4 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -114,6 +114,8 @@ const translation = { needAnswerNode: 'Le nœud de réponse doit être ajouté.', addBlock: 'Ajouter un nœud', tagBound: 'Nombre d\'applications utilisant cette étiquette', + currentView: 'Vue actuelle', + currentWorkflow: 'Flux de travail actuel', }, env: { envPanelTitle: 'Variables d\'Environnement', diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index 95a013057b..1b113504cb 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -117,6 +117,8 @@ const translation = { addBlock: 'नोड जोड़ें', needEndNode: 'अंत नोड जोड़ा जाना चाहिए', tagBound: 'इस टैग का उपयोग करने वाले ऐप्स की संख्या', + currentView: 'वर्तमान दृश्य', + currentWorkflow: 'वर्तमान कार्यप्रवाह', }, env: { envPanelTitle: 'पर्यावरण चर', diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index 98f7bd4264..e939f65afa 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -118,6 +118,8 @@ const translation = { addBlock: 'Aggiungi nodo', needAnswerNode: 'Deve essere aggiunto il nodo di risposta', tagBound: 'Numero di app che utilizzano questo tag', + currentWorkflow: 'Flusso di lavoro corrente', + currentView: 'Vista corrente', }, env: { envPanelTitle: 'Variabili d\'Ambiente', diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index 2afbc4bfc3..bbe4c1de2f 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -118,6 +118,8 @@ const translation = { needAnswerNode: '답변 노드를 추가해야 합니다.', needEndNode: '종단 노드를 추가해야 합니다.', tagBound: '이 태그를 사용하는 앱 수', + currentView: '현재 보기', + currentWorkflow: '현재 워크플로', }, env: { envPanelTitle: '환경 변수', diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index 468260bc9e..c2de2786c9 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -114,6 +114,8 @@ const translation = { needEndNode: 'Należy dodać węzeł końcowy', needAnswerNode: 'Węzeł odpowiedzi musi zostać dodany', tagBound: 'Liczba aplikacji korzystających z tego tagu', + currentWorkflow: 'Bieżący przepływ pracy', + currentView: 'Bieżący widok', }, env: { envPanelTitle: 'Zmienne Środowiskowe', diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index cc8c14c3b7..9e217dd768 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -114,6 +114,8 @@ const translation = { needEndNode: 'O nó de Fim deve ser adicionado', needAnswerNode: 'O nó de resposta deve ser adicionado', tagBound: 'Número de aplicativos usando esta tag', + currentView: 'Visualização atual', + currentWorkflow: 'Fluxo de trabalho atual', }, env: { envPanelTitle: 'Variáveis de Ambiente', diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index e95b6e66c9..07a2992ed3 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -114,6 +114,8 @@ const translation = { needAnswerNode: 'Nodul de răspuns trebuie adăugat', needEndNode: 'Nodul de sfârșit trebuie adăugat', tagBound: 'Numărul de aplicații care folosesc acest tag', + currentView: 'Vizualizare curentă', + currentWorkflow: 'Flux de lucru curent', }, env: { envPanelTitle: 'Variabile de Mediu', diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index 0b36e680c0..a9bd141c1d 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -114,6 +114,8 @@ const translation = { needAnswerNode: 'В узел ответа необходимо добавить', needEndNode: 'Узел конца должен быть добавлен', tagBound: 'Количество приложений, использующих этот тег', + currentView: 'Текущий вид', + currentWorkflow: 'Текущий рабочий процесс', }, env: { envPanelTitle: 'Переменные среды', diff --git a/web/i18n/sl-SI/workflow.ts b/web/i18n/sl-SI/workflow.ts index df2b7b5159..a77336575c 100644 --- a/web/i18n/sl-SI/workflow.ts +++ b/web/i18n/sl-SI/workflow.ts @@ -114,6 +114,8 @@ const translation = { configure: 'Konfiguriraj', inRunMode: 'V načinu izvajanja', tagBound: 'Število aplikacij, ki uporabljajo to oznako', + currentView: 'Trenutni pogled', + currentWorkflow: 'Trenutni potek dela', }, env: { modal: { diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index 58f889e6b9..b5ed9b53e9 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -114,6 +114,8 @@ const translation = { addBlock: 'เพิ่มโนด', needEndNode: 'ต้องเพิ่มโหนดจบ', tagBound: 'จำนวนแอปพลิเคชันที่ใช้แท็กนี้', + currentWorkflow: 'เวิร์กโฟลว์ปัจจุบัน', + currentView: 'ปัจจุบัน View', }, env: { envPanelTitle: 'ตัวแปรสภาพแวดล้อม', diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index e66cf35561..3b36788a83 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -114,6 +114,8 @@ const translation = { needAnswerNode: 'Cevap düğümü eklenmelidir.', needEndNode: 'Son düğüm eklenmelidir', tagBound: 'Bu etiketi kullanan uygulama sayısı', + currentView: 'Geçerli Görünüm', + currentWorkflow: 'Mevcut İş Akışı', }, env: { envPanelTitle: 'Çevre Değişkenleri', diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index c0aee379d0..5effad6b16 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -114,6 +114,8 @@ const translation = { needEndNode: 'Необхідно додати кінцевий вузол', needAnswerNode: 'Вузол Відповіді повинен бути доданий', tagBound: 'Кількість додатків, що використовують цей тег', + currentView: 'Поточний вигляд', + currentWorkflow: 'Поточний робочий процес', }, env: { envPanelTitle: 'Змінні середовища', diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index 0d34d7af31..cd33142587 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -114,6 +114,8 @@ const translation = { addBlock: 'Thêm Node', needEndNode: 'Nút Kết thúc phải được thêm vào', tagBound: 'Số lượng ứng dụng sử dụng thẻ này', + currentWorkflow: 'Quy trình làm việc hiện tại', + currentView: 'Hiện tại View', }, env: { envPanelTitle: 'Biến Môi Trường', diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index 311997769a..fe703c7bc2 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -114,6 +114,8 @@ const translation = { exportImage: '匯出圖像', exportJPEG: '匯出為 JPEG', tagBound: '使用此標籤的應用程式數量', + currentView: '當前檢視', + currentWorkflow: '當前工作流程', }, env: { envPanelTitle: '環境變數', From cbe0d9d0536387be608e137443a30e1d27d2ac9b Mon Sep 17 00:00:00 2001 From: -LAN- Date: Sat, 9 Aug 2025 22:40:28 +0800 Subject: [PATCH 09/21] fix: conversation pinned filter returns incorrect results when no conversations are pinned (#23670) --- api/services/conversation_service.py | 14 +- .../services/test_conversation_service.py | 127 ++++++++++++++++++ 2 files changed, 136 insertions(+), 5 deletions(-) create mode 100644 api/tests/unit_tests/services/test_conversation_service.py diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index 692a3639cd..713c4c6782 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -50,12 +50,16 @@ class ConversationService: Conversation.from_account_id == (user.id if isinstance(user, Account) else None), or_(Conversation.invoke_from.is_(None), Conversation.invoke_from == invoke_from.value), ) - # Check if include_ids is not None and not empty to avoid WHERE false condition - if include_ids is not None and len(include_ids) > 0: + # Check if include_ids is not None to apply filter + if include_ids is not None: + if len(include_ids) == 0: + # If include_ids is empty, return empty result + return InfiniteScrollPagination(data=[], limit=limit, has_more=False) stmt = stmt.where(Conversation.id.in_(include_ids)) - # Check if exclude_ids is not None and not empty to avoid WHERE false condition - if exclude_ids is not None and len(exclude_ids) > 0: - stmt = stmt.where(~Conversation.id.in_(exclude_ids)) + # Check if exclude_ids is not None to apply filter + if exclude_ids is not None: + if len(exclude_ids) > 0: + stmt = stmt.where(~Conversation.id.in_(exclude_ids)) # define sort fields and directions sort_field, sort_direction = cls._get_sort_params(sort_by) diff --git a/api/tests/unit_tests/services/test_conversation_service.py b/api/tests/unit_tests/services/test_conversation_service.py new file mode 100644 index 0000000000..9c1c044f03 --- /dev/null +++ b/api/tests/unit_tests/services/test_conversation_service.py @@ -0,0 +1,127 @@ +import uuid +from unittest.mock import MagicMock, patch + +from core.app.entities.app_invoke_entities import InvokeFrom +from services.conversation_service import ConversationService + + +class TestConversationService: + def test_pagination_with_empty_include_ids(self): + """Test that empty include_ids returns empty result""" + mock_session = MagicMock() + mock_app_model = MagicMock(id=str(uuid.uuid4())) + mock_user = MagicMock(id=str(uuid.uuid4())) + + result = ConversationService.pagination_by_last_id( + session=mock_session, + app_model=mock_app_model, + user=mock_user, + last_id=None, + limit=20, + invoke_from=InvokeFrom.WEB_APP, + include_ids=[], # Empty include_ids should return empty result + exclude_ids=None, + ) + + assert result.data == [] + assert result.has_more is False + assert result.limit == 20 + + def test_pagination_with_non_empty_include_ids(self): + """Test that non-empty include_ids filters properly""" + mock_session = MagicMock() + mock_app_model = MagicMock(id=str(uuid.uuid4())) + mock_user = MagicMock(id=str(uuid.uuid4())) + + # Mock the query results + mock_conversations = [MagicMock(id=str(uuid.uuid4())) for _ in range(3)] + mock_session.scalars.return_value.all.return_value = mock_conversations + mock_session.scalar.return_value = 0 + + with patch("services.conversation_service.select") as mock_select: + mock_stmt = MagicMock() + mock_select.return_value = mock_stmt + mock_stmt.where.return_value = mock_stmt + mock_stmt.order_by.return_value = mock_stmt + mock_stmt.limit.return_value = mock_stmt + mock_stmt.subquery.return_value = MagicMock() + + result = ConversationService.pagination_by_last_id( + session=mock_session, + app_model=mock_app_model, + user=mock_user, + last_id=None, + limit=20, + invoke_from=InvokeFrom.WEB_APP, + include_ids=["conv1", "conv2"], # Non-empty include_ids + exclude_ids=None, + ) + + # Verify the where clause was called with id.in_ + assert mock_stmt.where.called + + def test_pagination_with_empty_exclude_ids(self): + """Test that empty exclude_ids doesn't filter""" + mock_session = MagicMock() + mock_app_model = MagicMock(id=str(uuid.uuid4())) + mock_user = MagicMock(id=str(uuid.uuid4())) + + # Mock the query results + mock_conversations = [MagicMock(id=str(uuid.uuid4())) for _ in range(5)] + mock_session.scalars.return_value.all.return_value = mock_conversations + mock_session.scalar.return_value = 0 + + with patch("services.conversation_service.select") as mock_select: + mock_stmt = MagicMock() + mock_select.return_value = mock_stmt + mock_stmt.where.return_value = mock_stmt + mock_stmt.order_by.return_value = mock_stmt + mock_stmt.limit.return_value = mock_stmt + mock_stmt.subquery.return_value = MagicMock() + + result = ConversationService.pagination_by_last_id( + session=mock_session, + app_model=mock_app_model, + user=mock_user, + last_id=None, + limit=20, + invoke_from=InvokeFrom.WEB_APP, + include_ids=None, + exclude_ids=[], # Empty exclude_ids should not filter + ) + + # Result should contain the mocked conversations + assert len(result.data) == 5 + + def test_pagination_with_non_empty_exclude_ids(self): + """Test that non-empty exclude_ids filters properly""" + mock_session = MagicMock() + mock_app_model = MagicMock(id=str(uuid.uuid4())) + mock_user = MagicMock(id=str(uuid.uuid4())) + + # Mock the query results + mock_conversations = [MagicMock(id=str(uuid.uuid4())) for _ in range(3)] + mock_session.scalars.return_value.all.return_value = mock_conversations + mock_session.scalar.return_value = 0 + + with patch("services.conversation_service.select") as mock_select: + mock_stmt = MagicMock() + mock_select.return_value = mock_stmt + mock_stmt.where.return_value = mock_stmt + mock_stmt.order_by.return_value = mock_stmt + mock_stmt.limit.return_value = mock_stmt + mock_stmt.subquery.return_value = MagicMock() + + result = ConversationService.pagination_by_last_id( + session=mock_session, + app_model=mock_app_model, + user=mock_user, + last_id=None, + limit=20, + invoke_from=InvokeFrom.WEB_APP, + include_ids=None, + exclude_ids=["conv1", "conv2"], # Non-empty exclude_ids + ) + + # Verify the where clause was called for exclusion + assert mock_stmt.where.called From 431e0105de6b15ebfef833e9136459bda1e037ea Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sat, 9 Aug 2025 23:40:55 +0900 Subject: [PATCH 10/21] Fix bare raise in if blocks (#23671) Signed-off-by: Yongtao Huang --- api/controllers/web/app.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/controllers/web/app.py b/api/controllers/web/app.py index 94a525a75d..197859e8f3 100644 --- a/api/controllers/web/app.py +++ b/api/controllers/web/app.py @@ -1,5 +1,6 @@ from flask import request from flask_restful import Resource, marshal_with, reqparse +from werkzeug.exceptions import Unauthorized from controllers.common import fields from controllers.web import api @@ -75,14 +76,14 @@ class AppWebAuthPermission(Resource): try: auth_header = request.headers.get("Authorization") if auth_header is None: - raise + raise Unauthorized("Authorization header is missing.") if " " not in auth_header: - raise + raise Unauthorized("Invalid Authorization header format. Expected 'Bearer ' format.") auth_scheme, tk = auth_header.split(None, 1) auth_scheme = auth_scheme.lower() if auth_scheme != "bearer": - raise + raise Unauthorized("Authorization scheme must be 'Bearer'") decoded = PassportService().verify(tk) user_id = decoded.get("user_id", "visitor") From dc641348f69578e4381a1652d70a9c0b9ce2fd69 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 9 Aug 2025 22:41:42 +0800 Subject: [PATCH 11/21] fix: resolve datasets container rounded corners disappearing during scroll (#23667) --- web/app/(commonLayout)/datasets/container.tsx | 4 ++-- web/app/(commonLayout)/datasets/doc.tsx | 2 +- web/app/components/base/tab-slider-new/index.tsx | 4 ++-- web/app/components/header/header-wrapper.tsx | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/web/app/(commonLayout)/datasets/container.tsx b/web/app/(commonLayout)/datasets/container.tsx index 444119332b..5328fd03aa 100644 --- a/web/app/(commonLayout)/datasets/container.tsx +++ b/web/app/(commonLayout)/datasets/container.tsx @@ -86,8 +86,8 @@ const Container = () => { }, [currentWorkspace, router]) return ( -
-
+
+
setActiveTab(newActiveTab)} diff --git a/web/app/(commonLayout)/datasets/doc.tsx b/web/app/(commonLayout)/datasets/doc.tsx index b31e0a4161..c31dad3c00 100644 --- a/web/app/(commonLayout)/datasets/doc.tsx +++ b/web/app/(commonLayout)/datasets/doc.tsx @@ -193,7 +193,7 @@ const Doc = ({ apiBaseUrl }: DocProps) => { )}
-
+
{Template}
diff --git a/web/app/components/base/tab-slider-new/index.tsx b/web/app/components/base/tab-slider-new/index.tsx index fc0bc1c987..cf68abff1d 100644 --- a/web/app/components/base/tab-slider-new/index.tsx +++ b/web/app/components/base/tab-slider-new/index.tsx @@ -25,8 +25,8 @@ const TabSliderNew: FC = ({ key={option.value} onClick={() => onChange(option.value)} className={cn( - 'mr-1 flex h-[32px] cursor-pointer items-center rounded-lg border-[0.5px] border-transparent px-3 py-[7px] text-[13px] font-medium leading-[18px] text-text-tertiary hover:bg-components-main-nav-nav-button-bg-active', - value === option.value && 'border-components-main-nav-nav-button-border bg-components-main-nav-nav-button-bg-active text-components-main-nav-nav-button-text-active shadow-xs', + 'mr-1 flex h-[32px] cursor-pointer items-center rounded-lg border-[0.5px] border-transparent px-3 py-[7px] text-[13px] font-medium leading-[18px] text-text-tertiary hover:bg-state-base-hover', + value === option.value && 'border-components-main-nav-nav-button-border bg-state-base-hover text-components-main-nav-nav-button-text-active shadow-xs', )} > {option.icon} diff --git a/web/app/components/header/header-wrapper.tsx b/web/app/components/header/header-wrapper.tsx index 6486e3707a..17026d46bf 100644 --- a/web/app/components/header/header-wrapper.tsx +++ b/web/app/components/header/header-wrapper.tsx @@ -13,7 +13,7 @@ const HeaderWrapper = ({ children, }: HeaderWrapperProps) => { const pathname = usePathname() - const isBordered = ['/apps', '/datasets', '/datasets/create', '/tools'].includes(pathname) + const isBordered = ['/apps', '/datasets/create', '/tools'].includes(pathname) // Check if the current path is a workflow canvas & fullscreen const inWorkflowCanvas = pathname.endsWith('/workflow') const workflowCanvasMaximize = localStorage.getItem('workflow-canvas-maximize') === 'true' From 6900b0813486cd82f0e71541883cf285b12b8e12 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Sat, 9 Aug 2025 22:42:18 +0800 Subject: [PATCH 12/21] fix: sync missing conversation variables for existing conversations (#23649) --- api/core/app/apps/advanced_chat/app_runner.py | 119 ++++- .../test_app_runner_conversation_variables.py | 419 ++++++++++++++++++ 2 files changed, 518 insertions(+), 20 deletions(-) create mode 100644 api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index a75e17af64..3de2f5ca9e 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -118,26 +118,8 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): ): return - # Init conversation variables - stmt = select(ConversationVariable).where( - ConversationVariable.app_id == self.conversation.app_id, - ConversationVariable.conversation_id == self.conversation.id, - ) - with Session(db.engine) as session: - db_conversation_variables = session.scalars(stmt).all() - if not db_conversation_variables: - # Create conversation variables if they don't exist. - db_conversation_variables = [ - ConversationVariable.from_variable( - app_id=self.conversation.app_id, conversation_id=self.conversation.id, variable=variable - ) - for variable in self._workflow.conversation_variables - ] - session.add_all(db_conversation_variables) - # Convert database entities to variables. - conversation_variables = [item.to_variable() for item in db_conversation_variables] - - session.commit() + # Initialize conversation variables + conversation_variables = self._initialize_conversation_variables() # Create a variable pool. system_inputs = SystemVariable( @@ -292,3 +274,100 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): message_id=message_id, trace_manager=app_generate_entity.trace_manager, ) + + def _initialize_conversation_variables(self) -> list[VariableUnion]: + """ + Initialize conversation variables for the current conversation. + + This method: + 1. Loads existing variables from the database + 2. Creates new variables if none exist + 3. Syncs missing variables from the workflow definition + + :return: List of conversation variables ready for use + """ + with Session(db.engine) as session: + existing_variables = self._load_existing_conversation_variables(session) + + if not existing_variables: + # First time initialization - create all variables + existing_variables = self._create_all_conversation_variables(session) + else: + # Check and add any missing variables from the workflow + existing_variables = self._sync_missing_conversation_variables(session, existing_variables) + + # Convert to Variable objects for use in the workflow + conversation_variables = [var.to_variable() for var in existing_variables] + + session.commit() + return cast(list[VariableUnion], conversation_variables) + + def _load_existing_conversation_variables(self, session: Session) -> list[ConversationVariable]: + """ + Load existing conversation variables from the database. + + :param session: Database session + :return: List of existing conversation variables + """ + stmt = select(ConversationVariable).where( + ConversationVariable.app_id == self.conversation.app_id, + ConversationVariable.conversation_id == self.conversation.id, + ) + return list(session.scalars(stmt).all()) + + def _create_all_conversation_variables(self, session: Session) -> list[ConversationVariable]: + """ + Create all conversation variables for a new conversation. + + :param session: Database session + :return: List of created conversation variables + """ + new_variables = [ + ConversationVariable.from_variable( + app_id=self.conversation.app_id, conversation_id=self.conversation.id, variable=variable + ) + for variable in self._workflow.conversation_variables + ] + + if new_variables: + session.add_all(new_variables) + + return new_variables + + def _sync_missing_conversation_variables( + self, session: Session, existing_variables: list[ConversationVariable] + ) -> list[ConversationVariable]: + """ + Sync missing conversation variables from the workflow definition. + + This handles the case where new variables are added to a workflow + after conversations have already been created. + + :param session: Database session + :param existing_variables: List of existing conversation variables + :return: Updated list including any newly created variables + """ + # Get IDs of existing and workflow variables + existing_ids = {var.id for var in existing_variables} + workflow_variables = {var.id: var for var in self._workflow.conversation_variables} + + # Find missing variable IDs + missing_ids = set(workflow_variables.keys()) - existing_ids + + if not missing_ids: + return existing_variables + + # Create missing variables with their default values + new_variables = [ + ConversationVariable.from_variable( + app_id=self.conversation.app_id, + conversation_id=self.conversation.id, + variable=workflow_variables[var_id], + ) + for var_id in missing_ids + ] + + session.add_all(new_variables) + + # Return combined list + return existing_variables + new_variables diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py new file mode 100644 index 0000000000..da175e7ccd --- /dev/null +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py @@ -0,0 +1,419 @@ +"""Test conversation variable handling in AdvancedChatAppRunner.""" + +from unittest.mock import MagicMock, patch +from uuid import uuid4 + +from sqlalchemy.orm import Session + +from core.app.apps.advanced_chat.app_runner import AdvancedChatAppRunner +from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom +from core.variables import SegmentType +from factories import variable_factory +from models import ConversationVariable, Workflow + + +class TestAdvancedChatAppRunnerConversationVariables: + """Test that AdvancedChatAppRunner correctly handles conversation variables.""" + + def test_missing_conversation_variables_are_added(self): + """Test that new conversation variables added to workflow are created for existing conversations.""" + # Setup + app_id = str(uuid4()) + conversation_id = str(uuid4()) + workflow_id = str(uuid4()) + + # Create workflow with two conversation variables + workflow_vars = [ + variable_factory.build_conversation_variable_from_mapping( + { + "id": "var1", + "name": "existing_var", + "value_type": SegmentType.STRING, + "value": "default1", + } + ), + variable_factory.build_conversation_variable_from_mapping( + { + "id": "var2", + "name": "new_var", + "value_type": SegmentType.STRING, + "value": "default2", + } + ), + ] + + # Mock workflow with conversation variables + mock_workflow = MagicMock(spec=Workflow) + mock_workflow.conversation_variables = workflow_vars + mock_workflow.tenant_id = str(uuid4()) + mock_workflow.app_id = app_id + mock_workflow.id = workflow_id + mock_workflow.type = "chat" + mock_workflow.graph_dict = {} + mock_workflow.environment_variables = [] + + # Create existing conversation variable (only var1 exists in DB) + existing_db_var = MagicMock(spec=ConversationVariable) + existing_db_var.id = "var1" + existing_db_var.app_id = app_id + existing_db_var.conversation_id = conversation_id + existing_db_var.to_variable = MagicMock(return_value=workflow_vars[0]) + + # Mock conversation and message + mock_conversation = MagicMock() + mock_conversation.app_id = app_id + mock_conversation.id = conversation_id + + mock_message = MagicMock() + mock_message.id = str(uuid4()) + + # Mock app config + mock_app_config = MagicMock() + mock_app_config.app_id = app_id + mock_app_config.workflow_id = workflow_id + mock_app_config.tenant_id = str(uuid4()) + + # Mock app generate entity + mock_app_generate_entity = MagicMock(spec=AdvancedChatAppGenerateEntity) + mock_app_generate_entity.app_config = mock_app_config + mock_app_generate_entity.inputs = {} + mock_app_generate_entity.query = "test query" + mock_app_generate_entity.files = [] + mock_app_generate_entity.user_id = str(uuid4()) + mock_app_generate_entity.invoke_from = InvokeFrom.SERVICE_API + mock_app_generate_entity.workflow_run_id = str(uuid4()) + mock_app_generate_entity.call_depth = 0 + mock_app_generate_entity.single_iteration_run = None + mock_app_generate_entity.single_loop_run = None + mock_app_generate_entity.trace_manager = None + + # Create runner + runner = AdvancedChatAppRunner( + application_generate_entity=mock_app_generate_entity, + queue_manager=MagicMock(), + conversation=mock_conversation, + message=mock_message, + dialogue_count=1, + variable_loader=MagicMock(), + workflow=mock_workflow, + system_user_id=str(uuid4()), + app=MagicMock(), + ) + + # Mock database session + mock_session = MagicMock(spec=Session) + + # First query returns only existing variable + mock_scalars_result = MagicMock() + mock_scalars_result.all.return_value = [existing_db_var] + mock_session.scalars.return_value = mock_scalars_result + + # Track what gets added to session + added_items = [] + + def track_add_all(items): + added_items.extend(items) + + mock_session.add_all.side_effect = track_add_all + + # Patch the necessary components + with ( + patch("core.app.apps.advanced_chat.app_runner.Session") as mock_session_class, + patch("core.app.apps.advanced_chat.app_runner.select") as mock_select, + patch("core.app.apps.advanced_chat.app_runner.db") as mock_db, + patch.object(runner, "_init_graph") as mock_init_graph, + patch.object(runner, "handle_input_moderation", return_value=False), + patch.object(runner, "handle_annotation_reply", return_value=False), + patch("core.app.apps.advanced_chat.app_runner.WorkflowEntry") as mock_workflow_entry_class, + patch("core.app.apps.advanced_chat.app_runner.VariablePool") as mock_variable_pool_class, + ): + # Setup mocks + mock_session_class.return_value.__enter__.return_value = mock_session + mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists + mock_db.engine = MagicMock() + + # Mock graph initialization + mock_init_graph.return_value = MagicMock() + + # Mock workflow entry + mock_workflow_entry = MagicMock() + mock_workflow_entry.run.return_value = iter([]) # Empty generator + mock_workflow_entry_class.return_value = mock_workflow_entry + + # Run the method + runner.run() + + # Verify that the missing variable was added + assert len(added_items) == 1, "Should have added exactly one missing variable" + + # Check that the added item is the missing variable (var2) + added_var = added_items[0] + assert hasattr(added_var, "id"), "Added item should be a ConversationVariable" + # Note: Since we're mocking ConversationVariable.from_variable, + # we can't directly check the id, but we can verify add_all was called + assert mock_session.add_all.called, "Session add_all should have been called" + assert mock_session.commit.called, "Session commit should have been called" + + def test_no_variables_creates_all(self): + """Test that all conversation variables are created when none exist in DB.""" + # Setup + app_id = str(uuid4()) + conversation_id = str(uuid4()) + workflow_id = str(uuid4()) + + # Create workflow with conversation variables + workflow_vars = [ + variable_factory.build_conversation_variable_from_mapping( + { + "id": "var1", + "name": "var1", + "value_type": SegmentType.STRING, + "value": "default1", + } + ), + variable_factory.build_conversation_variable_from_mapping( + { + "id": "var2", + "name": "var2", + "value_type": SegmentType.STRING, + "value": "default2", + } + ), + ] + + # Mock workflow + mock_workflow = MagicMock(spec=Workflow) + mock_workflow.conversation_variables = workflow_vars + mock_workflow.tenant_id = str(uuid4()) + mock_workflow.app_id = app_id + mock_workflow.id = workflow_id + mock_workflow.type = "chat" + mock_workflow.graph_dict = {} + mock_workflow.environment_variables = [] + + # Mock conversation and message + mock_conversation = MagicMock() + mock_conversation.app_id = app_id + mock_conversation.id = conversation_id + + mock_message = MagicMock() + mock_message.id = str(uuid4()) + + # Mock app config + mock_app_config = MagicMock() + mock_app_config.app_id = app_id + mock_app_config.workflow_id = workflow_id + mock_app_config.tenant_id = str(uuid4()) + + # Mock app generate entity + mock_app_generate_entity = MagicMock(spec=AdvancedChatAppGenerateEntity) + mock_app_generate_entity.app_config = mock_app_config + mock_app_generate_entity.inputs = {} + mock_app_generate_entity.query = "test query" + mock_app_generate_entity.files = [] + mock_app_generate_entity.user_id = str(uuid4()) + mock_app_generate_entity.invoke_from = InvokeFrom.SERVICE_API + mock_app_generate_entity.workflow_run_id = str(uuid4()) + mock_app_generate_entity.call_depth = 0 + mock_app_generate_entity.single_iteration_run = None + mock_app_generate_entity.single_loop_run = None + mock_app_generate_entity.trace_manager = None + + # Create runner + runner = AdvancedChatAppRunner( + application_generate_entity=mock_app_generate_entity, + queue_manager=MagicMock(), + conversation=mock_conversation, + message=mock_message, + dialogue_count=1, + variable_loader=MagicMock(), + workflow=mock_workflow, + system_user_id=str(uuid4()), + app=MagicMock(), + ) + + # Mock database session + mock_session = MagicMock(spec=Session) + + # Query returns empty list (no existing variables) + mock_scalars_result = MagicMock() + mock_scalars_result.all.return_value = [] + mock_session.scalars.return_value = mock_scalars_result + + # Track what gets added to session + added_items = [] + + def track_add_all(items): + added_items.extend(items) + + mock_session.add_all.side_effect = track_add_all + + # Patch the necessary components + with ( + patch("core.app.apps.advanced_chat.app_runner.Session") as mock_session_class, + patch("core.app.apps.advanced_chat.app_runner.select") as mock_select, + patch("core.app.apps.advanced_chat.app_runner.db") as mock_db, + patch.object(runner, "_init_graph") as mock_init_graph, + patch.object(runner, "handle_input_moderation", return_value=False), + patch.object(runner, "handle_annotation_reply", return_value=False), + patch("core.app.apps.advanced_chat.app_runner.WorkflowEntry") as mock_workflow_entry_class, + patch("core.app.apps.advanced_chat.app_runner.VariablePool") as mock_variable_pool_class, + patch("core.app.apps.advanced_chat.app_runner.ConversationVariable") as mock_conv_var_class, + ): + # Setup mocks + mock_session_class.return_value.__enter__.return_value = mock_session + mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists + mock_db.engine = MagicMock() + + # Mock ConversationVariable.from_variable to return mock objects + mock_conv_vars = [] + for var in workflow_vars: + mock_cv = MagicMock() + mock_cv.id = var.id + mock_cv.to_variable.return_value = var + mock_conv_vars.append(mock_cv) + + mock_conv_var_class.from_variable.side_effect = mock_conv_vars + + # Mock graph initialization + mock_init_graph.return_value = MagicMock() + + # Mock workflow entry + mock_workflow_entry = MagicMock() + mock_workflow_entry.run.return_value = iter([]) # Empty generator + mock_workflow_entry_class.return_value = mock_workflow_entry + + # Run the method + runner.run() + + # Verify that all variables were created + assert len(added_items) == 2, "Should have added both variables" + assert mock_session.add_all.called, "Session add_all should have been called" + assert mock_session.commit.called, "Session commit should have been called" + + def test_all_variables_exist_no_changes(self): + """Test that no changes are made when all variables already exist in DB.""" + # Setup + app_id = str(uuid4()) + conversation_id = str(uuid4()) + workflow_id = str(uuid4()) + + # Create workflow with conversation variables + workflow_vars = [ + variable_factory.build_conversation_variable_from_mapping( + { + "id": "var1", + "name": "var1", + "value_type": SegmentType.STRING, + "value": "default1", + } + ), + variable_factory.build_conversation_variable_from_mapping( + { + "id": "var2", + "name": "var2", + "value_type": SegmentType.STRING, + "value": "default2", + } + ), + ] + + # Mock workflow + mock_workflow = MagicMock(spec=Workflow) + mock_workflow.conversation_variables = workflow_vars + mock_workflow.tenant_id = str(uuid4()) + mock_workflow.app_id = app_id + mock_workflow.id = workflow_id + mock_workflow.type = "chat" + mock_workflow.graph_dict = {} + mock_workflow.environment_variables = [] + + # Create existing conversation variables (both exist in DB) + existing_db_vars = [] + for var in workflow_vars: + db_var = MagicMock(spec=ConversationVariable) + db_var.id = var.id + db_var.app_id = app_id + db_var.conversation_id = conversation_id + db_var.to_variable = MagicMock(return_value=var) + existing_db_vars.append(db_var) + + # Mock conversation and message + mock_conversation = MagicMock() + mock_conversation.app_id = app_id + mock_conversation.id = conversation_id + + mock_message = MagicMock() + mock_message.id = str(uuid4()) + + # Mock app config + mock_app_config = MagicMock() + mock_app_config.app_id = app_id + mock_app_config.workflow_id = workflow_id + mock_app_config.tenant_id = str(uuid4()) + + # Mock app generate entity + mock_app_generate_entity = MagicMock(spec=AdvancedChatAppGenerateEntity) + mock_app_generate_entity.app_config = mock_app_config + mock_app_generate_entity.inputs = {} + mock_app_generate_entity.query = "test query" + mock_app_generate_entity.files = [] + mock_app_generate_entity.user_id = str(uuid4()) + mock_app_generate_entity.invoke_from = InvokeFrom.SERVICE_API + mock_app_generate_entity.workflow_run_id = str(uuid4()) + mock_app_generate_entity.call_depth = 0 + mock_app_generate_entity.single_iteration_run = None + mock_app_generate_entity.single_loop_run = None + mock_app_generate_entity.trace_manager = None + + # Create runner + runner = AdvancedChatAppRunner( + application_generate_entity=mock_app_generate_entity, + queue_manager=MagicMock(), + conversation=mock_conversation, + message=mock_message, + dialogue_count=1, + variable_loader=MagicMock(), + workflow=mock_workflow, + system_user_id=str(uuid4()), + app=MagicMock(), + ) + + # Mock database session + mock_session = MagicMock(spec=Session) + + # Query returns all existing variables + mock_scalars_result = MagicMock() + mock_scalars_result.all.return_value = existing_db_vars + mock_session.scalars.return_value = mock_scalars_result + + # Patch the necessary components + with ( + patch("core.app.apps.advanced_chat.app_runner.Session") as mock_session_class, + patch("core.app.apps.advanced_chat.app_runner.select") as mock_select, + patch("core.app.apps.advanced_chat.app_runner.db") as mock_db, + patch.object(runner, "_init_graph") as mock_init_graph, + patch.object(runner, "handle_input_moderation", return_value=False), + patch.object(runner, "handle_annotation_reply", return_value=False), + patch("core.app.apps.advanced_chat.app_runner.WorkflowEntry") as mock_workflow_entry_class, + patch("core.app.apps.advanced_chat.app_runner.VariablePool") as mock_variable_pool_class, + ): + # Setup mocks + mock_session_class.return_value.__enter__.return_value = mock_session + mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists + mock_db.engine = MagicMock() + + # Mock graph initialization + mock_init_graph.return_value = MagicMock() + + # Mock workflow entry + mock_workflow_entry = MagicMock() + mock_workflow_entry.run.return_value = iter([]) # Empty generator + mock_workflow_entry_class.return_value = mock_workflow_entry + + # Run the method + runner.run() + + # Verify that no variables were added + assert not mock_session.add_all.called, "Session add_all should not have been called" + assert mock_session.commit.called, "Session commit should still be called" From 5f8967918e94223fba098bf6903c30fd8f3e12ee Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Sun, 10 Aug 2025 11:03:46 +0800 Subject: [PATCH 13/21] Feat add testcontainers test for app dsl service (#23675) --- .../services/test_app_dsl_service.py | 473 ++++++++++++++++++ 1 file changed, 473 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_app_dsl_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py new file mode 100644 index 0000000000..f2bd9f8084 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -0,0 +1,473 @@ +import json +from unittest.mock import MagicMock, patch + +import pytest +import yaml +from faker import Faker + +from models.model import App, AppModelConfig +from services.account_service import AccountService, TenantService +from services.app_dsl_service import AppDslService, ImportMode, ImportStatus +from services.app_service import AppService + + +class TestAppDslService: + """Integration tests for AppDslService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.app_dsl_service.WorkflowService") as mock_workflow_service, + patch("services.app_dsl_service.DependenciesAnalysisService") as mock_dependencies_service, + patch("services.app_dsl_service.WorkflowDraftVariableService") as mock_draft_variable_service, + patch("services.app_dsl_service.ssrf_proxy") as mock_ssrf_proxy, + patch("services.app_dsl_service.redis_client") as mock_redis_client, + patch("services.app_dsl_service.app_was_created") as mock_app_was_created, + patch("services.app_dsl_service.app_model_config_was_updated") as mock_app_model_config_was_updated, + patch("services.app_service.ModelManager") as mock_model_manager, + patch("services.app_service.FeatureService") as mock_feature_service, + patch("services.app_service.EnterpriseService") as mock_enterprise_service, + ): + # Setup default mock returns + mock_workflow_service.return_value.get_draft_workflow.return_value = None + mock_workflow_service.return_value.sync_draft_workflow.return_value = MagicMock() + mock_dependencies_service.generate_latest_dependencies.return_value = [] + mock_dependencies_service.get_leaked_dependencies.return_value = [] + mock_dependencies_service.generate_dependencies.return_value = [] + mock_draft_variable_service.return_value.delete_workflow_variables.return_value = None + mock_ssrf_proxy.get.return_value.content = b"test content" + mock_ssrf_proxy.get.return_value.raise_for_status.return_value = None + mock_redis_client.setex.return_value = None + mock_redis_client.get.return_value = None + mock_redis_client.delete.return_value = None + mock_app_was_created.send.return_value = None + mock_app_model_config_was_updated.send.return_value = None + + # Mock ModelManager for app service + mock_model_instance = mock_model_manager.return_value + mock_model_instance.get_default_model_instance.return_value = None + mock_model_instance.get_default_provider_model_name.return_value = ("openai", "gpt-3.5-turbo") + + # Mock FeatureService and EnterpriseService + mock_feature_service.get_system_features.return_value.webapp_auth.enabled = False + mock_enterprise_service.WebAppAuth.update_app_access_mode.return_value = None + mock_enterprise_service.WebAppAuth.cleanup_webapp.return_value = None + + yield { + "workflow_service": mock_workflow_service, + "dependencies_service": mock_dependencies_service, + "draft_variable_service": mock_draft_variable_service, + "ssrf_proxy": mock_ssrf_proxy, + "redis_client": mock_redis_client, + "app_was_created": mock_app_was_created, + "app_model_config_was_updated": mock_app_model_config_was_updated, + "model_manager": mock_model_manager, + "feature_service": mock_feature_service, + "enterprise_service": mock_enterprise_service, + } + + def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test app and account for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (app, account) - Created app and account instances + """ + fake = Faker() + + # Setup mocks for account creation + with patch("services.account_service.FeatureService") as mock_account_feature_service: + mock_account_feature_service.get_system_features.return_value.is_allow_register = True + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Setup app creation arguments + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🤖", + "icon_background": "#FF6B6B", + "api_rph": 100, + "api_rpm": 10, + } + + # Create app + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + return app, account + + def _create_simple_yaml_content(self, app_name="Test App", app_mode="chat"): + """ + Helper method to create simple YAML content for testing. + """ + yaml_data = { + "version": "0.3.0", + "kind": "app", + "app": { + "name": app_name, + "mode": app_mode, + "icon": "🤖", + "icon_background": "#FFEAD5", + "description": "Test app description", + "use_icon_as_answer_icon": False, + }, + "model_config": { + "model": { + "provider": "openai", + "name": "gpt-3.5-turbo", + "mode": "chat", + "completion_params": { + "max_tokens": 1000, + "temperature": 0.7, + "top_p": 1.0, + }, + }, + "pre_prompt": "You are a helpful assistant.", + "prompt_type": "simple", + }, + } + return yaml.dump(yaml_data, allow_unicode=True) + + def test_import_app_yaml_content_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app import from YAML content. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create YAML content + yaml_content = self._create_simple_yaml_content(fake.company(), "chat") + + # Import app + dsl_service = AppDslService(db_session_with_containers) + result = dsl_service.import_app( + account=account, + import_mode=ImportMode.YAML_CONTENT, + yaml_content=yaml_content, + name="Imported App", + description="Imported app description", + ) + + # Verify import result + assert result.status == ImportStatus.COMPLETED + assert result.app_id is not None + assert result.app_mode == "chat" + assert result.imported_dsl_version == "0.3.0" + assert result.error == "" + + # Verify app was created in database + imported_app = db_session_with_containers.query(App).filter(App.id == result.app_id).first() + assert imported_app is not None + assert imported_app.name == "Imported App" + assert imported_app.description == "Imported app description" + assert imported_app.mode == "chat" + assert imported_app.tenant_id == account.current_tenant_id + assert imported_app.created_by == account.id + + # Verify model config was created + model_config = ( + db_session_with_containers.query(AppModelConfig).filter(AppModelConfig.app_id == result.app_id).first() + ) + assert model_config is not None + # The provider and model_id are stored in the model field as JSON + model_dict = model_config.model_dict + assert model_dict["provider"] == "openai" + assert model_dict["name"] == "gpt-3.5-turbo" + + def test_import_app_yaml_url_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app import from YAML URL. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create YAML content for mock response + yaml_content = self._create_simple_yaml_content(fake.company(), "chat") + + # Setup mock response + mock_response = MagicMock() + mock_response.content = yaml_content.encode("utf-8") + mock_response.raise_for_status.return_value = None + mock_external_service_dependencies["ssrf_proxy"].get.return_value = mock_response + + # Import app from URL + dsl_service = AppDslService(db_session_with_containers) + result = dsl_service.import_app( + account=account, + import_mode=ImportMode.YAML_URL, + yaml_url="https://example.com/app.yaml", + name="URL Imported App", + description="App imported from URL", + ) + + # Verify import result + assert result.status == ImportStatus.COMPLETED + assert result.app_id is not None + assert result.app_mode == "chat" + assert result.imported_dsl_version == "0.3.0" + assert result.error == "" + + # Verify app was created in database + imported_app = db_session_with_containers.query(App).filter(App.id == result.app_id).first() + assert imported_app is not None + assert imported_app.name == "URL Imported App" + assert imported_app.description == "App imported from URL" + assert imported_app.mode == "chat" + assert imported_app.tenant_id == account.current_tenant_id + + # Verify ssrf_proxy was called + mock_external_service_dependencies["ssrf_proxy"].get.assert_called_once_with( + "https://example.com/app.yaml", follow_redirects=True, timeout=(10, 10) + ) + + def test_import_app_invalid_yaml_format(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app import with invalid YAML format. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create invalid YAML content + invalid_yaml = "invalid: yaml: content: [" + + # Import app with invalid YAML + dsl_service = AppDslService(db_session_with_containers) + result = dsl_service.import_app( + account=account, + import_mode=ImportMode.YAML_CONTENT, + yaml_content=invalid_yaml, + name="Invalid App", + ) + + # Verify import failed + assert result.status == ImportStatus.FAILED + assert result.app_id is None + assert "Invalid YAML format" in result.error + assert result.imported_dsl_version == "" + + # Verify no app was created in database + apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + assert apps_count == 1 # Only the original test app + + def test_import_app_missing_yaml_content(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app import with missing YAML content. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Import app without YAML content + dsl_service = AppDslService(db_session_with_containers) + result = dsl_service.import_app( + account=account, + import_mode=ImportMode.YAML_CONTENT, + name="Missing Content App", + ) + + # Verify import failed + assert result.status == ImportStatus.FAILED + assert result.app_id is None + assert "yaml_content is required" in result.error + assert result.imported_dsl_version == "" + + # Verify no app was created in database + apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + assert apps_count == 1 # Only the original test app + + def test_import_app_missing_yaml_url(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app import with missing YAML URL. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Import app without YAML URL + dsl_service = AppDslService(db_session_with_containers) + result = dsl_service.import_app( + account=account, + import_mode=ImportMode.YAML_URL, + name="Missing URL App", + ) + + # Verify import failed + assert result.status == ImportStatus.FAILED + assert result.app_id is None + assert "yaml_url is required" in result.error + assert result.imported_dsl_version == "" + + # Verify no app was created in database + apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + assert apps_count == 1 # Only the original test app + + def test_import_app_invalid_import_mode(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app import with invalid import mode. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create YAML content + yaml_content = self._create_simple_yaml_content(fake.company(), "chat") + + # Import app with invalid mode should raise ValueError + dsl_service = AppDslService(db_session_with_containers) + with pytest.raises(ValueError, match="Invalid import_mode: invalid-mode"): + dsl_service.import_app( + account=account, + import_mode="invalid-mode", + yaml_content=yaml_content, + name="Invalid Mode App", + ) + + # Verify no app was created in database + apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + assert apps_count == 1 # Only the original test app + + def test_export_dsl_chat_app_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful DSL export for chat app. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create model config for the app + model_config = AppModelConfig() + model_config.id = fake.uuid4() + model_config.app_id = app.id + model_config.provider = "openai" + model_config.model_id = "gpt-3.5-turbo" + model_config.model = json.dumps( + { + "provider": "openai", + "name": "gpt-3.5-turbo", + "mode": "chat", + "completion_params": { + "max_tokens": 1000, + "temperature": 0.7, + }, + } + ) + model_config.pre_prompt = "You are a helpful assistant." + model_config.prompt_type = "simple" + model_config.created_by = account.id + model_config.updated_by = account.id + + # Set the app_model_config_id to link the config + app.app_model_config_id = model_config.id + + db_session_with_containers.add(model_config) + db_session_with_containers.commit() + + # Export DSL + exported_dsl = AppDslService.export_dsl(app, include_secret=False) + + # Parse exported YAML + exported_data = yaml.safe_load(exported_dsl) + + # Verify exported data structure + assert exported_data["kind"] == "app" + assert exported_data["app"]["name"] == app.name + assert exported_data["app"]["mode"] == app.mode + assert exported_data["app"]["icon"] == app.icon + assert exported_data["app"]["icon_background"] == app.icon_background + assert exported_data["app"]["description"] == app.description + + # Verify model config was exported + assert "model_config" in exported_data + # The exported model_config structure may be different from the database structure + # Check that the model config exists and has the expected content + assert exported_data["model_config"] is not None + + # Verify dependencies were exported + assert "dependencies" in exported_data + assert isinstance(exported_data["dependencies"], list) + + def test_export_dsl_workflow_app_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful DSL export for workflow app. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Update app to workflow mode + app.mode = "workflow" + db_session_with_containers.commit() + + # Mock workflow service to return a workflow + mock_workflow = MagicMock() + mock_workflow.to_dict.return_value = { + "graph": {"nodes": [{"id": "start", "type": "start", "data": {"type": "start"}}], "edges": []}, + "features": {}, + "environment_variables": [], + "conversation_variables": [], + } + mock_external_service_dependencies[ + "workflow_service" + ].return_value.get_draft_workflow.return_value = mock_workflow + + # Export DSL + exported_dsl = AppDslService.export_dsl(app, include_secret=False) + + # Parse exported YAML + exported_data = yaml.safe_load(exported_dsl) + + # Verify exported data structure + assert exported_data["kind"] == "app" + assert exported_data["app"]["name"] == app.name + assert exported_data["app"]["mode"] == "workflow" + + # Verify workflow was exported + assert "workflow" in exported_data + assert "graph" in exported_data["workflow"] + assert "nodes" in exported_data["workflow"]["graph"] + + # Verify dependencies were exported + assert "dependencies" in exported_data + assert isinstance(exported_data["dependencies"], list) + + # Verify workflow service was called + mock_external_service_dependencies["workflow_service"].return_value.get_draft_workflow.assert_called_once_with( + app + ) + + def test_check_dependencies_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful dependency checking. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Mock Redis to return dependencies + mock_dependencies_json = '{"app_id": "' + app.id + '", "dependencies": []}' + mock_external_service_dependencies["redis_client"].get.return_value = mock_dependencies_json + + # Check dependencies + dsl_service = AppDslService(db_session_with_containers) + result = dsl_service.check_dependencies(app_model=app) + + # Verify result + assert result.leaked_dependencies == [] + + # Verify Redis was queried + mock_external_service_dependencies["redis_client"].get.assert_called_once_with( + f"app_check_dependencies:{app.id}" + ) + + # Verify dependencies service was called + mock_external_service_dependencies["dependencies_service"].get_leaked_dependencies.assert_called_once() From 0be3b4e7a64c0c4b81f6bc6849549d8d7e53669c Mon Sep 17 00:00:00 2001 From: baonudesifeizhai <85092850+baonudesifeizhai@users.noreply.github.com> Date: Sat, 9 Aug 2025 23:05:55 -0400 Subject: [PATCH 14/21] fix: Add internationalization support for date input formatting Fixes #23266 (#23678) --- .../date-picker/index.tsx | 3 +- .../base/date-and-time-picker/utils/dayjs.ts | 46 +++++++++++++++++++ .../components/base/markdown-blocks/form.tsx | 17 ++++++- web/i18n/en-US/time.ts | 8 ++++ web/i18n/ja-JP/time.ts | 8 ++++ web/i18n/zh-Hans/time.ts | 8 ++++ 6 files changed, 86 insertions(+), 4 deletions(-) diff --git a/web/app/components/base/date-and-time-picker/date-picker/index.tsx b/web/app/components/base/date-and-time-picker/date-picker/index.tsx index f4fc86101e..f99b8257c1 100644 --- a/web/app/components/base/date-and-time-picker/date-picker/index.tsx +++ b/web/app/components/base/date-and-time-picker/date-picker/index.tsx @@ -78,7 +78,6 @@ const DatePicker = ({ setCurrentDate(prev => getDateWithTimezone({ date: prev, timezone })) setSelectedDate(prev => prev ? getDateWithTimezone({ date: prev, timezone }) : undefined) } - // eslint-disable-next-line react-hooks/exhaustive-deps }, [timezone]) const handleClickTrigger = (e: React.MouseEvent) => { @@ -192,7 +191,7 @@ const DatePicker = ({ setView(ViewType.date) } - const timeFormat = needTimePicker ? 'MMMM D, YYYY hh:mm A' : 'MMMM D, YYYY' + const timeFormat = needTimePicker ? t('time.dateFormats.displayWithTime') : t('time.dateFormats.display') const displayValue = value?.format(timeFormat) || '' const displayTime = selectedDate?.format('hh:mm A') || '--:-- --' const placeholderDate = isOpen && selectedDate ? selectedDate.format(timeFormat) : (placeholder || t('time.defaultPlaceholder')) diff --git a/web/app/components/base/date-and-time-picker/utils/dayjs.ts b/web/app/components/base/date-and-time-picker/utils/dayjs.ts index cdc3924194..80dc0f780c 100644 --- a/web/app/components/base/date-and-time-picker/utils/dayjs.ts +++ b/web/app/components/base/date-and-time-picker/utils/dayjs.ts @@ -90,3 +90,49 @@ export const convertTimezoneToOffsetStr = (timezone?: string) => { return DEFAULT_OFFSET_STR return `UTC${tzItem.name.charAt(0)}${tzItem.name.charAt(2)}` } + +// Parse date with multiple format support +export const parseDateWithFormat = (dateString: string, format?: string): Dayjs | null => { + if (!dateString) return null + + // If format is specified, use it directly + if (format) { + const parsed = dayjs(dateString, format, true) + return parsed.isValid() ? parsed : null + } + + // Try common date formats + const formats = [ + 'YYYY-MM-DD', // Standard format + 'YYYY/MM/DD', // Slash format + 'DD-MM-YYYY', // European format + 'DD/MM/YYYY', // European slash format + 'MM-DD-YYYY', // US format + 'MM/DD/YYYY', // US slash format + 'YYYY-MM-DDTHH:mm:ss.SSSZ', // ISO format + 'YYYY-MM-DDTHH:mm:ssZ', // ISO format (no milliseconds) + 'YYYY-MM-DD HH:mm:ss', // Standard datetime format + ] + + for (const fmt of formats) { + const parsed = dayjs(dateString, fmt, true) + if (parsed.isValid()) + return parsed + } + + return null +} + +// Format date output with localization support +export const formatDateForOutput = (date: Dayjs, includeTime: boolean = false, locale: string = 'en-US'): string => { + if (!date || !date.isValid()) return '' + + if (includeTime) { + // Output format with time + return date.format('YYYY-MM-DDTHH:mm:ss.SSSZ') + } + else { + // Date-only output format without timezone + return date.format('YYYY-MM-DD') + } +} diff --git a/web/app/components/base/markdown-blocks/form.tsx b/web/app/components/base/markdown-blocks/form.tsx index b71193d8f9..5e0b118d36 100644 --- a/web/app/components/base/markdown-blocks/form.tsx +++ b/web/app/components/base/markdown-blocks/form.tsx @@ -7,6 +7,7 @@ import TimePicker from '@/app/components/base/date-and-time-picker/time-picker' import Checkbox from '@/app/components/base/checkbox' import Select from '@/app/components/base/select' import { useChatContext } from '@/app/components/base/chat/chat/context' +import { formatDateForOutput } from '@/app/components/base/date-and-time-picker/utils/dayjs' enum DATA_FORMAT { TEXT = 'text', @@ -51,8 +52,20 @@ const MarkdownForm = ({ node }: any) => { const getFormValues = (children: any) => { const values: { [key: string]: any } = {} children.forEach((child: any) => { - if ([SUPPORTED_TAGS.INPUT, SUPPORTED_TAGS.TEXTAREA].includes(child.tagName)) - values[child.properties.name] = formValues[child.properties.name] + if ([SUPPORTED_TAGS.INPUT, SUPPORTED_TAGS.TEXTAREA].includes(child.tagName)) { + let value = formValues[child.properties.name] + + if (child.tagName === SUPPORTED_TAGS.INPUT + && (child.properties.type === SUPPORTED_TYPES.DATE || child.properties.type === SUPPORTED_TYPES.DATETIME)) { + if (value && typeof value.format === 'function') { + // Format date output consistently + const includeTime = child.properties.type === SUPPORTED_TYPES.DATETIME + value = formatDateForOutput(value, includeTime) + } + } + + values[child.properties.name] = value + } }) return values } diff --git a/web/i18n/en-US/time.ts b/web/i18n/en-US/time.ts index 40adad0231..9d72637d84 100644 --- a/web/i18n/en-US/time.ts +++ b/web/i18n/en-US/time.ts @@ -32,6 +32,14 @@ const translation = { pickTime: 'Pick Time', }, defaultPlaceholder: 'Pick a time...', + // Date format configurations + dateFormats: { + display: 'MMMM D, YYYY', + displayWithTime: 'MMMM D, YYYY hh:mm A', + input: 'YYYY-MM-DD', + output: 'YYYY-MM-DD', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/ja-JP/time.ts b/web/i18n/ja-JP/time.ts index 6594533b2b..5a5d61748c 100644 --- a/web/i18n/ja-JP/time.ts +++ b/web/i18n/ja-JP/time.ts @@ -32,6 +32,14 @@ const translation = { pickTime: 'ピックタイム', }, defaultPlaceholder: '時間を選んでください...', + // Date format configurations + dateFormats: { + display: 'YYYY年MM月DD日', + displayWithTime: 'YYYY年MM月DD日 HH:mm', + input: 'YYYY-MM-DD', + output: 'YYYY-MM-DD', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/zh-Hans/time.ts b/web/i18n/zh-Hans/time.ts index a7a1c6e574..74b68da1b7 100644 --- a/web/i18n/zh-Hans/time.ts +++ b/web/i18n/zh-Hans/time.ts @@ -32,6 +32,14 @@ const translation = { pickTime: '选择时间', }, defaultPlaceholder: '请选择时间...', + // Date format configurations + dateFormats: { + display: 'YYYY年MM月DD日', + displayWithTime: 'YYYY年MM月DD日 HH:mm', + input: 'YYYY-MM-DD', + output: 'YYYY-MM-DD', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation From 79a3c1618f96d024d4fe9dc70632c67364023d02 Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Sun, 10 Aug 2025 11:09:47 +0800 Subject: [PATCH 15/21] fix: support custom file types in workflow Start node (#23672) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../workflow/nodes/document_extractor/node.py | 54 +++++++++++++++++-- .../components/before-run-form/form-item.tsx | 8 +-- 2 files changed, 55 insertions(+), 7 deletions(-) diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index 23512c8ce4..a61e6ba4ac 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -168,7 +168,57 @@ def _extract_text_by_mime_type(*, file_content: bytes, mime_type: str) -> str: def _extract_text_by_file_extension(*, file_content: bytes, file_extension: str) -> str: """Extract text from a file based on its file extension.""" match file_extension: - case ".txt" | ".markdown" | ".md" | ".html" | ".htm" | ".xml": + case ( + ".txt" + | ".markdown" + | ".md" + | ".html" + | ".htm" + | ".xml" + | ".c" + | ".h" + | ".cpp" + | ".hpp" + | ".cc" + | ".cxx" + | ".c++" + | ".py" + | ".js" + | ".ts" + | ".jsx" + | ".tsx" + | ".java" + | ".php" + | ".rb" + | ".go" + | ".rs" + | ".swift" + | ".kt" + | ".scala" + | ".sh" + | ".bash" + | ".bat" + | ".ps1" + | ".sql" + | ".r" + | ".m" + | ".pl" + | ".lua" + | ".vim" + | ".asm" + | ".s" + | ".css" + | ".scss" + | ".less" + | ".sass" + | ".ini" + | ".cfg" + | ".conf" + | ".toml" + | ".env" + | ".log" + | ".vtt" + ): return _extract_text_from_plain_text(file_content) case ".json": return _extract_text_from_json(file_content) @@ -194,8 +244,6 @@ def _extract_text_by_file_extension(*, file_content: bytes, file_extension: str) return _extract_text_from_eml(file_content) case ".msg": return _extract_text_from_msg(file_content) - case ".vtt": - return _extract_text_from_vtt(file_content) case ".properties": return _extract_text_from_properties(file_content) case _: diff --git a/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx b/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx index 430359b845..c3f4f31ea3 100644 --- a/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx +++ b/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx @@ -181,7 +181,7 @@ const FormItem: FC = ({ value={singleFileValue} onChange={handleSingleFileChange} fileConfig={{ - allowed_file_types: inStepRun + allowed_file_types: inStepRun && (!payload.allowed_file_types || payload.allowed_file_types.length === 0) ? [ SupportUploadFileTypes.image, SupportUploadFileTypes.document, @@ -189,7 +189,7 @@ const FormItem: FC = ({ SupportUploadFileTypes.video, ] : payload.allowed_file_types, - allowed_file_extensions: inStepRun + allowed_file_extensions: inStepRun && (!payload.allowed_file_extensions || payload.allowed_file_extensions.length === 0) ? [ ...FILE_EXTS[SupportUploadFileTypes.image], ...FILE_EXTS[SupportUploadFileTypes.document], @@ -208,7 +208,7 @@ const FormItem: FC = ({ value={value} onChange={files => onChange(files)} fileConfig={{ - allowed_file_types: (inStepRun || isIteratorItemFile) + allowed_file_types: (inStepRun || isIteratorItemFile) && (!payload.allowed_file_types || payload.allowed_file_types.length === 0) ? [ SupportUploadFileTypes.image, SupportUploadFileTypes.document, @@ -216,7 +216,7 @@ const FormItem: FC = ({ SupportUploadFileTypes.video, ] : payload.allowed_file_types, - allowed_file_extensions: (inStepRun || isIteratorItemFile) + allowed_file_extensions: (inStepRun || isIteratorItemFile) && (!payload.allowed_file_extensions || payload.allowed_file_extensions.length === 0) ? [ ...FILE_EXTS[SupportUploadFileTypes.image], ...FILE_EXTS[SupportUploadFileTypes.document], From ea502d36a986e5afec7511921e5d2185a1ee57c6 Mon Sep 17 00:00:00 2001 From: Matri Qi Date: Sun, 10 Aug 2025 11:16:43 +0800 Subject: [PATCH 16/21] lint: fix sonarjs/no-dead-store issues (#23653) --- .../[datasetId]/layout-main.tsx | 6 ----- .../dataset-config/settings-modal/index.tsx | 2 -- .../base/markdown-blocks/code-block.tsx | 1 - web/app/components/base/mermaid/index.tsx | 12 +--------- .../external-api/external-api-modal/Form.tsx | 4 +--- .../datasets/rename-modal/index.tsx | 4 ++-- web/app/components/explore/app-list/index.tsx | 24 +------------------ web/app/components/explore/sidebar/index.tsx | 1 - .../invited-modal/invitation-link.tsx | 4 +--- .../model-parameter-modal/parameter-item.tsx | 4 ---- .../marketplace/search-box/tags-filter.tsx | 3 +-- .../components/plugins/marketplace/utils.ts | 2 +- .../share/text-generation/index.tsx | 5 +--- .../block-selector/use-sticky-scroll.ts | 2 +- .../components/workflow/hooks/use-workflow.ts | 9 ++++--- .../nodes/_base/components/node-control.tsx | 2 -- .../_base/components/workflow-panel/index.tsx | 1 - .../nodes/list-operator/use-config.ts | 5 ++-- web/eslint.config.mjs | 2 +- 19 files changed, 17 insertions(+), 76 deletions(-) diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx index d70179266a..f8189b0c8a 100644 --- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx +++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx @@ -4,7 +4,6 @@ import React, { useEffect, useMemo } from 'react' import { usePathname } from 'next/navigation' import useSWR from 'swr' import { useTranslation } from 'react-i18next' -import { useBoolean } from 'ahooks' import { RiEqualizer2Fill, RiEqualizer2Line, @@ -44,17 +43,12 @@ type IExtraInfoProps = { } const ExtraInfo = ({ isMobile, relatedApps, expand }: IExtraInfoProps) => { - const [isShowTips, { toggle: toggleTips, set: setShowTips }] = useBoolean(!isMobile) const { t } = useTranslation() const docLink = useDocLink() const hasRelatedApps = relatedApps?.data && relatedApps?.data?.length > 0 const relatedAppsTotal = relatedApps?.data?.length || 0 - useEffect(() => { - setShowTips(!isMobile) - }, [isMobile, setShowTips]) - return
{/* Related apps for desktop */}
= ({ const { data: embeddingsModelList } = useModelList(ModelTypeEnum.textEmbedding) const { modelList: rerankModelList, - defaultModel: rerankDefaultModel, - currentModel: isRerankDefaultModelValid, } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.rerank) const { t } = useTranslation() const docLink = useDocLink() diff --git a/web/app/components/base/markdown-blocks/code-block.tsx b/web/app/components/base/markdown-blocks/code-block.tsx index c88cfde9e6..48de8bf4ab 100644 --- a/web/app/components/base/markdown-blocks/code-block.tsx +++ b/web/app/components/base/markdown-blocks/code-block.tsx @@ -81,7 +81,6 @@ const CodeBlock: any = memo(({ inline, className, children = '', ...props }: any const echartsRef = useRef(null) const contentRef = useRef('') const processedRef = useRef(false) // Track if content was successfully processed - const instanceIdRef = useRef(`chart-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`) // Unique ID for logging const isInitialRenderRef = useRef(true) // Track if this is initial render const chartInstanceRef = useRef(null) // Direct reference to ECharts instance const resizeTimerRef = useRef(null) // For debounce handling diff --git a/web/app/components/base/mermaid/index.tsx b/web/app/components/base/mermaid/index.tsx index a953ef15a8..80271bb29b 100644 --- a/web/app/components/base/mermaid/index.tsx +++ b/web/app/components/base/mermaid/index.tsx @@ -1,4 +1,4 @@ -import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react' +import React, { useCallback, useEffect, useRef, useState } from 'react' import mermaid, { type MermaidConfig } from 'mermaid' import { useTranslation } from 'react-i18next' import { ExclamationTriangleIcon } from '@heroicons/react/24/outline' @@ -122,14 +122,6 @@ const Flowchart = React.forwardRef((props: { const renderTimeoutRef = useRef() const [errMsg, setErrMsg] = useState('') const [imagePreviewUrl, setImagePreviewUrl] = useState('') - const [isCodeComplete, setIsCodeComplete] = useState(false) - const codeCompletionCheckRef = useRef() - const prevCodeRef = useRef() - - // Create cache key from code, style and theme - const cacheKey = useMemo(() => { - return `${props.PrimitiveCode}-${look}-${currentTheme}` - }, [props.PrimitiveCode, look, currentTheme]) /** * Renders Mermaid chart @@ -537,11 +529,9 @@ const Flowchart = React.forwardRef((props: { {isLoading && !svgString && (
- {!isCodeComplete && (
{t('common.wait_for_completion', 'Waiting for diagram code to complete...')}
- )}
)} diff --git a/web/app/components/datasets/external-api/external-api-modal/Form.tsx b/web/app/components/datasets/external-api/external-api-modal/Form.tsx index 8884cb787f..5479f8147c 100644 --- a/web/app/components/datasets/external-api/external-api-modal/Form.tsx +++ b/web/app/components/datasets/external-api/external-api-modal/Form.tsx @@ -1,4 +1,4 @@ -import React, { useState } from 'react' +import React from 'react' import type { FC } from 'react' import { useTranslation } from 'react-i18next' import { RiBookOpenLine } from '@remixicon/react' @@ -28,10 +28,8 @@ const Form: FC = React.memo(({ }) => { const { t, i18n } = useTranslation() const docLink = useDocLink() - const [changeKey, setChangeKey] = useState('') const handleFormChange = (key: string, val: string) => { - setChangeKey(key) if (key === 'name') { onChange({ ...value, [key]: val }) } diff --git a/web/app/components/datasets/rename-modal/index.tsx b/web/app/components/datasets/rename-modal/index.tsx index dd53bafdd9..c497dbb957 100644 --- a/web/app/components/datasets/rename-modal/index.tsx +++ b/web/app/components/datasets/rename-modal/index.tsx @@ -28,8 +28,8 @@ const RenameDatasetModal = ({ show, dataset, onSuccess, onClose }: RenameDataset const [loading, setLoading] = useState(false) const [name, setName] = useState(dataset.name) const [description, setDescription] = useState(dataset.description) - const [externalKnowledgeId, setExternalKnowledgeId] = useState(dataset.external_knowledge_info.external_knowledge_id) - const [externalKnowledgeApiId, setExternalKnowledgeApiId] = useState(dataset.external_knowledge_info.external_knowledge_api_id) + const externalKnowledgeId = dataset.external_knowledge_info.external_knowledge_id + const externalKnowledgeApiId = dataset.external_knowledge_info.external_knowledge_api_id const onConfirm: MouseEventHandler = async () => { if (!name.trim()) { diff --git a/web/app/components/explore/app-list/index.tsx b/web/app/components/explore/app-list/index.tsx index 7e2d990bc8..79cbff81c8 100644 --- a/web/app/components/explore/app-list/index.tsx +++ b/web/app/components/explore/app-list/index.tsx @@ -51,7 +51,6 @@ const Apps = ({ handleSearch() } - const [currentType, setCurrentType] = useState('') const [currCategory, setCurrCategory] = useTabSearchParams({ defaultTab: allCategoriesEn, disableSearchParams: false, @@ -74,28 +73,7 @@ const Apps = ({ }, ) - const filteredList = useMemo(() => { - if (currCategory === allCategoriesEn) { - if (!currentType) - return allList - else if (currentType === 'chatbot') - return allList.filter(item => (item.app.mode === 'chat' || item.app.mode === 'advanced-chat')) - else if (currentType === 'agent') - return allList.filter(item => (item.app.mode === 'agent-chat')) - else - return allList.filter(item => (item.app.mode === 'workflow')) - } - else { - if (!currentType) - return allList.filter(item => item.category === currCategory) - else if (currentType === 'chatbot') - return allList.filter(item => (item.app.mode === 'chat' || item.app.mode === 'advanced-chat') && item.category === currCategory) - else if (currentType === 'agent') - return allList.filter(item => (item.app.mode === 'agent-chat') && item.category === currCategory) - else - return allList.filter(item => (item.app.mode === 'workflow') && item.category === currCategory) - } - }, [currentType, currCategory, allCategoriesEn, allList]) + const filteredList = allList.filter(item => currCategory === allCategoriesEn || item.category === currCategory) const searchFilteredList = useMemo(() => { if (!searchKeywords || !filteredList || filteredList.length === 0) diff --git a/web/app/components/explore/sidebar/index.tsx b/web/app/components/explore/sidebar/index.tsx index 74c397f4fd..c5866c31d4 100644 --- a/web/app/components/explore/sidebar/index.tsx +++ b/web/app/components/explore/sidebar/index.tsx @@ -49,7 +49,6 @@ const SideBar: FC = ({ const segments = useSelectedLayoutSegments() const lastSegment = segments.slice(-1)[0] const isDiscoverySelected = lastSegment === 'apps' - const isChatSelected = lastSegment === 'chat' const { installedApps, setInstalledApps, setIsFetchingInstalledApps } = useContext(ExploreContext) const { isFetching: isFetchingInstalledApps, data: ret, refetch: fetchInstalledAppList } = useGetInstalledApps() const { mutateAsync: uninstallApp } = useUninstallApp() diff --git a/web/app/components/header/account-setting/members-page/invited-modal/invitation-link.tsx b/web/app/components/header/account-setting/members-page/invited-modal/invitation-link.tsx index 68a575f503..a94ef4584f 100644 --- a/web/app/components/header/account-setting/members-page/invited-modal/invitation-link.tsx +++ b/web/app/components/header/account-setting/members-page/invited-modal/invitation-link.tsx @@ -1,11 +1,10 @@ 'use client' -import React, { useCallback, useEffect, useRef, useState } from 'react' +import React, { useCallback, useEffect, useState } from 'react' import { t } from 'i18next' import copy from 'copy-to-clipboard' import s from './index.module.css' import type { SuccessInvitationResult } from '.' import Tooltip from '@/app/components/base/tooltip' -import { randomString } from '@/utils' type IInvitationLinkProps = { value: SuccessInvitationResult @@ -15,7 +14,6 @@ const InvitationLink = ({ value, }: IInvitationLinkProps) => { const [isCopied, setIsCopied] = useState(false) - const selector = useRef(`invite-link-${randomString(4)}`) const copyHandle = useCallback(() => { // No prefix is needed here because the backend has already processed it diff --git a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx index 3e969d708b..7cfb906206 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx @@ -1,7 +1,6 @@ import type { FC } from 'react' import { useEffect, useRef, useState } from 'react' import type { ModelParameterRule } from '../declarations' -import { useLanguage } from '../hooks' import { isNullOrUndefined } from '../utils' import cn from '@/utils/classnames' import Switch from '@/app/components/base/switch' @@ -10,7 +9,6 @@ import Slider from '@/app/components/base/slider' import Radio from '@/app/components/base/radio' import { SimpleSelect } from '@/app/components/base/select' import TagInput from '@/app/components/base/tag-input' -import { useTranslation } from 'react-i18next' export type ParameterValue = number | string | string[] | boolean | undefined @@ -28,8 +26,6 @@ const ParameterItem: FC = ({ onSwitch, isInWorkflow, }) => { - const { t } = useTranslation() - const language = useLanguage() const [localValue, setLocalValue] = useState(value) const numberInputRef = useRef(null) diff --git a/web/app/components/plugins/marketplace/search-box/tags-filter.tsx b/web/app/components/plugins/marketplace/search-box/tags-filter.tsx index bae6491727..12b84490a4 100644 --- a/web/app/components/plugins/marketplace/search-box/tags-filter.tsx +++ b/web/app/components/plugins/marketplace/search-box/tags-filter.tsx @@ -30,7 +30,7 @@ const TagsFilter = ({ const { t } = useMixedTranslation(locale) const [open, setOpen] = useState(false) const [searchText, setSearchText] = useState('') - const { tags: options, tagsMap } = useTags(t) + const { tags: options } = useTags(t) const filteredOptions = options.filter(option => option.label.toLowerCase().includes(searchText.toLowerCase())) const handleCheck = (id: string) => { if (tags.includes(id)) @@ -38,7 +38,6 @@ const TagsFilter = ({ else onTagsChange([...tags, id]) } - const selectedTagsLength = tags.length return ( { } export const getMarketplacePluginsByCollectionId = async (collectionId: string, query?: CollectionsAndPluginsSearchParams) => { - let plugins = [] as Plugin[] + let plugins: Plugin[] try { const url = `${MARKETPLACE_API_PREFIX}/collections/${collectionId}/plugins` diff --git a/web/app/components/share/text-generation/index.tsx b/web/app/components/share/text-generation/index.tsx index 4a6d1c9965..da5b09b065 100644 --- a/web/app/components/share/text-generation/index.tsx +++ b/web/app/components/share/text-generation/index.tsx @@ -151,10 +151,9 @@ const TextGeneration: FC = ({ const pendingTaskList = allTaskList.filter(task => task.status === TaskStatus.pending) const noPendingTask = pendingTaskList.length === 0 const showTaskList = allTaskList.filter(task => task.status !== TaskStatus.pending) - const [currGroupNum, doSetCurrGroupNum] = useState(0) const currGroupNumRef = useRef(0) + const setCurrGroupNum = (num: number) => { - doSetCurrGroupNum(num) currGroupNumRef.current = num } const getCurrGroupNum = () => { @@ -164,10 +163,8 @@ const TextGeneration: FC = ({ const allFailedTaskList = allTaskList.filter(task => task.status === TaskStatus.failed) const allTasksFinished = allTaskList.every(task => task.status === TaskStatus.completed) const allTasksRun = allTaskList.every(task => [TaskStatus.completed, TaskStatus.failed].includes(task.status)) - const [batchCompletionRes, doSetBatchCompletionRes] = useState>({}) const batchCompletionResRef = useRef>({}) const setBatchCompletionRes = (res: Record) => { - doSetBatchCompletionRes(res) batchCompletionResRef.current = res } const getBatchCompletionRes = () => batchCompletionResRef.current diff --git a/web/app/components/workflow/block-selector/use-sticky-scroll.ts b/web/app/components/workflow/block-selector/use-sticky-scroll.ts index 405ecdba7e..c828e9ce92 100644 --- a/web/app/components/workflow/block-selector/use-sticky-scroll.ts +++ b/web/app/components/workflow/block-selector/use-sticky-scroll.ts @@ -23,7 +23,7 @@ const useStickyScroll = ({ return const { height: wrapHeight, top: wrapTop } = wrapDom.getBoundingClientRect() const { top: nextToStickyTop } = stickyDOM.getBoundingClientRect() - let scrollPositionNew = ScrollPosition.belowTheWrap + let scrollPositionNew: ScrollPosition if (nextToStickyTop - wrapTop >= wrapHeight) scrollPositionNew = ScrollPosition.belowTheWrap diff --git a/web/app/components/workflow/hooks/use-workflow.ts b/web/app/components/workflow/hooks/use-workflow.ts index f9120f45b1..a6435ec632 100644 --- a/web/app/components/workflow/hooks/use-workflow.ts +++ b/web/app/components/workflow/hooks/use-workflow.ts @@ -444,7 +444,7 @@ export const useFetchToolsData = () => { workflowTools: workflowTools || [], }) } - if(type === 'mcp') { + if (type === 'mcp') { const mcpTools = await fetchAllMCPTools() workflowStore.setState({ @@ -500,18 +500,17 @@ export const useToolIcon = (data: Node['data']) => { const mcpTools = useStore(s => s.mcpTools) const toolIcon = useMemo(() => { - if(!data) + if (!data) return '' if (data.type === BlockEnum.Tool) { - let targetTools = buildInTools + let targetTools = workflowTools if (data.provider_type === CollectionType.builtIn) targetTools = buildInTools else if (data.provider_type === CollectionType.custom) targetTools = customTools else if (data.provider_type === CollectionType.mcp) targetTools = mcpTools - else - targetTools = workflowTools + return targetTools.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.icon } }, [data, buildInTools, customTools, mcpTools, workflowTools]) diff --git a/web/app/components/workflow/nodes/_base/components/node-control.tsx b/web/app/components/workflow/nodes/_base/components/node-control.tsx index 5b92b7b6b4..0e3f54f108 100644 --- a/web/app/components/workflow/nodes/_base/components/node-control.tsx +++ b/web/app/components/workflow/nodes/_base/components/node-control.tsx @@ -11,7 +11,6 @@ import { import { useNodeDataUpdate, useNodesInteractions, - useNodesSyncDraft, } from '../../../hooks' import { type Node, NodeRunningStatus } from '../../../types' import { canRunBySingle } from '../../../utils' @@ -30,7 +29,6 @@ const NodeControl: FC = ({ const [open, setOpen] = useState(false) const { handleNodeDataUpdate } = useNodeDataUpdate() const { handleNodeSelect } = useNodesInteractions() - const { handleSyncWorkflowDraft } = useNodesSyncDraft() const isSingleRunning = data._singleRunningStatus === NodeRunningStatus.Running const handleOpenChange = useCallback((newOpen: boolean) => { setOpen(newOpen) diff --git a/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx b/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx index 93fab83172..4723b2dce7 100644 --- a/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx +++ b/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx @@ -198,7 +198,6 @@ const BasePanel: FC = ({ isShowSingleRun, hideSingleRun, runningStatus, - handleStop, runInputData, runInputDataRef, runResult, diff --git a/web/app/components/workflow/nodes/list-operator/use-config.ts b/web/app/components/workflow/nodes/list-operator/use-config.ts index efbf32b8c7..21e9761725 100644 --- a/web/app/components/workflow/nodes/list-operator/use-config.ts +++ b/web/app/components/workflow/nodes/list-operator/use-config.ts @@ -36,6 +36,7 @@ const useConfig = (id: string, payload: ListFilterNodeType) => { const { inputs, setInputs } = useNodeCrud(id, payload) const { getCurrentVariableType } = useWorkflowVariables() + const getType = useCallback((variable?: ValueSelector) => { const varType = getCurrentVariableType({ parentNode: isInIteration ? iterationNode : loopNode, @@ -44,7 +45,7 @@ const useConfig = (id: string, payload: ListFilterNodeType) => { isChatMode, isConstant: false, }) - let itemVarType = VarType.string + let itemVarType = varType switch (varType) { case VarType.arrayNumber: itemVarType = VarType.number @@ -58,8 +59,6 @@ const useConfig = (id: string, payload: ListFilterNodeType) => { case VarType.arrayObject: itemVarType = VarType.object break - default: - itemVarType = varType } return { varType, itemVarType } }, [availableNodes, getCurrentVariableType, inputs.variable, isChatMode, isInIteration, iterationNode, loopNode]) diff --git a/web/eslint.config.mjs b/web/eslint.config.mjs index dda2beff02..747c14ad67 100644 --- a/web/eslint.config.mjs +++ b/web/eslint.config.mjs @@ -163,7 +163,7 @@ export default combine( 'sonarjs/single-char-in-character-classes': 'off', 'sonarjs/anchor-precedence': 'warn', 'sonarjs/updated-loop-counter': 'off', - 'sonarjs/no-dead-store': 'warn', + 'sonarjs/no-dead-store': 'error', 'sonarjs/no-duplicated-branches': 'warn', 'sonarjs/max-lines': 'warn', // max 1000 lines 'sonarjs/no-variable-usage-before-declaration': 'error', From dc65a72d934124e5ace67077b1e60bdea394a826 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Aug 2025 11:17:14 +0800 Subject: [PATCH 17/21] chore: translate i18n files (#23679) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/i18n/de-DE/time.ts | 7 +++++++ web/i18n/es-ES/time.ts | 7 +++++++ web/i18n/fa-IR/time.ts | 7 +++++++ web/i18n/fr-FR/time.ts | 7 +++++++ web/i18n/hi-IN/time.ts | 7 +++++++ web/i18n/it-IT/time.ts | 7 +++++++ web/i18n/ko-KR/time.ts | 7 +++++++ web/i18n/pl-PL/time.ts | 7 +++++++ web/i18n/pt-BR/time.ts | 7 +++++++ web/i18n/ro-RO/time.ts | 7 +++++++ web/i18n/ru-RU/time.ts | 7 +++++++ web/i18n/sl-SI/time.ts | 7 +++++++ web/i18n/th-TH/time.ts | 7 +++++++ web/i18n/tr-TR/time.ts | 7 +++++++ web/i18n/uk-UA/time.ts | 6 ++++++ web/i18n/vi-VN/time.ts | 7 +++++++ web/i18n/zh-Hant/time.ts | 7 +++++++ 17 files changed, 118 insertions(+) diff --git a/web/i18n/de-DE/time.ts b/web/i18n/de-DE/time.ts index 16f5bc8475..8e443881ef 100644 --- a/web/i18n/de-DE/time.ts +++ b/web/i18n/de-DE/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Wähle Zeit', }, defaultPlaceholder: 'Wähle eine Zeit...', + dateFormats: { + displayWithTime: 'MMMM D, YYYY hh:mm A', + output: 'YYYY-MM-DD', + display: 'MMMM D, YYYY', + input: 'YYYY-MM-DD', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/es-ES/time.ts b/web/i18n/es-ES/time.ts index 920c80eea3..25b0961a8d 100644 --- a/web/i18n/es-ES/time.ts +++ b/web/i18n/es-ES/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Elegir hora', }, defaultPlaceholder: 'Elige una hora...', + dateFormats: { + input: 'AAAA-MM-DD', + output: 'AAAA-MM-DD', + display: 'MMMM D, AAAA', + outputWithTime: 'AAAA-MM-DDTHH:mm:ss.SSSZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + }, } export default translation diff --git a/web/i18n/fa-IR/time.ts b/web/i18n/fa-IR/time.ts index 2e4ffea784..1f0452d172 100644 --- a/web/i18n/fa-IR/time.ts +++ b/web/i18n/fa-IR/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'زمان انتخاب کنید', }, defaultPlaceholder: 'زمانی را انتخاب کنید...', + dateFormats: { + output: 'YYYY-MM-DD', + input: 'YYYY-MM-DD', + display: 'MMMM D, YYYY', + displayWithTime: 'MMMM D, YYYY hh:mm A', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/fr-FR/time.ts b/web/i18n/fr-FR/time.ts index e05e6dc6b3..7ca19407df 100644 --- a/web/i18n/fr-FR/time.ts +++ b/web/i18n/fr-FR/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Choisir le temps', }, defaultPlaceholder: 'Choisissez un moment...', + dateFormats: { + display: 'MMMM D, AAAA', + output: 'AAAA-MM-JJ', + input: 'AAAA-MM-JJ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + outputWithTime: 'AAAA-MM-JJTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/hi-IN/time.ts b/web/i18n/hi-IN/time.ts index 72f6cd56c4..c0c9ee6a6f 100644 --- a/web/i18n/hi-IN/time.ts +++ b/web/i18n/hi-IN/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'समय चुनें', }, defaultPlaceholder: 'एक समय चुनें...', + dateFormats: { + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + input: 'YYYY-MM-DD', + output: 'YYYY-MM-DD', + display: 'MMMM D, YYYY', + displayWithTime: 'MMMM D, YYYY hh:mm A', + }, } export default translation diff --git a/web/i18n/it-IT/time.ts b/web/i18n/it-IT/time.ts index f330e8fd6c..0ee85bf280 100644 --- a/web/i18n/it-IT/time.ts +++ b/web/i18n/it-IT/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Scegli Tempo', }, defaultPlaceholder: 'Scegli un orario...', + dateFormats: { + display: 'MMMM D, YYYY', + input: 'AAAA-MM-GG', + output: 'AAAA-MM-GG', + outputWithTime: 'AAAA-MM-GGTHH:mm:ss.SSSZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + }, } export default translation diff --git a/web/i18n/ko-KR/time.ts b/web/i18n/ko-KR/time.ts index 1233dbf979..1f2540581a 100644 --- a/web/i18n/ko-KR/time.ts +++ b/web/i18n/ko-KR/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: '시간 선택', }, defaultPlaceholder: '시간을 선택하세요...', + dateFormats: { + input: 'YYYY-MM-DD', + display: 'MMMM D, YYYY', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + output: 'YYYY-MM-DD', + }, } export default translation diff --git a/web/i18n/pl-PL/time.ts b/web/i18n/pl-PL/time.ts index e98ebddfcd..6cb300a162 100644 --- a/web/i18n/pl-PL/time.ts +++ b/web/i18n/pl-PL/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Wybierz czas', }, defaultPlaceholder: 'Wybierz czas...', + dateFormats: { + output: 'RRRR-MM-DD', + displayWithTime: 'MMMM D, YYYY hh:mm A', + display: 'MMMM D, YYYY', + input: 'RRRR-MM-DD', + outputWithTime: 'RRRR-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/pt-BR/time.ts b/web/i18n/pt-BR/time.ts index fcf25cab1e..a0d634eb6a 100644 --- a/web/i18n/pt-BR/time.ts +++ b/web/i18n/pt-BR/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Escolha o Horário', }, defaultPlaceholder: 'Escolha um horário...', + dateFormats: { + input: 'AAAA-MM-DD', + output: 'AAAA-MM-DD', + outputWithTime: 'AAAA-MM-DDTHH:mm:ss.SSSZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + display: 'MMMM D, YYYY', + }, } export default translation diff --git a/web/i18n/ro-RO/time.ts b/web/i18n/ro-RO/time.ts index 2b40803081..6fffd78d4a 100644 --- a/web/i18n/ro-RO/time.ts +++ b/web/i18n/ro-RO/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Alegeți timpul', }, defaultPlaceholder: 'Alege o oră...', + dateFormats: { + display: 'MMMM D, YYYY', + input: 'AAAA-LL-ZZ', + output: 'AAAA-LL-ZZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + outputWithTime: 'AAAA-LL-ZZSS:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/ru-RU/time.ts b/web/i18n/ru-RU/time.ts index be9e38f4db..1a8325ce17 100644 --- a/web/i18n/ru-RU/time.ts +++ b/web/i18n/ru-RU/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Выберите время', }, defaultPlaceholder: 'Выберите время...', + dateFormats: { + display: 'MMMM D, YYYY', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + output: 'ГГГГ-ММ-ДД', + input: 'ГГГГ-ММ-ДД', + }, } export default translation diff --git a/web/i18n/sl-SI/time.ts b/web/i18n/sl-SI/time.ts index b88a33b675..6e2e93357e 100644 --- a/web/i18n/sl-SI/time.ts +++ b/web/i18n/sl-SI/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Izberi čas', }, defaultPlaceholder: 'Izberi čas...', + dateFormats: { + display: 'MMMM D, YYYY', + input: 'YYYY-MM-DD', + output: 'YYYY-MM-DD', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + displayWithTime: 'MMMM D, YYYY hh:mm A', + }, } export default translation diff --git a/web/i18n/th-TH/time.ts b/web/i18n/th-TH/time.ts index 03897dd863..73b2a4fed9 100644 --- a/web/i18n/th-TH/time.ts +++ b/web/i18n/th-TH/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'เลือกเวลา', }, defaultPlaceholder: 'เลือกเวลา...', + dateFormats: { + input: 'YYYY-MM-DD', + displayWithTime: 'MMMM D, YYYY hh:mm A', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + display: 'MMMM D, YYYY', + output: 'YYYY-MM-DD', + }, } export default translation diff --git a/web/i18n/tr-TR/time.ts b/web/i18n/tr-TR/time.ts index f4cded0998..a8f78094fd 100644 --- a/web/i18n/tr-TR/time.ts +++ b/web/i18n/tr-TR/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Zamanı Seç', }, defaultPlaceholder: 'Bir zaman seç...', + dateFormats: { + displayWithTime: 'MMMM D, YYYY hh:mm A', + output: 'YYYY-AA-GG', + display: 'MMMM D, YYYY', + outputWithTime: 'YYYY-AA-GGSS:DD:DDS.SSSZ', + input: 'YYYY-AA-GG', + }, } export default translation diff --git a/web/i18n/uk-UA/time.ts b/web/i18n/uk-UA/time.ts index 1ea08cb4ed..942af7d125 100644 --- a/web/i18n/uk-UA/time.ts +++ b/web/i18n/uk-UA/time.ts @@ -32,6 +32,12 @@ const translation = { pickTime: 'Виберіть час', }, defaultPlaceholder: 'Виберіть час...', + dateFormats: { + displayWithTime: 'MMMM D, YYYY hh:mm A', + output: 'РРРР-ММ-ДД', + display: 'MMMM D, YYYY', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + }, } export default translation diff --git a/web/i18n/vi-VN/time.ts b/web/i18n/vi-VN/time.ts index 9c07eceb4d..635b81e150 100644 --- a/web/i18n/vi-VN/time.ts +++ b/web/i18n/vi-VN/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: 'Chọn Thời Gian', }, defaultPlaceholder: 'Chọn một thời gian...', + dateFormats: { + input: 'YYYY-MM-DD', + displayWithTime: 'MMMM D, YYYY hh:mm A', + display: 'MMMM D, YYYY', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', + output: 'YYYY-MM-DD', + }, } export default translation diff --git a/web/i18n/zh-Hant/time.ts b/web/i18n/zh-Hant/time.ts index ddb402c422..1c8660161a 100644 --- a/web/i18n/zh-Hant/time.ts +++ b/web/i18n/zh-Hant/time.ts @@ -32,6 +32,13 @@ const translation = { pickTime: '選擇時間', }, defaultPlaceholder: '選擇一個時間...', + dateFormats: { + display: 'MMMM D, YYYY', + output: 'YYYY-MM-DD', + displayWithTime: 'MMMM D, YYYY hh:mm A', + input: 'YYYY-MM-DD', + outputWithTime: 'YYYY年MM月DD日 HH:mm:ss.SSSZ', + }, } export default translation From a17b7b3d89c98cbbb530255d85d77f3bbb4dbcd2 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sun, 10 Aug 2025 11:17:38 +0800 Subject: [PATCH 18/21] fix: correct File Preview API position in Japanese advanced chat template (#23645) --- .../template/template_advanced_chat.ja.mdx | 160 +++++++++--------- 1 file changed, 80 insertions(+), 80 deletions(-) diff --git a/web/app/components/develop/template/template_advanced_chat.ja.mdx b/web/app/components/develop/template/template_advanced_chat.ja.mdx index 65a9daa194..bbe0f29b7a 100644 --- a/web/app/components/develop/template/template_advanced_chat.ja.mdx +++ b/web/app/components/develop/template/template_advanced_chat.ja.mdx @@ -392,6 +392,86 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from --- + + + + アップロードされたファイルをプレビューまたはダウンロードします。このエンドポイントを使用すると、以前にファイルアップロード API でアップロードされたファイルにアクセスできます。 + + ファイルは、リクエストしているアプリケーションのメッセージ範囲内にある場合のみアクセス可能です。 + + ### パスパラメータ + - `file_id` (string) 必須 + プレビューするファイルの一意識別子。ファイルアップロード API レスポンスから取得します。 + + ### クエリパラメータ + - `as_attachment` (boolean) オプション + ファイルを添付ファイルとして強制ダウンロードするかどうか。デフォルトは `false`(ブラウザでプレビュー)。 + + ### レスポンス + ブラウザ表示またはダウンロード用の適切なヘッダー付きでファイル内容を返します。 + - `Content-Type` ファイル MIME タイプに基づいて設定 + - `Content-Length` ファイルサイズ(バイト、利用可能な場合) + - `Content-Disposition` `as_attachment=true` の場合は "attachment" に設定 + - `Cache-Control` パフォーマンス向上のためのキャッシュヘッダー + - `Accept-Ranges` 音声/動画ファイルの場合は "bytes" に設定 + + ### エラー + - 400, `invalid_param`, パラメータ入力異常 + - 403, `file_access_denied`, ファイルアクセス拒否またはファイルが現在のアプリケーションに属していません + - 404, `file_not_found`, ファイルが見つからないか削除されています + - 500, サーバー内部エラー + + + + ### リクエスト例 + + + ```bash {{ title: 'cURL - ブラウザプレビュー' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 添付ファイルとしてダウンロード + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### レスポンスヘッダー例 + + ```http {{ title: 'ヘッダー - 画像プレビュー' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### ダウンロードレスポンスヘッダー + + ```http {{ title: 'ヘッダー - ファイルダウンロード' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + + +--- + --- - - - - アップロードされたファイルをプレビューまたはダウンロードします。このエンドポイントを使用すると、以前にファイルアップロード API でアップロードされたファイルにアクセスできます。 - - ファイルは、リクエストしているアプリケーションのメッセージ範囲内にある場合のみアクセス可能です。 - - ### パスパラメータ - - `file_id` (string) 必須 - プレビューするファイルの一意識別子。ファイルアップロード API レスポンスから取得します。 - - ### クエリパラメータ - - `as_attachment` (boolean) オプション - ファイルを添付ファイルとして強制ダウンロードするかどうか。デフォルトは `false`(ブラウザでプレビュー)。 - - ### レスポンス - ブラウザ表示またはダウンロード用の適切なヘッダー付きでファイル内容を返します。 - - `Content-Type` ファイル MIME タイプに基づいて設定 - - `Content-Length` ファイルサイズ(バイト、利用可能な場合) - - `Content-Disposition` `as_attachment=true` の場合は "attachment" に設定 - - `Cache-Control` パフォーマンス向上のためのキャッシュヘッダー - - `Accept-Ranges` 音声/動画ファイルの場合は "bytes" に設定 - - ### エラー - - 400, `invalid_param`, パラメータ入力異常 - - 403, `file_access_denied`, ファイルアクセス拒否またはファイルが現在のアプリケーションに属していません - - 404, `file_not_found`, ファイルが見つからないか削除されています - - 500, サーバー内部エラー - - - - ### リクエスト例 - - - ```bash {{ title: 'cURL - ブラウザプレビュー' }} - curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ - --header 'Authorization: Bearer {api_key}' - ``` - - - - ### 添付ファイルとしてダウンロード - - - ```bash {{ title: 'cURL' }} - curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ - --header 'Authorization: Bearer {api_key}' \ - --output downloaded_file.png - ``` - - - - ### レスポンスヘッダー例 - - ```http {{ title: 'ヘッダー - 画像プレビュー' }} - Content-Type: image/png - Content-Length: 1024 - Cache-Control: public, max-age=3600 - ``` - - - ### ダウンロードレスポンスヘッダー - - ```http {{ title: 'ヘッダー - ファイルダウンロード' }} - Content-Type: image/png - Content-Length: 1024 - Content-Disposition: attachment; filename*=UTF-8''example.png - Cache-Control: public, max-age=3600 - ``` - - - - ---- - Date: Sun, 10 Aug 2025 17:21:05 +0800 Subject: [PATCH 19/21] fix: improve dark mode UI consistency in signin page (#23684) --- .../components/base/select/locale-signin.tsx | 61 +++++++++++++++++++ web/app/signin/_header.tsx | 4 +- web/app/signin/assets/github-dark.svg | 17 ++++++ .../{normalForm.tsx => normal-form.tsx} | 9 ++- .../{oneMoreStep.tsx => one-more-step.tsx} | 0 web/app/signin/page.module.css | 4 ++ web/app/signin/page.tsx | 4 +- 7 files changed, 90 insertions(+), 9 deletions(-) create mode 100644 web/app/components/base/select/locale-signin.tsx create mode 100644 web/app/signin/assets/github-dark.svg rename web/app/signin/{normalForm.tsx => normal-form.tsx} (96%) rename web/app/signin/{oneMoreStep.tsx => one-more-step.tsx} (100%) diff --git a/web/app/components/base/select/locale-signin.tsx b/web/app/components/base/select/locale-signin.tsx new file mode 100644 index 0000000000..48dbee1ca3 --- /dev/null +++ b/web/app/components/base/select/locale-signin.tsx @@ -0,0 +1,61 @@ +'use client' +import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' +import { Fragment } from 'react' +import { GlobeAltIcon } from '@heroicons/react/24/outline' + +type ISelectProps = { + items: Array<{ value: string; name: string }> + value?: string + className?: string + onChange?: (value: string) => void +} + +export default function LocaleSigninSelect({ + items, + value, + onChange, +}: ISelectProps) { + const item = items.filter(item => item.value === value)[0] + + return ( +
+ +
+ + +
+ + +
+ {items.map((item) => { + return + + + })} + +
+ +
+
+
+
+ ) +} diff --git a/web/app/signin/_header.tsx b/web/app/signin/_header.tsx index 03e05924b8..731a229b8e 100644 --- a/web/app/signin/_header.tsx +++ b/web/app/signin/_header.tsx @@ -1,7 +1,7 @@ 'use client' import React from 'react' import { useContext } from 'use-context-selector' -import Select from '@/app/components/base/select/locale' +import LocaleSigninSelect from '@/app/components/base/select/locale-signin' import Divider from '@/app/components/base/divider' import { languages } from '@/i18n-config/language' import type { Locale } from '@/i18n-config' @@ -33,7 +33,7 @@ const Header = () => { /> : }
-