diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index cf855b1cc0..99ac618bcb 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -959,6 +959,16 @@ class MailConfig(BaseSettings): default=None, ) + ENABLE_TRIAL_APP: bool = Field( + description="Enable trial app", + default=False, + ) + + ENABLE_EXPLORE_BANNER: bool = Field( + description="Enable explore banner", + default=False, + ) + class RagEtlConfig(BaseSettings): """ diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py index ad878fc266..fdc9aabc83 100644 --- a/api/controllers/console/__init__.py +++ b/api/controllers/console/__init__.py @@ -107,10 +107,12 @@ from .datasets.rag_pipeline import ( # Import explore controllers from .explore import ( + banner, installed_app, parameter, recommended_app, saved_message, + trial, ) # Import tag controllers @@ -145,6 +147,7 @@ __all__ = [ "apikey", "app", "audio", + "banner", "billing", "bp", "completion", @@ -198,6 +201,7 @@ __all__ = [ "statistic", "tags", "tool_providers", + "trial", "trigger_providers", "version", "website", diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index a25ca5ef51..978df15cf1 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -15,7 +15,7 @@ from controllers.console.wraps import only_edition_cloud from core.db.session_factory import session_factory from extensions.ext_database import db from libs.token import extract_access_token -from models.model import App, InstalledApp, RecommendedApp +from models.model import App, ExporleBanner, InstalledApp, RecommendedApp, TrialApp P = ParamSpec("P") R = TypeVar("R") @@ -32,6 +32,8 @@ class InsertExploreAppPayload(BaseModel): language: str = Field(...) category: str = Field(...) position: int = Field(...) + can_trial: bool = Field(default=False) + trial_limit: int = Field(default=0) @field_validator("language") @classmethod @@ -39,11 +41,33 @@ class InsertExploreAppPayload(BaseModel): return supported_language(value) +class InsertExploreBannerPayload(BaseModel): + category: str = Field(...) + title: str = Field(...) + description: str = Field(...) + img_src: str = Field(..., alias="img-src") + language: str = Field(default="en-US") + link: str = Field(...) + sort: int = Field(...) + + @field_validator("language") + @classmethod + def validate_language(cls, value: str) -> str: + return supported_language(value) + + model_config = {"populate_by_name": True} + + console_ns.schema_model( InsertExploreAppPayload.__name__, InsertExploreAppPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), ) +console_ns.schema_model( + InsertExploreBannerPayload.__name__, + InsertExploreBannerPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), +) + def admin_required(view: Callable[P, R]): @wraps(view) @@ -109,6 +133,20 @@ class InsertExploreAppListApi(Resource): ) db.session.add(recommended_app) + if payload.can_trial: + trial_app = db.session.execute( + select(TrialApp).where(TrialApp.app_id == payload.app_id) + ).scalar_one_or_none() + if not trial_app: + db.session.add( + TrialApp( + app_id=payload.app_id, + tenant_id=app.tenant_id, + trial_limit=payload.trial_limit, + ) + ) + else: + trial_app.trial_limit = payload.trial_limit app.is_public = True db.session.commit() @@ -123,6 +161,20 @@ class InsertExploreAppListApi(Resource): recommended_app.category = payload.category recommended_app.position = payload.position + if payload.can_trial: + trial_app = db.session.execute( + select(TrialApp).where(TrialApp.app_id == payload.app_id) + ).scalar_one_or_none() + if not trial_app: + db.session.add( + TrialApp( + app_id=payload.app_id, + tenant_id=app.tenant_id, + trial_limit=payload.trial_limit, + ) + ) + else: + trial_app.trial_limit = payload.trial_limit app.is_public = True db.session.commit() @@ -168,7 +220,62 @@ class InsertExploreAppApi(Resource): for installed_app in installed_apps: session.delete(installed_app) + trial_app = session.execute( + select(TrialApp).where(TrialApp.app_id == recommended_app.app_id) + ).scalar_one_or_none() + if trial_app: + session.delete(trial_app) + db.session.delete(recommended_app) db.session.commit() return {"result": "success"}, 204 + + +@console_ns.route("/admin/insert-explore-banner") +class InsertExploreBannerApi(Resource): + @console_ns.doc("insert_explore_banner") + @console_ns.doc(description="Insert an explore banner") + @console_ns.expect(console_ns.models[InsertExploreBannerPayload.__name__]) + @console_ns.response(201, "Banner inserted successfully") + @only_edition_cloud + @admin_required + def post(self): + payload = InsertExploreBannerPayload.model_validate(console_ns.payload) + + content = { + "category": payload.category, + "title": payload.title, + "description": payload.description, + "img-src": payload.img_src, + } + + banner = ExporleBanner( + content=content, + link=payload.link, + sort=payload.sort, + language=payload.language, + ) + db.session.add(banner) + db.session.commit() + + return {"result": "success"}, 201 + + +@console_ns.route("/admin/insert-explore-banner/") +class DeleteExploreBannerApi(Resource): + @console_ns.doc("delete_explore_banner") + @console_ns.doc(description="Delete an explore banner") + @console_ns.doc(params={"banner_id": "Banner ID to delete"}) + @console_ns.response(204, "Banner deleted successfully") + @only_edition_cloud + @admin_required + def delete(self, banner_id): + banner = db.session.execute(select(ExporleBanner).where(ExporleBanner.id == banner_id)).scalar_one_or_none() + if not banner: + raise NotFound(f"Banner '{banner_id}' is not found") + + db.session.delete(banner) + db.session.commit() + + return {"result": "success"}, 204 diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py index 6a4c1528b0..388820e20a 100644 --- a/api/controllers/console/app/annotation.py +++ b/api/controllers/console/app/annotation.py @@ -272,6 +272,7 @@ class AnnotationExportApi(Resource): @account_initialization_required @edit_permission_required def get(self, app_id): + app_id = str(app_id) annotation_list = AppAnnotationService.export_annotation_list_by_app_id(app_id) response_data = {"data": marshal(annotation_list, annotation_fields)} @@ -359,7 +360,6 @@ class AnnotationBatchImportApi(Resource): file.seek(0, 2) # Seek to end of file file_size = file.tell() file.seek(0) # Reset to beginning - max_size_bytes = dify_config.ANNOTATION_IMPORT_FILE_SIZE_LIMIT * 1024 * 1024 if file_size > max_size_bytes: abort( diff --git a/api/controllers/console/app/error.py b/api/controllers/console/app/error.py index fbd7901646..6b4bd6755a 100644 --- a/api/controllers/console/app/error.py +++ b/api/controllers/console/app/error.py @@ -115,3 +115,9 @@ class InvokeRateLimitError(BaseHTTPException): error_code = "rate_limit_error" description = "Rate Limit Error" code = 429 + + +class NeedAddIdsError(BaseHTTPException): + error_code = "need_add_ids" + description = "Need to add ids." + code = 400 diff --git a/api/controllers/console/app/message.py b/api/controllers/console/app/message.py index 12ada8b798..66f4524156 100644 --- a/api/controllers/console/app/message.py +++ b/api/controllers/console/app/message.py @@ -202,6 +202,7 @@ message_detail_model = console_ns.model( "status": fields.String, "error": fields.String, "parent_message_id": fields.String, + "generation_detail": fields.Raw, }, ) diff --git a/api/controllers/console/app/wraps.py b/api/controllers/console/app/wraps.py index 9bb2718f89..e687d980fa 100644 --- a/api/controllers/console/app/wraps.py +++ b/api/controllers/console/app/wraps.py @@ -23,6 +23,11 @@ def _load_app_model(app_id: str) -> App | None: return app_model +def _load_app_model_with_trial(app_id: str) -> App | None: + app_model = db.session.query(App).where(App.id == app_id, App.status == "normal").first() + return app_model + + def get_app_model(view: Callable[P, R] | None = None, *, mode: Union[AppMode, list[AppMode], None] = None): def decorator(view_func: Callable[P1, R1]): @wraps(view_func) @@ -62,3 +67,44 @@ def get_app_model(view: Callable[P, R] | None = None, *, mode: Union[AppMode, li return decorator else: return decorator(view) + + +def get_app_model_with_trial(view: Callable[P, R] | None = None, *, mode: Union[AppMode, list[AppMode], None] = None): + def decorator(view_func: Callable[P, R]): + @wraps(view_func) + def decorated_view(*args: P.args, **kwargs: P.kwargs): + if not kwargs.get("app_id"): + raise ValueError("missing app_id in path parameters") + + app_id = kwargs.get("app_id") + app_id = str(app_id) + + del kwargs["app_id"] + + app_model = _load_app_model_with_trial(app_id) + + if not app_model: + raise AppNotFoundError() + + app_mode = AppMode.value_of(app_model.mode) + + if mode is not None: + if isinstance(mode, list): + modes = mode + else: + modes = [mode] + + if app_mode not in modes: + mode_values = {m.value for m in modes} + raise AppNotFoundError(f"App mode is not in the supported list: {mode_values}") + + kwargs["app_model"] = app_model + + return view_func(*args, **kwargs) + + return decorated_view + + if view is None: + return decorator + else: + return decorator(view) diff --git a/api/controllers/console/auth/oauth.py b/api/controllers/console/auth/oauth.py index 112e152432..3943379573 100644 --- a/api/controllers/console/auth/oauth.py +++ b/api/controllers/console/auth/oauth.py @@ -161,10 +161,7 @@ class OAuthCallback(Resource): ip_address=extract_remote_ip(request), ) - base_url = dify_config.CONSOLE_WEB_URL - query_char = "&" if "?" in base_url else "?" - target_url = f"{base_url}{query_char}oauth_new_user={str(oauth_new_user).lower()}" - response = redirect(target_url) + response = redirect(f"{dify_config.CONSOLE_WEB_URL}?oauth_new_user={str(oauth_new_user).lower()}") set_access_token_to_cookie(request, response, token_pair.access_token) set_refresh_token_to_cookie(request, response, token_pair.refresh_token) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 8ceb896d4f..979ab63a58 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -146,6 +146,7 @@ class DatasetUpdatePayload(BaseModel): embedding_model: str | None = None embedding_model_provider: str | None = None retrieval_model: dict[str, Any] | None = None + summary_index_setting: dict[str, Any] | None = None partial_member_list: list[dict[str, str]] | None = None external_retrieval_model: dict[str, Any] | None = None external_knowledge_id: str | None = None diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index ac78d3854b..5982822713 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -39,9 +39,10 @@ from fields.document_fields import ( from libs.datetime_utils import naive_utc_now from libs.login import current_account_with_tenant, login_required from models import DatasetProcessRule, Document, DocumentSegment, UploadFile -from models.dataset import DocumentPipelineExecutionLog +from models.dataset import DocumentPipelineExecutionLog, DocumentSegmentSummary from services.dataset_service import DatasetService, DocumentService from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig, ProcessRule, RetrievalModel +from tasks.generate_summary_index_task import generate_summary_index_task from ..app.error import ( ProviderModelCurrentlyNotSupportError, @@ -104,6 +105,10 @@ class DocumentRenamePayload(BaseModel): name: str +class GenerateSummaryPayload(BaseModel): + document_list: list[str] + + register_schema_models( console_ns, KnowledgeConfig, @@ -111,6 +116,7 @@ register_schema_models( RetrievalModel, DocumentRetryPayload, DocumentRenamePayload, + GenerateSummaryPayload, ) @@ -295,6 +301,97 @@ class DatasetDocumentListApi(Resource): paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) documents = paginated_documents.items + + # Check if dataset has summary index enabled + has_summary_index = ( + dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ) + + # Filter documents that need summary calculation + documents_need_summary = [doc for doc in documents if doc.need_summary is True] + document_ids_need_summary = [str(doc.id) for doc in documents_need_summary] + + # Calculate summary_index_status for documents that need summary (only if dataset summary index is enabled) + summary_status_map = {} + if has_summary_index and document_ids_need_summary: + # Get all segments for these documents (excluding qa_model and re_segment) + segments = ( + db.session.query(DocumentSegment.id, DocumentSegment.document_id) + .where( + DocumentSegment.document_id.in_(document_ids_need_summary), + DocumentSegment.status != "re_segment", + DocumentSegment.tenant_id == current_tenant_id, + ) + .all() + ) + + # Group segments by document_id + document_segments_map = {} + for segment in segments: + doc_id = str(segment.document_id) + if doc_id not in document_segments_map: + document_segments_map[doc_id] = [] + document_segments_map[doc_id].append(segment.id) + + # Get all summary records for these segments + all_segment_ids = [seg.id for seg in segments] + summaries = {} + if all_segment_ids: + summary_records = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id.in_(all_segment_ids), + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.enabled == True, # Only count enabled summaries + ) + .all() + ) + summaries = {summary.chunk_id: summary.status for summary in summary_records} + + # Calculate summary_index_status for each document + for doc_id in document_ids_need_summary: + segment_ids = document_segments_map.get(doc_id, []) + if not segment_ids: + # No segments, status is "GENERATING" (waiting to generate) + summary_status_map[doc_id] = "GENERATING" + continue + + # Count summary statuses for this document's segments + status_counts = {"completed": 0, "generating": 0, "error": 0, "not_started": 0} + for segment_id in segment_ids: + status = summaries.get(segment_id, "not_started") + if status in status_counts: + status_counts[status] += 1 + else: + status_counts["not_started"] += 1 + + total_segments = len(segment_ids) + completed_count = status_counts["completed"] + generating_count = status_counts["generating"] + error_count = status_counts["error"] + + # Determine overall status (only three states: GENERATING, COMPLETED, ERROR) + if completed_count == total_segments: + summary_status_map[doc_id] = "COMPLETED" + elif error_count > 0: + # Has errors (even if some are completed or generating) + summary_status_map[doc_id] = "ERROR" + elif generating_count > 0 or status_counts["not_started"] > 0: + # Still generating or not started + summary_status_map[doc_id] = "GENERATING" + else: + # Default to generating + summary_status_map[doc_id] = "GENERATING" + + # Add summary_index_status to each document + for document in documents: + if has_summary_index and document.need_summary is True: + document.summary_index_status = summary_status_map.get(str(document.id), "GENERATING") + else: + # Return null if summary index is not enabled or document doesn't need summary + document.summary_index_status = None + if fetch: for document in documents: completed_segments = ( @@ -393,6 +490,7 @@ class DatasetDocumentListApi(Resource): return {"result": "success"}, 204 + @console_ns.route("/datasets/init") class DatasetInitApi(Resource): @console_ns.doc("init_dataset") @@ -780,6 +878,7 @@ class DocumentApi(DocumentResource): "display_status": document.display_status, "doc_form": document.doc_form, "doc_language": document.doc_language, + "need_summary": document.need_summary if document.need_summary is not None else False, } else: dataset_process_rules = DatasetService.get_process_rules(dataset_id) @@ -815,6 +914,7 @@ class DocumentApi(DocumentResource): "display_status": document.display_status, "doc_form": document.doc_form, "doc_language": document.doc_language, + "need_summary": document.need_summary if document.need_summary is not None else False, } return response, 200 @@ -1182,3 +1282,211 @@ class DocumentPipelineExecutionLogApi(DocumentResource): "input_data": log.input_data, "datasource_node_id": log.datasource_node_id, }, 200 + + +@console_ns.route("/datasets//documents/generate-summary") +class DocumentGenerateSummaryApi(Resource): + @console_ns.doc("generate_summary_for_documents") + @console_ns.doc(description="Generate summary index for documents") + @console_ns.doc(params={"dataset_id": "Dataset ID"}) + @console_ns.expect(console_ns.models[GenerateSummaryPayload.__name__]) + @console_ns.response(200, "Summary generation started successfully") + @console_ns.response(400, "Invalid request or dataset configuration") + @console_ns.response(403, "Permission denied") + @console_ns.response(404, "Dataset not found") + @setup_required + @login_required + @account_initialization_required + @cloud_edition_billing_rate_limit_check("knowledge") + def post(self, dataset_id): + """ + Generate summary index for specified documents. + + This endpoint checks if the dataset configuration supports summary generation + (indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), + then asynchronously generates summary indexes for the provided documents. + """ + current_user, _ = current_account_with_tenant() + dataset_id = str(dataset_id) + + # Get dataset + dataset = DatasetService.get_dataset(dataset_id) + if not dataset: + raise NotFound("Dataset not found.") + + # Check permissions + if not current_user.is_dataset_editor: + raise Forbidden() + + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + # Validate request payload + payload = GenerateSummaryPayload.model_validate(console_ns.payload or {}) + document_list = payload.document_list + + if not document_list: + raise ValueError("document_list cannot be empty.") + + # Check if dataset configuration supports summary generation + if dataset.indexing_technique != "high_quality": + raise ValueError( + f"Summary generation is only available for 'high_quality' indexing technique. " + f"Current indexing technique: {dataset.indexing_technique}" + ) + + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + raise ValueError( + "Summary index is not enabled for this dataset. " + "Please enable it in the dataset settings." + ) + + # Verify all documents exist and belong to the dataset + documents = ( + db.session.query(Document) + .filter( + Document.id.in_(document_list), + Document.dataset_id == dataset_id, + ) + .all() + ) + + if len(documents) != len(document_list): + found_ids = {doc.id for doc in documents} + missing_ids = set(document_list) - found_ids + raise NotFound(f"Some documents not found: {list(missing_ids)}") + + # Dispatch async tasks for each document + for document in documents: + # Skip qa_model documents as they don't generate summaries + if document.doc_form == "qa_model": + logger.info( + f"Skipping summary generation for qa_model document {document.id}" + ) + continue + + # Dispatch async task + generate_summary_index_task(dataset_id, document.id) + logger.info( + f"Dispatched summary generation task for document {document.id} in dataset {dataset_id}" + ) + + return {"result": "success"}, 200 + + +@console_ns.route("/datasets//documents//summary-status") +class DocumentSummaryStatusApi(DocumentResource): + @console_ns.doc("get_document_summary_status") + @console_ns.doc(description="Get summary index generation status for a document") + @console_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"}) + @console_ns.response(200, "Summary status retrieved successfully") + @console_ns.response(404, "Document not found") + @setup_required + @login_required + @account_initialization_required + def get(self, dataset_id, document_id): + """ + Get summary index generation status for a document. + + Returns: + - total_segments: Total number of segments in the document + - summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records + - summaries: List of summary records with status and content preview + """ + current_user, _ = current_account_with_tenant() + dataset_id = str(dataset_id) + document_id = str(document_id) + + # Get document + document = self.get_document(dataset_id, document_id) + + # Get dataset + dataset = DatasetService.get_dataset(dataset_id) + if not dataset: + raise NotFound("Dataset not found.") + + # Check permissions + try: + DatasetService.check_dataset_permission(dataset, current_user) + except services.errors.account.NoPermissionError as e: + raise Forbidden(str(e)) + + # Get all segments for this document + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.document_id == document_id, + DocumentSegment.dataset_id == dataset_id, + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + ) + .all() + ) + + total_segments = len(segments) + + # Get all summary records for these segments + segment_ids = [segment.id for segment in segments] + summaries = [] + if segment_ids: + summaries = ( + db.session.query(DocumentSegmentSummary) + .filter( + DocumentSegmentSummary.document_id == document_id, + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.chunk_id.in_(segment_ids), + DocumentSegmentSummary.enabled == True, # Only return enabled summaries + ) + .all() + ) + + # Create a mapping of chunk_id to summary + summary_map = {summary.chunk_id: summary for summary in summaries} + + # Count statuses + status_counts = { + "completed": 0, + "generating": 0, + "error": 0, + "not_started": 0, + } + + summary_list = [] + for segment in segments: + summary = summary_map.get(segment.id) + if summary: + status = summary.status + status_counts[status] = status_counts.get(status, 0) + 1 + summary_list.append({ + "segment_id": segment.id, + "segment_position": segment.position, + "status": summary.status, + "summary_preview": summary.summary_content[:100] + "..." if summary.summary_content and len(summary.summary_content) > 100 else summary.summary_content, + "error": summary.error, + "created_at": int(summary.created_at.timestamp()) if summary.created_at else None, + "updated_at": int(summary.updated_at.timestamp()) if summary.updated_at else None, + }) + else: + status_counts["not_started"] += 1 + summary_list.append({ + "segment_id": segment.id, + "segment_position": segment.position, + "status": "not_started", + "summary_preview": None, + "error": None, + "created_at": None, + "updated_at": None, + }) + + return { + "total_segments": total_segments, + "summary_status": status_counts, + "summaries": summary_list, + }, 200 diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index 16fecb41c6..423462f966 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -32,7 +32,7 @@ from extensions.ext_redis import redis_client from fields.segment_fields import child_chunk_fields, segment_fields from libs.helper import escape_like_pattern from libs.login import current_account_with_tenant, login_required -from models.dataset import ChildChunk, DocumentSegment +from models.dataset import ChildChunk, DocumentSegment, DocumentSegmentSummary from models.model import UploadFile from services.dataset_service import DatasetService, DocumentService, SegmentService from services.entities.knowledge_entities.knowledge_entities import ChildChunkUpdateArgs, SegmentUpdateArgs @@ -41,6 +41,23 @@ from services.errors.chunk import ChildChunkIndexingError as ChildChunkIndexingS from tasks.batch_create_segment_to_index_task import batch_create_segment_to_index_task +def _get_segment_with_summary(segment, dataset_id): + """Helper function to marshal segment and add summary information.""" + segment_dict = marshal(segment, segment_fields) + # Query summary for this segment (only enabled summaries) + summary = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id == segment.id, + DocumentSegmentSummary.dataset_id == dataset_id, + DocumentSegmentSummary.enabled == True, # Only return enabled summaries + ) + .first() + ) + segment_dict["summary"] = summary.summary_content if summary else None + return segment_dict + + class SegmentListQuery(BaseModel): limit: int = Field(default=20, ge=1, le=100) status: list[str] = Field(default_factory=list) @@ -63,6 +80,7 @@ class SegmentUpdatePayload(BaseModel): keywords: list[str] | None = None regenerate_child_chunks: bool = False attachment_ids: list[str] | None = None + summary: str | None = None # Summary content for summary index class BatchImportPayload(BaseModel): @@ -180,8 +198,34 @@ class DatasetDocumentSegmentListApi(Resource): segments = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) + # Query summaries for all segments in this page (batch query for efficiency) + segment_ids = [segment.id for segment in segments.items] + summaries = {} + if segment_ids: + summary_records = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id.in_(segment_ids), + DocumentSegmentSummary.dataset_id == dataset_id, + ) + .all() + ) + # Only include enabled summaries + summaries = { + summary.chunk_id: summary.summary_content + for summary in summary_records + if summary.enabled is True + } + + # Add summary to each segment + segments_with_summary = [] + for segment in segments.items: + segment_dict = marshal(segment, segment_fields) + segment_dict["summary"] = summaries.get(segment.id) + segments_with_summary.append(segment_dict) + response = { - "data": marshal(segments.items, segment_fields), + "data": segments_with_summary, "limit": limit, "total": segments.total, "total_pages": segments.pages, @@ -327,7 +371,7 @@ class DatasetDocumentSegmentAddApi(Resource): payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) segment = SegmentService.create_segment(payload_dict, document, dataset) - return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200 @console_ns.route("/datasets//documents//segments/") @@ -389,10 +433,12 @@ class DatasetDocumentSegmentUpdateApi(Resource): payload = SegmentUpdatePayload.model_validate(console_ns.payload or {}) payload_dict = payload.model_dump(exclude_none=True) SegmentService.segment_create_args_validate(payload_dict, document) + + # Update segment (summary update with change detection is handled in SegmentService.update_segment) segment = SegmentService.update_segment( SegmentUpdateArgs.model_validate(payload.model_dump(exclude_none=True)), segment, document, dataset ) - return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200 @setup_required @login_required diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index 932cb4fcce..c947132070 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -1,4 +1,4 @@ -from flask_restx import Resource +from flask_restx import Resource, fields from controllers.common.schema import register_schema_model from libs.login import login_required @@ -10,17 +10,56 @@ from ..wraps import ( cloud_edition_billing_rate_limit_check, setup_required, ) +from fields.hit_testing_fields import ( + child_chunk_fields, + document_fields, + files_fields, + hit_testing_record_fields, + segment_fields, +) register_schema_model(console_ns, HitTestingPayload) +def _get_or_create_model(model_name: str, field_def): + """Get or create a flask_restx model to avoid dict type issues in Swagger.""" + existing = console_ns.models.get(model_name) + if existing is None: + existing = console_ns.model(model_name, field_def) + return existing + + +# Register models for flask_restx to avoid dict type issues in Swagger +document_model = _get_or_create_model("HitTestingDocument", document_fields) + +segment_fields_copy = segment_fields.copy() +segment_fields_copy["document"] = fields.Nested(document_model) +segment_model = _get_or_create_model("HitTestingSegment", segment_fields_copy) + +child_chunk_model = _get_or_create_model("HitTestingChildChunk", child_chunk_fields) +files_model = _get_or_create_model("HitTestingFile", files_fields) + +hit_testing_record_fields_copy = hit_testing_record_fields.copy() +hit_testing_record_fields_copy["segment"] = fields.Nested(segment_model) +hit_testing_record_fields_copy["child_chunks"] = fields.List(fields.Nested(child_chunk_model)) +hit_testing_record_fields_copy["files"] = fields.List(fields.Nested(files_model)) +hit_testing_record_model = _get_or_create_model("HitTestingRecord", hit_testing_record_fields_copy) + +# Response model for hit testing API +hit_testing_response_fields = { + "query": fields.String, + "records": fields.List(fields.Nested(hit_testing_record_model)), +} +hit_testing_response_model = _get_or_create_model("HitTestingResponse", hit_testing_response_fields) + + @console_ns.route("/datasets//hit-testing") class HitTestingApi(Resource, DatasetsHitTestingBase): @console_ns.doc("test_dataset_retrieval") @console_ns.doc(description="Test dataset knowledge retrieval") @console_ns.doc(params={"dataset_id": "Dataset ID"}) @console_ns.expect(console_ns.models[HitTestingPayload.__name__]) - @console_ns.response(200, "Hit testing completed successfully") + @console_ns.response(200, "Hit testing completed successfully", model=hit_testing_response_model) @console_ns.response(404, "Dataset not found") @console_ns.response(400, "Invalid parameters") @setup_required diff --git a/api/controllers/console/explore/banner.py b/api/controllers/console/explore/banner.py new file mode 100644 index 0000000000..da306fbc9d --- /dev/null +++ b/api/controllers/console/explore/banner.py @@ -0,0 +1,43 @@ +from flask import request +from flask_restx import Resource + +from controllers.console import api +from controllers.console.explore.wraps import explore_banner_enabled +from extensions.ext_database import db +from models.model import ExporleBanner + + +class BannerApi(Resource): + """Resource for banner list.""" + + @explore_banner_enabled + def get(self): + """Get banner list.""" + language = request.args.get("language", "en-US") + + # Build base query for enabled banners + base_query = db.session.query(ExporleBanner).where(ExporleBanner.status == "enabled") + + # Try to get banners in the requested language + banners = base_query.where(ExporleBanner.language == language).order_by(ExporleBanner.sort).all() + + # Fallback to en-US if no banners found and language is not en-US + if not banners and language != "en-US": + banners = base_query.where(ExporleBanner.language == "en-US").order_by(ExporleBanner.sort).all() + # Convert banners to serializable format + result = [] + for banner in banners: + banner_data = { + "id": banner.id, + "content": banner.content, # Already parsed as JSON by SQLAlchemy + "link": banner.link, + "sort": banner.sort, + "status": banner.status, + "created_at": banner.created_at.isoformat() if banner.created_at else None, + } + result.append(banner_data) + + return result + + +api.add_resource(BannerApi, "/explore/banners") diff --git a/api/controllers/console/explore/error.py b/api/controllers/console/explore/error.py index 1e05ff4206..e96fa64f84 100644 --- a/api/controllers/console/explore/error.py +++ b/api/controllers/console/explore/error.py @@ -29,3 +29,25 @@ class AppAccessDeniedError(BaseHTTPException): error_code = "access_denied" description = "App access denied." code = 403 + + +class TrialAppNotAllowed(BaseHTTPException): + """*403* `Trial App Not Allowed` + + Raise if the user has reached the trial app limit. + """ + + error_code = "trial_app_not_allowed" + code = 403 + description = "the app is not allowed to be trial." + + +class TrialAppLimitExceeded(BaseHTTPException): + """*403* `Trial App Limit Exceeded` + + Raise if the user has exceeded the trial app limit. + """ + + error_code = "trial_app_limit_exceeded" + code = 403 + description = "The user has exceeded the trial app limit." diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 2b2f807694..362513ec1c 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -29,6 +29,7 @@ recommended_app_fields = { "category": fields.String, "position": fields.Integer, "is_listed": fields.Boolean, + "can_trial": fields.Boolean, } recommended_app_list_fields = { diff --git a/api/controllers/console/explore/trial.py b/api/controllers/console/explore/trial.py new file mode 100644 index 0000000000..97d856bebe --- /dev/null +++ b/api/controllers/console/explore/trial.py @@ -0,0 +1,512 @@ +import logging +from typing import Any, cast + +from flask import request +from flask_restx import Resource, marshal, marshal_with, reqparse +from werkzeug.exceptions import Forbidden, InternalServerError, NotFound + +import services +from controllers.common.fields import Parameters as ParametersResponse +from controllers.common.fields import Site as SiteResponse +from controllers.console import api +from controllers.console.app.error import ( + AppUnavailableError, + AudioTooLargeError, + CompletionRequestError, + ConversationCompletedError, + NeedAddIdsError, + NoAudioUploadedError, + ProviderModelCurrentlyNotSupportError, + ProviderNotInitializeError, + ProviderNotSupportSpeechToTextError, + ProviderQuotaExceededError, + UnsupportedAudioTypeError, +) +from controllers.console.app.wraps import get_app_model_with_trial +from controllers.console.explore.error import ( + AppSuggestedQuestionsAfterAnswerDisabledError, + NotChatAppError, + NotCompletionAppError, + NotWorkflowAppError, +) +from controllers.console.explore.wraps import TrialAppResource, trial_feature_enable +from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError +from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict +from core.app.apps.base_app_queue_manager import AppQueueManager +from core.app.entities.app_invoke_entities import InvokeFrom +from core.errors.error import ( + ModelCurrentlyNotSupportError, + ProviderTokenNotInitError, + QuotaExceededError, +) +from core.model_runtime.errors.invoke import InvokeError +from core.workflow.graph_engine.manager import GraphEngineManager +from extensions.ext_database import db +from fields.app_fields import app_detail_fields_with_site +from fields.dataset_fields import dataset_fields +from fields.workflow_fields import workflow_fields +from libs import helper +from libs.helper import uuid_value +from libs.login import current_user +from models import Account +from models.account import TenantStatus +from models.model import AppMode, Site +from models.workflow import Workflow +from services.app_generate_service import AppGenerateService +from services.app_service import AppService +from services.audio_service import AudioService +from services.dataset_service import DatasetService +from services.errors.audio import ( + AudioTooLargeServiceError, + NoAudioUploadedServiceError, + ProviderNotSupportSpeechToTextServiceError, + UnsupportedAudioTypeServiceError, +) +from services.errors.conversation import ConversationNotExistsError +from services.errors.llm import InvokeRateLimitError +from services.errors.message import ( + MessageNotExistsError, + SuggestedQuestionsAfterAnswerDisabledError, +) +from services.message_service import MessageService +from services.recommended_app_service import RecommendedAppService + +logger = logging.getLogger(__name__) + + +class TrialAppWorkflowRunApi(TrialAppResource): + def post(self, trial_app): + """ + Run workflow + """ + app_model = trial_app + if not app_model: + raise NotWorkflowAppError() + app_mode = AppMode.value_of(app_model.mode) + if app_mode != AppMode.WORKFLOW: + raise NotWorkflowAppError() + + parser = reqparse.RequestParser() + parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json") + parser.add_argument("files", type=list, required=False, location="json") + args = parser.parse_args() + assert current_user is not None + try: + app_id = app_model.id + user_id = current_user.id + response = AppGenerateService.generate( + app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.EXPLORE, streaming=True + ) + RecommendedAppService.add_trial_app_record(app_id, user_id) + return helper.compact_generate_response(response) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + except InvokeRateLimitError as ex: + raise InvokeRateLimitHttpError(ex.description) + except ValueError as e: + raise e + except Exception: + logger.exception("internal server error.") + raise InternalServerError() + + +class TrialAppWorkflowTaskStopApi(TrialAppResource): + def post(self, trial_app, task_id: str): + """ + Stop workflow task + """ + app_model = trial_app + if not app_model: + raise NotWorkflowAppError() + app_mode = AppMode.value_of(app_model.mode) + if app_mode != AppMode.WORKFLOW: + raise NotWorkflowAppError() + assert current_user is not None + + # Stop using both mechanisms for backward compatibility + # Legacy stop flag mechanism (without user check) + AppQueueManager.set_stop_flag_no_user_check(task_id) + + # New graph engine command channel mechanism + GraphEngineManager.send_stop_command(task_id) + + return {"result": "success"} + + +class TrialChatApi(TrialAppResource): + @trial_feature_enable + def post(self, trial_app): + app_model = trial_app + app_mode = AppMode.value_of(app_model.mode) + if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}: + raise NotChatAppError() + + parser = reqparse.RequestParser() + parser.add_argument("inputs", type=dict, required=True, location="json") + parser.add_argument("query", type=str, required=True, location="json") + parser.add_argument("files", type=list, required=False, location="json") + parser.add_argument("conversation_id", type=uuid_value, location="json") + parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json") + parser.add_argument("retriever_from", type=str, required=False, default="explore_app", location="json") + args = parser.parse_args() + + args["auto_generate_name"] = False + + try: + if not isinstance(current_user, Account): + raise ValueError("current_user must be an Account instance") + + # Get IDs before they might be detached from session + app_id = app_model.id + user_id = current_user.id + + response = AppGenerateService.generate( + app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.EXPLORE, streaming=True + ) + RecommendedAppService.add_trial_app_record(app_id, user_id) + return helper.compact_generate_response(response) + except services.errors.conversation.ConversationNotExistsError: + raise NotFound("Conversation Not Exists.") + except services.errors.conversation.ConversationCompletedError: + raise ConversationCompletedError() + except services.errors.app_model_config.AppModelConfigBrokenError: + logger.exception("App model config broken.") + raise AppUnavailableError() + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + except InvokeRateLimitError as ex: + raise InvokeRateLimitHttpError(ex.description) + except ValueError as e: + raise e + except Exception: + logger.exception("internal server error.") + raise InternalServerError() + + +class TrialMessageSuggestedQuestionApi(TrialAppResource): + @trial_feature_enable + def get(self, trial_app, message_id): + app_model = trial_app + app_mode = AppMode.value_of(app_model.mode) + if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}: + raise NotChatAppError() + + message_id = str(message_id) + + try: + if not isinstance(current_user, Account): + raise ValueError("current_user must be an Account instance") + questions = MessageService.get_suggested_questions_after_answer( + app_model=app_model, user=current_user, message_id=message_id, invoke_from=InvokeFrom.EXPLORE + ) + except MessageNotExistsError: + raise NotFound("Message not found") + except ConversationNotExistsError: + raise NotFound("Conversation not found") + except SuggestedQuestionsAfterAnswerDisabledError: + raise AppSuggestedQuestionsAfterAnswerDisabledError() + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + except Exception: + logger.exception("internal server error.") + raise InternalServerError() + + return {"data": questions} + + +class TrialChatAudioApi(TrialAppResource): + @trial_feature_enable + def post(self, trial_app): + app_model = trial_app + + file = request.files["file"] + + try: + if not isinstance(current_user, Account): + raise ValueError("current_user must be an Account instance") + + # Get IDs before they might be detached from session + app_id = app_model.id + user_id = current_user.id + + response = AudioService.transcript_asr(app_model=app_model, file=file, end_user=None) + RecommendedAppService.add_trial_app_record(app_id, user_id) + return response + except services.errors.app_model_config.AppModelConfigBrokenError: + logger.exception("App model config broken.") + raise AppUnavailableError() + except NoAudioUploadedServiceError: + raise NoAudioUploadedError() + except AudioTooLargeServiceError as e: + raise AudioTooLargeError(str(e)) + except UnsupportedAudioTypeServiceError: + raise UnsupportedAudioTypeError() + except ProviderNotSupportSpeechToTextServiceError: + raise ProviderNotSupportSpeechToTextError() + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + except ValueError as e: + raise e + except Exception as e: + logger.exception("internal server error.") + raise InternalServerError() + + +class TrialChatTextApi(TrialAppResource): + @trial_feature_enable + def post(self, trial_app): + app_model = trial_app + try: + parser = reqparse.RequestParser() + parser.add_argument("message_id", type=str, required=False, location="json") + parser.add_argument("voice", type=str, location="json") + parser.add_argument("text", type=str, location="json") + parser.add_argument("streaming", type=bool, location="json") + args = parser.parse_args() + + message_id = args.get("message_id", None) + text = args.get("text", None) + voice = args.get("voice", None) + if not isinstance(current_user, Account): + raise ValueError("current_user must be an Account instance") + + # Get IDs before they might be detached from session + app_id = app_model.id + user_id = current_user.id + + response = AudioService.transcript_tts(app_model=app_model, text=text, voice=voice, message_id=message_id) + RecommendedAppService.add_trial_app_record(app_id, user_id) + return response + except services.errors.app_model_config.AppModelConfigBrokenError: + logger.exception("App model config broken.") + raise AppUnavailableError() + except NoAudioUploadedServiceError: + raise NoAudioUploadedError() + except AudioTooLargeServiceError as e: + raise AudioTooLargeError(str(e)) + except UnsupportedAudioTypeServiceError: + raise UnsupportedAudioTypeError() + except ProviderNotSupportSpeechToTextServiceError: + raise ProviderNotSupportSpeechToTextError() + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + except ValueError as e: + raise e + except Exception as e: + logger.exception("internal server error.") + raise InternalServerError() + + +class TrialCompletionApi(TrialAppResource): + @trial_feature_enable + def post(self, trial_app): + app_model = trial_app + if app_model.mode != "completion": + raise NotCompletionAppError() + + parser = reqparse.RequestParser() + parser.add_argument("inputs", type=dict, required=True, location="json") + parser.add_argument("query", type=str, location="json", default="") + parser.add_argument("files", type=list, required=False, location="json") + parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json") + parser.add_argument("retriever_from", type=str, required=False, default="explore_app", location="json") + args = parser.parse_args() + + streaming = args["response_mode"] == "streaming" + args["auto_generate_name"] = False + + try: + if not isinstance(current_user, Account): + raise ValueError("current_user must be an Account instance") + + # Get IDs before they might be detached from session + app_id = app_model.id + user_id = current_user.id + + response = AppGenerateService.generate( + app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.EXPLORE, streaming=streaming + ) + + RecommendedAppService.add_trial_app_record(app_id, user_id) + return helper.compact_generate_response(response) + except services.errors.conversation.ConversationNotExistsError: + raise NotFound("Conversation Not Exists.") + except services.errors.conversation.ConversationCompletedError: + raise ConversationCompletedError() + except services.errors.app_model_config.AppModelConfigBrokenError: + logger.exception("App model config broken.") + raise AppUnavailableError() + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + except ValueError as e: + raise e + except Exception: + logger.exception("internal server error.") + raise InternalServerError() + + +class TrialSitApi(Resource): + """Resource for trial app sites.""" + + @trial_feature_enable + @get_app_model_with_trial + def get(self, app_model): + """Retrieve app site info. + + Returns the site configuration for the application including theme, icons, and text. + """ + site = db.session.query(Site).where(Site.app_id == app_model.id).first() + + if not site: + raise Forbidden() + + assert app_model.tenant + if app_model.tenant.status == TenantStatus.ARCHIVE: + raise Forbidden() + + return SiteResponse.model_validate(site).model_dump(mode="json") + + +class TrialAppParameterApi(Resource): + """Resource for app variables.""" + + @trial_feature_enable + @get_app_model_with_trial + def get(self, app_model): + """Retrieve app parameters.""" + + if app_model is None: + raise AppUnavailableError() + + if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}: + workflow = app_model.workflow + if workflow is None: + raise AppUnavailableError() + + features_dict = workflow.features_dict + user_input_form = workflow.user_input_form(to_old_structure=True) + else: + app_model_config = app_model.app_model_config + if app_model_config is None: + raise AppUnavailableError() + + features_dict = app_model_config.to_dict() + + user_input_form = features_dict.get("user_input_form", []) + + parameters = get_parameters_from_feature_dict(features_dict=features_dict, user_input_form=user_input_form) + return ParametersResponse.model_validate(parameters).model_dump(mode="json") + + +class AppApi(Resource): + @trial_feature_enable + @get_app_model_with_trial + @marshal_with(app_detail_fields_with_site) + def get(self, app_model): + """Get app detail""" + + app_service = AppService() + app_model = app_service.get_app(app_model) + + return app_model + + +class AppWorkflowApi(Resource): + @trial_feature_enable + @get_app_model_with_trial + @marshal_with(workflow_fields) + def get(self, app_model): + """Get workflow detail""" + if not app_model.workflow_id: + raise AppUnavailableError() + + workflow = ( + db.session.query(Workflow) + .where( + Workflow.id == app_model.workflow_id, + ) + .first() + ) + return workflow + + +class DatasetListApi(Resource): + @trial_feature_enable + @get_app_model_with_trial + def get(self, app_model): + page = request.args.get("page", default=1, type=int) + limit = request.args.get("limit", default=20, type=int) + ids = request.args.getlist("ids") + + tenant_id = app_model.tenant_id + if ids: + datasets, total = DatasetService.get_datasets_by_ids(ids, tenant_id) + else: + raise NeedAddIdsError() + + data = cast(list[dict[str, Any]], marshal(datasets, dataset_fields)) + + response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page} + return response + + +api.add_resource(TrialChatApi, "/trial-apps//chat-messages", endpoint="trial_app_chat_completion") + +api.add_resource( + TrialMessageSuggestedQuestionApi, + "/trial-apps//messages//suggested-questions", + endpoint="trial_app_suggested_question", +) + +api.add_resource(TrialChatAudioApi, "/trial-apps//audio-to-text", endpoint="trial_app_audio") +api.add_resource(TrialChatTextApi, "/trial-apps//text-to-audio", endpoint="trial_app_text") + +api.add_resource(TrialCompletionApi, "/trial-apps//completion-messages", endpoint="trial_app_completion") + +api.add_resource(TrialSitApi, "/trial-apps//site") + +api.add_resource(TrialAppParameterApi, "/trial-apps//parameters", endpoint="trial_app_parameters") + +api.add_resource(AppApi, "/trial-apps/", endpoint="trial_app") + +api.add_resource(TrialAppWorkflowRunApi, "/trial-apps//workflows/run", endpoint="trial_app_workflow_run") +api.add_resource(TrialAppWorkflowTaskStopApi, "/trial-apps//workflows/tasks//stop") + +api.add_resource(AppWorkflowApi, "/trial-apps//workflows", endpoint="trial_app_workflow") +api.add_resource(DatasetListApi, "/trial-apps//datasets", endpoint="trial_app_datasets") diff --git a/api/controllers/console/explore/wraps.py b/api/controllers/console/explore/wraps.py index 2a97d312aa..38f0a04904 100644 --- a/api/controllers/console/explore/wraps.py +++ b/api/controllers/console/explore/wraps.py @@ -2,14 +2,15 @@ from collections.abc import Callable from functools import wraps from typing import Concatenate, ParamSpec, TypeVar +from flask import abort from flask_restx import Resource from werkzeug.exceptions import NotFound -from controllers.console.explore.error import AppAccessDeniedError +from controllers.console.explore.error import AppAccessDeniedError, TrialAppLimitExceeded, TrialAppNotAllowed from controllers.console.wraps import account_initialization_required from extensions.ext_database import db from libs.login import current_account_with_tenant, login_required -from models import InstalledApp +from models import AccountTrialAppRecord, App, InstalledApp, TrialApp from services.enterprise.enterprise_service import EnterpriseService from services.feature_service import FeatureService @@ -71,6 +72,61 @@ def user_allowed_to_access_app(view: Callable[Concatenate[InstalledApp, P], R] | return decorator +def trial_app_required(view: Callable[Concatenate[App, P], R] | None = None): + def decorator(view: Callable[Concatenate[App, P], R]): + @wraps(view) + def decorated(app_id: str, *args: P.args, **kwargs: P.kwargs): + current_user, _ = current_account_with_tenant() + + trial_app = db.session.query(TrialApp).where(TrialApp.app_id == str(app_id)).first() + + if trial_app is None: + raise TrialAppNotAllowed() + app = trial_app.app + + if app is None: + raise TrialAppNotAllowed() + + account_trial_app_record = ( + db.session.query(AccountTrialAppRecord) + .where(AccountTrialAppRecord.account_id == current_user.id, AccountTrialAppRecord.app_id == app_id) + .first() + ) + if account_trial_app_record: + if account_trial_app_record.count >= trial_app.trial_limit: + raise TrialAppLimitExceeded() + + return view(app, *args, **kwargs) + + return decorated + + if view: + return decorator(view) + return decorator + + +def trial_feature_enable(view: Callable[..., R]) -> Callable[..., R]: + @wraps(view) + def decorated(*args, **kwargs): + features = FeatureService.get_system_features() + if not features.enable_trial_app: + abort(403, "Trial app feature is not enabled.") + return view(*args, **kwargs) + + return decorated + + +def explore_banner_enabled(view: Callable[..., R]) -> Callable[..., R]: + @wraps(view) + def decorated(*args, **kwargs): + features = FeatureService.get_system_features() + if not features.enable_explore_banner: + abort(403, "Explore banner feature is not enabled.") + return view(*args, **kwargs) + + return decorated + + class InstalledAppResource(Resource): # must be reversed if there are multiple decorators @@ -80,3 +136,13 @@ class InstalledAppResource(Resource): account_initialization_required, login_required, ] + + +class TrialAppResource(Resource): + # must be reversed if there are multiple decorators + + method_decorators = [ + trial_app_required, + account_initialization_required, + login_required, + ] diff --git a/api/controllers/console/wraps.py b/api/controllers/console/wraps.py index 95fc006a12..781941caa0 100644 --- a/api/controllers/console/wraps.py +++ b/api/controllers/console/wraps.py @@ -358,14 +358,12 @@ def annotation_import_rate_limit(view: Callable[P, R]): def decorated(*args: P.args, **kwargs: P.kwargs): _, current_tenant_id = current_account_with_tenant() current_time = int(time.time() * 1000) - # Check per-minute rate limit minute_key = f"annotation_import_rate_limit:{current_tenant_id}:1min" redis_client.zadd(minute_key, {current_time: current_time}) redis_client.zremrangebyscore(minute_key, 0, current_time - 60000) minute_count = redis_client.zcard(minute_key) redis_client.expire(minute_key, 120) # 2 minutes TTL - if minute_count > dify_config.ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: abort( 429, @@ -379,7 +377,6 @@ def annotation_import_rate_limit(view: Callable[P, R]): redis_client.zremrangebyscore(hour_key, 0, current_time - 3600000) hour_count = redis_client.zcard(hour_key) redis_client.expire(hour_key, 7200) # 2 hours TTL - if hour_count > dify_config.ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: abort( 429, diff --git a/api/core/agent/agent_app_runner.py b/api/core/agent/agent_app_runner.py new file mode 100644 index 0000000000..2ee0a23aab --- /dev/null +++ b/api/core/agent/agent_app_runner.py @@ -0,0 +1,380 @@ +import logging +from collections.abc import Generator +from copy import deepcopy +from typing import Any + +from core.agent.base_agent_runner import BaseAgentRunner +from core.agent.entities import AgentEntity, AgentLog, AgentResult +from core.agent.patterns.strategy_factory import StrategyFactory +from core.app.apps.base_app_queue_manager import PublishFrom +from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent +from core.file import file_manager +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMUsage, + PromptMessage, + PromptMessageContentType, + SystemPromptMessage, + TextPromptMessageContent, + UserPromptMessage, +) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent, PromptMessageContentUnionTypes +from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform +from core.tools.__base.tool import Tool +from core.tools.entities.tool_entities import ToolInvokeMeta +from core.tools.tool_engine import ToolEngine +from models.model import Message + +logger = logging.getLogger(__name__) + + +class AgentAppRunner(BaseAgentRunner): + def _create_tool_invoke_hook(self, message: Message): + """ + Create a tool invoke hook that uses ToolEngine.agent_invoke. + This hook handles file creation and returns proper meta information. + """ + # Get trace manager from app generate entity + trace_manager = self.application_generate_entity.trace_manager + + def tool_invoke_hook( + tool: Tool, tool_args: dict[str, Any], tool_name: str + ) -> tuple[str, list[str], ToolInvokeMeta]: + """Hook that uses agent_invoke for proper file and meta handling.""" + tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke( + tool=tool, + tool_parameters=tool_args, + user_id=self.user_id, + tenant_id=self.tenant_id, + message=message, + invoke_from=self.application_generate_entity.invoke_from, + agent_tool_callback=self.agent_callback, + trace_manager=trace_manager, + app_id=self.application_generate_entity.app_config.app_id, + message_id=message.id, + conversation_id=self.conversation.id, + ) + + # Publish files and track IDs + for message_file_id in message_files: + self.queue_manager.publish( + QueueMessageFileEvent(message_file_id=message_file_id), + PublishFrom.APPLICATION_MANAGER, + ) + self._current_message_file_ids.append(message_file_id) + + return tool_invoke_response, message_files, tool_invoke_meta + + return tool_invoke_hook + + def run(self, message: Message, query: str, **kwargs: Any) -> Generator[LLMResultChunk, None, None]: + """ + Run Agent application + """ + self.query = query + app_generate_entity = self.application_generate_entity + + app_config = self.app_config + assert app_config is not None, "app_config is required" + assert app_config.agent is not None, "app_config.agent is required" + + # convert tools into ModelRuntime Tool format + tool_instances, _ = self._init_prompt_tools() + + assert app_config.agent + + # Create tool invoke hook for agent_invoke + tool_invoke_hook = self._create_tool_invoke_hook(message) + + # Get instruction for ReAct strategy + instruction = self.app_config.prompt_template.simple_prompt_template or "" + + # Use factory to create appropriate strategy + strategy = StrategyFactory.create_strategy( + model_features=self.model_features, + model_instance=self.model_instance, + tools=list(tool_instances.values()), + files=list(self.files), + max_iterations=app_config.agent.max_iteration, + context=self.build_execution_context(), + agent_strategy=self.config.strategy, + tool_invoke_hook=tool_invoke_hook, + instruction=instruction, + ) + + # Initialize state variables + current_agent_thought_id = None + has_published_thought = False + current_tool_name: str | None = None + self._current_message_file_ids: list[str] = [] + + # organize prompt messages + prompt_messages = self._organize_prompt_messages() + + # Run strategy + generator = strategy.run( + prompt_messages=prompt_messages, + model_parameters=app_generate_entity.model_conf.parameters, + stop=app_generate_entity.model_conf.stop, + stream=True, + ) + + # Consume generator and collect result + result: AgentResult | None = None + try: + while True: + try: + output = next(generator) + except StopIteration as e: + # Generator finished, get the return value + result = e.value + break + + if isinstance(output, LLMResultChunk): + # Handle LLM chunk + if current_agent_thought_id and not has_published_thought: + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + has_published_thought = True + + yield output + + elif isinstance(output, AgentLog): + # Handle Agent Log using log_type for type-safe dispatch + if output.status == AgentLog.LogStatus.START: + if output.log_type == AgentLog.LogType.ROUND: + # Start of a new round + message_file_ids: list[str] = [] + current_agent_thought_id = self.create_agent_thought( + message_id=message.id, + message="", + tool_name="", + tool_input="", + messages_ids=message_file_ids, + ) + has_published_thought = False + + elif output.log_type == AgentLog.LogType.TOOL_CALL: + if current_agent_thought_id is None: + continue + + # Tool call start - extract data from structured fields + current_tool_name = output.data.get("tool_name", "") + tool_input = output.data.get("tool_args", {}) + + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=current_tool_name, + tool_input=tool_input, + thought=None, + observation=None, + tool_invoke_meta=None, + answer=None, + messages_ids=[], + ) + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + elif output.status == AgentLog.LogStatus.SUCCESS: + if output.log_type == AgentLog.LogType.THOUGHT: + if current_agent_thought_id is None: + continue + + thought_text = output.data.get("thought") + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=None, + tool_input=None, + thought=thought_text, + observation=None, + tool_invoke_meta=None, + answer=None, + messages_ids=[], + ) + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + elif output.log_type == AgentLog.LogType.TOOL_CALL: + if current_agent_thought_id is None: + continue + + # Tool call finished + tool_output = output.data.get("output") + # Get meta from strategy output (now properly populated) + tool_meta = output.data.get("meta") + + # Wrap tool_meta with tool_name as key (required by agent_service) + if tool_meta and current_tool_name: + tool_meta = {current_tool_name: tool_meta} + + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=None, + tool_input=None, + thought=None, + observation=tool_output, + tool_invoke_meta=tool_meta, + answer=None, + messages_ids=self._current_message_file_ids, + ) + # Clear message file ids after saving + self._current_message_file_ids = [] + current_tool_name = None + + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + elif output.log_type == AgentLog.LogType.ROUND: + if current_agent_thought_id is None: + continue + + # Round finished - save LLM usage and answer + llm_usage = output.metadata.get(AgentLog.LogMetadata.LLM_USAGE) + llm_result = output.data.get("llm_result") + final_answer = output.data.get("final_answer") + + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=None, + tool_input=None, + thought=llm_result, + observation=None, + tool_invoke_meta=None, + answer=final_answer, + messages_ids=[], + llm_usage=llm_usage, + ) + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + except Exception: + # Re-raise any other exceptions + raise + + # Process final result + if isinstance(result, AgentResult): + final_answer = result.text + usage = result.usage or LLMUsage.empty_usage() + + # Publish end event + self.queue_manager.publish( + QueueMessageEndEvent( + llm_result=LLMResult( + model=self.model_instance.model, + prompt_messages=prompt_messages, + message=AssistantPromptMessage(content=final_answer), + usage=usage, + system_fingerprint="", + ) + ), + PublishFrom.APPLICATION_MANAGER, + ) + + def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + Initialize system message + """ + if not prompt_template: + return prompt_messages or [] + + prompt_messages = prompt_messages or [] + + if prompt_messages and isinstance(prompt_messages[0], SystemPromptMessage): + prompt_messages[0] = SystemPromptMessage(content=prompt_template) + return prompt_messages + + if not prompt_messages: + return [SystemPromptMessage(content=prompt_template)] + + prompt_messages.insert(0, SystemPromptMessage(content=prompt_template)) + return prompt_messages + + def _organize_user_query(self, query: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + Organize user query + """ + if self.files: + # get image detail config + image_detail_config = ( + self.application_generate_entity.file_upload_config.image_config.detail + if ( + self.application_generate_entity.file_upload_config + and self.application_generate_entity.file_upload_config.image_config + ) + else None + ) + image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW + + prompt_message_contents: list[PromptMessageContentUnionTypes] = [] + for file in self.files: + prompt_message_contents.append( + file_manager.to_prompt_message_content( + file, + image_detail_config=image_detail_config, + ) + ) + prompt_message_contents.append(TextPromptMessageContent(data=query)) + + prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) + else: + prompt_messages.append(UserPromptMessage(content=query)) + + return prompt_messages + + def _clear_user_prompt_image_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + As for now, gpt supports both fc and vision at the first iteration. + We need to remove the image messages from the prompt messages at the first iteration. + """ + prompt_messages = deepcopy(prompt_messages) + + for prompt_message in prompt_messages: + if isinstance(prompt_message, UserPromptMessage): + if isinstance(prompt_message.content, list): + prompt_message.content = "\n".join( + [ + content.data + if content.type == PromptMessageContentType.TEXT + else "[image]" + if content.type == PromptMessageContentType.IMAGE + else "[file]" + for content in prompt_message.content + ] + ) + + return prompt_messages + + def _organize_prompt_messages(self): + # For ReAct strategy, use the agent prompt template + if self.config.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT and self.config.prompt: + prompt_template = self.config.prompt.first_prompt + else: + prompt_template = self.app_config.prompt_template.simple_prompt_template or "" + + self.history_prompt_messages = self._init_system_message(prompt_template, self.history_prompt_messages) + query_prompt_messages = self._organize_user_query(self.query or "", []) + + self.history_prompt_messages = AgentHistoryPromptTransform( + model_config=self.model_config, + prompt_messages=[*query_prompt_messages, *self._current_thoughts], + history_messages=self.history_prompt_messages, + memory=self.memory, + ).get_prompt() + + prompt_messages = [*self.history_prompt_messages, *query_prompt_messages, *self._current_thoughts] + if len(self._current_thoughts) != 0: + # clear messages after the first iteration + prompt_messages = self._clear_user_prompt_image_messages(prompt_messages) + return prompt_messages diff --git a/api/core/agent/base_agent_runner.py b/api/core/agent/base_agent_runner.py index 3c6d36afe4..b5459611b1 100644 --- a/api/core/agent/base_agent_runner.py +++ b/api/core/agent/base_agent_runner.py @@ -6,7 +6,7 @@ from typing import Union, cast from sqlalchemy import select -from core.agent.entities import AgentEntity, AgentToolEntity +from core.agent.entities import AgentEntity, AgentToolEntity, ExecutionContext from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig from core.app.apps.base_app_queue_manager import AppQueueManager @@ -116,9 +116,20 @@ class BaseAgentRunner(AppRunner): features = model_schema.features if model_schema and model_schema.features else [] self.stream_tool_call = ModelFeature.STREAM_TOOL_CALL in features self.files = application_generate_entity.files if ModelFeature.VISION in features else [] + self.model_features = features self.query: str | None = "" self._current_thoughts: list[PromptMessage] = [] + def build_execution_context(self) -> ExecutionContext: + """Build execution context.""" + return ExecutionContext( + user_id=self.user_id, + app_id=self.app_config.app_id, + conversation_id=self.conversation.id, + message_id=self.message.id, + tenant_id=self.tenant_id, + ) + def _repack_app_generate_entity( self, app_generate_entity: AgentChatAppGenerateEntity ) -> AgentChatAppGenerateEntity: diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py deleted file mode 100644 index a55f2d0f5f..0000000000 --- a/api/core/agent/cot_agent_runner.py +++ /dev/null @@ -1,437 +0,0 @@ -import json -import logging -from abc import ABC, abstractmethod -from collections.abc import Generator, Mapping, Sequence -from typing import Any - -from core.agent.base_agent_runner import BaseAgentRunner -from core.agent.entities import AgentScratchpadUnit -from core.agent.output_parser.cot_output_parser import CotAgentOutputParser -from core.app.apps.base_app_queue_manager import PublishFrom -from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - ToolPromptMessage, - UserPromptMessage, -) -from core.ops.ops_trace_manager import TraceQueueManager -from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform -from core.tools.__base.tool import Tool -from core.tools.entities.tool_entities import ToolInvokeMeta -from core.tools.tool_engine import ToolEngine -from core.workflow.nodes.agent.exc import AgentMaxIterationError -from models.model import Message - -logger = logging.getLogger(__name__) - - -class CotAgentRunner(BaseAgentRunner, ABC): - _is_first_iteration = True - _ignore_observation_providers = ["wenxin"] - _historic_prompt_messages: list[PromptMessage] - _agent_scratchpad: list[AgentScratchpadUnit] - _instruction: str - _query: str - _prompt_messages_tools: Sequence[PromptMessageTool] - - def run( - self, - message: Message, - query: str, - inputs: Mapping[str, str], - ) -> Generator: - """ - Run Cot agent application - """ - - app_generate_entity = self.application_generate_entity - self._repack_app_generate_entity(app_generate_entity) - self._init_react_state(query) - - trace_manager = app_generate_entity.trace_manager - - # check model mode - if "Observation" not in app_generate_entity.model_conf.stop: - if app_generate_entity.model_conf.provider not in self._ignore_observation_providers: - app_generate_entity.model_conf.stop.append("Observation") - - app_config = self.app_config - assert app_config.agent - - # init instruction - inputs = inputs or {} - instruction = app_config.prompt_template.simple_prompt_template or "" - self._instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs) - - iteration_step = 1 - max_iteration_steps = min(app_config.agent.max_iteration, 99) + 1 - - # convert tools into ModelRuntime Tool format - tool_instances, prompt_messages_tools = self._init_prompt_tools() - self._prompt_messages_tools = prompt_messages_tools - - function_call_state = True - llm_usage: dict[str, LLMUsage | None] = {"usage": None} - final_answer = "" - prompt_messages: list = [] # Initialize prompt_messages - agent_thought_id = "" # Initialize agent_thought_id - - def increase_usage(final_llm_usage_dict: dict[str, LLMUsage | None], usage: LLMUsage): - if not final_llm_usage_dict["usage"]: - final_llm_usage_dict["usage"] = usage - else: - llm_usage = final_llm_usage_dict["usage"] - llm_usage.prompt_tokens += usage.prompt_tokens - llm_usage.completion_tokens += usage.completion_tokens - llm_usage.total_tokens += usage.total_tokens - llm_usage.prompt_price += usage.prompt_price - llm_usage.completion_price += usage.completion_price - llm_usage.total_price += usage.total_price - - model_instance = self.model_instance - - while function_call_state and iteration_step <= max_iteration_steps: - # continue to run until there is not any tool call - function_call_state = False - - if iteration_step == max_iteration_steps: - # the last iteration, remove all tools - self._prompt_messages_tools = [] - - message_file_ids: list[str] = [] - - agent_thought_id = self.create_agent_thought( - message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids - ) - - if iteration_step > 1: - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - # recalc llm max tokens - prompt_messages = self._organize_prompt_messages() - self.recalc_llm_max_tokens(self.model_config, prompt_messages) - # invoke model - chunks = model_instance.invoke_llm( - prompt_messages=prompt_messages, - model_parameters=app_generate_entity.model_conf.parameters, - tools=[], - stop=app_generate_entity.model_conf.stop, - stream=True, - user=self.user_id, - callbacks=[], - ) - - usage_dict: dict[str, LLMUsage | None] = {} - react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks, usage_dict) - scratchpad = AgentScratchpadUnit( - agent_response="", - thought="", - action_str="", - observation="", - action=None, - ) - - # publish agent thought if it's first iteration - if iteration_step == 1: - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - for chunk in react_chunks: - if isinstance(chunk, AgentScratchpadUnit.Action): - action = chunk - # detect action - assert scratchpad.agent_response is not None - scratchpad.agent_response += json.dumps(chunk.model_dump()) - scratchpad.action_str = json.dumps(chunk.model_dump()) - scratchpad.action = action - else: - assert scratchpad.agent_response is not None - scratchpad.agent_response += chunk - assert scratchpad.thought is not None - scratchpad.thought += chunk - yield LLMResultChunk( - model=self.model_config.model, - prompt_messages=prompt_messages, - system_fingerprint="", - delta=LLMResultChunkDelta(index=0, message=AssistantPromptMessage(content=chunk), usage=None), - ) - - assert scratchpad.thought is not None - scratchpad.thought = scratchpad.thought.strip() or "I am thinking about how to help you" - self._agent_scratchpad.append(scratchpad) - - # Check if max iteration is reached and model still wants to call tools - if iteration_step == max_iteration_steps and scratchpad.action: - if scratchpad.action.action_name.lower() != "final answer": - raise AgentMaxIterationError(app_config.agent.max_iteration) - - # get llm usage - if "usage" in usage_dict: - if usage_dict["usage"] is not None: - increase_usage(llm_usage, usage_dict["usage"]) - else: - usage_dict["usage"] = LLMUsage.empty_usage() - - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name=(scratchpad.action.action_name if scratchpad.action and not scratchpad.is_final() else ""), - tool_input={scratchpad.action.action_name: scratchpad.action.action_input} if scratchpad.action else {}, - tool_invoke_meta={}, - thought=scratchpad.thought or "", - observation="", - answer=scratchpad.agent_response or "", - messages_ids=[], - llm_usage=usage_dict["usage"], - ) - - if not scratchpad.is_final(): - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - if not scratchpad.action: - # failed to extract action, return final answer directly - final_answer = "" - else: - if scratchpad.action.action_name.lower() == "final answer": - # action is final answer, return final answer directly - try: - if isinstance(scratchpad.action.action_input, dict): - final_answer = json.dumps(scratchpad.action.action_input, ensure_ascii=False) - elif isinstance(scratchpad.action.action_input, str): - final_answer = scratchpad.action.action_input - else: - final_answer = f"{scratchpad.action.action_input}" - except TypeError: - final_answer = f"{scratchpad.action.action_input}" - else: - function_call_state = True - # action is tool call, invoke tool - tool_invoke_response, tool_invoke_meta = self._handle_invoke_action( - action=scratchpad.action, - tool_instances=tool_instances, - message_file_ids=message_file_ids, - trace_manager=trace_manager, - ) - scratchpad.observation = tool_invoke_response - scratchpad.agent_response = tool_invoke_response - - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name=scratchpad.action.action_name, - tool_input={scratchpad.action.action_name: scratchpad.action.action_input}, - thought=scratchpad.thought or "", - observation={scratchpad.action.action_name: tool_invoke_response}, - tool_invoke_meta={scratchpad.action.action_name: tool_invoke_meta.to_dict()}, - answer=scratchpad.agent_response, - messages_ids=message_file_ids, - llm_usage=usage_dict["usage"], - ) - - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - # update prompt tool message - for prompt_tool in self._prompt_messages_tools: - self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool) - - iteration_step += 1 - - yield LLMResultChunk( - model=model_instance.model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, message=AssistantPromptMessage(content=final_answer), usage=llm_usage["usage"] - ), - system_fingerprint="", - ) - - # save agent thought - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name="", - tool_input={}, - tool_invoke_meta={}, - thought=final_answer, - observation={}, - answer=final_answer, - messages_ids=[], - ) - # publish end event - self.queue_manager.publish( - QueueMessageEndEvent( - llm_result=LLMResult( - model=model_instance.model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage(content=final_answer), - usage=llm_usage["usage"] or LLMUsage.empty_usage(), - system_fingerprint="", - ) - ), - PublishFrom.APPLICATION_MANAGER, - ) - - def _handle_invoke_action( - self, - action: AgentScratchpadUnit.Action, - tool_instances: Mapping[str, Tool], - message_file_ids: list[str], - trace_manager: TraceQueueManager | None = None, - ) -> tuple[str, ToolInvokeMeta]: - """ - handle invoke action - :param action: action - :param tool_instances: tool instances - :param message_file_ids: message file ids - :param trace_manager: trace manager - :return: observation, meta - """ - # action is tool call, invoke tool - tool_call_name = action.action_name - tool_call_args = action.action_input - tool_instance = tool_instances.get(tool_call_name) - - if not tool_instance: - answer = f"there is not a tool named {tool_call_name}" - return answer, ToolInvokeMeta.error_instance(answer) - - if isinstance(tool_call_args, str): - try: - tool_call_args = json.loads(tool_call_args) - except json.JSONDecodeError: - pass - - # invoke tool - tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke( - tool=tool_instance, - tool_parameters=tool_call_args, - user_id=self.user_id, - tenant_id=self.tenant_id, - message=self.message, - invoke_from=self.application_generate_entity.invoke_from, - agent_tool_callback=self.agent_callback, - trace_manager=trace_manager, - ) - - # publish files - for message_file_id in message_files: - # publish message file - self.queue_manager.publish( - QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER - ) - # add message file ids - message_file_ids.append(message_file_id) - - return tool_invoke_response, tool_invoke_meta - - def _convert_dict_to_action(self, action: dict) -> AgentScratchpadUnit.Action: - """ - convert dict to action - """ - return AgentScratchpadUnit.Action(action_name=action["action"], action_input=action["action_input"]) - - def _fill_in_inputs_from_external_data_tools(self, instruction: str, inputs: Mapping[str, Any]) -> str: - """ - fill in inputs from external data tools - """ - for key, value in inputs.items(): - try: - instruction = instruction.replace(f"{{{{{key}}}}}", str(value)) - except Exception: - continue - - return instruction - - def _init_react_state(self, query): - """ - init agent scratchpad - """ - self._query = query - self._agent_scratchpad = [] - self._historic_prompt_messages = self._organize_historic_prompt_messages() - - @abstractmethod - def _organize_prompt_messages(self) -> list[PromptMessage]: - """ - organize prompt messages - """ - - def _format_assistant_message(self, agent_scratchpad: list[AgentScratchpadUnit]) -> str: - """ - format assistant message - """ - message = "" - for scratchpad in agent_scratchpad: - if scratchpad.is_final(): - message += f"Final Answer: {scratchpad.agent_response}" - else: - message += f"Thought: {scratchpad.thought}\n\n" - if scratchpad.action_str: - message += f"Action: {scratchpad.action_str}\n\n" - if scratchpad.observation: - message += f"Observation: {scratchpad.observation}\n\n" - - return message - - def _organize_historic_prompt_messages( - self, current_session_messages: list[PromptMessage] | None = None - ) -> list[PromptMessage]: - """ - organize historic prompt messages - """ - result: list[PromptMessage] = [] - scratchpads: list[AgentScratchpadUnit] = [] - current_scratchpad: AgentScratchpadUnit | None = None - - for message in self.history_prompt_messages: - if isinstance(message, AssistantPromptMessage): - if not current_scratchpad: - assert isinstance(message.content, str) - current_scratchpad = AgentScratchpadUnit( - agent_response=message.content, - thought=message.content or "I am thinking about how to help you", - action_str="", - action=None, - observation=None, - ) - scratchpads.append(current_scratchpad) - if message.tool_calls: - try: - current_scratchpad.action = AgentScratchpadUnit.Action( - action_name=message.tool_calls[0].function.name, - action_input=json.loads(message.tool_calls[0].function.arguments), - ) - current_scratchpad.action_str = json.dumps(current_scratchpad.action.to_dict()) - except Exception: - logger.exception("Failed to parse tool call from assistant message") - elif isinstance(message, ToolPromptMessage): - if current_scratchpad: - assert isinstance(message.content, str) - current_scratchpad.observation = message.content - else: - raise NotImplementedError("expected str type") - elif isinstance(message, UserPromptMessage): - if scratchpads: - result.append(AssistantPromptMessage(content=self._format_assistant_message(scratchpads))) - scratchpads = [] - current_scratchpad = None - - result.append(message) - - if scratchpads: - result.append(AssistantPromptMessage(content=self._format_assistant_message(scratchpads))) - - historic_prompts = AgentHistoryPromptTransform( - model_config=self.model_config, - prompt_messages=current_session_messages or [], - history_messages=result, - memory=self.memory, - ).get_prompt() - return historic_prompts diff --git a/api/core/agent/cot_chat_agent_runner.py b/api/core/agent/cot_chat_agent_runner.py deleted file mode 100644 index 4d1d94eadc..0000000000 --- a/api/core/agent/cot_chat_agent_runner.py +++ /dev/null @@ -1,118 +0,0 @@ -import json - -from core.agent.cot_agent_runner import CotAgentRunner -from core.file import file_manager -from core.model_runtime.entities import ( - AssistantPromptMessage, - PromptMessage, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.message_entities import ImagePromptMessageContent, PromptMessageContentUnionTypes -from core.model_runtime.utils.encoders import jsonable_encoder - - -class CotChatAgentRunner(CotAgentRunner): - def _organize_system_prompt(self) -> SystemPromptMessage: - """ - Organize system prompt - """ - assert self.app_config.agent - assert self.app_config.agent.prompt - - prompt_entity = self.app_config.agent.prompt - if not prompt_entity: - raise ValueError("Agent prompt configuration is not set") - first_prompt = prompt_entity.first_prompt - - system_prompt = ( - first_prompt.replace("{{instruction}}", self._instruction) - .replace("{{tools}}", json.dumps(jsonable_encoder(self._prompt_messages_tools))) - .replace("{{tool_names}}", ", ".join([tool.name for tool in self._prompt_messages_tools])) - ) - - return SystemPromptMessage(content=system_prompt) - - def _organize_user_query(self, query, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - Organize user query - """ - if self.files: - # get image detail config - image_detail_config = ( - self.application_generate_entity.file_upload_config.image_config.detail - if ( - self.application_generate_entity.file_upload_config - and self.application_generate_entity.file_upload_config.image_config - ) - else None - ) - image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW - - prompt_message_contents: list[PromptMessageContentUnionTypes] = [] - for file in self.files: - prompt_message_contents.append( - file_manager.to_prompt_message_content( - file, - image_detail_config=image_detail_config, - ) - ) - prompt_message_contents.append(TextPromptMessageContent(data=query)) - - prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) - else: - prompt_messages.append(UserPromptMessage(content=query)) - - return prompt_messages - - def _organize_prompt_messages(self) -> list[PromptMessage]: - """ - Organize - """ - # organize system prompt - system_message = self._organize_system_prompt() - - # organize current assistant messages - agent_scratchpad = self._agent_scratchpad - if not agent_scratchpad: - assistant_messages = [] - else: - assistant_message = AssistantPromptMessage(content="") - assistant_message.content = "" # FIXME: type check tell mypy that assistant_message.content is str - for unit in agent_scratchpad: - if unit.is_final(): - assert isinstance(assistant_message.content, str) - assistant_message.content += f"Final Answer: {unit.agent_response}" - else: - assert isinstance(assistant_message.content, str) - assistant_message.content += f"Thought: {unit.thought}\n\n" - if unit.action_str: - assistant_message.content += f"Action: {unit.action_str}\n\n" - if unit.observation: - assistant_message.content += f"Observation: {unit.observation}\n\n" - - assistant_messages = [assistant_message] - - # query messages - query_messages = self._organize_user_query(self._query, []) - - if assistant_messages: - # organize historic prompt messages - historic_messages = self._organize_historic_prompt_messages( - [system_message, *query_messages, *assistant_messages, UserPromptMessage(content="continue")] - ) - messages = [ - system_message, - *historic_messages, - *query_messages, - *assistant_messages, - UserPromptMessage(content="continue"), - ] - else: - # organize historic prompt messages - historic_messages = self._organize_historic_prompt_messages([system_message, *query_messages]) - messages = [system_message, *historic_messages, *query_messages] - - # join all messages - return messages diff --git a/api/core/agent/cot_completion_agent_runner.py b/api/core/agent/cot_completion_agent_runner.py deleted file mode 100644 index da9a001d84..0000000000 --- a/api/core/agent/cot_completion_agent_runner.py +++ /dev/null @@ -1,87 +0,0 @@ -import json - -from core.agent.cot_agent_runner import CotAgentRunner -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.utils.encoders import jsonable_encoder - - -class CotCompletionAgentRunner(CotAgentRunner): - def _organize_instruction_prompt(self) -> str: - """ - Organize instruction prompt - """ - if self.app_config.agent is None: - raise ValueError("Agent configuration is not set") - prompt_entity = self.app_config.agent.prompt - if prompt_entity is None: - raise ValueError("prompt entity is not set") - first_prompt = prompt_entity.first_prompt - - system_prompt = ( - first_prompt.replace("{{instruction}}", self._instruction) - .replace("{{tools}}", json.dumps(jsonable_encoder(self._prompt_messages_tools))) - .replace("{{tool_names}}", ", ".join([tool.name for tool in self._prompt_messages_tools])) - ) - - return system_prompt - - def _organize_historic_prompt(self, current_session_messages: list[PromptMessage] | None = None) -> str: - """ - Organize historic prompt - """ - historic_prompt_messages = self._organize_historic_prompt_messages(current_session_messages) - historic_prompt = "" - - for message in historic_prompt_messages: - if isinstance(message, UserPromptMessage): - historic_prompt += f"Question: {message.content}\n\n" - elif isinstance(message, AssistantPromptMessage): - if isinstance(message.content, str): - historic_prompt += message.content + "\n\n" - elif isinstance(message.content, list): - for content in message.content: - if not isinstance(content, TextPromptMessageContent): - continue - historic_prompt += content.data - - return historic_prompt - - def _organize_prompt_messages(self) -> list[PromptMessage]: - """ - Organize prompt messages - """ - # organize system prompt - system_prompt = self._organize_instruction_prompt() - - # organize historic prompt messages - historic_prompt = self._organize_historic_prompt() - - # organize current assistant messages - agent_scratchpad = self._agent_scratchpad - assistant_prompt = "" - for unit in agent_scratchpad or []: - if unit.is_final(): - assistant_prompt += f"Final Answer: {unit.agent_response}" - else: - assistant_prompt += f"Thought: {unit.thought}\n\n" - if unit.action_str: - assistant_prompt += f"Action: {unit.action_str}\n\n" - if unit.observation: - assistant_prompt += f"Observation: {unit.observation}\n\n" - - # query messages - query_prompt = f"Question: {self._query}" - - # join all messages - prompt = ( - system_prompt.replace("{{historic_messages}}", historic_prompt) - .replace("{{agent_scratchpad}}", assistant_prompt) - .replace("{{query}}", query_prompt) - ) - - return [UserPromptMessage(content=prompt)] diff --git a/api/core/agent/entities.py b/api/core/agent/entities.py index 220feced1d..46af4d2d72 100644 --- a/api/core/agent/entities.py +++ b/api/core/agent/entities.py @@ -1,3 +1,5 @@ +import uuid +from collections.abc import Mapping from enum import StrEnum from typing import Any, Union @@ -92,3 +94,96 @@ class AgentInvokeMessage(ToolInvokeMessage): """ pass + + +class ExecutionContext(BaseModel): + """Execution context containing trace and audit information. + + This context carries all the IDs and metadata that are not part of + the core business logic but needed for tracing, auditing, and + correlation purposes. + """ + + user_id: str | None = None + app_id: str | None = None + conversation_id: str | None = None + message_id: str | None = None + tenant_id: str | None = None + + @classmethod + def create_minimal(cls, user_id: str | None = None) -> "ExecutionContext": + """Create a minimal context with only essential fields.""" + return cls(user_id=user_id) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for passing to legacy code.""" + return { + "user_id": self.user_id, + "app_id": self.app_id, + "conversation_id": self.conversation_id, + "message_id": self.message_id, + "tenant_id": self.tenant_id, + } + + def with_updates(self, **kwargs) -> "ExecutionContext": + """Create a new context with updated fields.""" + data = self.to_dict() + data.update(kwargs) + + return ExecutionContext( + user_id=data.get("user_id"), + app_id=data.get("app_id"), + conversation_id=data.get("conversation_id"), + message_id=data.get("message_id"), + tenant_id=data.get("tenant_id"), + ) + + +class AgentLog(BaseModel): + """ + Agent Log. + """ + + class LogType(StrEnum): + """Type of agent log entry.""" + + ROUND = "round" # A complete iteration round + THOUGHT = "thought" # LLM thinking/reasoning + TOOL_CALL = "tool_call" # Tool invocation + + class LogMetadata(StrEnum): + STARTED_AT = "started_at" + FINISHED_AT = "finished_at" + ELAPSED_TIME = "elapsed_time" + TOTAL_PRICE = "total_price" + TOTAL_TOKENS = "total_tokens" + PROVIDER = "provider" + CURRENCY = "currency" + LLM_USAGE = "llm_usage" + ICON = "icon" + ICON_DARK = "icon_dark" + + class LogStatus(StrEnum): + START = "start" + ERROR = "error" + SUCCESS = "success" + + id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="The id of the log") + label: str = Field(..., description="The label of the log") + log_type: LogType = Field(..., description="The type of the log") + parent_id: str | None = Field(default=None, description="Leave empty for root log") + error: str | None = Field(default=None, description="The error message") + status: LogStatus = Field(..., description="The status of the log") + data: Mapping[str, Any] = Field(..., description="Detailed log data") + metadata: Mapping[LogMetadata, Any] = Field(default={}, description="The metadata of the log") + + +class AgentResult(BaseModel): + """ + Agent execution result. + """ + + text: str = Field(default="", description="The generated text") + files: list[Any] = Field(default_factory=list, description="Files produced during execution") + usage: Any | None = Field(default=None, description="LLM usage statistics") + finish_reason: str | None = Field(default=None, description="Reason for completion") diff --git a/api/core/agent/patterns/README.md b/api/core/agent/patterns/README.md new file mode 100644 index 0000000000..95b1bf87fa --- /dev/null +++ b/api/core/agent/patterns/README.md @@ -0,0 +1,55 @@ +# Agent Patterns + +A unified agent pattern module that powers both Agent V2 workflow nodes and agent applications. Strategies share a common execution contract while adapting to model capabilities and tool availability. + +## Overview + +The module applies a strategy pattern around LLM/tool orchestration. `StrategyFactory` auto-selects the best implementation based on model features or an explicit agent strategy, and each strategy streams logs and usage consistently. + +## Key Features + +- **Dual strategies** + - `FunctionCallStrategy`: uses native LLM function/tool calling when the model exposes `TOOL_CALL`, `MULTI_TOOL_CALL`, or `STREAM_TOOL_CALL`. + - `ReActStrategy`: ReAct (reasoning + acting) flow driven by `CotAgentOutputParser`, used when function calling is unavailable or explicitly requested. +- **Explicit or auto selection** + - `StrategyFactory.create_strategy` prefers an explicit `AgentEntity.Strategy` (FUNCTION_CALLING or CHAIN_OF_THOUGHT). + - Otherwise it falls back to function calling when tool-call features exist, or ReAct when they do not. +- **Unified execution contract** + - `AgentPattern.run` yields streaming `AgentLog` entries and `LLMResultChunk` data, returning an `AgentResult` with text, files, usage, and `finish_reason`. + - Iterations are configurable and hard-capped at 99 rounds; the last round forces a final answer by withholding tools. +- **Tool handling and hooks** + - Tools convert to `PromptMessageTool` objects before invocation. + - Optional `tool_invoke_hook` lets callers override tool execution (e.g., agent apps) while workflow runs use `ToolEngine.generic_invoke`. + - Tool outputs support text, links, JSON, variables, blobs, retriever resources, and file attachments; `target=="self"` files are reloaded into model context, others are returned as outputs. +- **File-aware arguments** + - Tool args accept `[File: ]` or `[Files: ]` placeholders that resolve to `File` objects before invocation, enabling models to reference uploaded files safely. +- **ReAct prompt shaping** + - System prompts replace `{{instruction}}`, `{{tools}}`, and `{{tool_names}}` placeholders. + - Adds `Observation` to stop sequences and appends scratchpad text so the model sees prior Thought/Action/Observation history. +- **Observability and accounting** + - Standardized `AgentLog` entries for rounds, model thoughts, and tool calls, including usage aggregation (`LLMUsage`) across streaming and non-streaming paths. + +## Architecture + +``` +agent/patterns/ +├── base.py # Shared utilities: logging, usage, tool invocation, file handling +├── function_call.py # Native function-calling loop with tool execution +├── react.py # ReAct loop with CoT parsing and scratchpad wiring +└── strategy_factory.py # Strategy selection by model features or explicit override +``` + +## Usage + +- For auto-selection: + - Call `StrategyFactory.create_strategy(model_features, model_instance, context, tools, files, ...)` and run the returned strategy with prompt messages and model params. +- For explicit behavior: + - Pass `agent_strategy=AgentEntity.Strategy.FUNCTION_CALLING` to force native calls (falls back to ReAct if unsupported), or `CHAIN_OF_THOUGHT` to force ReAct. +- Both strategies stream chunks and logs; collect the generator output until it returns an `AgentResult`. + +## Integration Points + +- **Model runtime**: delegates to `ModelInstance.invoke_llm` for both streaming and non-streaming calls. +- **Tool system**: defaults to `ToolEngine.generic_invoke`, with `tool_invoke_hook` for custom callers. +- **Files**: flows through `File` objects for tool inputs/outputs and model-context attachments. +- **Execution context**: `ExecutionContext` fields (user/app/conversation/message) propagate to tool invocations and logging. diff --git a/api/core/agent/patterns/__init__.py b/api/core/agent/patterns/__init__.py new file mode 100644 index 0000000000..8a3b125533 --- /dev/null +++ b/api/core/agent/patterns/__init__.py @@ -0,0 +1,19 @@ +"""Agent patterns module. + +This module provides different strategies for agent execution: +- FunctionCallStrategy: Uses native function/tool calling +- ReActStrategy: Uses ReAct (Reasoning + Acting) approach +- StrategyFactory: Factory for creating strategies based on model features +""" + +from .base import AgentPattern +from .function_call import FunctionCallStrategy +from .react import ReActStrategy +from .strategy_factory import StrategyFactory + +__all__ = [ + "AgentPattern", + "FunctionCallStrategy", + "ReActStrategy", + "StrategyFactory", +] diff --git a/api/core/agent/patterns/base.py b/api/core/agent/patterns/base.py new file mode 100644 index 0000000000..d98fa005a3 --- /dev/null +++ b/api/core/agent/patterns/base.py @@ -0,0 +1,474 @@ +"""Base class for agent strategies.""" + +from __future__ import annotations + +import json +import re +import time +from abc import ABC, abstractmethod +from collections.abc import Callable, Generator +from typing import TYPE_CHECKING, Any + +from core.agent.entities import AgentLog, AgentResult, ExecutionContext +from core.file import File +from core.model_manager import ModelInstance +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMResultChunkDelta, + PromptMessage, + PromptMessageTool, +) +from core.model_runtime.entities.llm_entities import LLMUsage +from core.model_runtime.entities.message_entities import TextPromptMessageContent +from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMeta + +if TYPE_CHECKING: + from core.tools.__base.tool import Tool + +# Type alias for tool invoke hook +# Returns: (response_content, message_file_ids, tool_invoke_meta) +ToolInvokeHook = Callable[["Tool", dict[str, Any], str], tuple[str, list[str], ToolInvokeMeta]] + + +class AgentPattern(ABC): + """Base class for agent execution strategies.""" + + def __init__( + self, + model_instance: ModelInstance, + tools: list[Tool], + context: ExecutionContext, + max_iterations: int = 10, + workflow_call_depth: int = 0, + files: list[File] = [], + tool_invoke_hook: ToolInvokeHook | None = None, + ): + """Initialize the agent strategy.""" + self.model_instance = model_instance + self.tools = tools + self.context = context + self.max_iterations = min(max_iterations, 99) # Cap at 99 iterations + self.workflow_call_depth = workflow_call_depth + self.files: list[File] = files + self.tool_invoke_hook = tool_invoke_hook + + @abstractmethod + def run( + self, + prompt_messages: list[PromptMessage], + model_parameters: dict[str, Any], + stop: list[str] = [], + stream: bool = True, + ) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]: + """Execute the agent strategy.""" + pass + + def _accumulate_usage(self, total_usage: dict[str, Any], delta_usage: LLMUsage) -> None: + """Accumulate LLM usage statistics.""" + if not total_usage.get("usage"): + # Create a copy to avoid modifying the original + total_usage["usage"] = LLMUsage( + prompt_tokens=delta_usage.prompt_tokens, + prompt_unit_price=delta_usage.prompt_unit_price, + prompt_price_unit=delta_usage.prompt_price_unit, + prompt_price=delta_usage.prompt_price, + completion_tokens=delta_usage.completion_tokens, + completion_unit_price=delta_usage.completion_unit_price, + completion_price_unit=delta_usage.completion_price_unit, + completion_price=delta_usage.completion_price, + total_tokens=delta_usage.total_tokens, + total_price=delta_usage.total_price, + currency=delta_usage.currency, + latency=delta_usage.latency, + ) + else: + current: LLMUsage = total_usage["usage"] + current.prompt_tokens += delta_usage.prompt_tokens + current.completion_tokens += delta_usage.completion_tokens + current.total_tokens += delta_usage.total_tokens + current.prompt_price += delta_usage.prompt_price + current.completion_price += delta_usage.completion_price + current.total_price += delta_usage.total_price + + def _extract_content(self, content: Any) -> str: + """Extract text content from message content.""" + if isinstance(content, list): + # Content items are PromptMessageContentUnionTypes + text_parts = [] + for c in content: + # Check if it's a TextPromptMessageContent (which has data attribute) + if isinstance(c, TextPromptMessageContent): + text_parts.append(c.data) + return "".join(text_parts) + return str(content) + + def _has_tool_calls(self, chunk: LLMResultChunk) -> bool: + """Check if chunk contains tool calls.""" + # LLMResultChunk always has delta attribute + return bool(chunk.delta.message and chunk.delta.message.tool_calls) + + def _has_tool_calls_result(self, result: LLMResult) -> bool: + """Check if result contains tool calls (non-streaming).""" + # LLMResult always has message attribute + return bool(result.message and result.message.tool_calls) + + def _extract_tool_calls(self, chunk: LLMResultChunk) -> list[tuple[str, str, dict[str, Any]]]: + """Extract tool calls from streaming chunk.""" + tool_calls: list[tuple[str, str, dict[str, Any]]] = [] + if chunk.delta.message and chunk.delta.message.tool_calls: + for tool_call in chunk.delta.message.tool_calls: + if tool_call.function: + try: + args = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {} + except json.JSONDecodeError: + args = {} + tool_calls.append((tool_call.id or "", tool_call.function.name, args)) + return tool_calls + + def _extract_tool_calls_result(self, result: LLMResult) -> list[tuple[str, str, dict[str, Any]]]: + """Extract tool calls from non-streaming result.""" + tool_calls = [] + if result.message and result.message.tool_calls: + for tool_call in result.message.tool_calls: + if tool_call.function: + try: + args = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {} + except json.JSONDecodeError: + args = {} + tool_calls.append((tool_call.id or "", tool_call.function.name, args)) + return tool_calls + + def _extract_text_from_message(self, message: PromptMessage) -> str: + """Extract text content from a prompt message.""" + # PromptMessage always has content attribute + content = message.content + if isinstance(content, str): + return content + elif isinstance(content, list): + # Extract text from content list + text_parts = [] + for item in content: + if isinstance(item, TextPromptMessageContent): + text_parts.append(item.data) + return " ".join(text_parts) + return "" + + def _get_tool_metadata(self, tool_instance: Tool) -> dict[AgentLog.LogMetadata, Any]: + """Get metadata for a tool including provider and icon info.""" + from core.tools.tool_manager import ToolManager + + metadata: dict[AgentLog.LogMetadata, Any] = {} + if tool_instance.entity and tool_instance.entity.identity: + identity = tool_instance.entity.identity + if identity.provider: + metadata[AgentLog.LogMetadata.PROVIDER] = identity.provider + + # Get icon using ToolManager for proper URL generation + tenant_id = self.context.tenant_id + if tenant_id and identity.provider: + try: + provider_type = tool_instance.tool_provider_type() + icon = ToolManager.get_tool_icon(tenant_id, provider_type, identity.provider) + if isinstance(icon, str): + metadata[AgentLog.LogMetadata.ICON] = icon + elif isinstance(icon, dict): + # Handle icon dict with background/content or light/dark variants + metadata[AgentLog.LogMetadata.ICON] = icon + except Exception: + # Fallback to identity.icon if ToolManager fails + if identity.icon: + metadata[AgentLog.LogMetadata.ICON] = identity.icon + elif identity.icon: + metadata[AgentLog.LogMetadata.ICON] = identity.icon + return metadata + + def _create_log( + self, + label: str, + log_type: AgentLog.LogType, + status: AgentLog.LogStatus, + data: dict[str, Any] | None = None, + parent_id: str | None = None, + extra_metadata: dict[AgentLog.LogMetadata, Any] | None = None, + ) -> AgentLog: + """Create a new AgentLog with standard metadata.""" + metadata: dict[AgentLog.LogMetadata, Any] = { + AgentLog.LogMetadata.STARTED_AT: time.perf_counter(), + } + if extra_metadata: + metadata.update(extra_metadata) + + return AgentLog( + label=label, + log_type=log_type, + status=status, + data=data or {}, + parent_id=parent_id, + metadata=metadata, + ) + + def _finish_log( + self, + log: AgentLog, + data: dict[str, Any] | None = None, + usage: LLMUsage | None = None, + ) -> AgentLog: + """Finish an AgentLog by updating its status and metadata.""" + log.status = AgentLog.LogStatus.SUCCESS + + if data is not None: + log.data = data + + # Calculate elapsed time + started_at = log.metadata.get(AgentLog.LogMetadata.STARTED_AT, time.perf_counter()) + finished_at = time.perf_counter() + + # Update metadata + log.metadata = { + **log.metadata, + AgentLog.LogMetadata.FINISHED_AT: finished_at, + # Calculate elapsed time in seconds + AgentLog.LogMetadata.ELAPSED_TIME: round(finished_at - started_at, 4), + } + + # Add usage information if provided + if usage: + log.metadata.update( + { + AgentLog.LogMetadata.TOTAL_PRICE: usage.total_price, + AgentLog.LogMetadata.CURRENCY: usage.currency, + AgentLog.LogMetadata.TOTAL_TOKENS: usage.total_tokens, + AgentLog.LogMetadata.LLM_USAGE: usage, + } + ) + + return log + + def _replace_file_references(self, tool_args: dict[str, Any]) -> dict[str, Any]: + """ + Replace file references in tool arguments with actual File objects. + + Args: + tool_args: Dictionary of tool arguments + + Returns: + Updated tool arguments with file references replaced + """ + # Process each argument in the dictionary + processed_args: dict[str, Any] = {} + for key, value in tool_args.items(): + processed_args[key] = self._process_file_reference(value) + return processed_args + + def _process_file_reference(self, data: Any) -> Any: + """ + Recursively process data to replace file references. + Supports both single file [File: file_id] and multiple files [Files: file_id1, file_id2, ...]. + + Args: + data: The data to process (can be dict, list, str, or other types) + + Returns: + Processed data with file references replaced + """ + single_file_pattern = re.compile(r"^\[File:\s*([^\]]+)\]$") + multiple_files_pattern = re.compile(r"^\[Files:\s*([^\]]+)\]$") + + if isinstance(data, dict): + # Process dictionary recursively + return {key: self._process_file_reference(value) for key, value in data.items()} + elif isinstance(data, list): + # Process list recursively + return [self._process_file_reference(item) for item in data] + elif isinstance(data, str): + # Check for single file pattern [File: file_id] + single_match = single_file_pattern.match(data.strip()) + if single_match: + file_id = single_match.group(1).strip() + # Find the file in self.files + for file in self.files: + if file.id and str(file.id) == file_id: + return file + # If file not found, return original value + return data + + # Check for multiple files pattern [Files: file_id1, file_id2, ...] + multiple_match = multiple_files_pattern.match(data.strip()) + if multiple_match: + file_ids_str = multiple_match.group(1).strip() + # Split by comma and strip whitespace + file_ids = [fid.strip() for fid in file_ids_str.split(",")] + + # Find all matching files + matched_files: list[File] = [] + for file_id in file_ids: + for file in self.files: + if file.id and str(file.id) == file_id: + matched_files.append(file) + break + + # Return list of files if any were found, otherwise return original + return matched_files or data + + return data + else: + # Return other types as-is + return data + + def _create_text_chunk(self, text: str, prompt_messages: list[PromptMessage]) -> LLMResultChunk: + """Create a text chunk for streaming.""" + return LLMResultChunk( + model=self.model_instance.model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content=text), + usage=None, + ), + system_fingerprint="", + ) + + def _invoke_tool( + self, + tool_instance: Tool, + tool_args: dict[str, Any], + tool_name: str, + ) -> tuple[str, list[File], ToolInvokeMeta | None]: + """ + Invoke a tool and collect its response. + + Args: + tool_instance: The tool instance to invoke + tool_args: Tool arguments + tool_name: Name of the tool + + Returns: + Tuple of (response_content, tool_files, tool_invoke_meta) + """ + # Process tool_args to replace file references with actual File objects + tool_args = self._replace_file_references(tool_args) + + # If a tool invoke hook is set, use it instead of generic_invoke + if self.tool_invoke_hook: + response_content, _, tool_invoke_meta = self.tool_invoke_hook(tool_instance, tool_args, tool_name) + # Note: message_file_ids are stored in DB, we don't convert them to File objects here + # The caller (AgentAppRunner) handles file publishing + return response_content, [], tool_invoke_meta + + # Default: use generic_invoke for workflow scenarios + # Import here to avoid circular import + from core.tools.tool_engine import DifyWorkflowCallbackHandler, ToolEngine + + tool_response = ToolEngine().generic_invoke( + tool=tool_instance, + tool_parameters=tool_args, + user_id=self.context.user_id or "", + workflow_tool_callback=DifyWorkflowCallbackHandler(), + workflow_call_depth=self.workflow_call_depth, + app_id=self.context.app_id, + conversation_id=self.context.conversation_id, + message_id=self.context.message_id, + ) + + # Collect response and files + response_content = "" + tool_files: list[File] = [] + + for response in tool_response: + if response.type == ToolInvokeMessage.MessageType.TEXT: + assert isinstance(response.message, ToolInvokeMessage.TextMessage) + response_content += response.message.text + + elif response.type == ToolInvokeMessage.MessageType.LINK: + # Handle link messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + response_content += f"[Link: {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.IMAGE: + # Handle image URL messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + response_content += f"[Image: {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.IMAGE_LINK: + # Handle image link messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + response_content += f"[Image: {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.BINARY_LINK: + # Handle binary file link messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + filename = response.meta.get("filename", "file") if response.meta else "file" + response_content += f"[File: {filename} - {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.JSON: + # Handle JSON messages + if isinstance(response.message, ToolInvokeMessage.JsonMessage): + response_content += json.dumps(response.message.json_object, ensure_ascii=False, indent=2) + + elif response.type == ToolInvokeMessage.MessageType.BLOB: + # Handle blob messages - convert to text representation + if isinstance(response.message, ToolInvokeMessage.BlobMessage): + mime_type = ( + response.meta.get("mime_type", "application/octet-stream") + if response.meta + else "application/octet-stream" + ) + size = len(response.message.blob) + response_content += f"[Binary data: {mime_type}, size: {size} bytes]" + + elif response.type == ToolInvokeMessage.MessageType.VARIABLE: + # Handle variable messages + if isinstance(response.message, ToolInvokeMessage.VariableMessage): + var_name = response.message.variable_name + var_value = response.message.variable_value + if isinstance(var_value, str): + response_content += var_value + else: + response_content += f"[Variable {var_name}: {json.dumps(var_value, ensure_ascii=False)}]" + + elif response.type == ToolInvokeMessage.MessageType.BLOB_CHUNK: + # Handle blob chunk messages - these are parts of a larger blob + if isinstance(response.message, ToolInvokeMessage.BlobChunkMessage): + response_content += f"[Blob chunk {response.message.sequence}: {len(response.message.blob)} bytes]" + + elif response.type == ToolInvokeMessage.MessageType.RETRIEVER_RESOURCES: + # Handle retriever resources messages + if isinstance(response.message, ToolInvokeMessage.RetrieverResourceMessage): + response_content += response.message.context + + elif response.type == ToolInvokeMessage.MessageType.FILE: + # Extract file from meta + if response.meta and "file" in response.meta: + file = response.meta["file"] + if isinstance(file, File): + # Check if file is for model or tool output + if response.meta.get("target") == "self": + # File is for model - add to files for next prompt + self.files.append(file) + response_content += f"File '{file.filename}' has been loaded into your context." + else: + # File is tool output + tool_files.append(file) + + return response_content, tool_files, None + + def _find_tool_by_name(self, tool_name: str) -> Tool | None: + """Find a tool instance by its name.""" + for tool in self.tools: + if tool.entity.identity.name == tool_name: + return tool + return None + + def _convert_tools_to_prompt_format(self) -> list[PromptMessageTool]: + """Convert tools to prompt message format.""" + prompt_tools: list[PromptMessageTool] = [] + for tool in self.tools: + prompt_tools.append(tool.to_prompt_message_tool()) + return prompt_tools + + def _update_usage_with_empty(self, llm_usage: dict[str, Any]) -> None: + """Initialize usage tracking with empty usage if not set.""" + if "usage" not in llm_usage or llm_usage["usage"] is None: + llm_usage["usage"] = LLMUsage.empty_usage() diff --git a/api/core/agent/patterns/function_call.py b/api/core/agent/patterns/function_call.py new file mode 100644 index 0000000000..a0f0d30ab3 --- /dev/null +++ b/api/core/agent/patterns/function_call.py @@ -0,0 +1,299 @@ +"""Function Call strategy implementation.""" + +import json +from collections.abc import Generator +from typing import Any, Union + +from core.agent.entities import AgentLog, AgentResult +from core.file import File +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMResultChunkDelta, + LLMUsage, + PromptMessage, + PromptMessageTool, + ToolPromptMessage, +) +from core.tools.entities.tool_entities import ToolInvokeMeta + +from .base import AgentPattern + + +class FunctionCallStrategy(AgentPattern): + """Function Call strategy using model's native tool calling capability.""" + + def run( + self, + prompt_messages: list[PromptMessage], + model_parameters: dict[str, Any], + stop: list[str] = [], + stream: bool = True, + ) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]: + """Execute the function call agent strategy.""" + # Convert tools to prompt format + prompt_tools: list[PromptMessageTool] = self._convert_tools_to_prompt_format() + + # Initialize tracking + iteration_step: int = 1 + max_iterations: int = self.max_iterations + 1 + function_call_state: bool = True + total_usage: dict[str, LLMUsage | None] = {"usage": None} + messages: list[PromptMessage] = list(prompt_messages) # Create mutable copy + final_text: str = "" + finish_reason: str | None = None + output_files: list[File] = [] # Track files produced by tools + + while function_call_state and iteration_step <= max_iterations: + function_call_state = False + round_log = self._create_log( + label=f"ROUND {iteration_step}", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + yield round_log + # On last iteration, remove tools to force final answer + current_tools: list[PromptMessageTool] = [] if iteration_step == max_iterations else prompt_tools + model_log = self._create_log( + label=f"{self.model_instance.model} Thought", + log_type=AgentLog.LogType.THOUGHT, + status=AgentLog.LogStatus.START, + data={}, + parent_id=round_log.id, + extra_metadata={ + AgentLog.LogMetadata.PROVIDER: self.model_instance.provider, + }, + ) + yield model_log + + # Track usage for this round only + round_usage: dict[str, LLMUsage | None] = {"usage": None} + + # Invoke model + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = self.model_instance.invoke_llm( + prompt_messages=messages, + model_parameters=model_parameters, + tools=current_tools, + stop=stop, + stream=stream, + user=self.context.user_id, + callbacks=[], + ) + + # Process response + tool_calls, response_content, chunk_finish_reason = yield from self._handle_chunks( + chunks, round_usage, model_log + ) + messages.append(self._create_assistant_message(response_content, tool_calls)) + + # Accumulate to total usage + round_usage_value = round_usage.get("usage") + if round_usage_value: + self._accumulate_usage(total_usage, round_usage_value) + + # Update final text if no tool calls (this is likely the final answer) + if not tool_calls: + final_text = response_content + + # Update finish reason + if chunk_finish_reason: + finish_reason = chunk_finish_reason + + # Process tool calls + tool_outputs: dict[str, str] = {} + if tool_calls: + function_call_state = True + # Execute tools + for tool_call_id, tool_name, tool_args in tool_calls: + tool_response, tool_files, _ = yield from self._handle_tool_call( + tool_name, tool_args, tool_call_id, messages, round_log + ) + tool_outputs[tool_name] = tool_response + # Track files produced by tools + output_files.extend(tool_files) + yield self._finish_log( + round_log, + data={ + "llm_result": response_content, + "tool_calls": [ + {"name": tc[1], "args": tc[2], "output": tool_outputs.get(tc[1], "")} for tc in tool_calls + ] + if tool_calls + else [], + "final_answer": final_text if not function_call_state else None, + }, + usage=round_usage.get("usage"), + ) + iteration_step += 1 + + # Return final result + from core.agent.entities import AgentResult + + return AgentResult( + text=final_text, + files=output_files, + usage=total_usage.get("usage") or LLMUsage.empty_usage(), + finish_reason=finish_reason, + ) + + def _handle_chunks( + self, + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult], + llm_usage: dict[str, LLMUsage | None], + start_log: AgentLog, + ) -> Generator[ + LLMResultChunk | AgentLog, + None, + tuple[list[tuple[str, str, dict[str, Any]]], str, str | None], + ]: + """Handle LLM response chunks and extract tool calls and content. + + Returns a tuple of (tool_calls, response_content, finish_reason). + """ + tool_calls: list[tuple[str, str, dict[str, Any]]] = [] + response_content: str = "" + finish_reason: str | None = None + if isinstance(chunks, Generator): + # Streaming response + for chunk in chunks: + # Extract tool calls + if self._has_tool_calls(chunk): + tool_calls.extend(self._extract_tool_calls(chunk)) + + # Extract content + if chunk.delta.message and chunk.delta.message.content: + response_content += self._extract_content(chunk.delta.message.content) + + # Track usage + if chunk.delta.usage: + self._accumulate_usage(llm_usage, chunk.delta.usage) + + # Capture finish reason + if chunk.delta.finish_reason: + finish_reason = chunk.delta.finish_reason + + yield chunk + else: + # Non-streaming response + result: LLMResult = chunks + + if self._has_tool_calls_result(result): + tool_calls.extend(self._extract_tool_calls_result(result)) + + if result.message and result.message.content: + response_content += self._extract_content(result.message.content) + + if result.usage: + self._accumulate_usage(llm_usage, result.usage) + + # Convert to streaming format + yield LLMResultChunk( + model=result.model, + prompt_messages=result.prompt_messages, + delta=LLMResultChunkDelta(index=0, message=result.message, usage=result.usage), + ) + yield self._finish_log( + start_log, + data={ + "result": response_content, + }, + usage=llm_usage.get("usage"), + ) + return tool_calls, response_content, finish_reason + + def _create_assistant_message( + self, content: str, tool_calls: list[tuple[str, str, dict[str, Any]]] | None = None + ) -> AssistantPromptMessage: + """Create assistant message with tool calls.""" + if tool_calls is None: + return AssistantPromptMessage(content=content) + return AssistantPromptMessage( + content=content or "", + tool_calls=[ + AssistantPromptMessage.ToolCall( + id=tc[0], + type="function", + function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tc[1], arguments=json.dumps(tc[2])), + ) + for tc in tool_calls + ], + ) + + def _handle_tool_call( + self, + tool_name: str, + tool_args: dict[str, Any], + tool_call_id: str, + messages: list[PromptMessage], + round_log: AgentLog, + ) -> Generator[AgentLog, None, tuple[str, list[File], ToolInvokeMeta | None]]: + """Handle a single tool call and return response with files and meta.""" + # Find tool + tool_instance = self._find_tool_by_name(tool_name) + if not tool_instance: + raise ValueError(f"Tool {tool_name} not found") + + # Get tool metadata (provider, icon, etc.) + tool_metadata = self._get_tool_metadata(tool_instance) + + # Create tool call log + tool_call_log = self._create_log( + label=f"CALL {tool_name}", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={ + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "tool_args": tool_args, + }, + parent_id=round_log.id, + extra_metadata=tool_metadata, + ) + yield tool_call_log + + # Invoke tool using base class method with error handling + try: + response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args, tool_name) + + yield self._finish_log( + tool_call_log, + data={ + **tool_call_log.data, + "output": response_content, + "files": len(tool_files), + "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, + }, + ) + final_content = response_content or "Tool executed successfully" + # Add tool response to messages + messages.append( + ToolPromptMessage( + content=final_content, + tool_call_id=tool_call_id, + name=tool_name, + ) + ) + return response_content, tool_files, tool_invoke_meta + except Exception as e: + # Tool invocation failed, yield error log + error_message = str(e) + tool_call_log.status = AgentLog.LogStatus.ERROR + tool_call_log.error = error_message + tool_call_log.data = { + **tool_call_log.data, + "error": error_message, + } + yield tool_call_log + + # Add error message to conversation + error_content = f"Tool execution failed: {error_message}" + messages.append( + ToolPromptMessage( + content=error_content, + tool_call_id=tool_call_id, + name=tool_name, + ) + ) + return error_content, [], None diff --git a/api/core/agent/patterns/react.py b/api/core/agent/patterns/react.py new file mode 100644 index 0000000000..87a9fa9b65 --- /dev/null +++ b/api/core/agent/patterns/react.py @@ -0,0 +1,418 @@ +"""ReAct strategy implementation.""" + +from __future__ import annotations + +import json +from collections.abc import Generator +from typing import TYPE_CHECKING, Any, Union + +from core.agent.entities import AgentLog, AgentResult, AgentScratchpadUnit, ExecutionContext +from core.agent.output_parser.cot_output_parser import CotAgentOutputParser +from core.file import File +from core.model_manager import ModelInstance +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMResultChunkDelta, + PromptMessage, + SystemPromptMessage, +) + +from .base import AgentPattern, ToolInvokeHook + +if TYPE_CHECKING: + from core.tools.__base.tool import Tool + + +class ReActStrategy(AgentPattern): + """ReAct strategy using reasoning and acting approach.""" + + def __init__( + self, + model_instance: ModelInstance, + tools: list[Tool], + context: ExecutionContext, + max_iterations: int = 10, + workflow_call_depth: int = 0, + files: list[File] = [], + tool_invoke_hook: ToolInvokeHook | None = None, + instruction: str = "", + ): + """Initialize the ReAct strategy with instruction support.""" + super().__init__( + model_instance=model_instance, + tools=tools, + context=context, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + files=files, + tool_invoke_hook=tool_invoke_hook, + ) + self.instruction = instruction + + def run( + self, + prompt_messages: list[PromptMessage], + model_parameters: dict[str, Any], + stop: list[str] = [], + stream: bool = True, + ) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]: + """Execute the ReAct agent strategy.""" + # Initialize tracking + agent_scratchpad: list[AgentScratchpadUnit] = [] + iteration_step: int = 1 + max_iterations: int = self.max_iterations + 1 + react_state: bool = True + total_usage: dict[str, Any] = {"usage": None} + output_files: list[File] = [] # Track files produced by tools + final_text: str = "" + finish_reason: str | None = None + + # Add "Observation" to stop sequences + if "Observation" not in stop: + stop = stop.copy() + stop.append("Observation") + + while react_state and iteration_step <= max_iterations: + react_state = False + round_log = self._create_log( + label=f"ROUND {iteration_step}", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + yield round_log + + # Build prompt with/without tools based on iteration + include_tools = iteration_step < max_iterations + current_messages = self._build_prompt_with_react_format( + prompt_messages, agent_scratchpad, include_tools, self.instruction + ) + + model_log = self._create_log( + label=f"{self.model_instance.model} Thought", + log_type=AgentLog.LogType.THOUGHT, + status=AgentLog.LogStatus.START, + data={}, + parent_id=round_log.id, + extra_metadata={ + AgentLog.LogMetadata.PROVIDER: self.model_instance.provider, + }, + ) + yield model_log + + # Track usage for this round only + round_usage: dict[str, Any] = {"usage": None} + + # Use current messages directly (files are handled by base class if needed) + messages_to_use = current_messages + + # Invoke model + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = self.model_instance.invoke_llm( + prompt_messages=messages_to_use, + model_parameters=model_parameters, + stop=stop, + stream=stream, + user=self.context.user_id or "", + callbacks=[], + ) + + # Process response + scratchpad, chunk_finish_reason = yield from self._handle_chunks( + chunks, round_usage, model_log, current_messages + ) + agent_scratchpad.append(scratchpad) + + # Accumulate to total usage + round_usage_value = round_usage.get("usage") + if round_usage_value: + self._accumulate_usage(total_usage, round_usage_value) + + # Update finish reason + if chunk_finish_reason: + finish_reason = chunk_finish_reason + + # Check if we have an action to execute + if scratchpad.action and scratchpad.action.action_name.lower() != "final answer": + react_state = True + # Execute tool + observation, tool_files = yield from self._handle_tool_call( + scratchpad.action, current_messages, round_log + ) + scratchpad.observation = observation + # Track files produced by tools + output_files.extend(tool_files) + + # Add observation to scratchpad for display + yield self._create_text_chunk(f"\nObservation: {observation}\n", current_messages) + else: + # Extract final answer + if scratchpad.action and scratchpad.action.action_input: + final_answer = scratchpad.action.action_input + if isinstance(final_answer, dict): + final_answer = json.dumps(final_answer, ensure_ascii=False) + final_text = str(final_answer) + elif scratchpad.thought: + # If no action but we have thought, use thought as final answer + final_text = scratchpad.thought + + yield self._finish_log( + round_log, + data={ + "thought": scratchpad.thought, + "action": scratchpad.action_str if scratchpad.action else None, + "observation": scratchpad.observation or None, + "final_answer": final_text if not react_state else None, + }, + usage=round_usage.get("usage"), + ) + iteration_step += 1 + + # Return final result + + from core.agent.entities import AgentResult + + return AgentResult( + text=final_text, files=output_files, usage=total_usage.get("usage"), finish_reason=finish_reason + ) + + def _build_prompt_with_react_format( + self, + original_messages: list[PromptMessage], + agent_scratchpad: list[AgentScratchpadUnit], + include_tools: bool = True, + instruction: str = "", + ) -> list[PromptMessage]: + """Build prompt messages with ReAct format.""" + # Copy messages to avoid modifying original + messages = list(original_messages) + + # Find and update the system prompt that should already exist + system_prompt_found = False + for i, msg in enumerate(messages): + if isinstance(msg, SystemPromptMessage): + system_prompt_found = True + # The system prompt from frontend already has the template, just replace placeholders + + # Format tools + tools_str = "" + tool_names = [] + if include_tools and self.tools: + # Convert tools to prompt message tools format + prompt_tools = [tool.to_prompt_message_tool() for tool in self.tools] + tool_names = [tool.name for tool in prompt_tools] + + # Format tools as JSON for comprehensive information + from core.model_runtime.utils.encoders import jsonable_encoder + + tools_str = json.dumps(jsonable_encoder(prompt_tools), indent=2) + tool_names_str = ", ".join(f'"{name}"' for name in tool_names) + else: + tools_str = "No tools available" + tool_names_str = "" + + # Replace placeholders in the existing system prompt + updated_content = msg.content + assert isinstance(updated_content, str) + updated_content = updated_content.replace("{{instruction}}", instruction) + updated_content = updated_content.replace("{{tools}}", tools_str) + updated_content = updated_content.replace("{{tool_names}}", tool_names_str) + + # Create new SystemPromptMessage with updated content + messages[i] = SystemPromptMessage(content=updated_content) + break + + # If no system prompt found, that's unexpected but add scratchpad anyway + if not system_prompt_found: + # This shouldn't happen if frontend is working correctly + pass + + # Format agent scratchpad + scratchpad_str = "" + if agent_scratchpad: + scratchpad_parts: list[str] = [] + for unit in agent_scratchpad: + if unit.thought: + scratchpad_parts.append(f"Thought: {unit.thought}") + if unit.action_str: + scratchpad_parts.append(f"Action:\n```\n{unit.action_str}\n```") + if unit.observation: + scratchpad_parts.append(f"Observation: {unit.observation}") + scratchpad_str = "\n".join(scratchpad_parts) + + # If there's a scratchpad, append it to the last message + if scratchpad_str: + messages.append(AssistantPromptMessage(content=scratchpad_str)) + + return messages + + def _handle_chunks( + self, + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult], + llm_usage: dict[str, Any], + model_log: AgentLog, + current_messages: list[PromptMessage], + ) -> Generator[ + LLMResultChunk | AgentLog, + None, + tuple[AgentScratchpadUnit, str | None], + ]: + """Handle LLM response chunks and extract action/thought. + + Returns a tuple of (scratchpad_unit, finish_reason). + """ + usage_dict: dict[str, Any] = {} + + # Convert non-streaming to streaming format if needed + if isinstance(chunks, LLMResult): + # Create a generator from the LLMResult + def result_to_chunks() -> Generator[LLMResultChunk, None, None]: + yield LLMResultChunk( + model=chunks.model, + prompt_messages=chunks.prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=chunks.message, + usage=chunks.usage, + finish_reason=None, # LLMResult doesn't have finish_reason, only streaming chunks do + ), + system_fingerprint=chunks.system_fingerprint or "", + ) + + streaming_chunks = result_to_chunks() + else: + streaming_chunks = chunks + + react_chunks = CotAgentOutputParser.handle_react_stream_output(streaming_chunks, usage_dict) + + # Initialize scratchpad unit + scratchpad = AgentScratchpadUnit( + agent_response="", + thought="", + action_str="", + observation="", + action=None, + ) + + finish_reason: str | None = None + + # Process chunks + for chunk in react_chunks: + if isinstance(chunk, AgentScratchpadUnit.Action): + # Action detected + action_str = json.dumps(chunk.model_dump()) + scratchpad.agent_response = (scratchpad.agent_response or "") + action_str + scratchpad.action_str = action_str + scratchpad.action = chunk + + yield self._create_text_chunk(json.dumps(chunk.model_dump()), current_messages) + else: + # Text chunk + chunk_text = str(chunk) + scratchpad.agent_response = (scratchpad.agent_response or "") + chunk_text + scratchpad.thought = (scratchpad.thought or "") + chunk_text + + yield self._create_text_chunk(chunk_text, current_messages) + + # Update usage + if usage_dict.get("usage"): + if llm_usage.get("usage"): + self._accumulate_usage(llm_usage, usage_dict["usage"]) + else: + llm_usage["usage"] = usage_dict["usage"] + + # Clean up thought + scratchpad.thought = (scratchpad.thought or "").strip() or "I am thinking about how to help you" + + # Finish model log + yield self._finish_log( + model_log, + data={ + "thought": scratchpad.thought, + "action": scratchpad.action_str if scratchpad.action else None, + }, + usage=llm_usage.get("usage"), + ) + + return scratchpad, finish_reason + + def _handle_tool_call( + self, + action: AgentScratchpadUnit.Action, + prompt_messages: list[PromptMessage], + round_log: AgentLog, + ) -> Generator[AgentLog, None, tuple[str, list[File]]]: + """Handle tool call and return observation with files.""" + tool_name = action.action_name + tool_args: dict[str, Any] | str = action.action_input + + # Find tool instance first to get metadata + tool_instance = self._find_tool_by_name(tool_name) + tool_metadata = self._get_tool_metadata(tool_instance) if tool_instance else {} + + # Start tool log with tool metadata + tool_log = self._create_log( + label=f"CALL {tool_name}", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={ + "tool_name": tool_name, + "tool_args": tool_args, + }, + parent_id=round_log.id, + extra_metadata=tool_metadata, + ) + yield tool_log + + if not tool_instance: + # Finish tool log with error + yield self._finish_log( + tool_log, + data={ + **tool_log.data, + "error": f"Tool {tool_name} not found", + }, + ) + return f"Tool {tool_name} not found", [] + + # Ensure tool_args is a dict + tool_args_dict: dict[str, Any] + if isinstance(tool_args, str): + try: + tool_args_dict = json.loads(tool_args) + except json.JSONDecodeError: + tool_args_dict = {"input": tool_args} + elif not isinstance(tool_args, dict): + tool_args_dict = {"input": str(tool_args)} + else: + tool_args_dict = tool_args + + # Invoke tool using base class method with error handling + try: + response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args_dict, tool_name) + + # Finish tool log + yield self._finish_log( + tool_log, + data={ + **tool_log.data, + "output": response_content, + "files": len(tool_files), + "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, + }, + ) + + return response_content or "Tool executed successfully", tool_files + except Exception as e: + # Tool invocation failed, yield error log + error_message = str(e) + tool_log.status = AgentLog.LogStatus.ERROR + tool_log.error = error_message + tool_log.data = { + **tool_log.data, + "error": error_message, + } + yield tool_log + + return f"Tool execution failed: {error_message}", [] diff --git a/api/core/agent/patterns/strategy_factory.py b/api/core/agent/patterns/strategy_factory.py new file mode 100644 index 0000000000..ad26075291 --- /dev/null +++ b/api/core/agent/patterns/strategy_factory.py @@ -0,0 +1,107 @@ +"""Strategy factory for creating agent strategies.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from core.agent.entities import AgentEntity, ExecutionContext +from core.file.models import File +from core.model_manager import ModelInstance +from core.model_runtime.entities.model_entities import ModelFeature + +from .base import AgentPattern, ToolInvokeHook +from .function_call import FunctionCallStrategy +from .react import ReActStrategy + +if TYPE_CHECKING: + from core.tools.__base.tool import Tool + + +class StrategyFactory: + """Factory for creating agent strategies based on model features.""" + + # Tool calling related features + TOOL_CALL_FEATURES = {ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL} + + @staticmethod + def create_strategy( + model_features: list[ModelFeature], + model_instance: ModelInstance, + context: ExecutionContext, + tools: list[Tool], + files: list[File], + max_iterations: int = 10, + workflow_call_depth: int = 0, + agent_strategy: AgentEntity.Strategy | None = None, + tool_invoke_hook: ToolInvokeHook | None = None, + instruction: str = "", + ) -> AgentPattern: + """ + Create an appropriate strategy based on model features. + + Args: + model_features: List of model features/capabilities + model_instance: Model instance to use + context: Execution context containing trace/audit information + tools: Available tools + files: Available files + max_iterations: Maximum iterations for the strategy + workflow_call_depth: Depth of workflow calls + agent_strategy: Optional explicit strategy override + tool_invoke_hook: Optional hook for custom tool invocation (e.g., agent_invoke) + instruction: Optional instruction for ReAct strategy + + Returns: + AgentStrategy instance + """ + # If explicit strategy is provided and it's Function Calling, try to use it if supported + if agent_strategy == AgentEntity.Strategy.FUNCTION_CALLING: + if set(model_features) & StrategyFactory.TOOL_CALL_FEATURES: + return FunctionCallStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + ) + # Fallback to ReAct if FC is requested but not supported + + # If explicit strategy is Chain of Thought (ReAct) + if agent_strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT: + return ReActStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + instruction=instruction, + ) + + # Default auto-selection logic + if set(model_features) & StrategyFactory.TOOL_CALL_FEATURES: + # Model supports native function calling + return FunctionCallStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + ) + else: + # Use ReAct strategy for models without function calling + return ReActStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + instruction=instruction, + ) diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index da1e9f19b6..c4d89b8b2f 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -4,6 +4,7 @@ import re import time from collections.abc import Callable, Generator, Mapping from contextlib import contextmanager +from dataclasses import dataclass, field from threading import Thread from typing import Any, Union @@ -19,6 +20,7 @@ from core.app.entities.app_invoke_entities import ( InvokeFrom, ) from core.app.entities.queue_entities import ( + ChunkType, MessageQueueMessage, QueueAdvancedChatMessageEndEvent, QueueAgentLogEvent, @@ -70,13 +72,122 @@ from core.workflow.runtime import GraphRuntimeState from core.workflow.system_variable import SystemVariable from extensions.ext_database import db from libs.datetime_utils import naive_utc_now -from models import Account, Conversation, EndUser, Message, MessageFile +from models import Account, Conversation, EndUser, LLMGenerationDetail, Message, MessageFile from models.enums import CreatorUserRole from models.workflow import Workflow logger = logging.getLogger(__name__) +@dataclass +class StreamEventBuffer: + """ + Buffer for recording stream events in order to reconstruct the generation sequence. + Records the exact order of text chunks, thoughts, and tool calls as they stream. + """ + + # Accumulated reasoning content (each thought block is a separate element) + reasoning_content: list[str] = field(default_factory=list) + # Current reasoning buffer (accumulates until we see a different event type) + _current_reasoning: str = "" + # Tool calls with their details + tool_calls: list[dict] = field(default_factory=list) + # Tool call ID to index mapping for updating results + _tool_call_id_map: dict[str, int] = field(default_factory=dict) + # Sequence of events in stream order + sequence: list[dict] = field(default_factory=list) + # Current position in answer text + _content_position: int = 0 + # Track last event type to detect transitions + _last_event_type: str | None = None + + def _flush_current_reasoning(self) -> None: + """Flush accumulated reasoning to the list and add to sequence.""" + if self._current_reasoning.strip(): + self.reasoning_content.append(self._current_reasoning.strip()) + self.sequence.append({"type": "reasoning", "index": len(self.reasoning_content) - 1}) + self._current_reasoning = "" + + def record_text_chunk(self, text: str) -> None: + """Record a text chunk event.""" + if not text: + return + + # Flush any pending reasoning first + if self._last_event_type == "thought": + self._flush_current_reasoning() + + text_len = len(text) + start_pos = self._content_position + + # If last event was also content, extend it; otherwise create new + if self.sequence and self.sequence[-1].get("type") == "content": + self.sequence[-1]["end"] = start_pos + text_len + else: + self.sequence.append({"type": "content", "start": start_pos, "end": start_pos + text_len}) + + self._content_position += text_len + self._last_event_type = "content" + + def record_thought_chunk(self, text: str) -> None: + """Record a thought/reasoning chunk event.""" + if not text: + return + + # Accumulate thought content + self._current_reasoning += text + self._last_event_type = "thought" + + def record_tool_call(self, tool_call_id: str, tool_name: str, tool_arguments: str) -> None: + """Record a tool call event.""" + if not tool_call_id: + return + + # Flush any pending reasoning first + if self._last_event_type == "thought": + self._flush_current_reasoning() + + # Check if this tool call already exists (we might get multiple chunks) + if tool_call_id in self._tool_call_id_map: + idx = self._tool_call_id_map[tool_call_id] + # Update arguments if provided + if tool_arguments: + self.tool_calls[idx]["arguments"] = tool_arguments + else: + # New tool call + tool_call = { + "id": tool_call_id or "", + "name": tool_name or "", + "arguments": tool_arguments or "", + "result": "", + "elapsed_time": None, + } + self.tool_calls.append(tool_call) + idx = len(self.tool_calls) - 1 + self._tool_call_id_map[tool_call_id] = idx + self.sequence.append({"type": "tool_call", "index": idx}) + + self._last_event_type = "tool_call" + + def record_tool_result(self, tool_call_id: str, result: str, tool_elapsed_time: float | None = None) -> None: + """Record a tool result event (update existing tool call).""" + if not tool_call_id: + return + if tool_call_id in self._tool_call_id_map: + idx = self._tool_call_id_map[tool_call_id] + self.tool_calls[idx]["result"] = result + self.tool_calls[idx]["elapsed_time"] = tool_elapsed_time + + def finalize(self) -> None: + """Finalize the buffer, flushing any pending data.""" + if self._last_event_type == "thought": + self._flush_current_reasoning() + + def has_data(self) -> bool: + """Check if there's any meaningful data recorded.""" + return bool(self.reasoning_content or self.tool_calls or self.sequence) + + class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): """ AdvancedChatAppGenerateTaskPipeline is a class that generate stream output and state management for Application. @@ -144,6 +255,8 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): self._workflow_run_id: str = "" self._draft_var_saver_factory = draft_var_saver_factory self._graph_runtime_state: GraphRuntimeState | None = None + # Stream event buffer for recording generation sequence + self._stream_buffer = StreamEventBuffer() self._seed_graph_runtime_state_from_queue_manager() def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: @@ -383,7 +496,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): queue_message: Union[WorkflowQueueMessage, MessageQueueMessage] | None = None, **kwargs, ) -> Generator[StreamResponse, None, None]: - """Handle text chunk events.""" + """Handle text chunk events and record to stream buffer for sequence reconstruction.""" delta_text = event.text if delta_text is None: return @@ -405,9 +518,52 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): if tts_publisher and queue_message: tts_publisher.publish(queue_message) - self._task_state.answer += delta_text + tool_call = event.tool_call + tool_result = event.tool_result + tool_payload = tool_call or tool_result + tool_call_id = tool_payload.id if tool_payload and tool_payload.id else "" + tool_name = tool_payload.name if tool_payload and tool_payload.name else "" + tool_arguments = tool_call.arguments if tool_call and tool_call.arguments else "" + tool_files = tool_result.files if tool_result else [] + tool_elapsed_time = tool_result.elapsed_time if tool_result else None + tool_icon = tool_payload.icon if tool_payload else None + tool_icon_dark = tool_payload.icon_dark if tool_payload else None + # Record stream event based on chunk type + chunk_type = event.chunk_type or ChunkType.TEXT + match chunk_type: + case ChunkType.TEXT: + self._stream_buffer.record_text_chunk(delta_text) + self._task_state.answer += delta_text + case ChunkType.THOUGHT: + # Reasoning should not be part of final answer text + self._stream_buffer.record_thought_chunk(delta_text) + case ChunkType.TOOL_CALL: + self._stream_buffer.record_tool_call( + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + ) + case ChunkType.TOOL_RESULT: + self._stream_buffer.record_tool_result( + tool_call_id=tool_call_id, + result=delta_text, + tool_elapsed_time=tool_elapsed_time, + ) + self._task_state.answer += delta_text + case _: + pass yield self._message_cycle_manager.message_to_stream_response( - answer=delta_text, message_id=self._message_id, from_variable_selector=event.from_variable_selector + answer=delta_text, + message_id=self._message_id, + from_variable_selector=event.from_variable_selector, + chunk_type=event.chunk_type.value if event.chunk_type else None, + tool_call_id=tool_call_id or None, + tool_name=tool_name or None, + tool_arguments=tool_arguments or None, + tool_files=tool_files, + tool_elapsed_time=tool_elapsed_time, + tool_icon=tool_icon, + tool_icon_dark=tool_icon_dark, ) def _handle_iteration_start_event( @@ -775,6 +931,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): # If there are assistant files, remove markdown image links from answer answer_text = self._task_state.answer + answer_text = self._strip_think_blocks(answer_text) if self._recorded_files: # Remove markdown image links since we're storing files separately answer_text = re.sub(r"!\[.*?\]\(.*?\)", "", answer_text).strip() @@ -826,6 +983,54 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): ] session.add_all(message_files) + # Save generation detail (reasoning/tool calls/sequence) from stream buffer + self._save_generation_detail(session=session, message=message) + + @staticmethod + def _strip_think_blocks(text: str) -> str: + """Remove ... blocks (including their content) from text.""" + if not text or "]*>.*?", "", text, flags=re.IGNORECASE | re.DOTALL) + clean_text = re.sub(r"\n\s*\n", "\n\n", clean_text).strip() + return clean_text + + def _save_generation_detail(self, *, session: Session, message: Message) -> None: + """ + Save LLM generation detail for Chatflow using stream event buffer. + The buffer records the exact order of events as they streamed, + allowing accurate reconstruction of the generation sequence. + """ + # Finalize the stream buffer to flush any pending data + self._stream_buffer.finalize() + + # Only save if there's meaningful data + if not self._stream_buffer.has_data(): + return + + reasoning_content = self._stream_buffer.reasoning_content + tool_calls = self._stream_buffer.tool_calls + sequence = self._stream_buffer.sequence + + # Check if generation detail already exists for this message + existing = session.query(LLMGenerationDetail).filter_by(message_id=message.id).first() + + if existing: + existing.reasoning_content = json.dumps(reasoning_content) if reasoning_content else None + existing.tool_calls = json.dumps(tool_calls) if tool_calls else None + existing.sequence = json.dumps(sequence) if sequence else None + else: + generation_detail = LLMGenerationDetail( + tenant_id=self._application_generate_entity.app_config.tenant_id, + app_id=self._application_generate_entity.app_config.app_id, + message_id=message.id, + reasoning_content=json.dumps(reasoning_content) if reasoning_content else None, + tool_calls=json.dumps(tool_calls) if tool_calls else None, + sequence=json.dumps(sequence) if sequence else None, + ) + session.add(generation_detail) + def _seed_graph_runtime_state_from_queue_manager(self) -> None: """Bootstrap the cached runtime state from the queue manager when present.""" candidate = self._base_task_pipeline.queue_manager.graph_runtime_state diff --git a/api/core/app/apps/agent_chat/app_runner.py b/api/core/app/apps/agent_chat/app_runner.py index 2760466a3b..f5cf7a2c56 100644 --- a/api/core/app/apps/agent_chat/app_runner.py +++ b/api/core/app/apps/agent_chat/app_runner.py @@ -3,10 +3,8 @@ from typing import cast from sqlalchemy import select -from core.agent.cot_chat_agent_runner import CotChatAgentRunner -from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner +from core.agent.agent_app_runner import AgentAppRunner from core.agent.entities import AgentEntity -from core.agent.fc_agent_runner import FunctionCallAgentRunner from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.apps.base_app_runner import AppRunner @@ -14,8 +12,7 @@ from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity from core.app.entities.queue_entities import QueueAnnotationReplyEvent from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelInstance -from core.model_runtime.entities.llm_entities import LLMMode -from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey +from core.model_runtime.entities.model_entities import ModelFeature from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.moderation.base import ModerationError from extensions.ext_database import db @@ -194,22 +191,7 @@ class AgentChatAppRunner(AppRunner): raise ValueError("Message not found") db.session.close() - runner_cls: type[FunctionCallAgentRunner] | type[CotChatAgentRunner] | type[CotCompletionAgentRunner] - # start agent runner - if agent_entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT: - # check LLM mode - if model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.CHAT: - runner_cls = CotChatAgentRunner - elif model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.COMPLETION: - runner_cls = CotCompletionAgentRunner - else: - raise ValueError(f"Invalid LLM mode: {model_schema.model_properties.get(ModelPropertyKey.MODE)}") - elif agent_entity.strategy == AgentEntity.Strategy.FUNCTION_CALLING: - runner_cls = FunctionCallAgentRunner - else: - raise ValueError(f"Invalid agent strategy: {agent_entity.strategy}") - - runner = runner_cls( + runner = AgentAppRunner( tenant_id=app_config.tenant_id, application_generate_entity=application_generate_entity, conversation=conversation_result, diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 38ecec5d30..0f3f9972c3 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -671,7 +671,7 @@ class WorkflowResponseConverter: task_id=task_id, data=AgentLogStreamResponse.Data( node_execution_id=event.node_execution_id, - id=event.id, + message_id=event.id, parent_id=event.parent_id, label=event.label, error=event.error, diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 842ad545ad..2b8ed38c63 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -13,6 +13,7 @@ from core.app.apps.common.workflow_response_converter import WorkflowResponseCon from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity from core.app.entities.queue_entities import ( AppQueueEvent, + ChunkType, MessageQueueMessage, QueueAgentLogEvent, QueueErrorEvent, @@ -483,11 +484,33 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): if delta_text is None: return + tool_call = event.tool_call + tool_result = event.tool_result + tool_payload = tool_call or tool_result + tool_call_id = tool_payload.id if tool_payload and tool_payload.id else None + tool_name = tool_payload.name if tool_payload and tool_payload.name else None + tool_arguments = tool_call.arguments if tool_call else None + tool_elapsed_time = tool_result.elapsed_time if tool_result else None + tool_files = tool_result.files if tool_result else [] + tool_icon = tool_payload.icon if tool_payload else None + tool_icon_dark = tool_payload.icon_dark if tool_payload else None + # only publish tts message at text chunk streaming if tts_publisher and queue_message: tts_publisher.publish(queue_message) - yield self._text_chunk_to_stream_response(delta_text, from_variable_selector=event.from_variable_selector) + yield self._text_chunk_to_stream_response( + text=delta_text, + from_variable_selector=event.from_variable_selector, + chunk_type=event.chunk_type, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + tool_files=tool_files, + tool_elapsed_time=tool_elapsed_time, + tool_icon=tool_icon, + tool_icon_dark=tool_icon_dark, + ) def _handle_agent_log_event(self, event: QueueAgentLogEvent, **kwargs) -> Generator[StreamResponse, None, None]: """Handle agent log events.""" @@ -650,16 +673,61 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): session.add(workflow_app_log) def _text_chunk_to_stream_response( - self, text: str, from_variable_selector: list[str] | None = None + self, + text: str, + from_variable_selector: list[str] | None = None, + chunk_type: ChunkType | None = None, + tool_call_id: str | None = None, + tool_name: str | None = None, + tool_arguments: str | None = None, + tool_files: list[str] | None = None, + tool_error: str | None = None, + tool_elapsed_time: float | None = None, + tool_icon: str | dict | None = None, + tool_icon_dark: str | dict | None = None, ) -> TextChunkStreamResponse: """ Handle completed event. :param text: text :return: """ + from core.app.entities.task_entities import ChunkType as ResponseChunkType + + response_chunk_type = ResponseChunkType(chunk_type.value) if chunk_type else ResponseChunkType.TEXT + + data = TextChunkStreamResponse.Data( + text=text, + from_variable_selector=from_variable_selector, + chunk_type=response_chunk_type, + ) + + if response_chunk_type == ResponseChunkType.TOOL_CALL: + data = data.model_copy( + update={ + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "tool_arguments": tool_arguments, + "tool_icon": tool_icon, + "tool_icon_dark": tool_icon_dark, + } + ) + elif response_chunk_type == ResponseChunkType.TOOL_RESULT: + data = data.model_copy( + update={ + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "tool_arguments": tool_arguments, + "tool_files": tool_files, + "tool_error": tool_error, + "tool_elapsed_time": tool_elapsed_time, + "tool_icon": tool_icon, + "tool_icon_dark": tool_icon_dark, + } + ) + response = TextChunkStreamResponse( task_id=self._application_generate_entity.task_id, - data=TextChunkStreamResponse.Data(text=text, from_variable_selector=from_variable_selector), + data=data, ) return response diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 7adf3504ac..b306a0376e 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -463,12 +463,20 @@ class WorkflowBasedAppRunner: ) ) elif isinstance(event, NodeRunStreamChunkEvent): + from core.app.entities.queue_entities import ChunkType as QueueChunkType + + if event.is_final and not event.chunk: + return + self._publish_event( QueueTextChunkEvent( text=event.chunk, from_variable_selector=list(event.selector), in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + chunk_type=QueueChunkType(event.chunk_type.value), + tool_call=event.tool_call, + tool_result=event.tool_result, ) ) elif isinstance(event, NodeRunRetrieverResourceEvent): diff --git a/api/core/app/entities/llm_generation_entities.py b/api/core/app/entities/llm_generation_entities.py new file mode 100644 index 0000000000..33e97e3299 --- /dev/null +++ b/api/core/app/entities/llm_generation_entities.py @@ -0,0 +1,70 @@ +""" +LLM Generation Detail entities. + +Defines the structure for storing and transmitting LLM generation details +including reasoning content, tool calls, and their sequence. +""" + +from typing import Literal + +from pydantic import BaseModel, Field + + +class ContentSegment(BaseModel): + """Represents a content segment in the generation sequence.""" + + type: Literal["content"] = "content" + start: int = Field(..., description="Start position in the text") + end: int = Field(..., description="End position in the text") + + +class ReasoningSegment(BaseModel): + """Represents a reasoning segment in the generation sequence.""" + + type: Literal["reasoning"] = "reasoning" + index: int = Field(..., description="Index into reasoning_content array") + + +class ToolCallSegment(BaseModel): + """Represents a tool call segment in the generation sequence.""" + + type: Literal["tool_call"] = "tool_call" + index: int = Field(..., description="Index into tool_calls array") + + +SequenceSegment = ContentSegment | ReasoningSegment | ToolCallSegment + + +class ToolCallDetail(BaseModel): + """Represents a tool call with its arguments and result.""" + + id: str = Field(default="", description="Unique identifier for the tool call") + name: str = Field(..., description="Name of the tool") + arguments: str = Field(default="", description="JSON string of tool arguments") + result: str = Field(default="", description="Result from the tool execution") + elapsed_time: float | None = Field(default=None, description="Elapsed time in seconds") + + +class LLMGenerationDetailData(BaseModel): + """ + Domain model for LLM generation detail. + + Contains the structured data for reasoning content, tool calls, + and their display sequence. + """ + + reasoning_content: list[str] = Field(default_factory=list, description="List of reasoning segments") + tool_calls: list[ToolCallDetail] = Field(default_factory=list, description="List of tool call details") + sequence: list[SequenceSegment] = Field(default_factory=list, description="Display order of segments") + + def is_empty(self) -> bool: + """Check if there's any meaningful generation detail.""" + return not self.reasoning_content and not self.tool_calls + + def to_response_dict(self) -> dict: + """Convert to dictionary for API response.""" + return { + "reasoning_content": self.reasoning_content, + "tool_calls": [tc.model_dump() for tc in self.tool_calls], + "sequence": [seg.model_dump() for seg in self.sequence], + } diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index 77d6bf03b4..fdc4014caa 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk from core.rag.entities.citation_metadata import RetrievalSourceMetadata -from core.workflow.entities import AgentNodeStrategyInit +from core.workflow.entities import AgentNodeStrategyInit, ToolCall, ToolResult from core.workflow.enums import WorkflowNodeExecutionMetadataKey from core.workflow.nodes import NodeType @@ -177,6 +177,17 @@ class QueueLoopCompletedEvent(AppQueueEvent): error: str | None = None +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + THOUGHT_START = "thought_start" # Agent thought start + THOUGHT_END = "thought_end" # Agent thought end + + class QueueTextChunkEvent(AppQueueEvent): """ QueueTextChunkEvent entity @@ -191,6 +202,16 @@ class QueueTextChunkEvent(AppQueueEvent): in_loop_id: str | None = None """loop id if node is in loop""" + # Extended fields for Agent/Tool streaming + chunk_type: ChunkType = ChunkType.TEXT + """type of the chunk""" + + # Tool streaming payloads + tool_call: ToolCall | None = None + """structured tool call info""" + tool_result: ToolResult | None = None + """structured tool result info""" + class QueueAgentMessageEvent(AppQueueEvent): """ diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 79a5e657b3..0998510b60 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -113,6 +113,38 @@ class MessageStreamResponse(StreamResponse): answer: str from_variable_selector: list[str] | None = None + # Extended fields for Agent/Tool streaming (imported at runtime to avoid circular import) + chunk_type: str | None = None + """type of the chunk: text, tool_call, tool_result, thought""" + + # Tool call fields (when chunk_type == "tool_call") + tool_call_id: str | None = None + """unique identifier for this tool call""" + tool_name: str | None = None + """name of the tool being called""" + tool_arguments: str | None = None + """accumulated tool arguments JSON""" + + # Tool result fields (when chunk_type == "tool_result") + tool_files: list[str] | None = None + """file IDs produced by tool""" + tool_error: str | None = None + """error message if tool failed""" + tool_elapsed_time: float | None = None + """elapsed time spent executing the tool""" + tool_icon: str | dict | None = None + """icon of the tool""" + tool_icon_dark: str | dict | None = None + """dark theme icon of the tool""" + + def model_dump(self, *args, **kwargs) -> dict[str, object]: + kwargs.setdefault("exclude_none", True) + return super().model_dump(*args, **kwargs) + + def model_dump_json(self, *args, **kwargs) -> str: + kwargs.setdefault("exclude_none", True) + return super().model_dump_json(*args, **kwargs) + class MessageAudioStreamResponse(StreamResponse): """ @@ -582,6 +614,17 @@ class LoopNodeCompletedStreamResponse(StreamResponse): data: Data +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + THOUGHT_START = "thought_start" # Agent thought start + THOUGHT_END = "thought_end" # Agent thought end + + class TextChunkStreamResponse(StreamResponse): """ TextChunkStreamResponse entity @@ -595,6 +638,36 @@ class TextChunkStreamResponse(StreamResponse): text: str from_variable_selector: list[str] | None = None + # Extended fields for Agent/Tool streaming + chunk_type: ChunkType = ChunkType.TEXT + """type of the chunk""" + + # Tool call fields (when chunk_type == TOOL_CALL) + tool_call_id: str | None = None + """unique identifier for this tool call""" + tool_name: str | None = None + """name of the tool being called""" + tool_arguments: str | None = None + """accumulated tool arguments JSON""" + + # Tool result fields (when chunk_type == TOOL_RESULT) + tool_files: list[str] | None = None + """file IDs produced by tool""" + tool_error: str | None = None + """error message if tool failed""" + + # Tool elapsed time fields (when chunk_type == TOOL_RESULT) + tool_elapsed_time: float | None = None + """elapsed time spent executing the tool""" + + def model_dump(self, *args, **kwargs) -> dict[str, object]: + kwargs.setdefault("exclude_none", True) + return super().model_dump(*args, **kwargs) + + def model_dump_json(self, *args, **kwargs) -> str: + kwargs.setdefault("exclude_none", True) + return super().model_dump_json(*args, **kwargs) + event: StreamEvent = StreamEvent.TEXT_CHUNK data: Data @@ -743,7 +816,7 @@ class AgentLogStreamResponse(StreamResponse): """ node_execution_id: str - id: str + message_id: str label: str parent_id: str | None = None error: str | None = None diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 5bb93fa44a..a1852ffe19 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -1,4 +1,5 @@ import logging +import re import time from collections.abc import Generator from threading import Thread @@ -58,7 +59,7 @@ from core.prompt.utils.prompt_template_parser import PromptTemplateParser from events.message_event import message_was_created from extensions.ext_database import db from libs.datetime_utils import naive_utc_now -from models.model import AppMode, Conversation, Message, MessageAgentThought +from models.model import AppMode, Conversation, LLMGenerationDetail, Message, MessageAgentThought logger = logging.getLogger(__name__) @@ -68,6 +69,8 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): EasyUIBasedGenerateTaskPipeline is a class that generate stream output and state management for Application. """ + _THINK_PATTERN = re.compile(r"]*>(.*?)", re.IGNORECASE | re.DOTALL) + _task_state: EasyUITaskState _application_generate_entity: Union[ChatAppGenerateEntity, CompletionAppGenerateEntity, AgentChatAppGenerateEntity] @@ -409,11 +412,136 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): ) ) + # Save LLM generation detail if there's reasoning_content + self._save_generation_detail(session=session, message=message, llm_result=llm_result) + message_was_created.send( message, application_generate_entity=self._application_generate_entity, ) + def _save_generation_detail(self, *, session: Session, message: Message, llm_result: LLMResult) -> None: + """ + Save LLM generation detail for Completion/Chat/Agent-Chat applications. + For Agent-Chat, also merges MessageAgentThought records. + """ + import json + + reasoning_list: list[str] = [] + tool_calls_list: list[dict] = [] + sequence: list[dict] = [] + answer = message.answer or "" + + # Check if this is Agent-Chat mode by looking for agent thoughts + agent_thoughts = ( + session.query(MessageAgentThought) + .filter_by(message_id=message.id) + .order_by(MessageAgentThought.position.asc()) + .all() + ) + + if agent_thoughts: + # Agent-Chat mode: merge MessageAgentThought records + content_pos = 0 + cleaned_answer_parts: list[str] = [] + for thought in agent_thoughts: + # Add thought/reasoning + if thought.thought: + reasoning_text = thought.thought + if " blocks and clean the final answer + clean_answer, reasoning_content = self._split_reasoning_from_answer(answer) + if reasoning_content: + answer = clean_answer + llm_result.message.content = clean_answer + llm_result.reasoning_content = reasoning_content + message.answer = clean_answer + if reasoning_content: + reasoning_list = [reasoning_content] + # Content comes first, then reasoning + if answer: + sequence.append({"type": "content", "start": 0, "end": len(answer)}) + sequence.append({"type": "reasoning", "index": 0}) + + # Only save if there's meaningful generation detail + if not reasoning_list and not tool_calls_list: + return + + # Check if generation detail already exists + existing = session.query(LLMGenerationDetail).filter_by(message_id=message.id).first() + + if existing: + existing.reasoning_content = json.dumps(reasoning_list) if reasoning_list else None + existing.tool_calls = json.dumps(tool_calls_list) if tool_calls_list else None + existing.sequence = json.dumps(sequence) if sequence else None + else: + generation_detail = LLMGenerationDetail( + tenant_id=self._application_generate_entity.app_config.tenant_id, + app_id=self._application_generate_entity.app_config.app_id, + message_id=message.id, + reasoning_content=json.dumps(reasoning_list) if reasoning_list else None, + tool_calls=json.dumps(tool_calls_list) if tool_calls_list else None, + sequence=json.dumps(sequence) if sequence else None, + ) + session.add(generation_detail) + + @classmethod + def _split_reasoning_from_answer(cls, text: str) -> tuple[str, str]: + """ + Extract reasoning segments from blocks and return (clean_text, reasoning). + """ + matches = cls._THINK_PATTERN.findall(text) + reasoning_content = "\n".join(match.strip() for match in matches) if matches else "" + + clean_text = cls._THINK_PATTERN.sub("", text) + clean_text = re.sub(r"\n\s*\n", "\n\n", clean_text).strip() + + return clean_text, reasoning_content or "" + def _handle_stop(self, event: QueueStopEvent): """ Handle stop. diff --git a/api/core/app/task_pipeline/message_cycle_manager.py b/api/core/app/task_pipeline/message_cycle_manager.py index 0e7f300cee..f9f341fcea 100644 --- a/api/core/app/task_pipeline/message_cycle_manager.py +++ b/api/core/app/task_pipeline/message_cycle_manager.py @@ -232,15 +232,31 @@ class MessageCycleManager: answer: str, message_id: str, from_variable_selector: list[str] | None = None, + chunk_type: str | None = None, + tool_call_id: str | None = None, + tool_name: str | None = None, + tool_arguments: str | None = None, + tool_files: list[str] | None = None, + tool_error: str | None = None, + tool_elapsed_time: float | None = None, + tool_icon: str | dict | None = None, + tool_icon_dark: str | dict | None = None, event_type: StreamEvent | None = None, ) -> MessageStreamResponse: """ Message to stream response. :param answer: answer :param message_id: message id + :param from_variable_selector: from variable selector + :param chunk_type: type of the chunk (text, function_call, tool_result, thought) + :param tool_call_id: unique identifier for this tool call + :param tool_name: name of the tool being called + :param tool_arguments: accumulated tool arguments JSON + :param tool_files: file IDs produced by tool + :param tool_error: error message if tool failed :return: """ - return MessageStreamResponse( + response = MessageStreamResponse( task_id=self._application_generate_entity.task_id, id=message_id, answer=answer, @@ -248,6 +264,35 @@ class MessageCycleManager: event=event_type or StreamEvent.MESSAGE, ) + if chunk_type: + response = response.model_copy(update={"chunk_type": chunk_type}) + + if chunk_type == "tool_call": + response = response.model_copy( + update={ + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "tool_arguments": tool_arguments, + "tool_icon": tool_icon, + "tool_icon_dark": tool_icon_dark, + } + ) + elif chunk_type == "tool_result": + response = response.model_copy( + update={ + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "tool_arguments": tool_arguments, + "tool_files": tool_files, + "tool_error": tool_error, + "tool_elapsed_time": tool_elapsed_time, + "tool_icon": tool_icon, + "tool_icon_dark": tool_icon_dark, + } + ) + + return response + def message_replace_to_stream_response(self, answer: str, reason: str = "") -> MessageReplaceStreamResponse: """ Message replace to stream response. diff --git a/api/core/callback_handler/index_tool_callback_handler.py b/api/core/callback_handler/index_tool_callback_handler.py index d0279349ca..5249fea8cd 100644 --- a/api/core/callback_handler/index_tool_callback_handler.py +++ b/api/core/callback_handler/index_tool_callback_handler.py @@ -5,7 +5,6 @@ from sqlalchemy import select from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.entities.app_invoke_entities import InvokeFrom -from core.app.entities.queue_entities import QueueRetrieverResourcesEvent from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.rag.index_processor.constant.index_type import IndexStructureType from core.rag.models.document import Document @@ -90,6 +89,8 @@ class DatasetIndexToolCallbackHandler: # TODO(-LAN-): Improve type check def return_retriever_resource_info(self, resource: Sequence[RetrievalSourceMetadata]): """Handle return_retriever_resource_info.""" + from core.app.entities.queue_entities import QueueRetrieverResourcesEvent + self._queue_manager.publish( QueueRetrieverResourcesEvent(retriever_resources=resource), PublishFrom.APPLICATION_MANAGER ) diff --git a/api/core/entities/knowledge_entities.py b/api/core/entities/knowledge_entities.py index d4093b5245..b1ba3c3e2a 100644 --- a/api/core/entities/knowledge_entities.py +++ b/api/core/entities/knowledge_entities.py @@ -3,6 +3,7 @@ from pydantic import BaseModel, Field, field_validator class PreviewDetail(BaseModel): content: str + summary: str | None = None child_chunks: list[str] | None = None diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index f1b50f360b..599a655ab9 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -311,14 +311,18 @@ class IndexingRunner: qa_preview_texts: list[QAPreviewDetail] = [] total_segments = 0 + # doc_form represents the segmentation method (general, parent-child, QA) index_type = doc_form index_processor = IndexProcessorFactory(index_type).init_index_processor() + # one extract_setting is one source document for extract_setting in extract_settings: # extract processing_rule = DatasetProcessRule( mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"]) ) + # Extract document content text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"]) + # Cleaning and segmentation documents = index_processor.transform( text_docs, current_user=None, @@ -361,6 +365,12 @@ class IndexingRunner: if doc_form and doc_form == "qa_model": return IndexingEstimate(total_segments=total_segments * 20, qa_preview=qa_preview_texts, preview=[]) + + # Generate summary preview + summary_index_setting = tmp_processing_rule["summary_index_setting"] if "summary_index_setting" in tmp_processing_rule else None + if summary_index_setting and summary_index_setting.get('enable') and preview_texts: + preview_texts = index_processor.generate_summary_preview(tenant_id, preview_texts, summary_index_setting) + return IndexingEstimate(total_segments=total_segments, preview=preview_texts) def _extract( diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index ec2b7f2d44..1fbf279309 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -434,3 +434,6 @@ INSTRUCTION_GENERATE_TEMPLATE_PROMPT = """The output of this prompt is not as ex You should edit the prompt according to the IDEAL OUTPUT.""" INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}.""" + +DEFAULT_GENERATOR_SUMMARY_PROMPT = """ +You are a helpful assistant that summarizes long pieces of text into concise summaries. Given the following text, generate a brief summary that captures the main points and key information. The summary should be clear, concise, and written in complete sentences. """ diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 8ec1ce6242..65437e97b6 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -392,6 +392,69 @@ class RetrievalService: records = [] include_segment_ids = set() segment_child_map = {} + segment_file_map = {} + segment_summary_map = {} # Map segment_id to summary content + summary_segment_ids = set() # Track segments retrieved via summary + with Session(bind=db.engine, expire_on_commit=False) as session: + # Process documents + for document in documents: + segment_id = None + attachment_info = None + child_chunk = None + document_id = document.metadata.get("document_id") + if document_id not in dataset_documents: + continue + + dataset_document = dataset_documents[document_id] + if not dataset_document: + continue + + if dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX: + # Handle parent-child documents + if document.metadata.get("doc_type") == DocType.IMAGE: + attachment_info_dict = cls.get_segment_attachment_info( + dataset_document.dataset_id, + dataset_document.tenant_id, + document.metadata.get("doc_id") or "", + session, + ) + if attachment_info_dict: + attachment_info = attachment_info_dict["attachment_info"] + segment_id = attachment_info_dict["segment_id"] + else: + # Check if this is a summary document + is_summary = document.metadata.get("is_summary", False) + if is_summary: + # For summary documents, find the original chunk via original_chunk_id + original_chunk_id = document.metadata.get("original_chunk_id") + if not original_chunk_id: + continue + segment_id = original_chunk_id + # Track that this segment was retrieved via summary + summary_segment_ids.add(segment_id) + else: + # For normal documents, find by child chunk index_node_id + child_index_node_id = document.metadata.get("doc_id") + child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id == child_index_node_id) + child_chunk = session.scalar(child_chunk_stmt) + + if not child_chunk: + continue + segment_id = child_chunk.segment_id + + if not segment_id: + continue + + segment = ( + session.query(DocumentSegment) + .where( + DocumentSegment.dataset_id == dataset_document.dataset_id, + DocumentSegment.enabled == True, + DocumentSegment.status == "completed", + DocumentSegment.id == segment_id, + ) + .first() + ) valid_dataset_documents = {} image_doc_ids: list[Any] = [] @@ -507,7 +570,47 @@ class RetrievalService: max_score = max( max_score, file_document.metadata.get("score", 0.0) if file_document else 0.0 ) + segment = session.scalar(document_segment_stmt) + if segment: + segment_file_map[segment.id] = [attachment_info] + else: + # Check if this is a summary document + is_summary = document.metadata.get("is_summary", False) + if is_summary: + # For summary documents, find the original chunk via original_chunk_id + original_chunk_id = document.metadata.get("original_chunk_id") + if not original_chunk_id: + continue + # Track that this segment was retrieved via summary + summary_segment_ids.add(original_chunk_id) + document_segment_stmt = select(DocumentSegment).where( + DocumentSegment.dataset_id == dataset_document.dataset_id, + DocumentSegment.enabled == True, + DocumentSegment.status == "completed", + DocumentSegment.id == original_chunk_id, + ) + segment = session.scalar(document_segment_stmt) + else: + # For normal documents, find by index_node_id + index_node_id = document.metadata.get("doc_id") + if not index_node_id: + continue + document_segment_stmt = select(DocumentSegment).where( + DocumentSegment.dataset_id == dataset_document.dataset_id, + DocumentSegment.enabled == True, + DocumentSegment.status == "completed", + DocumentSegment.index_node_id == index_node_id, + ) + segment = session.scalar(document_segment_stmt) + if not segment: + continue + if segment.id not in include_segment_ids: + include_segment_ids.add(segment.id) + record = { + "segment": segment, + "score": document.metadata.get("score"), # type: ignore + } map_detail = { "max_score": max_score, "child_chunks": child_chunk_details, @@ -542,6 +645,23 @@ class RetrievalService: if record["segment"].id in attachment_map: record["files"] = attachment_map[record["segment"].id] # type: ignore[assignment] + # Batch query summaries for segments retrieved via summary (only enabled summaries) + if summary_segment_ids: + from models.dataset import DocumentSegmentSummary + + summaries = ( + session.query(DocumentSegmentSummary) + .filter( + DocumentSegmentSummary.chunk_id.in_(summary_segment_ids), + DocumentSegmentSummary.status == "completed", + DocumentSegmentSummary.enabled == True, # Only retrieve enabled summaries + ) + .all() + ) + for summary in summaries: + if summary.summary_content: + segment_summary_map[summary.chunk_id] = summary.summary_content + result: list[RetrievalSegments] = [] for record in records: # Extract segment @@ -576,9 +696,16 @@ class RetrievalService: else None ) + # Extract summary if this segment was retrieved via summary + summary_content = segment_summary_map.get(segment.id) + # Create RetrievalSegments object retrieval_segment = RetrievalSegments( - segment=segment, child_chunks=child_chunks_list, score=score, files=files + segment=segment, + child_chunks=child_chunks_list, + score=score, + files=files, + summary=summary_content ) result.append(retrieval_segment) diff --git a/api/core/rag/embedding/retrieval.py b/api/core/rag/embedding/retrieval.py index b54a37b49e..f6834ab87b 100644 --- a/api/core/rag/embedding/retrieval.py +++ b/api/core/rag/embedding/retrieval.py @@ -20,3 +20,4 @@ class RetrievalSegments(BaseModel): child_chunks: list[RetrievalChildChunk] | None = None score: float | None = None files: list[dict[str, str | int]] | None = None + summary: str | None = None # Summary content if retrieved via summary index diff --git a/api/core/rag/index_processor/index_processor_base.py b/api/core/rag/index_processor/index_processor_base.py index e36b54eedd..8bbdf8ba39 100644 --- a/api/core/rag/index_processor/index_processor_base.py +++ b/api/core/rag/index_processor/index_processor_base.py @@ -13,6 +13,7 @@ from urllib.parse import unquote, urlparse import httpx from configs import dify_config +from core.entities.knowledge_entities import PreviewDetail from core.helper import ssrf_proxy from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.index_processor.constant.doc_type import DocType @@ -45,6 +46,15 @@ class BaseIndexProcessor(ABC): def transform(self, documents: list[Document], current_user: Account | None = None, **kwargs) -> list[Document]: raise NotImplementedError + @abstractmethod + def generate_summary_preview(self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict) -> list[PreviewDetail]: + """ + For each segment in preview_texts, generate a summary using LLM and attach it to the segment. + The summary can be stored in a new attribute, e.g., summary. + This method should be implemented by subclasses. + """ + raise NotImplementedError + @abstractmethod def load( self, diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index cf68cff7dc..89a6d80306 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -1,9 +1,13 @@ """Paragraph index processor.""" +import logging import uuid from collections.abc import Mapping from typing import Any +logger = logging.getLogger(__name__) + +from core.entities.knowledge_entities import PreviewDetail from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.retrieval_service import RetrievalService @@ -17,12 +21,19 @@ from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.models.document import AttachmentDocument, Document, MultimodalGeneralStructureChunk from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols +from extensions.ext_database import db from libs import helper from models.account import Account -from models.dataset import Dataset, DatasetProcessRule +from models.dataset import Dataset, DatasetProcessRule, DocumentSegment from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import Rule +from services.summary_index_service import SummaryIndexService +from core.llm_generator.prompts import DEFAULT_GENERATOR_SUMMARY_PROMPT +from core.model_runtime.entities.message_entities import UserPromptMessage +from core.model_runtime.entities.model_entities import ModelType +from core.provider_manager import ProviderManager +from core.model_manager import ModelInstance class ParagraphIndexProcessor(BaseIndexProcessor): @@ -108,6 +119,29 @@ class ParagraphIndexProcessor(BaseIndexProcessor): keyword.add_texts(documents) def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + if dataset.indexing_technique == "high_quality": vector = Vector(dataset) if node_ids: @@ -227,3 +261,70 @@ class ParagraphIndexProcessor(BaseIndexProcessor): } else: raise ValueError("Chunks is not a list") + + def generate_summary_preview(self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict) -> list[PreviewDetail]: + """ + For each segment, concurrently call generate_summary to generate a summary + and write it to the summary attribute of PreviewDetail. + """ + import concurrent.futures + from flask import current_app + + # Capture Flask app context for worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def process(preview: PreviewDetail) -> None: + """Generate summary for a single preview item.""" + try: + if flask_app: + # Ensure Flask app context in worker thread + with flask_app.app_context(): + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + else: + # Fallback: try without app context (may fail) + summary = self.generate_summary(tenant_id, preview.content, summary_index_setting) + preview.summary = summary + except Exception as e: + logger.error(f"Failed to generate summary for preview: {str(e)}") + # Don't fail the entire preview if summary generation fails + preview.summary = None + + with concurrent.futures.ThreadPoolExecutor() as executor: + list(executor.map(process, preview_texts)) + return preview_texts + + @staticmethod + def generate_summary(tenant_id: str, text: str, summary_index_setting: dict = None) -> str: + """ + Generate summary for the given text using ModelInstance.invoke_llm and the default or custom summary prompt. + """ + if not summary_index_setting or not summary_index_setting.get("enable"): + raise ValueError("summary_index_setting is required and must be enabled to generate summary.") + + model_name = summary_index_setting.get("model_name") + model_provider_name = summary_index_setting.get("model_provider_name") + summary_prompt = summary_index_setting.get("summary_prompt") + + # Import default summary prompt + if not summary_prompt: + summary_prompt = DEFAULT_GENERATOR_SUMMARY_PROMPT + + prompt = f"{summary_prompt}\n{text}" + + provider_manager = ProviderManager() + provider_model_bundle = provider_manager.get_provider_model_bundle(tenant_id, model_provider_name, ModelType.LLM) + model_instance = ModelInstance(provider_model_bundle, model_name) + prompt_messages = [UserPromptMessage(content=prompt)] + + result = model_instance.invoke_llm( + prompt_messages=prompt_messages, + model_parameters={}, + stream=False + ) + + return getattr(result.message, "content", "") diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index 0366f3259f..7e33ef9c02 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -25,6 +25,7 @@ from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegm from models.dataset import Document as DatasetDocument from services.account_service import AccountService from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule +from services.summary_index_service import SummaryIndexService class ParentChildIndexProcessor(BaseIndexProcessor): @@ -135,6 +136,29 @@ class ParentChildIndexProcessor(BaseIndexProcessor): def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): # node_ids is segment's node_ids + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + if dataset.indexing_technique == "high_quality": delete_child_chunks = kwargs.get("delete_child_chunks") or False precomputed_child_node_ids = kwargs.get("precomputed_child_node_ids") diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 1183d5fbd7..b38af0cacb 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -25,9 +25,10 @@ from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols from libs import helper from models.account import Account -from models.dataset import Dataset +from models.dataset import Dataset, DocumentSegment from models.dataset import Document as DatasetDocument from services.entities.knowledge_entities.knowledge_entities import Rule +from services.summary_index_service import SummaryIndexService logger = logging.getLogger(__name__) @@ -144,6 +145,30 @@ class QAIndexProcessor(BaseIndexProcessor): vector.create_multimodal(multimodal_documents) def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs): + # Note: Summary indexes are now disabled (not deleted) when segments are disabled. + # This method is called for actual deletion scenarios (e.g., when segment is deleted). + # For disable operations, disable_summaries_for_segments is called directly in the task. + # Note: qa_model doesn't generate summaries, but we clean them for completeness + # Only delete summaries if explicitly requested (e.g., when segment is actually deleted) + delete_summaries = kwargs.get("delete_summaries", False) + if delete_summaries: + if node_ids: + # Find segments by index_node_id + segments = ( + db.session.query(DocumentSegment) + .filter( + DocumentSegment.dataset_id == dataset.id, + DocumentSegment.index_node_id.in_(node_ids), + ) + .all() + ) + segment_ids = [segment.id for segment in segments] + if segment_ids: + SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids) + else: + # Delete all summaries for the dataset + SummaryIndexService.delete_summaries_for_segments(dataset, None) + vector = Vector(dataset) if node_ids: vector.delete_by_ids(node_ids) diff --git a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py index 4436773d25..a45d1d1046 100644 --- a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py @@ -29,6 +29,7 @@ from models import ( Account, CreatorUserRole, EndUser, + LLMGenerationDetail, WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom, ) @@ -457,6 +458,113 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) session.merge(db_model) session.flush() + # Save LLMGenerationDetail for LLM nodes with successful execution + if ( + domain_model.node_type == NodeType.LLM + and domain_model.status == WorkflowNodeExecutionStatus.SUCCEEDED + and domain_model.outputs is not None + ): + self._save_llm_generation_detail(session, domain_model) + + def _save_llm_generation_detail(self, session, execution: WorkflowNodeExecution) -> None: + """ + Save LLM generation detail for LLM nodes. + Extracts reasoning_content, tool_calls, and sequence from outputs and metadata. + """ + outputs = execution.outputs or {} + metadata = execution.metadata or {} + + reasoning_list = self._extract_reasoning(outputs) + tool_calls_list = self._extract_tool_calls(metadata.get(WorkflowNodeExecutionMetadataKey.AGENT_LOG)) + + if not reasoning_list and not tool_calls_list: + return + + sequence = self._build_generation_sequence(outputs.get("text", ""), reasoning_list, tool_calls_list) + self._upsert_generation_detail(session, execution, reasoning_list, tool_calls_list, sequence) + + def _extract_reasoning(self, outputs: Mapping[str, Any]) -> list[str]: + """Extract reasoning_content as a clean list of non-empty strings.""" + reasoning_content = outputs.get("reasoning_content") + if isinstance(reasoning_content, str): + trimmed = reasoning_content.strip() + return [trimmed] if trimmed else [] + if isinstance(reasoning_content, list): + return [item.strip() for item in reasoning_content if isinstance(item, str) and item.strip()] + return [] + + def _extract_tool_calls(self, agent_log: Any) -> list[dict[str, str]]: + """Extract tool call records from agent logs.""" + if not agent_log or not isinstance(agent_log, list): + return [] + + tool_calls: list[dict[str, str]] = [] + for log in agent_log: + log_data = log.data if hasattr(log, "data") else (log.get("data", {}) if isinstance(log, dict) else {}) + tool_name = log_data.get("tool_name") + if tool_name and str(tool_name).strip(): + tool_calls.append( + { + "id": log_data.get("tool_call_id", ""), + "name": tool_name, + "arguments": json.dumps(log_data.get("tool_args", {})), + "result": str(log_data.get("output", "")), + } + ) + return tool_calls + + def _build_generation_sequence( + self, text: str, reasoning_list: list[str], tool_calls_list: list[dict[str, str]] + ) -> list[dict[str, Any]]: + """Build a simple content/reasoning/tool_call sequence.""" + sequence: list[dict[str, Any]] = [] + if text: + sequence.append({"type": "content", "start": 0, "end": len(text)}) + for index in range(len(reasoning_list)): + sequence.append({"type": "reasoning", "index": index}) + for index in range(len(tool_calls_list)): + sequence.append({"type": "tool_call", "index": index}) + return sequence + + def _upsert_generation_detail( + self, + session, + execution: WorkflowNodeExecution, + reasoning_list: list[str], + tool_calls_list: list[dict[str, str]], + sequence: list[dict[str, Any]], + ) -> None: + """Insert or update LLMGenerationDetail with serialized fields.""" + existing = ( + session.query(LLMGenerationDetail) + .filter_by( + workflow_run_id=execution.workflow_execution_id, + node_id=execution.node_id, + ) + .first() + ) + + reasoning_json = json.dumps(reasoning_list) if reasoning_list else None + tool_calls_json = json.dumps(tool_calls_list) if tool_calls_list else None + sequence_json = json.dumps(sequence) if sequence else None + + if existing: + existing.reasoning_content = reasoning_json + existing.tool_calls = tool_calls_json + existing.sequence = sequence_json + return + + generation_detail = LLMGenerationDetail( + tenant_id=self._tenant_id, + app_id=self._app_id, + workflow_run_id=execution.workflow_execution_id, + node_id=execution.node_id, + reasoning_content=reasoning_json, + tool_calls=tool_calls_json, + sequence=sequence_json, + ) + session.add(generation_detail) + def get_db_models_by_workflow_run( self, workflow_run_id: str, diff --git a/api/core/tools/__base/tool.py b/api/core/tools/__base/tool.py index ebd200a822..24fc11aefc 100644 --- a/api/core/tools/__base/tool.py +++ b/api/core/tools/__base/tool.py @@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from models.model import File +from core.model_runtime.entities.message_entities import PromptMessageTool from core.tools.__base.tool_runtime import ToolRuntime from core.tools.entities.tool_entities import ( ToolEntity, @@ -154,6 +155,60 @@ class Tool(ABC): return parameters + def to_prompt_message_tool(self) -> PromptMessageTool: + message_tool = PromptMessageTool( + name=self.entity.identity.name, + description=self.entity.description.llm if self.entity.description else "", + parameters={ + "type": "object", + "properties": {}, + "required": [], + }, + ) + + parameters = self.get_merged_runtime_parameters() + for parameter in parameters: + if parameter.form != ToolParameter.ToolParameterForm.LLM: + continue + + parameter_type = parameter.type.as_normal_type() + if parameter.type in { + ToolParameter.ToolParameterType.SYSTEM_FILES, + ToolParameter.ToolParameterType.FILE, + ToolParameter.ToolParameterType.FILES, + }: + # Determine the description based on parameter type + if parameter.type == ToolParameter.ToolParameterType.FILE: + file_format_desc = " Input the file id with format: [File: file_id]." + else: + file_format_desc = "Input the file id with format: [Files: file_id1, file_id2, ...]. " + + message_tool.parameters["properties"][parameter.name] = { + "type": "string", + "description": (parameter.llm_description or "") + file_format_desc, + } + continue + enum = [] + if parameter.type == ToolParameter.ToolParameterType.SELECT: + enum = [option.value for option in parameter.options] if parameter.options else [] + + message_tool.parameters["properties"][parameter.name] = ( + { + "type": parameter_type, + "description": parameter.llm_description or "", + } + if parameter.input_schema is None + else parameter.input_schema + ) + + if len(enum) > 0: + message_tool.parameters["properties"][parameter.name]["enum"] = enum + + if parameter.required: + message_tool.parameters["required"].append(parameter.name) + + return message_tool + def create_image_message( self, image: str, diff --git a/api/core/workflow/entities/__init__.py b/api/core/workflow/entities/__init__.py index be70e467a0..0f3b9a5239 100644 --- a/api/core/workflow/entities/__init__.py +++ b/api/core/workflow/entities/__init__.py @@ -1,11 +1,16 @@ from .agent import AgentNodeStrategyInit from .graph_init_params import GraphInitParams +from .tool_entities import ToolCall, ToolCallResult, ToolResult, ToolResultStatus from .workflow_execution import WorkflowExecution from .workflow_node_execution import WorkflowNodeExecution __all__ = [ "AgentNodeStrategyInit", "GraphInitParams", + "ToolCall", + "ToolCallResult", + "ToolResult", + "ToolResultStatus", "WorkflowExecution", "WorkflowNodeExecution", ] diff --git a/api/core/workflow/entities/tool_entities.py b/api/core/workflow/entities/tool_entities.py new file mode 100644 index 0000000000..7e71a86849 --- /dev/null +++ b/api/core/workflow/entities/tool_entities.py @@ -0,0 +1,39 @@ +from enum import StrEnum + +from pydantic import BaseModel, Field + +from core.file import File + + +class ToolResultStatus(StrEnum): + SUCCESS = "success" + ERROR = "error" + + +class ToolCall(BaseModel): + id: str | None = Field(default=None, description="Unique identifier for this tool call") + name: str | None = Field(default=None, description="Name of the tool being called") + arguments: str | None = Field(default=None, description="Accumulated tool arguments JSON") + icon: str | dict | None = Field(default=None, description="Icon of the tool") + icon_dark: str | dict | None = Field(default=None, description="Dark theme icon of the tool") + + +class ToolResult(BaseModel): + id: str | None = Field(default=None, description="Identifier of the tool call this result belongs to") + name: str | None = Field(default=None, description="Name of the tool") + output: str | None = Field(default=None, description="Tool output text, error or success message") + files: list[str] = Field(default_factory=list, description="File produced by tool") + status: ToolResultStatus | None = Field(default=ToolResultStatus.SUCCESS, description="Tool execution status") + elapsed_time: float | None = Field(default=None, description="Elapsed seconds spent executing the tool") + icon: str | dict | None = Field(default=None, description="Icon of the tool") + icon_dark: str | dict | None = Field(default=None, description="Dark theme icon of the tool") + + +class ToolCallResult(BaseModel): + id: str | None = Field(default=None, description="Identifier for the tool call") + name: str | None = Field(default=None, description="Name of the tool") + arguments: str | None = Field(default=None, description="Accumulated tool arguments JSON") + output: str | None = Field(default=None, description="Tool output text, error or success message") + files: list[File] = Field(default_factory=list, description="File produced by tool") + status: ToolResultStatus = Field(default=ToolResultStatus.SUCCESS, description="Tool execution status") + elapsed_time: float | None = Field(default=None, description="Elapsed seconds spent executing the tool") diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index bb3b13e8c6..d76297f4d5 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -251,6 +251,8 @@ class WorkflowNodeExecutionMetadataKey(StrEnum): ERROR_STRATEGY = "error_strategy" # node in continue on error mode return the field LOOP_VARIABLE_MAP = "loop_variable_map" # single loop variable output DATASOURCE_INFO = "datasource_info" + LLM_CONTENT_SEQUENCE = "llm_content_sequence" + LLM_TRACE = "llm_trace" COMPLETED_REASON = "completed_reason" # completed reason for loop node diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index 98e0ea91ef..c5ea94ba80 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -16,7 +16,13 @@ from pydantic import BaseModel, Field from core.workflow.enums import NodeExecutionType, NodeState from core.workflow.graph import Graph -from core.workflow.graph_events import NodeRunStreamChunkEvent, NodeRunSucceededEvent +from core.workflow.graph_events import ( + ChunkType, + NodeRunStreamChunkEvent, + NodeRunSucceededEvent, + ToolCall, + ToolResult, +) from core.workflow.nodes.base.template import TextSegment, VariableSegment from core.workflow.runtime import VariablePool @@ -321,11 +327,24 @@ class ResponseStreamCoordinator: selector: Sequence[str], chunk: str, is_final: bool = False, + chunk_type: ChunkType = ChunkType.TEXT, + tool_call: ToolCall | None = None, + tool_result: ToolResult | None = None, ) -> NodeRunStreamChunkEvent: """Create a stream chunk event with consistent structure. For selectors with special prefixes (sys, env, conversation), we use the active response node's information since these are not actual node IDs. + + Args: + node_id: The node ID to attribute the event to + execution_id: The execution ID for this node + selector: The variable selector + chunk: The chunk content + is_final: Whether this is the final chunk + chunk_type: The semantic type of the chunk being streamed + tool_call: Structured data for tool_call chunks + tool_result: Structured data for tool_result chunks """ # Check if this is a special selector that doesn't correspond to a node if selector and selector[0] not in self._graph.nodes and self._active_session: @@ -338,6 +357,9 @@ class ResponseStreamCoordinator: selector=selector, chunk=chunk, is_final=is_final, + chunk_type=chunk_type, + tool_call=tool_call, + tool_result=tool_result, ) # Standard case: selector refers to an actual node @@ -349,6 +371,9 @@ class ResponseStreamCoordinator: selector=selector, chunk=chunk, is_final=is_final, + chunk_type=chunk_type, + tool_call=tool_call, + tool_result=tool_result, ) def _process_variable_segment(self, segment: VariableSegment) -> tuple[Sequence[NodeRunStreamChunkEvent], bool]: @@ -356,6 +381,8 @@ class ResponseStreamCoordinator: Handles both regular node selectors and special system selectors (sys, env, conversation). For special selectors, we attribute the output to the active response node. + + For object-type variables, automatically streams all child fields that have stream events. """ events: list[NodeRunStreamChunkEvent] = [] source_selector_prefix = segment.selector[0] if segment.selector else "" @@ -364,60 +391,81 @@ class ResponseStreamCoordinator: # Determine which node to attribute the output to # For special selectors (sys, env, conversation), use the active response node # For regular selectors, use the source node - if self._active_session and source_selector_prefix not in self._graph.nodes: - # Special selector - use active response node - output_node_id = self._active_session.node_id - else: - # Regular node selector - output_node_id = source_selector_prefix + active_session = self._active_session + special_selector = bool(active_session and source_selector_prefix not in self._graph.nodes) + output_node_id = active_session.node_id if special_selector and active_session else source_selector_prefix execution_id = self._get_or_create_execution_id(output_node_id) - # Stream all available chunks - while self._has_unread_stream(segment.selector): - if event := self._pop_stream_chunk(segment.selector): - # For special selectors, we need to update the event to use - # the active response node's information - if self._active_session and source_selector_prefix not in self._graph.nodes: - response_node = self._graph.nodes[self._active_session.node_id] - # Create a new event with the response node's information - # but keep the original selector - updated_event = NodeRunStreamChunkEvent( - id=execution_id, - node_id=response_node.id, - node_type=response_node.node_type, - selector=event.selector, # Keep original selector - chunk=event.chunk, - is_final=event.is_final, - ) - events.append(updated_event) - else: - # Regular node selector - use event as is - events.append(event) + # Check if there's a direct stream for this selector + has_direct_stream = ( + tuple(segment.selector) in self._stream_buffers or tuple(segment.selector) in self._closed_streams + ) - # Check if this is the last chunk by looking ahead - stream_closed = self._is_stream_closed(segment.selector) - # Check if stream is closed to determine if segment is complete - if stream_closed: - is_complete = True + stream_targets = [segment.selector] if has_direct_stream else sorted(self._find_child_streams(segment.selector)) - elif value := self._variable_pool.get(segment.selector): - # Process scalar value - is_last_segment = bool( - self._active_session and self._active_session.index == len(self._active_session.template.segments) - 1 - ) - events.append( - self._create_stream_chunk_event( - node_id=output_node_id, - execution_id=execution_id, - selector=segment.selector, - chunk=value.markdown, - is_final=is_last_segment, + if stream_targets: + all_complete = True + + for target_selector in stream_targets: + while self._has_unread_stream(target_selector): + if event := self._pop_stream_chunk(target_selector): + events.append( + self._rewrite_stream_event( + event=event, + output_node_id=output_node_id, + execution_id=execution_id, + special_selector=bool(special_selector), + ) + ) + + if not self._is_stream_closed(target_selector): + all_complete = False + + is_complete = all_complete + + # Fallback: check if scalar value exists in variable pool + if not is_complete and not has_direct_stream: + if value := self._variable_pool.get(segment.selector): + # Process scalar value + is_last_segment = bool( + self._active_session + and self._active_session.index == len(self._active_session.template.segments) - 1 ) - ) - is_complete = True + events.append( + self._create_stream_chunk_event( + node_id=output_node_id, + execution_id=execution_id, + selector=segment.selector, + chunk=value.markdown, + is_final=is_last_segment, + ) + ) + is_complete = True return events, is_complete + def _rewrite_stream_event( + self, + event: NodeRunStreamChunkEvent, + output_node_id: str, + execution_id: str, + special_selector: bool, + ) -> NodeRunStreamChunkEvent: + """Rewrite event to attribute to active response node when selector is special.""" + if not special_selector: + return event + + return self._create_stream_chunk_event( + node_id=output_node_id, + execution_id=execution_id, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=event.chunk_type, + tool_call=event.tool_call, + tool_result=event.tool_result, + ) + def _process_text_segment(self, segment: TextSegment) -> Sequence[NodeRunStreamChunkEvent]: """Process a text segment. Returns (events, is_complete).""" assert self._active_session is not None @@ -513,6 +561,36 @@ class ResponseStreamCoordinator: # ============= Internal Stream Management Methods ============= + def _find_child_streams(self, parent_selector: Sequence[str]) -> list[tuple[str, ...]]: + """Find all child stream selectors that are descendants of the parent selector. + + For example, if parent_selector is ['llm', 'generation'], this will find: + - ['llm', 'generation', 'content'] + - ['llm', 'generation', 'tool_calls'] + - ['llm', 'generation', 'tool_results'] + - ['llm', 'generation', 'thought'] + + Args: + parent_selector: The parent selector to search for children + + Returns: + List of child selector tuples found in stream buffers or closed streams + """ + parent_key = tuple(parent_selector) + parent_len = len(parent_key) + child_streams: set[tuple[str, ...]] = set() + + # Search in both active buffers and closed streams + all_selectors = set(self._stream_buffers.keys()) | self._closed_streams + + for selector_key in all_selectors: + # Check if this selector is a direct child of the parent + # Direct child means: len(child) == len(parent) + 1 and child starts with parent + if len(selector_key) == parent_len + 1 and selector_key[:parent_len] == parent_key: + child_streams.add(selector_key) + + return sorted(child_streams) + def _append_stream_chunk(self, selector: Sequence[str], event: NodeRunStreamChunkEvent) -> None: """ Append a stream chunk to the internal buffer. diff --git a/api/core/workflow/graph_events/__init__.py b/api/core/workflow/graph_events/__init__.py index 7a5edbb331..4ee0ec94d2 100644 --- a/api/core/workflow/graph_events/__init__.py +++ b/api/core/workflow/graph_events/__init__.py @@ -36,6 +36,7 @@ from .loop import ( # Node events from .node import ( + ChunkType, NodeRunExceptionEvent, NodeRunFailedEvent, NodeRunPauseRequestedEvent, @@ -44,10 +45,13 @@ from .node import ( NodeRunStartedEvent, NodeRunStreamChunkEvent, NodeRunSucceededEvent, + ToolCall, + ToolResult, ) __all__ = [ "BaseGraphEvent", + "ChunkType", "GraphEngineEvent", "GraphNodeEventBase", "GraphRunAbortedEvent", @@ -73,4 +77,6 @@ __all__ = [ "NodeRunStartedEvent", "NodeRunStreamChunkEvent", "NodeRunSucceededEvent", + "ToolCall", + "ToolResult", ] diff --git a/api/core/workflow/graph_events/node.py b/api/core/workflow/graph_events/node.py index f225798d41..2ae4fcd919 100644 --- a/api/core/workflow/graph_events/node.py +++ b/api/core/workflow/graph_events/node.py @@ -1,10 +1,11 @@ from collections.abc import Sequence from datetime import datetime +from enum import StrEnum from pydantic import Field from core.rag.entities.citation_metadata import RetrievalSourceMetadata -from core.workflow.entities import AgentNodeStrategyInit +from core.workflow.entities import AgentNodeStrategyInit, ToolCall, ToolResult from core.workflow.entities.pause_reason import PauseReason from .base import GraphNodeEventBase @@ -21,13 +22,39 @@ class NodeRunStartedEvent(GraphNodeEventBase): provider_id: str = "" +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + THOUGHT_START = "thought_start" # Agent thought start + THOUGHT_END = "thought_end" # Agent thought end + + class NodeRunStreamChunkEvent(GraphNodeEventBase): - # Spec-compliant fields + """Stream chunk event for workflow node execution.""" + + # Base fields selector: Sequence[str] = Field( ..., description="selector identifying the output location (e.g., ['nodeA', 'text'])" ) chunk: str = Field(..., description="the actual chunk content") is_final: bool = Field(default=False, description="indicates if this is the last chunk") + chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") + + # Tool call fields (when chunk_type == TOOL_CALL) + tool_call: ToolCall | None = Field( + default=None, + description="structured payload for tool_call chunks", + ) + + # Tool result fields (when chunk_type == TOOL_RESULT) + tool_result: ToolResult | None = Field( + default=None, + description="structured payload for tool_result chunks", + ) class NodeRunRetrieverResourceEvent(GraphNodeEventBase): diff --git a/api/core/workflow/node_events/__init__.py b/api/core/workflow/node_events/__init__.py index f14a594c85..67263311b9 100644 --- a/api/core/workflow/node_events/__init__.py +++ b/api/core/workflow/node_events/__init__.py @@ -13,16 +13,21 @@ from .loop import ( LoopSucceededEvent, ) from .node import ( + ChunkType, ModelInvokeCompletedEvent, PauseRequestedEvent, RunRetrieverResourceEvent, RunRetryEvent, StreamChunkEvent, StreamCompletedEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, ) __all__ = [ "AgentLogEvent", + "ChunkType", "IterationFailedEvent", "IterationNextEvent", "IterationStartedEvent", @@ -39,4 +44,7 @@ __all__ = [ "RunRetryEvent", "StreamChunkEvent", "StreamCompletedEvent", + "ThoughtChunkEvent", + "ToolCallChunkEvent", + "ToolResultChunkEvent", ] diff --git a/api/core/workflow/node_events/node.py b/api/core/workflow/node_events/node.py index e4fa52f444..f0bbc4d96f 100644 --- a/api/core/workflow/node_events/node.py +++ b/api/core/workflow/node_events/node.py @@ -1,11 +1,13 @@ from collections.abc import Sequence from datetime import datetime +from enum import StrEnum from pydantic import Field from core.file import File from core.model_runtime.entities.llm_entities import LLMUsage from core.rag.entities.citation_metadata import RetrievalSourceMetadata +from core.workflow.entities import ToolCall, ToolResult from core.workflow.entities.pause_reason import PauseReason from core.workflow.node_events import NodeRunResult @@ -32,13 +34,60 @@ class RunRetryEvent(NodeEventBase): start_at: datetime = Field(..., description="Retry start time") +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + THOUGHT_START = "thought_start" # Agent thought start + THOUGHT_END = "thought_end" # Agent thought end + + class StreamChunkEvent(NodeEventBase): - # Spec-compliant fields + """Base stream chunk event - normal text streaming output.""" + selector: Sequence[str] = Field( ..., description="selector identifying the output location (e.g., ['nodeA', 'text'])" ) chunk: str = Field(..., description="the actual chunk content") is_final: bool = Field(default=False, description="indicates if this is the last chunk") + chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") + tool_call: ToolCall | None = Field(default=None, description="structured payload for tool_call chunks") + tool_result: ToolResult | None = Field(default=None, description="structured payload for tool_result chunks") + + +class ToolCallChunkEvent(StreamChunkEvent): + """Tool call streaming event - tool call arguments streaming output.""" + + chunk_type: ChunkType = Field(default=ChunkType.TOOL_CALL, frozen=True) + tool_call: ToolCall | None = Field(default=None, description="structured tool call payload") + + +class ToolResultChunkEvent(StreamChunkEvent): + """Tool result event - tool execution result.""" + + chunk_type: ChunkType = Field(default=ChunkType.TOOL_RESULT, frozen=True) + tool_result: ToolResult | None = Field(default=None, description="structured tool result payload") + + +class ThoughtStartChunkEvent(StreamChunkEvent): + """Agent thought start streaming event - Agent thinking process (ReAct).""" + + chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_START, frozen=True) + + +class ThoughtEndChunkEvent(StreamChunkEvent): + """Agent thought end streaming event - Agent thinking process (ReAct).""" + + chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_END, frozen=True) + + +class ThoughtChunkEvent(StreamChunkEvent): + """Agent thought streaming event - Agent thinking process (ReAct).""" + + chunk_type: ChunkType = Field(default=ChunkType.THOUGHT, frozen=True) class StreamCompletedEvent(NodeEventBase): diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index 55c8db40ea..06e4c0440d 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -48,6 +48,9 @@ from core.workflow.node_events import ( RunRetrieverResourceEvent, StreamChunkEvent, StreamCompletedEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, ) from core.workflow.runtime import GraphRuntimeState from libs.datetime_utils import naive_utc_now @@ -564,6 +567,8 @@ class Node(Generic[NodeDataT]): @_dispatch.register def _(self, event: StreamChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + return NodeRunStreamChunkEvent( id=self.execution_id, node_id=self._node_id, @@ -571,6 +576,60 @@ class Node(Generic[NodeDataT]): selector=event.selector, chunk=event.chunk, is_final=event.is_final, + chunk_type=ChunkType(event.chunk_type.value), + tool_call=event.tool_call, + tool_result=event.tool_result, + ) + + @_dispatch.register + def _(self, event: ToolCallChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + + return NodeRunStreamChunkEvent( + id=self._node_execution_id, + node_id=self._node_id, + node_type=self.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=ChunkType.TOOL_CALL, + tool_call=event.tool_call, + ) + + @_dispatch.register + def _(self, event: ToolResultChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.entities import ToolResult, ToolResultStatus + from core.workflow.graph_events import ChunkType + + tool_result = event.tool_result or ToolResult() + status: ToolResultStatus = tool_result.status or ToolResultStatus.SUCCESS + tool_result = tool_result.model_copy( + update={"status": status, "files": tool_result.files or []}, + ) + + return NodeRunStreamChunkEvent( + id=self._node_execution_id, + node_id=self._node_id, + node_type=self.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=ChunkType.TOOL_RESULT, + tool_result=tool_result, + ) + + @_dispatch.register + def _(self, event: ThoughtChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + + return NodeRunStreamChunkEvent( + id=self._node_execution_id, + node_id=self._node_id, + node_type=self.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=ChunkType.THOUGHT, ) @_dispatch.register diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index 14ebd1f9ae..2cbd7952ba 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -62,6 +62,21 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]): inputs = {"variable_selector": variable_selector} process_data = {"documents": value if isinstance(value, list) else [value]} + # Ensure storage_key is loaded for File objects + files_to_check = value if isinstance(value, list) else [value] + files_needing_storage_key = [ + f for f in files_to_check + if isinstance(f, File) and not f.storage_key and f.related_id + ] + if files_needing_storage_key: + from factories.file_factory import StorageKeyLoader + from extensions.ext_database import db + from sqlalchemy.orm import Session + + with Session(bind=db.engine) as session: + storage_key_loader = StorageKeyLoader(session, tenant_id=self.tenant_id) + storage_key_loader.load_storage_keys(files_needing_storage_key) + try: if isinstance(value, list): extracted_text_list = list(map(_extract_text_from_file, value)) @@ -415,6 +430,15 @@ def _download_file_content(file: File) -> bytes: response.raise_for_status() return response.content else: + # Check if storage_key is set + if not file.storage_key: + raise FileDownloadError(f"File storage_key is missing for file: {file.filename}") + + # Check if file exists before downloading + from extensions.ext_storage import storage + if not storage.exists(file.storage_key): + raise FileDownloadError(f"File not found in storage: {file.storage_key}") + return file_manager.download(file) except Exception as e: raise FileDownloadError(f"Error downloading file: {str(e)}") from e diff --git a/api/core/workflow/nodes/knowledge_index/entities.py b/api/core/workflow/nodes/knowledge_index/entities.py index 3daca90b9b..bfeb9b5b79 100644 --- a/api/core/workflow/nodes/knowledge_index/entities.py +++ b/api/core/workflow/nodes/knowledge_index/entities.py @@ -158,3 +158,5 @@ class KnowledgeIndexNodeData(BaseNodeData): type: str = "knowledge-index" chunk_structure: str index_chunk_variable_selector: list[str] + indexing_technique: str | None = None + summary_index_setting: dict | None = None diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 17ca4bef7b..e056893cb8 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -1,9 +1,11 @@ +import concurrent.futures import datetime import logging import time from collections.abc import Mapping from typing import Any +from flask import current_app from sqlalchemy import func, select from core.app.entities.app_invoke_entities import InvokeFrom @@ -16,7 +18,9 @@ from core.workflow.nodes.base.node import Node from core.workflow.nodes.base.template import Template from core.workflow.runtime import VariablePool from extensions.ext_database import db -from models.dataset import Dataset, Document, DocumentSegment +from models.dataset import Dataset, Document, DocumentSegment, DocumentSegmentSummary +from services.summary_index_service import SummaryIndexService +from tasks.generate_summary_index_task import generate_summary_index_task from .entities import KnowledgeIndexNodeData from .exc import ( @@ -67,7 +71,18 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): # index knowledge try: if is_preview: - outputs = self._get_preview_output(node_data.chunk_structure, chunks) + # Preview mode: generate summaries for chunks directly without saving to database + # Format preview and generate summaries on-the-fly + # Get indexing_technique and summary_index_setting from node_data (workflow graph config) + # or fallback to dataset if not available in node_data + indexing_technique = node_data.indexing_technique or dataset.indexing_technique + summary_index_setting = node_data.summary_index_setting or dataset.summary_index_setting + + outputs = self._get_preview_output_with_summaries( + node_data.chunk_structure, chunks, dataset=dataset, + indexing_technique=indexing_technique, + summary_index_setting=summary_index_setting + ) return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=variables, @@ -163,6 +178,9 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): db.session.commit() + # Generate summary index if enabled + self._handle_summary_index_generation(dataset, document, variable_pool) + return { "dataset_id": ds_id_value, "dataset_name": dataset_name_value, @@ -173,9 +191,269 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): "display_status": "completed", } - def _get_preview_output(self, chunk_structure: str, chunks: Any) -> Mapping[str, Any]: + def _handle_summary_index_generation( + self, + dataset: Dataset, + document: Document, + variable_pool: VariablePool, + ) -> None: + """ + Handle summary index generation based on mode (debug/preview or production). + + Args: + dataset: Dataset containing the document + document: Document to generate summaries for + variable_pool: Variable pool to check invoke_from + """ + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return + + # Skip qa_model documents + if document.doc_form == "qa_model": + return + + # Determine if in preview/debug mode + invoke_from = variable_pool.get(["sys", SystemVariableKey.INVOKE_FROM]) + is_preview = invoke_from and invoke_from.value == InvokeFrom.DEBUGGER + + # Determine if only parent chunks should be processed + only_parent_chunks = dataset.chunk_structure == "parent_child_index" + + if is_preview: + try: + # Query segments that need summary generation + query = db.session.query(DocumentSegment).filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, + ) + segments = query.all() + + if not segments: + logger.info(f"No segments found for document {document.id}") + return + + # Filter segments based on mode + segments_to_process = [] + for segment in segments: + # Skip if summary already exists + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id, status="completed") + .first() + ) + if existing_summary: + continue + + # For parent-child mode, all segments are parent chunks, so process all + segments_to_process.append(segment) + + if not segments_to_process: + logger.info(f"No segments need summary generation for document {document.id}") + return + + # Use ThreadPoolExecutor for concurrent generation + flask_app = current_app._get_current_object() # type: ignore + max_workers = min(10, len(segments_to_process)) # Limit to 10 workers + + def process_segment(segment: DocumentSegment) -> None: + """Process a single segment in a thread with Flask app context.""" + with flask_app.app_context(): + try: + SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + except Exception as e: + logger.error(f"Failed to generate summary for segment {segment.id}: {str(e)}") + # Continue processing other segments + + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit(process_segment, segment) for segment in segments_to_process + ] + # Wait for all tasks to complete + concurrent.futures.wait(futures, timeout=300) + + logger.info( + f"Successfully generated summary index for {len(segments_to_process)} segments " + f"in document {document.id}" + ) + except Exception as e: + logger.exception(f"Failed to generate summary index for document {document.id}: {str(e)}") + # Don't fail the entire indexing process if summary generation fails + else: + # Production mode: asynchronous generation + logger.info(f"Queuing summary index generation task for document {document.id} (production mode)") + try: + generate_summary_index_task.delay(dataset.id, document.id, None) + logger.info(f"Summary index generation task queued for document {document.id}") + except Exception as e: + logger.exception(f"Failed to queue summary index generation task for document {document.id}: {str(e)}") + # Don't fail the entire indexing process if task queuing fails + + def _get_preview_output_with_summaries( + self, chunk_structure: str, chunks: Any, dataset: Dataset, + indexing_technique: str | None = None, + summary_index_setting: dict | None = None + ) -> Mapping[str, Any]: + """ + Generate preview output with summaries for chunks in preview mode. + This method generates summaries on-the-fly without saving to database. + + Args: + chunk_structure: Chunk structure type + chunks: Chunks to generate preview for + dataset: Dataset object (for tenant_id) + indexing_technique: Indexing technique from node config or dataset + summary_index_setting: Summary index setting from node config or dataset + """ index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() - return index_processor.format_preview(chunks) + preview_output = index_processor.format_preview(chunks) + + # Check if summary index is enabled + if indexing_technique != "high_quality": + return preview_output + + if not summary_index_setting or not summary_index_setting.get("enable"): + return preview_output + + # Generate summaries for chunks + if "preview" in preview_output and isinstance(preview_output["preview"], list): + chunk_count = len(preview_output["preview"]) + logger.info( + f"Generating summaries for {chunk_count} chunks in preview mode " + f"(dataset: {dataset.id})" + ) + # Use ParagraphIndexProcessor's generate_summary method + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + + # Get Flask app for application context in worker threads + flask_app = None + try: + flask_app = current_app._get_current_object() # type: ignore + except RuntimeError: + logger.warning("No Flask application context available, summary generation may fail") + + def generate_summary_for_chunk(preview_item: dict) -> None: + """Generate summary for a single chunk.""" + if "content" in preview_item: + try: + # Set Flask application context in worker thread + if flask_app: + with flask_app.app_context(): + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary + else: + # Fallback: try without app context (may fail) + summary = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=preview_item["content"], + summary_index_setting=summary_index_setting, + ) + if summary: + preview_item["summary"] = summary + except Exception as e: + logger.error(f"Failed to generate summary for chunk: {str(e)}") + # Don't fail the entire preview if summary generation fails + + # Generate summaries concurrently using ThreadPoolExecutor + # Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total) + timeout_seconds = min(300, 60 * len(preview_output["preview"])) + with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_output["preview"]))) as executor: + futures = [ + executor.submit(generate_summary_for_chunk, preview_item) + for preview_item in preview_output["preview"] + ] + # Wait for all tasks to complete with timeout + done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds) + + # Cancel tasks that didn't complete in time + if not_done: + logger.warning( + f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s. " + "Cancelling remaining tasks..." + ) + for future in not_done: + future.cancel() + # Wait a bit for cancellation to take effect + concurrent.futures.wait(not_done, timeout=5) + + completed_count = sum(1 for item in preview_output["preview"] if item.get("summary") is not None) + logger.info( + f"Completed summary generation for preview chunks: {completed_count}/{len(preview_output['preview'])} succeeded" + ) + + return preview_output + + def _get_preview_output( + self, chunk_structure: str, chunks: Any, dataset: Dataset | None = None, variable_pool: VariablePool | None = None + ) -> Mapping[str, Any]: + index_processor = IndexProcessorFactory(chunk_structure).init_index_processor() + preview_output = index_processor.format_preview(chunks) + + # If dataset is provided, try to enrich preview with summaries + if dataset and variable_pool: + document_id = variable_pool.get(["sys", SystemVariableKey.DOCUMENT_ID]) + if document_id: + document = db.session.query(Document).filter_by(id=document_id.value).first() + if document: + # Query summaries for this document + summaries = ( + db.session.query(DocumentSegmentSummary) + .filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, + ) + .all() + ) + + if summaries: + # Create a map of segment content to summary for matching + # Use content matching as chunks in preview might not be indexed yet + summary_by_content = {} + for summary in summaries: + segment = ( + db.session.query(DocumentSegment) + .filter_by(id=summary.chunk_id, dataset_id=dataset.id) + .first() + ) + if segment: + # Normalize content for matching (strip whitespace) + normalized_content = segment.content.strip() + summary_by_content[normalized_content] = summary.summary_content + + # Enrich preview with summaries by content matching + if "preview" in preview_output and isinstance(preview_output["preview"], list): + matched_count = 0 + for preview_item in preview_output["preview"]: + if "content" in preview_item: + # Normalize content for matching + normalized_chunk_content = preview_item["content"].strip() + if normalized_chunk_content in summary_by_content: + preview_item["summary"] = summary_by_content[normalized_chunk_content] + matched_count += 1 + + if matched_count > 0: + logger.info( + f"Enriched preview with {matched_count} existing summaries " + f"(dataset: {dataset.id}, document: {document.id})" + ) + + return preview_output @classmethod def version(cls) -> str: diff --git a/api/core/workflow/nodes/llm/__init__.py b/api/core/workflow/nodes/llm/__init__.py index f7bc713f63..edd0d3d581 100644 --- a/api/core/workflow/nodes/llm/__init__.py +++ b/api/core/workflow/nodes/llm/__init__.py @@ -3,6 +3,7 @@ from .entities import ( LLMNodeCompletionModelPromptTemplate, LLMNodeData, ModelConfig, + ToolMetadata, VisionConfig, ) from .node import LLMNode @@ -13,5 +14,6 @@ __all__ = [ "LLMNodeCompletionModelPromptTemplate", "LLMNodeData", "ModelConfig", + "ToolMetadata", "VisionConfig", ] diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index fe6f2290aa..0d61e45003 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -1,10 +1,17 @@ +import re from collections.abc import Mapping, Sequence from typing import Any, Literal -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator +from core.agent.entities import AgentLog, AgentResult +from core.file import File from core.model_runtime.entities import ImagePromptMessageContent, LLMMode +from core.model_runtime.entities.llm_entities import LLMUsage from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig +from core.tools.entities.tool_entities import ToolProviderType +from core.workflow.entities import ToolCall, ToolCallResult +from core.workflow.node_events import AgentLogEvent from core.workflow.nodes.base import BaseNodeData from core.workflow.nodes.base.entities import VariableSelector @@ -58,6 +65,268 @@ class LLMNodeCompletionModelPromptTemplate(CompletionModelPromptTemplate): jinja2_text: str | None = None +class ToolMetadata(BaseModel): + """ + Tool metadata for LLM node with tool support. + + Defines the essential fields needed for tool configuration, + particularly the 'type' field to identify tool provider type. + """ + + # Core fields + enabled: bool = True + type: ToolProviderType = Field(..., description="Tool provider type: builtin, api, mcp, workflow") + provider_name: str = Field(..., description="Tool provider name/identifier") + tool_name: str = Field(..., description="Tool name") + + # Optional fields + plugin_unique_identifier: str | None = Field(None, description="Plugin unique identifier for plugin tools") + credential_id: str | None = Field(None, description="Credential ID for tools requiring authentication") + + # Configuration fields + parameters: dict[str, Any] = Field(default_factory=dict, description="Tool parameters") + settings: dict[str, Any] = Field(default_factory=dict, description="Tool settings configuration") + extra: dict[str, Any] = Field(default_factory=dict, description="Extra tool configuration like custom description") + + +class ModelTraceSegment(BaseModel): + """Model invocation trace segment with token usage and output.""" + + text: str | None = Field(None, description="Model output text content") + reasoning: str | None = Field(None, description="Reasoning/thought content from model") + tool_calls: list[ToolCall] = Field(default_factory=list, description="Tool calls made by the model") + + +class ToolTraceSegment(BaseModel): + """Tool invocation trace segment with call details and result.""" + + id: str | None = Field(default=None, description="Unique identifier for this tool call") + name: str | None = Field(default=None, description="Name of the tool being called") + arguments: str | None = Field(default=None, description="Accumulated tool arguments JSON") + output: str | None = Field(default=None, description="Tool call result") + + +class LLMTraceSegment(BaseModel): + """ + Streaming trace segment for LLM tool-enabled runs. + + Represents alternating model and tool invocations in sequence: + model -> tool -> model -> tool -> ... + + Each segment records its execution duration. + """ + + type: Literal["model", "tool"] + duration: float = Field(..., description="Execution duration in seconds") + usage: LLMUsage | None = Field(default=None, description="Token usage statistics for this model call") + output: ModelTraceSegment | ToolTraceSegment = Field(..., description="Output of the segment") + + # Common metadata for both model and tool segments + provider: str | None = Field(default=None, description="Model or tool provider identifier") + name: str | None = Field(default=None, description="Name of the model or tool") + icon: str | None = Field(default=None, description="Icon for the provider") + icon_dark: str | None = Field(default=None, description="Dark theme icon for the provider") + error: str | None = Field(default=None, description="Error message if segment failed") + status: Literal["success", "error"] | None = Field(default=None, description="Tool execution status") + + +class LLMGenerationData(BaseModel): + """Generation data from LLM invocation with tools. + + For multi-turn tool calls like: thought1 -> text1 -> tool_call1 -> thought2 -> text2 -> tool_call2 + - reasoning_contents: [thought1, thought2, ...] - one element per turn + - tool_calls: [{id, name, arguments, result}, ...] - all tool calls with results + """ + + text: str = Field(..., description="Accumulated text content from all turns") + reasoning_contents: list[str] = Field(default_factory=list, description="Reasoning content per turn") + tool_calls: list[ToolCallResult] = Field(default_factory=list, description="Tool calls with results") + sequence: list[dict[str, Any]] = Field(default_factory=list, description="Ordered segments for rendering") + usage: LLMUsage = Field(..., description="LLM usage statistics") + finish_reason: str | None = Field(None, description="Finish reason from LLM") + files: list[File] = Field(default_factory=list, description="Generated files") + trace: list[LLMTraceSegment] = Field(default_factory=list, description="Streaming trace in emitted order") + + +class ThinkTagStreamParser: + """Lightweight state machine to split streaming chunks by tags.""" + + _START_PATTERN = re.compile(r"]*)?>", re.IGNORECASE) + _END_PATTERN = re.compile(r"", re.IGNORECASE) + _START_PREFIX = " int: + """Return length of the longest suffix of `text` that is a prefix of `prefix`.""" + max_len = min(len(text), len(prefix) - 1) + for i in range(max_len, 0, -1): + if text[-i:].lower() == prefix[:i].lower(): + return i + return 0 + + def process(self, chunk: str) -> list[tuple[str, str]]: + """ + Split incoming chunk into ('thought' | 'text', content) tuples. + Content excludes the tags themselves and handles split tags across chunks. + """ + parts: list[tuple[str, str]] = [] + self._buffer += chunk + + while self._buffer: + if self._in_think: + end_match = self._END_PATTERN.search(self._buffer) + if end_match: + thought_text = self._buffer[: end_match.start()] + if thought_text: + parts.append(("thought", thought_text)) + parts.append(("thought_end", "")) + self._buffer = self._buffer[end_match.end() :] + self._in_think = False + continue + + hold_len = self._suffix_prefix_len(self._buffer, self._END_PREFIX) + emit = self._buffer[: len(self._buffer) - hold_len] + if emit: + parts.append(("thought", emit)) + self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" + break + + start_match = self._START_PATTERN.search(self._buffer) + if start_match: + prefix = self._buffer[: start_match.start()] + if prefix: + parts.append(("text", prefix)) + self._buffer = self._buffer[start_match.end() :] + parts.append(("thought_start", "")) + self._in_think = True + continue + + hold_len = self._suffix_prefix_len(self._buffer, self._START_PREFIX) + emit = self._buffer[: len(self._buffer) - hold_len] + if emit: + parts.append(("text", emit)) + self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" + break + + cleaned_parts: list[tuple[str, str]] = [] + for kind, content in parts: + # Extra safeguard: strip any stray tags that slipped through. + content = self._START_PATTERN.sub("", content) + content = self._END_PATTERN.sub("", content) + if content or kind in {"thought_start", "thought_end"}: + cleaned_parts.append((kind, content)) + + return cleaned_parts + + def flush(self) -> list[tuple[str, str]]: + """Flush remaining buffer when the stream ends.""" + if not self._buffer: + return [] + kind = "thought" if self._in_think else "text" + content = self._buffer + # Drop dangling partial tags instead of emitting them + if content.lower().startswith(self._START_PREFIX) or content.lower().startswith(self._END_PREFIX): + content = "" + self._buffer = "" + if not content and not self._in_think: + return [] + # Strip any complete tags that might still be present. + content = self._START_PATTERN.sub("", content) + content = self._END_PATTERN.sub("", content) + + result: list[tuple[str, str]] = [] + if content: + result.append((kind, content)) + if self._in_think: + result.append(("thought_end", "")) + self._in_think = False + return result + + +class StreamBuffers(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + think_parser: ThinkTagStreamParser = Field(default_factory=ThinkTagStreamParser) + pending_thought: list[str] = Field(default_factory=list) + pending_content: list[str] = Field(default_factory=list) + pending_tool_calls: list[ToolCall] = Field(default_factory=list) + current_turn_reasoning: list[str] = Field(default_factory=list) + reasoning_per_turn: list[str] = Field(default_factory=list) + + +class TraceState(BaseModel): + trace_segments: list[LLMTraceSegment] = Field(default_factory=list) + tool_trace_map: dict[str, LLMTraceSegment] = Field(default_factory=dict) + tool_call_index_map: dict[str, int] = Field(default_factory=dict) + model_segment_start_time: float | None = Field(default=None, description="Start time for current model segment") + pending_usage: LLMUsage | None = Field(default=None, description="Pending usage for current model segment") + + +class AggregatedResult(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + text: str = "" + files: list[File] = Field(default_factory=list) + usage: LLMUsage = Field(default_factory=LLMUsage.empty_usage) + finish_reason: str | None = None + + +class AgentContext(BaseModel): + agent_logs: list[AgentLogEvent] = Field(default_factory=list) + agent_result: AgentResult | None = None + + +class ToolOutputState(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + stream: StreamBuffers = Field(default_factory=StreamBuffers) + trace: TraceState = Field(default_factory=TraceState) + aggregate: AggregatedResult = Field(default_factory=AggregatedResult) + agent: AgentContext = Field(default_factory=AgentContext) + + +class ToolLogPayload(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + tool_name: str = "" + tool_call_id: str = "" + tool_args: dict[str, Any] = Field(default_factory=dict) + tool_output: Any = None + tool_error: Any = None + files: list[Any] = Field(default_factory=list) + meta: dict[str, Any] = Field(default_factory=dict) + + @classmethod + def from_log(cls, log: AgentLog) -> "ToolLogPayload": + data = log.data or {} + return cls( + tool_name=data.get("tool_name", ""), + tool_call_id=data.get("tool_call_id", ""), + tool_args=data.get("tool_args") or {}, + tool_output=data.get("output"), + tool_error=data.get("error"), + files=data.get("files") or [], + meta=data.get("meta") or {}, + ) + + @classmethod + def from_mapping(cls, data: Mapping[str, Any]) -> "ToolLogPayload": + return cls( + tool_name=data.get("tool_name", ""), + tool_call_id=data.get("tool_call_id", ""), + tool_args=data.get("tool_args") or {}, + tool_output=data.get("output"), + tool_error=data.get("error"), + files=data.get("files") or [], + meta=data.get("meta") or {}, + ) + + class LLMNodeData(BaseNodeData): model: ModelConfig prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate @@ -86,6 +355,10 @@ class LLMNodeData(BaseNodeData): ), ) + # Tool support + tools: Sequence[ToolMetadata] = Field(default_factory=list) + max_iterations: int | None = Field(default=None, description="Maximum number of iterations for the LLM node") + @field_validator("prompt_config", mode="before") @classmethod def convert_none_prompt_config(cls, v: Any): diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index dfb55dcd80..b4caa28ee0 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -11,6 +11,8 @@ from typing import TYPE_CHECKING, Any, Literal from sqlalchemy import select +from core.agent.entities import AgentLog, AgentResult, AgentToolEntity, ExecutionContext +from core.agent.patterns import StrategyFactory from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.file import File, FileTransferMethod, FileType, file_manager from core.helper.code_executor import CodeExecutor, CodeLanguage @@ -48,7 +50,9 @@ from core.model_runtime.utils.encoders import jsonable_encoder from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig from core.prompt.utils.prompt_message_util import PromptMessageUtil from core.rag.entities.citation_metadata import RetrievalSourceMetadata +from core.tools.__base.tool import Tool from core.tools.signature import sign_upload_file +from core.tools.tool_manager import ToolManager from core.variables import ( ArrayFileSegment, ArraySegment, @@ -58,7 +62,8 @@ from core.variables import ( StringSegment, ) from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID -from core.workflow.entities import GraphInitParams +from core.workflow.entities import GraphInitParams, ToolCall, ToolResult, ToolResultStatus +from core.workflow.entities.tool_entities import ToolCallResult from core.workflow.enums import ( NodeType, SystemVariableKey, @@ -66,13 +71,18 @@ from core.workflow.enums import ( WorkflowNodeExecutionStatus, ) from core.workflow.node_events import ( + AgentLogEvent, ModelInvokeCompletedEvent, NodeEventBase, NodeRunResult, RunRetrieverResourceEvent, StreamChunkEvent, StreamCompletedEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, ) +from core.workflow.node_events.node import ThoughtEndChunkEvent, ThoughtStartChunkEvent from core.workflow.nodes.base.entities import VariableSelector from core.workflow.nodes.base.node import Node from core.workflow.nodes.base.variable_template_parser import VariableTemplateParser @@ -83,10 +93,21 @@ from models.model import UploadFile from . import llm_utils from .entities import ( + AgentContext, + AggregatedResult, + LLMGenerationData, LLMNodeChatModelMessage, LLMNodeCompletionModelPromptTemplate, LLMNodeData, + LLMTraceSegment, ModelConfig, + ModelTraceSegment, + StreamBuffers, + ThinkTagStreamParser, + ToolLogPayload, + ToolOutputState, + ToolTraceSegment, + TraceState, ) from .exc import ( InvalidContextStructureError, @@ -151,11 +172,11 @@ class LLMNode(Node[LLMNodeData]): def _run(self) -> Generator: node_inputs: dict[str, Any] = {} process_data: dict[str, Any] = {} - result_text = "" clean_text = "" usage = LLMUsage.empty_usage() finish_reason = None - reasoning_content = None + reasoning_content = "" # Initialize as empty string for consistency + clean_text = "" # Initialize clean_text to avoid UnboundLocalError variable_pool = self.graph_runtime_state.variable_pool try: @@ -236,55 +257,58 @@ class LLMNode(Node[LLMNodeData]): context_files=context_files, ) - # handle invoke result - generator = LLMNode.invoke_llm( - node_data_model=self.node_data.model, - model_instance=model_instance, - prompt_messages=prompt_messages, - stop=stop, - user_id=self.user_id, - structured_output_enabled=self.node_data.structured_output_enabled, - structured_output=self.node_data.structured_output, - file_saver=self._llm_file_saver, - file_outputs=self._file_outputs, - node_id=self._node_id, - node_type=self.node_type, - reasoning_format=self.node_data.reasoning_format, - ) - + # Variables for outputs + generation_data: LLMGenerationData | None = None structured_output: LLMStructuredOutput | None = None - for event in generator: - if isinstance(event, StreamChunkEvent): - yield event - elif isinstance(event, ModelInvokeCompletedEvent): - # Raw text - result_text = event.text - usage = event.usage - finish_reason = event.finish_reason - reasoning_content = event.reasoning_content or "" + # Check if tools are configured + if self.tool_call_enabled: + # Use tool-enabled invocation (Agent V2 style) + generator = self._invoke_llm_with_tools( + model_instance=model_instance, + prompt_messages=prompt_messages, + stop=stop, + files=files, + variable_pool=variable_pool, + node_inputs=node_inputs, + process_data=process_data, + ) + else: + # Use traditional LLM invocation + generator = LLMNode.invoke_llm( + node_data_model=self._node_data.model, + model_instance=model_instance, + prompt_messages=prompt_messages, + stop=stop, + user_id=self.user_id, + structured_output_enabled=self._node_data.structured_output_enabled, + structured_output=self._node_data.structured_output, + file_saver=self._llm_file_saver, + file_outputs=self._file_outputs, + node_id=self._node_id, + node_type=self.node_type, + reasoning_format=self._node_data.reasoning_format, + ) - # For downstream nodes, determine clean text based on reasoning_format - if self.node_data.reasoning_format == "tagged": - # Keep tags for backward compatibility - clean_text = result_text - else: - # Extract clean text from tags - clean_text, _ = LLMNode._split_reasoning(result_text, self.node_data.reasoning_format) + ( + clean_text, + reasoning_content, + generation_reasoning_content, + generation_clean_content, + usage, + finish_reason, + structured_output, + generation_data, + ) = yield from self._stream_llm_events(generator, model_instance=model_instance) - # Process structured output if available from the event. - structured_output = ( - LLMStructuredOutput(structured_output=event.structured_output) - if event.structured_output - else None - ) - - # deduct quota - llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) - break - elif isinstance(event, LLMStructuredOutput): - structured_output = event + # Extract variables from generation_data if available + if generation_data: + clean_text = generation_data.text + reasoning_content = "" + usage = generation_data.usage + finish_reason = generation_data.finish_reason + # Unified process_data building process_data = { "model_mode": model_config.mode, "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving( @@ -295,24 +319,88 @@ class LLMNode(Node[LLMNodeData]): "model_provider": model_config.provider, "model_name": model_config.model, } + if self.tool_call_enabled and self._node_data.tools: + process_data["tools"] = [ + { + "type": tool.type.value if hasattr(tool.type, "value") else tool.type, + "provider_name": tool.provider_name, + "tool_name": tool.tool_name, + } + for tool in self._node_data.tools + if tool.enabled + ] + # Unified outputs building outputs = { "text": clean_text, "reasoning_content": reasoning_content, "usage": jsonable_encoder(usage), "finish_reason": finish_reason, } + + # Build generation field + if generation_data: + # Use generation_data from tool invocation (supports multi-turn) + generation = { + "content": generation_data.text, + "reasoning_content": generation_data.reasoning_contents, # [thought1, thought2, ...] + "tool_calls": [self._serialize_tool_call(item) for item in generation_data.tool_calls], + "sequence": generation_data.sequence, + } + files_to_output = generation_data.files + else: + # Traditional LLM invocation + generation_reasoning = generation_reasoning_content or reasoning_content + generation_content = generation_clean_content or clean_text + sequence: list[dict[str, Any]] = [] + if generation_reasoning: + sequence = [ + {"type": "reasoning", "index": 0}, + {"type": "content", "start": 0, "end": len(generation_content)}, + ] + generation = { + "content": generation_content, + "reasoning_content": [generation_reasoning] if generation_reasoning else [], + "tool_calls": [], + "sequence": sequence, + } + files_to_output = self._file_outputs + + outputs["generation"] = generation + if files_to_output: + outputs["files"] = ArrayFileSegment(value=files_to_output) if structured_output: outputs["structured_output"] = structured_output.structured_output - if self._file_outputs: - outputs["files"] = ArrayFileSegment(value=self._file_outputs) # Send final chunk event to indicate streaming is complete - yield StreamChunkEvent( - selector=[self._node_id, "text"], - chunk="", - is_final=True, - ) + if not self.tool_call_enabled: + # For tool calls, final events are already sent in _process_tool_outputs + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk="", + is_final=True, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk="", + is_final=True, + ) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=True, + ) + + metadata: dict[WorkflowNodeExecutionMetadataKey, Any] = { + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, + } + + if generation_data and generation_data.trace: + metadata[WorkflowNodeExecutionMetadataKey.LLM_TRACE] = [ + segment.model_dump() for segment in generation_data.trace + ] yield StreamCompletedEvent( node_run_result=NodeRunResult( @@ -320,11 +408,7 @@ class LLMNode(Node[LLMNodeData]): inputs=node_inputs, process_data=process_data, outputs=outputs, - metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, - WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, - WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, - }, + metadata=metadata, llm_usage=usage, ) ) @@ -446,6 +530,8 @@ class LLMNode(Node[LLMNodeData]): usage = LLMUsage.empty_usage() finish_reason = None full_text_buffer = io.StringIO() + think_parser = ThinkTagStreamParser() + reasoning_chunks: list[str] = [] # Initialize streaming metrics tracking start_time = request_start_time if request_start_time is not None else time.perf_counter() @@ -474,12 +560,45 @@ class LLMNode(Node[LLMNodeData]): has_content = True full_text_buffer.write(text_part) + # Text output: always forward raw chunk (keep tags intact) yield StreamChunkEvent( selector=[node_id, "text"], chunk=text_part, is_final=False, ) + # Generation output: split out thoughts, forward only non-thought content chunks + for kind, segment in think_parser.process(text_part): + if not segment: + if kind not in {"thought_start", "thought_end"}: + continue + + if kind == "thought_start": + yield ThoughtStartChunkEvent( + selector=[node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + elif kind == "thought": + reasoning_chunks.append(segment) + yield ThoughtChunkEvent( + selector=[node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + elif kind == "thought_end": + yield ThoughtEndChunkEvent( + selector=[node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + else: + yield StreamChunkEvent( + selector=[node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + # Update the whole metadata if not model and result.model: model = result.model @@ -494,16 +613,47 @@ class LLMNode(Node[LLMNodeData]): except OutputParserError as e: raise LLMNodeError(f"Failed to parse structured output: {e}") + for kind, segment in think_parser.flush(): + if not segment and kind not in {"thought_start", "thought_end"}: + continue + if kind == "thought_start": + yield ThoughtStartChunkEvent( + selector=[node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + elif kind == "thought": + reasoning_chunks.append(segment) + yield ThoughtChunkEvent( + selector=[node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + elif kind == "thought_end": + yield ThoughtEndChunkEvent( + selector=[node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + else: + yield StreamChunkEvent( + selector=[node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + # Extract reasoning content from tags in the main text full_text = full_text_buffer.getvalue() if reasoning_format == "tagged": # Keep tags in text for backward compatibility clean_text = full_text - reasoning_content = "" + reasoning_content = "".join(reasoning_chunks) else: # Extract clean text and reasoning from tags clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format) + if reasoning_chunks and not reasoning_content: + reasoning_content = "".join(reasoning_chunks) # Calculate streaming metrics end_time = time.perf_counter() @@ -1268,6 +1418,732 @@ class LLMNode(Node[LLMNodeData]): def retry(self) -> bool: return self.node_data.retry_config.retry_enabled + @property + def tool_call_enabled(self) -> bool: + return ( + self.node_data.tools is not None + and len(self.node_data.tools) > 0 + and all(tool.enabled for tool in self.node_data.tools) + ) + + def _stream_llm_events( + self, + generator: Generator[NodeEventBase | LLMStructuredOutput, None, LLMGenerationData | None], + *, + model_instance: ModelInstance, + ) -> Generator[ + NodeEventBase, + None, + tuple[ + str, + str, + str, + str, + LLMUsage, + str | None, + LLMStructuredOutput | None, + LLMGenerationData | None, + ], + ]: + """ + Stream events and capture generator return value in one place. + Uses generator delegation so _run stays concise while still emitting events. + """ + clean_text = "" + reasoning_content = "" + generation_reasoning_content = "" + generation_clean_content = "" + usage = LLMUsage.empty_usage() + finish_reason: str | None = None + structured_output: LLMStructuredOutput | None = None + generation_data: LLMGenerationData | None = None + completed = False + + while True: + try: + event = next(generator) + except StopIteration as exc: + if isinstance(exc.value, LLMGenerationData): + generation_data = exc.value + break + + if completed: + # After completion we still drain to reach StopIteration.value + continue + + match event: + case StreamChunkEvent() | ThoughtChunkEvent(): + yield event + + case ModelInvokeCompletedEvent( + text=text, + usage=usage_event, + finish_reason=finish_reason_event, + reasoning_content=reasoning_event, + structured_output=structured_raw, + ): + clean_text = text + usage = usage_event + finish_reason = finish_reason_event + reasoning_content = reasoning_event or "" + generation_reasoning_content = reasoning_content + generation_clean_content = clean_text + + if self.node_data.reasoning_format == "tagged": + # Keep tagged text for output; also extract reasoning for generation field + generation_clean_content, generation_reasoning_content = LLMNode._split_reasoning( + clean_text, reasoning_format="separated" + ) + else: + clean_text, generation_reasoning_content = LLMNode._split_reasoning( + clean_text, self.node_data.reasoning_format + ) + generation_clean_content = clean_text + + structured_output = ( + LLMStructuredOutput(structured_output=structured_raw) if structured_raw else None + ) + + llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) + completed = True + + case LLMStructuredOutput(): + structured_output = event + + case _: + continue + + return ( + clean_text, + reasoning_content, + generation_reasoning_content, + generation_clean_content, + usage, + finish_reason, + structured_output, + generation_data, + ) + + def _invoke_llm_with_tools( + self, + model_instance: ModelInstance, + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + files: Sequence[File], + variable_pool: VariablePool, + node_inputs: dict[str, Any], + process_data: dict[str, Any], + ) -> Generator[NodeEventBase, None, LLMGenerationData]: + """Invoke LLM with tools support (from Agent V2). + + Returns LLMGenerationData with text, reasoning_contents, tool_calls, usage, finish_reason, files + """ + # Get model features to determine strategy + model_features = self._get_model_features(model_instance) + + # Prepare tool instances + tool_instances = self._prepare_tool_instances(variable_pool) + + # Prepare prompt files (files that come from prompt variables, not vision files) + prompt_files = self._extract_prompt_files(variable_pool) + + # Use factory to create appropriate strategy + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=model_instance, + tools=tool_instances, + files=prompt_files, + max_iterations=self._node_data.max_iterations or 10, + context=ExecutionContext(user_id=self.user_id, app_id=self.app_id, tenant_id=self.tenant_id), + ) + + # Run strategy + outputs = strategy.run( + prompt_messages=list(prompt_messages), + model_parameters=self._node_data.model.completion_params, + stop=list(stop or []), + stream=True, + ) + + # Process outputs and return generation result + result = yield from self._process_tool_outputs(outputs) + return result + + def _get_model_features(self, model_instance: ModelInstance) -> list[ModelFeature]: + """Get model schema to determine features.""" + try: + model_type_instance = model_instance.model_type_instance + model_schema = model_type_instance.get_model_schema( + model_instance.model, + model_instance.credentials, + ) + return model_schema.features if model_schema and model_schema.features else [] + except Exception: + logger.warning("Failed to get model schema, assuming no special features") + return [] + + def _prepare_tool_instances(self, variable_pool: VariablePool) -> list[Tool]: + """Prepare tool instances from configuration.""" + tool_instances = [] + + if self._node_data.tools: + for tool in self._node_data.tools: + try: + # Process settings to extract the correct structure + processed_settings = {} + for key, value in tool.settings.items(): + if isinstance(value, dict) and "value" in value and isinstance(value["value"], dict): + # Extract the nested value if it has the ToolInput structure + if "type" in value["value"] and "value" in value["value"]: + processed_settings[key] = value["value"] + else: + processed_settings[key] = value + else: + processed_settings[key] = value + + # Merge parameters with processed settings (similar to Agent Node logic) + merged_parameters = {**tool.parameters, **processed_settings} + + # Create AgentToolEntity from ToolMetadata + agent_tool = AgentToolEntity( + provider_id=tool.provider_name, + provider_type=tool.type, + tool_name=tool.tool_name, + tool_parameters=merged_parameters, + plugin_unique_identifier=tool.plugin_unique_identifier, + credential_id=tool.credential_id, + ) + + # Get tool runtime from ToolManager + tool_runtime = ToolManager.get_agent_tool_runtime( + tenant_id=self.tenant_id, + app_id=self.app_id, + agent_tool=agent_tool, + invoke_from=self.invoke_from, + variable_pool=variable_pool, + ) + + # Apply custom description from extra field if available + if tool.extra.get("description") and tool_runtime.entity.description: + tool_runtime.entity.description.llm = ( + tool.extra.get("description") or tool_runtime.entity.description.llm + ) + + tool_instances.append(tool_runtime) + except Exception as e: + logger.warning("Failed to load tool %s: %s", tool, str(e)) + continue + + return tool_instances + + def _extract_prompt_files(self, variable_pool: VariablePool) -> list[File]: + """Extract files from prompt template variables.""" + from core.variables import ArrayFileVariable, FileVariable + + files: list[File] = [] + + # Extract variables from prompt template + if isinstance(self._node_data.prompt_template, list): + for message in self._node_data.prompt_template: + if message.text: + parser = VariableTemplateParser(message.text) + variable_selectors = parser.extract_variable_selectors() + + for variable_selector in variable_selectors: + variable = variable_pool.get(variable_selector.value_selector) + if isinstance(variable, FileVariable) and variable.value: + files.append(variable.value) + elif isinstance(variable, ArrayFileVariable) and variable.value: + files.extend(variable.value) + + return files + + @staticmethod + def _serialize_tool_call(tool_call: ToolCallResult) -> dict[str, Any]: + """Convert ToolCallResult into JSON-friendly dict.""" + + def _file_to_ref(file: File) -> str | None: + # Align with streamed tool result events which carry file IDs + return file.id or file.related_id + + files = [] + for file in tool_call.files or []: + ref = _file_to_ref(file) + if ref: + files.append(ref) + + return { + "id": tool_call.id, + "name": tool_call.name, + "arguments": tool_call.arguments, + "output": tool_call.output, + "files": files, + "status": tool_call.status.value if hasattr(tool_call.status, "value") else tool_call.status, + "elapsed_time": tool_call.elapsed_time, + } + + def _generate_model_provider_icon_url(self, provider: str, dark: bool = False) -> str | None: + """Generate icon URL for model provider.""" + from yarl import URL + + from configs import dify_config + + icon_type = "icon_small_dark" if dark else "icon_small" + try: + return str( + URL(dify_config.CONSOLE_API_URL or "/") + / "console" + / "api" + / "workspaces" + / "current" + / "model-providers" + / provider + / icon_type + / "en_US" + ) + except Exception: + return None + + def _flush_model_segment( + self, + buffers: StreamBuffers, + trace_state: TraceState, + error: str | None = None, + ) -> None: + """Flush pending thought/content buffers into a single model trace segment.""" + if not buffers.pending_thought and not buffers.pending_content and not buffers.pending_tool_calls: + return + + now = time.perf_counter() + duration = now - trace_state.model_segment_start_time if trace_state.model_segment_start_time else 0.0 + + # Use pending_usage from trace_state (captured from THOUGHT log) + usage = trace_state.pending_usage + + # Generate model provider icon URL + provider = self._node_data.model.provider + model_name = self._node_data.model.name + model_icon = self._generate_model_provider_icon_url(provider) + model_icon_dark = self._generate_model_provider_icon_url(provider, dark=True) + + trace_state.trace_segments.append( + LLMTraceSegment( + type="model", + duration=duration, + usage=usage, + output=ModelTraceSegment( + text="".join(buffers.pending_content) if buffers.pending_content else None, + reasoning="".join(buffers.pending_thought) if buffers.pending_thought else None, + tool_calls=list(buffers.pending_tool_calls), + ), + provider=provider, + name=model_name, + icon=model_icon, + icon_dark=model_icon_dark, + error=error, + status="error" if error else "success", + ) + ) + buffers.pending_thought.clear() + buffers.pending_content.clear() + buffers.pending_tool_calls.clear() + trace_state.model_segment_start_time = None + trace_state.pending_usage = None + + def _handle_agent_log_output( + self, output: AgentLog, buffers: StreamBuffers, trace_state: TraceState, agent_context: AgentContext + ) -> Generator[NodeEventBase, None, None]: + payload = ToolLogPayload.from_log(output) + agent_log_event = AgentLogEvent( + message_id=output.id, + label=output.label, + node_execution_id=self.id, + parent_id=output.parent_id, + error=output.error, + status=output.status.value, + data=output.data, + metadata={k.value: v for k, v in output.metadata.items()}, + node_id=self._node_id, + ) + for log in agent_context.agent_logs: + if log.message_id == agent_log_event.message_id: + log.data = agent_log_event.data + log.status = agent_log_event.status + log.error = agent_log_event.error + log.label = agent_log_event.label + log.metadata = agent_log_event.metadata + break + else: + agent_context.agent_logs.append(agent_log_event) + + # Handle THOUGHT log completion - capture usage for model segment + if output.log_type == AgentLog.LogType.THOUGHT and output.status == AgentLog.LogStatus.SUCCESS: + llm_usage = output.metadata.get(AgentLog.LogMetadata.LLM_USAGE) if output.metadata else None + if llm_usage: + trace_state.pending_usage = llm_usage + + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.START: + tool_name = payload.tool_name + tool_call_id = payload.tool_call_id + tool_arguments = json.dumps(payload.tool_args) if payload.tool_args else "" + + # Get icon from metadata (available at START) + tool_icon = output.metadata.get(AgentLog.LogMetadata.ICON) if output.metadata else None + tool_icon_dark = output.metadata.get(AgentLog.LogMetadata.ICON_DARK) if output.metadata else None + + if tool_call_id and tool_call_id not in trace_state.tool_call_index_map: + trace_state.tool_call_index_map[tool_call_id] = len(trace_state.tool_call_index_map) + + # Add tool call to pending list for model segment + buffers.pending_tool_calls.append(ToolCall(id=tool_call_id, name=tool_name, arguments=tool_arguments)) + + yield ToolCallChunkEvent( + selector=[self._node_id, "generation", "tool_calls"], + chunk=tool_arguments, + tool_call=ToolCall( + id=tool_call_id, + name=tool_name, + arguments=tool_arguments, + icon=tool_icon, + icon_dark=tool_icon_dark, + ), + is_final=False, + ) + + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status != AgentLog.LogStatus.START: + tool_name = payload.tool_name + tool_output = payload.tool_output + tool_call_id = payload.tool_call_id + tool_files = payload.files if isinstance(payload.files, list) else [] + tool_error = payload.tool_error + tool_arguments = json.dumps(payload.tool_args) if payload.tool_args else "" + + if tool_call_id and tool_call_id not in trace_state.tool_call_index_map: + trace_state.tool_call_index_map[tool_call_id] = len(trace_state.tool_call_index_map) + + # Flush model segment before tool result processing + self._flush_model_segment(buffers, trace_state) + + if output.status == AgentLog.LogStatus.ERROR: + tool_error = output.error or payload.tool_error + if not tool_error and payload.meta: + tool_error = payload.meta.get("error") + else: + if payload.meta: + meta_error = payload.meta.get("error") + if meta_error: + tool_error = meta_error + + elapsed_time = output.metadata.get(AgentLog.LogMetadata.ELAPSED_TIME) if output.metadata else None + tool_provider = output.metadata.get(AgentLog.LogMetadata.PROVIDER) if output.metadata else None + tool_icon = output.metadata.get(AgentLog.LogMetadata.ICON) if output.metadata else None + tool_icon_dark = output.metadata.get(AgentLog.LogMetadata.ICON_DARK) if output.metadata else None + result_str = str(tool_output) if tool_output is not None else None + + tool_status: Literal["success", "error"] = "error" if tool_error else "success" + tool_call_segment = LLMTraceSegment( + type="tool", + duration=elapsed_time or 0.0, + usage=None, + output=ToolTraceSegment( + id=tool_call_id, + name=tool_name, + arguments=tool_arguments, + output=result_str, + ), + provider=tool_provider, + name=tool_name, + icon=tool_icon, + icon_dark=tool_icon_dark, + error=str(tool_error) if tool_error else None, + status=tool_status, + ) + trace_state.trace_segments.append(tool_call_segment) + if tool_call_id: + trace_state.tool_trace_map[tool_call_id] = tool_call_segment + + # Start new model segment tracking + trace_state.model_segment_start_time = time.perf_counter() + + yield ToolResultChunkEvent( + selector=[self._node_id, "generation", "tool_results"], + chunk=result_str or "", + tool_result=ToolResult( + id=tool_call_id, + name=tool_name, + output=result_str, + files=tool_files, + status=ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS, + elapsed_time=elapsed_time, + icon=tool_icon, + icon_dark=tool_icon_dark, + ), + is_final=False, + ) + + if buffers.current_turn_reasoning: + buffers.reasoning_per_turn.append("".join(buffers.current_turn_reasoning)) + buffers.current_turn_reasoning.clear() + + def _handle_llm_chunk_output( + self, output: LLMResultChunk, buffers: StreamBuffers, trace_state: TraceState, aggregate: AggregatedResult + ) -> Generator[NodeEventBase, None, None]: + message = output.delta.message + + if message and message.content: + chunk_text = message.content + if isinstance(chunk_text, list): + chunk_text = "".join(getattr(content, "data", str(content)) for content in chunk_text) + else: + chunk_text = str(chunk_text) + + for kind, segment in buffers.think_parser.process(chunk_text): + if not segment and kind not in {"thought_start", "thought_end"}: + continue + + # Start tracking model segment time on first output + if trace_state.model_segment_start_time is None: + trace_state.model_segment_start_time = time.perf_counter() + + if kind == "thought_start": + yield ThoughtStartChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + elif kind == "thought": + buffers.current_turn_reasoning.append(segment) + buffers.pending_thought.append(segment) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + elif kind == "thought_end": + yield ThoughtEndChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + else: + aggregate.text += segment + buffers.pending_content.append(segment) + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk=segment, + is_final=False, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + + if output.delta.usage: + self._accumulate_usage(aggregate.usage, output.delta.usage) + + if output.delta.finish_reason: + aggregate.finish_reason = output.delta.finish_reason + + def _flush_remaining_stream( + self, buffers: StreamBuffers, trace_state: TraceState, aggregate: AggregatedResult + ) -> Generator[NodeEventBase, None, None]: + for kind, segment in buffers.think_parser.flush(): + if not segment and kind not in {"thought_start", "thought_end"}: + continue + + # Start tracking model segment time on first output + if trace_state.model_segment_start_time is None: + trace_state.model_segment_start_time = time.perf_counter() + + if kind == "thought_start": + yield ThoughtStartChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + elif kind == "thought": + buffers.current_turn_reasoning.append(segment) + buffers.pending_thought.append(segment) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + elif kind == "thought_end": + yield ThoughtEndChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=False, + ) + else: + aggregate.text += segment + buffers.pending_content.append(segment) + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk=segment, + is_final=False, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + + if buffers.current_turn_reasoning: + buffers.reasoning_per_turn.append("".join(buffers.current_turn_reasoning)) + + # For final flush, use aggregate.usage if pending_usage is not set + # (e.g., for simple LLM calls without tool invocations) + if trace_state.pending_usage is None: + trace_state.pending_usage = aggregate.usage + + # Flush final model segment + self._flush_model_segment(buffers, trace_state) + + def _close_streams(self) -> Generator[NodeEventBase, None, None]: + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk="", + is_final=True, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk="", + is_final=True, + ) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=True, + ) + yield ToolCallChunkEvent( + selector=[self._node_id, "generation", "tool_calls"], + chunk="", + tool_call=ToolCall( + id="", + name="", + arguments="", + ), + is_final=True, + ) + yield ToolResultChunkEvent( + selector=[self._node_id, "generation", "tool_results"], + chunk="", + tool_result=ToolResult( + id="", + name="", + output="", + files=[], + status=ToolResultStatus.SUCCESS, + ), + is_final=True, + ) + + def _build_generation_data( + self, + trace_state: TraceState, + agent_context: AgentContext, + aggregate: AggregatedResult, + buffers: StreamBuffers, + ) -> LLMGenerationData: + sequence: list[dict[str, Any]] = [] + reasoning_index = 0 + content_position = 0 + tool_call_seen_index: dict[str, int] = {} + for trace_segment in trace_state.trace_segments: + if trace_segment.type == "thought": + sequence.append({"type": "reasoning", "index": reasoning_index}) + reasoning_index += 1 + elif trace_segment.type == "content": + segment_text = trace_segment.text or "" + start = content_position + end = start + len(segment_text) + sequence.append({"type": "content", "start": start, "end": end}) + content_position = end + elif trace_segment.type == "tool_call": + tool_id = trace_segment.tool_call.id if trace_segment.tool_call and trace_segment.tool_call.id else "" + if tool_id not in tool_call_seen_index: + tool_call_seen_index[tool_id] = len(tool_call_seen_index) + sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]}) + + tool_calls_for_generation: list[ToolCallResult] = [] + for log in agent_context.agent_logs: + payload = ToolLogPayload.from_mapping(log.data or {}) + tool_call_id = payload.tool_call_id + if not tool_call_id or log.status == AgentLog.LogStatus.START.value: + continue + + tool_args = payload.tool_args + log_error = payload.tool_error + log_output = payload.tool_output + result_text = log_output or log_error or "" + status = ToolResultStatus.ERROR if log_error else ToolResultStatus.SUCCESS + tool_calls_for_generation.append( + ToolCallResult( + id=tool_call_id, + name=payload.tool_name, + arguments=json.dumps(tool_args) if tool_args else "", + output=result_text, + status=status, + elapsed_time=log.metadata.get(AgentLog.LogMetadata.ELAPSED_TIME) if log.metadata else None, + ) + ) + + tool_calls_for_generation.sort( + key=lambda item: trace_state.tool_call_index_map.get(item.id or "", len(trace_state.tool_call_index_map)) + ) + + return LLMGenerationData( + text=aggregate.text, + reasoning_contents=buffers.reasoning_per_turn, + tool_calls=tool_calls_for_generation, + sequence=sequence, + usage=aggregate.usage, + finish_reason=aggregate.finish_reason, + files=aggregate.files, + trace=trace_state.trace_segments, + ) + + def _process_tool_outputs( + self, + outputs: Generator[LLMResultChunk | AgentLog, None, AgentResult], + ) -> Generator[NodeEventBase, None, LLMGenerationData]: + """Process strategy outputs and convert to node events.""" + state = ToolOutputState() + + try: + for output in outputs: + if isinstance(output, AgentLog): + yield from self._handle_agent_log_output(output, state.stream, state.trace, state.agent) + else: + yield from self._handle_llm_chunk_output(output, state.stream, state.trace, state.aggregate) + except StopIteration as exception: + if isinstance(getattr(exception, "value", None), AgentResult): + state.agent.agent_result = exception.value + + if state.agent.agent_result: + state.aggregate.text = state.agent.agent_result.text or state.aggregate.text + state.aggregate.files = state.agent.agent_result.files + if state.agent.agent_result.usage: + state.aggregate.usage = state.agent.agent_result.usage + if state.agent.agent_result.finish_reason: + state.aggregate.finish_reason = state.agent.agent_result.finish_reason + + yield from self._flush_remaining_stream(state.stream, state.trace, state.aggregate) + yield from self._close_streams() + + return self._build_generation_data(state.trace, state.agent, state.aggregate, state.stream) + + def _accumulate_usage(self, total_usage: LLMUsage, delta_usage: LLMUsage) -> None: + """Accumulate LLM usage statistics.""" + total_usage.prompt_tokens += delta_usage.prompt_tokens + total_usage.completion_tokens += delta_usage.completion_tokens + total_usage.total_tokens += delta_usage.total_tokens + total_usage.prompt_price += delta_usage.prompt_price + total_usage.completion_price += delta_usage.completion_price + total_usage.total_price += delta_usage.total_price + def _combine_message_content_with_role( *, contents: str | list[PromptMessageContentUnionTypes] | None = None, role: PromptMessageRole diff --git a/api/extensions/ext_celery.py b/api/extensions/ext_celery.py index 08cf96c1c1..af983f6d87 100644 --- a/api/extensions/ext_celery.py +++ b/api/extensions/ext_celery.py @@ -102,6 +102,8 @@ def init_app(app: DifyApp) -> Celery: imports = [ "tasks.async_workflow_tasks", # trigger workers "tasks.trigger_processing_tasks", # async trigger processing + "tasks.generate_summary_index_task", # summary index generation + "tasks.regenerate_summary_index_task", # summary index regeneration ] day = dify_config.CELERY_BEAT_SCHEDULER_TIME diff --git a/api/fields/conversation_fields.py b/api/fields/conversation_fields.py index d8ae0ad8b8..fe59cdcbb4 100644 --- a/api/fields/conversation_fields.py +++ b/api/fields/conversation_fields.py @@ -169,6 +169,7 @@ class MessageDetail(ResponseModel): status: str error: str | None = None parent_message_id: str | None = None + generation_detail: JSONValue | None = Field(default=None, validation_alias="generation_detail_dict") @field_validator("inputs", mode="before") @classmethod diff --git a/api/fields/dataset_fields.py b/api/fields/dataset_fields.py index 1e5ec7d200..ff6578098b 100644 --- a/api/fields/dataset_fields.py +++ b/api/fields/dataset_fields.py @@ -39,6 +39,14 @@ dataset_retrieval_model_fields = { "score_threshold_enabled": fields.Boolean, "score_threshold": fields.Float, } + +dataset_summary_index_fields = { + "enable": fields.Boolean, + "model_name": fields.String, + "model_provider_name": fields.String, + "summary_prompt": fields.String, +} + external_retrieval_model_fields = { "top_k": fields.Integer, "score_threshold": fields.Float, @@ -83,6 +91,7 @@ dataset_detail_fields = { "embedding_model_provider": fields.String, "embedding_available": fields.Boolean, "retrieval_model_dict": fields.Nested(dataset_retrieval_model_fields), + "summary_index_setting": fields.Nested(dataset_summary_index_fields), "tags": fields.List(fields.Nested(tag_fields)), "doc_form": fields.String, "external_knowledge_info": fields.Nested(external_knowledge_info_fields), diff --git a/api/fields/document_fields.py b/api/fields/document_fields.py index 9be59f7454..62f5e19e25 100644 --- a/api/fields/document_fields.py +++ b/api/fields/document_fields.py @@ -33,6 +33,8 @@ document_fields = { "hit_count": fields.Integer, "doc_form": fields.String, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), + "summary_index_status": fields.String, # Summary index generation status: "waiting", "generating", "completed", "partial_error", or null if not enabled + "need_summary": fields.Boolean, # Whether this document needs summary index generation } document_with_segments_fields = { @@ -60,6 +62,8 @@ document_with_segments_fields = { "completed_segments": fields.Integer, "total_segments": fields.Integer, "doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"), + "summary_index_status": fields.String, # Summary index generation status: "waiting", "generating", "completed", "partial_error", or null if not enabled + "need_summary": fields.Boolean, # Whether this document needs summary index generation } dataset_and_document_fields = { diff --git a/api/fields/hit_testing_fields.py b/api/fields/hit_testing_fields.py index e70f9fa722..0b54992835 100644 --- a/api/fields/hit_testing_fields.py +++ b/api/fields/hit_testing_fields.py @@ -58,4 +58,5 @@ hit_testing_record_fields = { "score": fields.Float, "tsne_position": fields.Raw, "files": fields.List(fields.Nested(files_fields)), + "summary": fields.String, # Summary content if retrieved via summary index } diff --git a/api/fields/message_fields.py b/api/fields/message_fields.py index 2bba198fa8..797f01c00c 100644 --- a/api/fields/message_fields.py +++ b/api/fields/message_fields.py @@ -59,6 +59,7 @@ class MessageListItem(ResponseModel): message_files: list[MessageFile] status: str error: str | None = None + generation_detail: JSONValueType | None = Field(default=None, validation_alias="generation_detail_dict") @field_validator("inputs", mode="before") @classmethod diff --git a/api/fields/segment_fields.py b/api/fields/segment_fields.py index 56d6b68378..2ce9fb154c 100644 --- a/api/fields/segment_fields.py +++ b/api/fields/segment_fields.py @@ -49,4 +49,5 @@ segment_fields = { "stopped_at": TimestampField, "child_chunks": fields.List(fields.Nested(child_chunk_fields)), "attachments": fields.List(fields.Nested(attachment_fields)), + "summary": fields.String, # Summary content for the segment } diff --git a/api/fields/workflow_run_fields.py b/api/fields/workflow_run_fields.py index 476025064f..1b2948811b 100644 --- a/api/fields/workflow_run_fields.py +++ b/api/fields/workflow_run_fields.py @@ -81,6 +81,7 @@ workflow_run_detail_fields = { "inputs": fields.Raw(attribute="inputs_dict"), "status": fields.String, "outputs": fields.Raw(attribute="outputs_dict"), + "outputs_as_generation": fields.Boolean, "error": fields.String, "elapsed_time": fields.Float, "total_tokens": fields.Integer, @@ -129,6 +130,7 @@ workflow_run_node_execution_fields = { "inputs_truncated": fields.Boolean, "outputs_truncated": fields.Boolean, "process_data_truncated": fields.Boolean, + "generation_detail": fields.Raw, } workflow_run_node_execution_list_fields = { diff --git a/api/migrations/versions/2025_12_10_1504-8a7f2ad7c23e_add_workflow_runs_created_at_idx.py b/api/migrations/versions/2025_12_10_1504-8a7f2ad7c23e_add_workflow_runs_created_at_idx.py new file mode 100644 index 0000000000..7968429ca8 --- /dev/null +++ b/api/migrations/versions/2025_12_10_1504-8a7f2ad7c23e_add_workflow_runs_created_at_idx.py @@ -0,0 +1,29 @@ +"""Add index on workflow_runs.created_at + +Revision ID: 8a7f2ad7c23e +Revises: d57accd375ae +Create Date: 2025-12-10 15:04:00.000000 +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = "8a7f2ad7c23e" +down_revision = "d57accd375ae" +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table("workflow_runs", schema=None) as batch_op: + batch_op.create_index( + batch_op.f("workflow_runs_created_at_idx"), + ["created_at"], + unique=False, + ) + + +def downgrade(): + with op.batch_alter_table("workflow_runs", schema=None) as batch_op: + batch_op.drop_index(batch_op.f("workflow_runs_created_at_idx")) diff --git a/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py new file mode 100644 index 0000000000..40bbbded1d --- /dev/null +++ b/api/migrations/versions/2025_12_15_1614-6bb0832495f0_alter_table_pipeline_recommended_.py @@ -0,0 +1,64 @@ +"""Alter table pipeline_recommended_plugins add column type + +Revision ID: 6bb0832495f0 +Revises: 7bb281b7a422 +Create Date: 2025-12-15 16:14:38.482072 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '6bb0832495f0' +down_revision = '7bb281b7a422' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('app_triggers', schema=None) as batch_op: + batch_op.alter_column('provider_name', + existing_type=sa.VARCHAR(length=255), + nullable=False, + existing_server_default=sa.text("''::character varying")) + + with op.batch_alter_table('operation_logs', schema=None) as batch_op: + batch_op.alter_column('content', + existing_type=postgresql.JSON(astext_type=sa.Text()), + nullable=False) + + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.add_column(sa.Column('type', sa.String(length=50), nullable=True)) + + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BIGINT(), + nullable=False) + + # ### end Alembic commands ### + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BIGINT(), + nullable=True) + + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.drop_column('type') + + with op.batch_alter_table('operation_logs', schema=None) as batch_op: + batch_op.alter_column('content', + existing_type=postgresql.JSON(astext_type=sa.Text()), + nullable=True) + + with op.batch_alter_table('app_triggers', schema=None) as batch_op: + batch_op.alter_column('provider_name', + existing_type=sa.VARCHAR(length=255), + nullable=True, + existing_server_default=sa.text("''::character varying")) + + # ### end Alembic commands ### diff --git a/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py b/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py new file mode 100644 index 0000000000..20ca06d200 --- /dev/null +++ b/api/migrations/versions/2025_12_16_1424-2536f83803a8_add_type_column_not_null_default_tool.py @@ -0,0 +1,33 @@ +"""add type column not null default tool + +Revision ID: 2536f83803a8 +Revises: 6bb0832495f0 +Create Date: 2025-12-16 14:24:40.740253 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '2536f83803a8' +down_revision = '6bb0832495f0' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.add_column(sa.Column('type', sa.String(length=50), nullable=False, server_default='tool')) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_recommended_plugins', schema=None) as batch_op: + batch_op.drop_column('type') + + # ### end Alembic commands ### diff --git a/api/migrations/versions/2025_12_30_1617-85c8b4a64f53_add_llm_generation_detail_table.py b/api/migrations/versions/2025_12_30_1617-85c8b4a64f53_add_llm_generation_detail_table.py new file mode 100644 index 0000000000..2b920a5ac6 --- /dev/null +++ b/api/migrations/versions/2025_12_30_1617-85c8b4a64f53_add_llm_generation_detail_table.py @@ -0,0 +1,46 @@ +"""add llm generation detail table. + +Revision ID: 85c8b4a64f53 +Revises: 7bb281b7a422 +Create Date: 2025-12-10 16:17:46.597669 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '85c8b4a64f53' +down_revision = '7df29de0f6be' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('llm_generation_details', + sa.Column('id', models.types.StringUUID(), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=False), + sa.Column('message_id', models.types.StringUUID(), nullable=True), + sa.Column('workflow_run_id', models.types.StringUUID(), nullable=True), + sa.Column('node_id', sa.String(length=255), nullable=True), + sa.Column('reasoning_content', models.types.LongText(), nullable=True), + sa.Column('tool_calls', models.types.LongText(), nullable=True), + sa.Column('sequence', models.types.LongText(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.CheckConstraint('(message_id IS NOT NULL AND workflow_run_id IS NULL AND node_id IS NULL) OR (message_id IS NULL AND workflow_run_id IS NOT NULL AND node_id IS NOT NULL)', name=op.f('llm_generation_details_ck_llm_generation_detail_assoc_mode_check')), + sa.PrimaryKeyConstraint('id', name='llm_generation_detail_pkey'), + sa.UniqueConstraint('message_id', name=op.f('llm_generation_details_message_id_key')) + ) + with op.batch_alter_table('llm_generation_details', schema=None) as batch_op: + batch_op.create_index('idx_llm_generation_detail_message', ['message_id'], unique=False) + batch_op.create_index('idx_llm_generation_detail_workflow', ['workflow_run_id', 'node_id'], unique=False) + + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('llm_generation_details') + # ### end Alembic commands ### diff --git a/api/migrations/versions/2026_01_09_1110-f9f6d18a37f9_add_table_explore_banner_and_trial.py b/api/migrations/versions/2026_01_09_1110-f9f6d18a37f9_add_table_explore_banner_and_trial.py new file mode 100644 index 0000000000..c34fa5f819 --- /dev/null +++ b/api/migrations/versions/2026_01_09_1110-f9f6d18a37f9_add_table_explore_banner_and_trial.py @@ -0,0 +1,73 @@ +"""add table explore banner and trial + +Revision ID: f9f6d18a37f9 +Revises: 7df29de0f6be +Create Date: 2026-01-09 11:10:18.079355 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = 'f9f6d18a37f9' +down_revision = '7df29de0f6be' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('account_trial_app_records', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('account_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=False), + sa.Column('count', sa.Integer(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='user_trial_app_pkey'), + sa.UniqueConstraint('account_id', 'app_id', name='unique_account_trial_app_record') + ) + with op.batch_alter_table('account_trial_app_records', schema=None) as batch_op: + batch_op.create_index('account_trial_app_record_account_id_idx', ['account_id'], unique=False) + batch_op.create_index('account_trial_app_record_app_id_idx', ['app_id'], unique=False) + + op.create_table('exporle_banners', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('content', sa.JSON(), nullable=False), + sa.Column('link', sa.String(length=255), nullable=False), + sa.Column('sort', sa.Integer(), nullable=False), + sa.Column('status', sa.String(length=255), server_default=sa.text("'enabled'::character varying"), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('language', sa.String(length=255), server_default=sa.text("'en-US'::character varying"), nullable=False), + sa.PrimaryKeyConstraint('id', name='exporler_banner_pkey') + ) + op.create_table('trial_apps', + sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('trial_limit', sa.Integer(), nullable=False), + sa.PrimaryKeyConstraint('id', name='trial_app_pkey'), + sa.UniqueConstraint('app_id', name='unique_trail_app_id') + ) + with op.batch_alter_table('trial_apps', schema=None) as batch_op: + batch_op.create_index('trial_app_app_id_idx', ['app_id'], unique=False) + batch_op.create_index('trial_app_tenant_id_idx', ['tenant_id'], unique=False) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('trial_apps', schema=None) as batch_op: + batch_op.drop_index('trial_app_tenant_id_idx') + batch_op.drop_index('trial_app_app_id_idx') + + op.drop_table('trial_apps') + op.drop_table('exporle_banners') + with op.batch_alter_table('account_trial_app_records', schema=None) as batch_op: + batch_op.drop_index('account_trial_app_record_app_id_idx') + batch_op.drop_index('account_trial_app_record_account_id_idx') + + op.drop_table('account_trial_app_records') + # ### end Alembic commands ### diff --git a/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py b/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py new file mode 100644 index 0000000000..40fe419ef6 --- /dev/null +++ b/api/migrations/versions/2026_01_12_1358-562dcce7d77c_add_summaryindex_feature.py @@ -0,0 +1,69 @@ +"""add SummaryIndex feature + +Revision ID: 562dcce7d77c +Revises: 03ea244985ce +Create Date: 2026-01-12 13:58:40.584802 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '562dcce7d77c' +down_revision = '03ea244985ce' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('document_segment_summary', + sa.Column('id', models.types.StringUUID(), nullable=False), + sa.Column('dataset_id', models.types.StringUUID(), nullable=False), + sa.Column('document_id', models.types.StringUUID(), nullable=False), + sa.Column('chunk_id', models.types.StringUUID(), nullable=False), + sa.Column('summary_content', models.types.LongText(), nullable=True), + sa.Column('summary_index_node_id', sa.String(length=255), nullable=True), + sa.Column('summary_index_node_hash', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=32), server_default=sa.text("'generating'"), nullable=False), + sa.Column('error', models.types.LongText(), nullable=True), + sa.Column('enabled', sa.Boolean(), server_default=sa.text('true'), nullable=False), + sa.Column('disabled_at', sa.DateTime(), nullable=True), + sa.Column('disabled_by', models.types.StringUUID(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.PrimaryKeyConstraint('id', name='document_segment_summary_pkey') + ) + with op.batch_alter_table('document_segment_summary', schema=None) as batch_op: + batch_op.create_index('document_segment_summary_chunk_id_idx', ['chunk_id'], unique=False) + batch_op.create_index('document_segment_summary_dataset_id_idx', ['dataset_id'], unique=False) + batch_op.create_index('document_segment_summary_document_id_idx', ['document_id'], unique=False) + batch_op.create_index('document_segment_summary_status_idx', ['status'], unique=False) + + with op.batch_alter_table('datasets', schema=None) as batch_op: + batch_op.add_column(sa.Column('summary_index_setting', models.types.AdjustedJSON(), nullable=True)) + + with op.batch_alter_table('documents', schema=None) as batch_op: + batch_op.add_column(sa.Column('need_summary', sa.Boolean(), server_default=sa.text('false'), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('documents', schema=None) as batch_op: + batch_op.drop_column('need_summary') + + with op.batch_alter_table('datasets', schema=None) as batch_op: + batch_op.drop_column('summary_index_setting') + + with op.batch_alter_table('document_segment_summary', schema=None) as batch_op: + batch_op.drop_index('document_segment_summary_status_idx') + batch_op.drop_index('document_segment_summary_document_id_idx') + batch_op.drop_index('document_segment_summary_dataset_id_idx') + batch_op.drop_index('document_segment_summary_chunk_id_idx') + + op.drop_table('document_segment_summary') + # ### end Alembic commands ### diff --git a/api/models/__init__.py b/api/models/__init__.py index e23de832dc..4c2ef0fb71 100644 --- a/api/models/__init__.py +++ b/api/models/__init__.py @@ -35,6 +35,7 @@ from .enums import ( WorkflowTriggerStatus, ) from .model import ( + AccountTrialAppRecord, ApiRequest, ApiToken, App, @@ -47,8 +48,10 @@ from .model import ( DatasetRetrieverResource, DifySetup, EndUser, + ExporleBanner, IconType, InstalledApp, + LLMGenerationDetail, Message, MessageAgentThought, MessageAnnotation, @@ -62,6 +65,7 @@ from .model import ( TagBinding, TenantCreditPool, TraceAppConfig, + TrialApp, UploadFile, ) from .oauth import DatasourceOauthParamConfig, DatasourceProvider @@ -114,6 +118,7 @@ __all__ = [ "Account", "AccountIntegrate", "AccountStatus", + "AccountTrialAppRecord", "ApiRequest", "ApiToken", "ApiToolProvider", @@ -150,11 +155,13 @@ __all__ = [ "DocumentSegment", "Embedding", "EndUser", + "ExporleBanner", "ExternalKnowledgeApis", "ExternalKnowledgeBindings", "IconType", "InstalledApp", "InvitationCode", + "LLMGenerationDetail", "LoadBalancingModelConfig", "Message", "MessageAgentThought", @@ -188,6 +195,7 @@ __all__ = [ "ToolLabelBinding", "ToolModelInvoke", "TraceAppConfig", + "TrialApp", "TriggerOAuthSystemClient", "TriggerOAuthTenantClient", "TriggerSubscription", diff --git a/api/models/dataset.py b/api/models/dataset.py index 445ac6086f..6497c0efc0 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -72,6 +72,7 @@ class Dataset(Base): keyword_number = mapped_column(sa.Integer, nullable=True, server_default=sa.text("10")) collection_binding_id = mapped_column(StringUUID, nullable=True) retrieval_model = mapped_column(AdjustedJSON, nullable=True) + summary_index_setting = mapped_column(AdjustedJSON, nullable=True) built_in_field_enabled = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) icon_info = mapped_column(AdjustedJSON, nullable=True) runtime_mode = mapped_column(sa.String(255), nullable=True, server_default=sa.text("'general'")) @@ -419,6 +420,7 @@ class Document(Base): doc_metadata = mapped_column(AdjustedJSON, nullable=True) doc_form = mapped_column(String(255), nullable=False, server_default=sa.text("'text_model'")) doc_language = mapped_column(String(255), nullable=True) + need_summary: Mapped[bool | None] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) DATA_SOURCES = ["upload_file", "notion_import", "website_crawl"] @@ -1567,3 +1569,34 @@ class SegmentAttachmentBinding(Base): segment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) attachment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + + +class DocumentSegmentSummary(Base): + __tablename__ = "document_segment_summary" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="document_segment_summary_pkey"), + sa.Index("document_segment_summary_dataset_id_idx", "dataset_id"), + sa.Index("document_segment_summary_document_id_idx", "document_id"), + sa.Index("document_segment_summary_chunk_id_idx", "chunk_id"), + sa.Index("document_segment_summary_status_idx", "status"), + ) + + id: Mapped[str] = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4())) + dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + document_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + # corresponds to DocumentSegment.id or parent chunk id + chunk_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + summary_content: Mapped[str] = mapped_column(LongText, nullable=True) + summary_index_node_id: Mapped[str] = mapped_column(String(255), nullable=True) + summary_index_node_hash: Mapped[str] = mapped_column(String(255), nullable=True) + status: Mapped[str] = mapped_column(String(32), nullable=False, server_default=sa.text("'generating'")) + error: Mapped[str] = mapped_column(LongText, nullable=True) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) + disabled_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) + disabled_by = mapped_column(StringUUID, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp()) + + def __repr__(self): + return f"" + diff --git a/api/models/model.py b/api/models/model.py index a48f4d34d4..d3db9dd97d 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -33,6 +33,8 @@ from .provider_ids import GenericProviderID from .types import LongText, StringUUID if TYPE_CHECKING: + from core.app.entities.llm_generation_entities import LLMGenerationDetailData + from .workflow import Workflow @@ -603,6 +605,64 @@ class InstalledApp(TypeBase): return tenant +class TrialApp(Base): + __tablename__ = "trial_apps" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="trial_app_pkey"), + sa.Index("trial_app_app_id_idx", "app_id"), + sa.Index("trial_app_tenant_id_idx", "tenant_id"), + sa.UniqueConstraint("app_id", name="unique_trail_app_id"), + ) + + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + app_id = mapped_column(StringUUID, nullable=False) + tenant_id = mapped_column(StringUUID, nullable=False) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + trial_limit = mapped_column(sa.Integer, nullable=False, default=3) + + @property + def app(self) -> App | None: + app = db.session.query(App).where(App.id == self.app_id).first() + return app + + +class AccountTrialAppRecord(Base): + __tablename__ = "account_trial_app_records" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="user_trial_app_pkey"), + sa.Index("account_trial_app_record_account_id_idx", "account_id"), + sa.Index("account_trial_app_record_app_id_idx", "app_id"), + sa.UniqueConstraint("account_id", "app_id", name="unique_account_trial_app_record"), + ) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + account_id = mapped_column(StringUUID, nullable=False) + app_id = mapped_column(StringUUID, nullable=False) + count = mapped_column(sa.Integer, nullable=False, default=0) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + + @property + def app(self) -> App | None: + app = db.session.query(App).where(App.id == self.app_id).first() + return app + + @property + def user(self) -> Account | None: + user = db.session.query(Account).where(Account.id == self.account_id).first() + return user + + +class ExporleBanner(Base): + __tablename__ = "exporle_banners" + __table_args__ = (sa.PrimaryKeyConstraint("id", name="exporler_banner_pkey"),) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + content = mapped_column(sa.JSON, nullable=False) + link = mapped_column(String(255), nullable=False) + sort = mapped_column(sa.Integer, nullable=False) + status = mapped_column(sa.String(255), nullable=False, server_default=sa.text("'enabled'::character varying")) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + language = mapped_column(String(255), nullable=False, server_default=sa.text("'en-US'::character varying")) + + class OAuthProviderApp(TypeBase): """ Globally shared OAuth provider app information. @@ -1204,6 +1264,18 @@ class Message(Base): .all() ) + # FIXME (Novice) -- It's easy to cause N+1 query problem here. + @property + def generation_detail(self) -> dict[str, Any] | None: + """ + Get LLM generation detail for this message. + Returns the detail as a dictionary or None if not found. + """ + detail = db.session.query(LLMGenerationDetail).filter_by(message_id=self.id).first() + if detail: + return detail.to_dict() + return None + @property def retriever_resources(self) -> Any: return self.message_metadata_dict.get("retriever_resources") if self.message_metadata else [] @@ -2107,3 +2179,87 @@ class TenantCreditPool(Base): def has_sufficient_credits(self, required_credits: int) -> bool: return self.remaining_credits >= required_credits + + +class LLMGenerationDetail(Base): + """ + Store LLM generation details including reasoning process and tool calls. + + Association (choose one): + - For apps with Message: use message_id (one-to-one) + - For Workflow: use workflow_run_id + node_id (one run may have multiple LLM nodes) + """ + + __tablename__ = "llm_generation_details" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="llm_generation_detail_pkey"), + sa.Index("idx_llm_generation_detail_message", "message_id"), + sa.Index("idx_llm_generation_detail_workflow", "workflow_run_id", "node_id"), + sa.CheckConstraint( + "(message_id IS NOT NULL AND workflow_run_id IS NULL AND node_id IS NULL)" + " OR " + "(message_id IS NULL AND workflow_run_id IS NOT NULL AND node_id IS NOT NULL)", + name="ck_llm_generation_detail_assoc_mode", + ), + ) + + id: Mapped[str] = mapped_column(StringUUID, default=lambda: str(uuid4())) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + + # Association fields (choose one) + message_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True, unique=True) + workflow_run_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True) + node_id: Mapped[str | None] = mapped_column(String(255), nullable=True) + + # Core data as JSON strings + reasoning_content: Mapped[str | None] = mapped_column(LongText) + tool_calls: Mapped[str | None] = mapped_column(LongText) + sequence: Mapped[str | None] = mapped_column(LongText) + + created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + + def to_domain_model(self) -> LLMGenerationDetailData: + """Convert to Pydantic domain model with proper validation.""" + from core.app.entities.llm_generation_entities import LLMGenerationDetailData + + return LLMGenerationDetailData( + reasoning_content=json.loads(self.reasoning_content) if self.reasoning_content else [], + tool_calls=json.loads(self.tool_calls) if self.tool_calls else [], + sequence=json.loads(self.sequence) if self.sequence else [], + ) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for API response.""" + return self.to_domain_model().to_response_dict() + + @classmethod + def from_domain_model( + cls, + data: LLMGenerationDetailData, + *, + tenant_id: str, + app_id: str, + message_id: str | None = None, + workflow_run_id: str | None = None, + node_id: str | None = None, + ) -> LLMGenerationDetail: + """Create from Pydantic domain model.""" + # Enforce association mode at object creation time as well. + message_mode = message_id is not None + workflow_mode = workflow_run_id is not None or node_id is not None + if message_mode and workflow_mode: + raise ValueError("LLMGenerationDetail cannot set both message_id and workflow_run_id/node_id.") + if not message_mode and not (workflow_run_id and node_id): + raise ValueError("LLMGenerationDetail requires either message_id or workflow_run_id+node_id.") + + return cls( + tenant_id=tenant_id, + app_id=app_id, + message_id=message_id, + workflow_run_id=workflow_run_id, + node_id=node_id, + reasoning_content=json.dumps(data.reasoning_content) if data.reasoning_content else None, + tool_calls=json.dumps([tc.model_dump() for tc in data.tool_calls]) if data.tool_calls else None, + sequence=json.dumps([seg.model_dump() for seg in data.sequence]) if data.sequence else None, + ) diff --git a/api/models/workflow.py b/api/models/workflow.py index 072c6100b5..068318278f 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -59,6 +59,37 @@ from .types import EnumText, LongText, StringUUID logger = logging.getLogger(__name__) +def is_generation_outputs(outputs: Mapping[str, Any]) -> bool: + if not outputs: + return False + + allowed_sequence_types = {"reasoning", "content", "tool_call"} + + def valid_sequence_item(item: Mapping[str, Any]) -> bool: + return isinstance(item, Mapping) and item.get("type") in allowed_sequence_types + + def valid_value(value: Any) -> bool: + if not isinstance(value, Mapping): + return False + + content = value.get("content") + reasoning_content = value.get("reasoning_content") + tool_calls = value.get("tool_calls") + sequence = value.get("sequence") + + return ( + isinstance(content, str) + and isinstance(reasoning_content, list) + and all(isinstance(item, str) for item in reasoning_content) + and isinstance(tool_calls, list) + and all(isinstance(item, Mapping) for item in tool_calls) + and isinstance(sequence, list) + and all(valid_sequence_item(item) for item in sequence) + ) + + return all(valid_value(value) for value in outputs.values()) + + class WorkflowType(StrEnum): """ Workflow Type Enum @@ -667,6 +698,10 @@ class WorkflowRun(Base): def workflow(self): return db.session.query(Workflow).where(Workflow.id == self.workflow_id).first() + @property + def outputs_as_generation(self): + return is_generation_outputs(self.outputs_dict) + def to_dict(self): return { "id": self.id, @@ -680,6 +715,7 @@ class WorkflowRun(Base): "inputs": self.inputs_dict, "status": self.status, "outputs": self.outputs_dict, + "outputs_as_generation": self.outputs_as_generation, "error": self.error, "elapsed_time": self.elapsed_time, "total_tokens": self.total_tokens, diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index b73302508a..ca41bcef9d 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -355,7 +355,6 @@ class AppAnnotationService: def batch_import_app_annotations(cls, app_id, file: FileStorage): """ Batch import annotations from CSV file with enhanced security checks. - Security features: - File size validation - Row count limits (min/max) @@ -364,7 +363,6 @@ class AppAnnotationService: - Concurrency tracking """ from configs import dify_config - # get app info current_user, current_tenant_id = current_account_with_tenant() app = ( @@ -448,27 +446,31 @@ class AppAnnotationService: f"The CSV file must contain at least {min_records} valid annotation record(s). " f"Found {len(result)} valid record(s)." ) - # Check annotation quota limit features = FeatureService.get_features(current_tenant_id) if features.billing.enabled: annotation_quota_limit = features.annotation_quota_limit if annotation_quota_limit.limit < len(result) + annotation_quota_limit.size: - raise ValueError("The number of annotations exceeds the limit of your subscription.") - # async job + raise ValueError( + f"The number of annotations ({len(result)}) would exceed your subscription limit. " + f"Current usage: {annotation_quota_limit.size}/{annotation_quota_limit.limit}. " + f"Available: {annotation_quota_limit.limit - annotation_quota_limit.size}." + ) + + # Create async job job_id = str(uuid.uuid4()) indexing_cache_key = f"app_annotation_batch_import_{str(job_id)}" - # Register job in active tasks list for concurrency tracking current_time = int(naive_utc_now().timestamp() * 1000) active_jobs_key = f"annotation_import_active:{current_tenant_id}" redis_client.zadd(active_jobs_key, {job_id: current_time}) redis_client.expire(active_jobs_key, 7200) # 2 hours TTL - # Set job status redis_client.setnx(indexing_cache_key, "waiting") - batch_import_annotations_task.delay(str(job_id), result, app_id, current_tenant_id, current_user.id) + redis_client.expire(indexing_cache_key, 3600) # 1 hour TTL + # Send batch import task + batch_import_annotations_task.delay(str(job_id), result, app_id, current_tenant_id, current_user.id) except ValueError as e: return {"error_msg": str(e)} except Exception as e: diff --git a/api/services/billing_service.py b/api/services/billing_service.py index 26ce8cad33..e5dc3e3983 100644 --- a/api/services/billing_service.py +++ b/api/services/billing_service.py @@ -44,6 +44,33 @@ class BillingService: billing_info = cls._send_request("GET", "/subscription/info", params=params) return billing_info + @classmethod + def get_info_bulk(cls, tenant_ids: Sequence[str]) -> dict[str, str]: + """ + Bulk billing info fetch via billing API. + + Payload: {"tenant_ids": ["t1", "t2", ...]} (max 200 per request) + + Returns: + Mapping of tenant_id -> plan + """ + results: dict[str, str] = {} + + chunk_size = 200 + for i in range(0, len(tenant_ids), chunk_size): + chunk = tenant_ids[i : i + chunk_size] + try: + resp = cls._send_request("POST", "/subscription/plan/batch", json={"tenant_ids": chunk}) + data = resp.get("data", {}) + for tenant_id, plan in data.items(): + if isinstance(plan, str): + results[tenant_id] = plan + except Exception: + logger.exception("Failed to fetch billing info batch for tenants: %s", chunk) + continue + + return results + @classmethod def get_tenant_feature_plan_usage_info(cls, tenant_id: str): params = {"tenant_id": tenant_id} diff --git a/api/services/clear_free_plan_expired_workflow_run_logs.py b/api/services/clear_free_plan_expired_workflow_run_logs.py new file mode 100644 index 0000000000..1fe2bad2d0 --- /dev/null +++ b/api/services/clear_free_plan_expired_workflow_run_logs.py @@ -0,0 +1,171 @@ +import datetime +import logging +from collections.abc import Iterable, Sequence + +import click +from sqlalchemy.orm import Session, sessionmaker + +from configs import dify_config +from enums.cloud_plan import CloudPlan +from extensions.ext_database import db +from models.workflow import WorkflowRun +from repositories.api_workflow_run_repository import APIWorkflowRunRepository +from repositories.sqlalchemy_api_workflow_node_execution_repository import ( + DifyAPISQLAlchemyWorkflowNodeExecutionRepository, +) +from repositories.sqlalchemy_workflow_trigger_log_repository import SQLAlchemyWorkflowTriggerLogRepository +from services.billing_service import BillingService + +logger = logging.getLogger(__name__) + + +class WorkflowRunCleanup: + def __init__( + self, + days: int, + batch_size: int, + start_after: datetime.datetime | None = None, + end_before: datetime.datetime | None = None, + workflow_run_repo: APIWorkflowRunRepository | None = None, + ): + if (start_after is None) ^ (end_before is None): + raise ValueError("start_after and end_before must be both set or both omitted.") + + computed_cutoff = datetime.datetime.now() - datetime.timedelta(days=days) + self.window_start = start_after + self.window_end = end_before or computed_cutoff + + if self.window_start and self.window_end <= self.window_start: + raise ValueError("end_before must be greater than start_after.") + + self.batch_size = batch_size + self.billing_cache: dict[str, CloudPlan | None] = {} + self.workflow_run_repo: APIWorkflowRunRepository + if workflow_run_repo: + self.workflow_run_repo = workflow_run_repo + else: + # Lazy import to avoid circular dependencies during module import + from repositories.factory import DifyAPIRepositoryFactory + + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + self.workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + + def run(self) -> None: + click.echo( + click.style( + f"Cleaning workflow runs " + f"{'between ' + self.window_start.isoformat() + ' and ' if self.window_start else 'before '}" + f"{self.window_end.isoformat()} (batch={self.batch_size})", + fg="white", + ) + ) + + total_runs_deleted = 0 + batch_index = 0 + last_seen: tuple[datetime.datetime, str] | None = None + + while True: + run_rows = self.workflow_run_repo.get_runs_batch_by_time_range( + start_after=self.window_start, + end_before=self.window_end, + last_seen=last_seen, + batch_size=self.batch_size, + ) + if not run_rows: + break + + batch_index += 1 + last_seen = (run_rows[-1].created_at, run_rows[-1].id) + tenant_ids = {row.tenant_id for row in run_rows} + free_tenants = self._filter_free_tenants(tenant_ids) + free_runs = [row for row in run_rows if row.tenant_id in free_tenants] + paid_or_skipped = len(run_rows) - len(free_runs) + + if not free_runs: + click.echo( + click.style( + f"[batch #{batch_index}] skipped (no sandbox runs in batch, {paid_or_skipped} paid/unknown)", + fg="yellow", + ) + ) + continue + + try: + counts = self.workflow_run_repo.delete_runs_with_related( + free_runs, + delete_node_executions=self._delete_node_executions, + delete_trigger_logs=self._delete_trigger_logs, + ) + except Exception: + logger.exception("Failed to delete workflow runs batch ending at %s", last_seen[0]) + raise + + total_runs_deleted += counts["runs"] + click.echo( + click.style( + f"[batch #{batch_index}] deleted runs: {counts['runs']} " + f"(nodes {counts['node_executions']}, offloads {counts['offloads']}, " + f"app_logs {counts['app_logs']}, trigger_logs {counts['trigger_logs']}, " + f"pauses {counts['pauses']}, pause_reasons {counts['pause_reasons']}); " + f"skipped {paid_or_skipped} paid/unknown", + fg="green", + ) + ) + + if self.window_start: + summary_message = ( + f"Cleanup complete. Deleted {total_runs_deleted} workflow runs " + f"between {self.window_start.isoformat()} and {self.window_end.isoformat()}" + ) + else: + summary_message = ( + f"Cleanup complete. Deleted {total_runs_deleted} workflow runs before {self.window_end.isoformat()}" + ) + + click.echo(click.style(summary_message, fg="white")) + + def _filter_free_tenants(self, tenant_ids: Iterable[str]) -> set[str]: + if not dify_config.BILLING_ENABLED: + return set(tenant_ids) + + tenant_id_list = list(tenant_ids) + uncached_tenants = [tenant_id for tenant_id in tenant_id_list if tenant_id not in self.billing_cache] + + if uncached_tenants: + try: + bulk_info = BillingService.get_info_bulk(uncached_tenants) + except Exception: + bulk_info = {} + logger.exception("Failed to fetch billing plans in bulk for tenants: %s", uncached_tenants) + + for tenant_id in uncached_tenants: + plan: CloudPlan | None = None + info = bulk_info.get(tenant_id) + if info: + try: + plan = CloudPlan(info) + except Exception: + logger.exception("Failed to parse billing plan for tenant %s", tenant_id) + else: + logger.warning("Missing billing info for tenant %s in bulk resp; treating as non-free", tenant_id) + + self.billing_cache[tenant_id] = plan + + return {tenant_id for tenant_id in tenant_id_list if self.billing_cache.get(tenant_id) == CloudPlan.SANDBOX} + + def _delete_trigger_logs(self, session: Session, run_ids: Sequence[str]) -> int: + trigger_repo = SQLAlchemyWorkflowTriggerLogRepository(session) + return trigger_repo.delete_by_run_ids(run_ids) + + def _delete_node_executions(self, session: Session, runs: Sequence[WorkflowRun]) -> tuple[int, int]: + run_contexts: list[DifyAPISQLAlchemyWorkflowNodeExecutionRepository.RunContext] = [ + { + "run_id": run.id, + "tenant_id": run.tenant_id, + "app_id": run.app_id, + "workflow_id": run.workflow_id, + "triggered_from": run.triggered_from, + } + for run in runs + ] + return DifyAPISQLAlchemyWorkflowNodeExecutionRepository.delete_by_runs(session, run_contexts) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 18e5613438..82e9770286 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -89,6 +89,7 @@ from tasks.enable_segments_to_index_task import enable_segments_to_index_task from tasks.recover_document_indexing_task import recover_document_indexing_task from tasks.remove_document_from_index_task import remove_document_from_index_task from tasks.retry_document_indexing_task import retry_document_indexing_task +from tasks.regenerate_summary_index_task import regenerate_summary_index_task from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task logger = logging.getLogger(__name__) @@ -474,6 +475,11 @@ class DatasetService: if external_retrieval_model: dataset.retrieval_model = external_retrieval_model + # Update summary index setting if provided + summary_index_setting = data.get("summary_index_setting", None) + if summary_index_setting is not None: + dataset.summary_index_setting = summary_index_setting + # Update basic dataset properties dataset.name = data.get("name", dataset.name) dataset.description = data.get("description", dataset.description) @@ -556,12 +562,20 @@ class DatasetService: # Handle indexing technique changes and embedding model updates action = DatasetService._handle_indexing_technique_change(dataset, data, filtered_data) + # Check if summary_index_setting model changed (before updating database) + summary_model_changed = DatasetService._check_summary_index_setting_model_changed( + dataset, data + ) + # Add metadata fields filtered_data["updated_by"] = user.id filtered_data["updated_at"] = naive_utc_now() # update Retrieval model if data.get("retrieval_model"): filtered_data["retrieval_model"] = data["retrieval_model"] + # update summary index setting + if data.get("summary_index_setting"): + filtered_data["summary_index_setting"] = data.get("summary_index_setting") # update icon info if data.get("icon_info"): filtered_data["icon_info"] = data.get("icon_info") @@ -570,12 +584,30 @@ class DatasetService: db.session.query(Dataset).filter_by(id=dataset.id).update(filtered_data) db.session.commit() + # Reload dataset to get updated values + db.session.refresh(dataset) + # update pipeline knowledge base node data DatasetService._update_pipeline_knowledge_base_node_data(dataset, user.id) # Trigger vector index task if indexing technique changed if action: deal_dataset_vector_index_task.delay(dataset.id, action) + # If embedding_model changed, also regenerate summary vectors + if action == "update": + regenerate_summary_index_task.delay( + dataset.id, + regenerate_reason="embedding_model_changed", + regenerate_vectors_only=True, + ) + + # Trigger summary index regeneration if summary model changed + if summary_model_changed: + regenerate_summary_index_task.delay( + dataset.id, + regenerate_reason="summary_model_changed", + regenerate_vectors_only=False, + ) return dataset @@ -614,6 +646,7 @@ class DatasetService: knowledge_index_node_data["chunk_structure"] = dataset.chunk_structure knowledge_index_node_data["indexing_technique"] = dataset.indexing_technique # pyright: ignore[reportAttributeAccessIssue] knowledge_index_node_data["keyword_number"] = dataset.keyword_number + knowledge_index_node_data["summary_index_setting"] = dataset.summary_index_setting node["data"] = knowledge_index_node_data updated = True except Exception: @@ -852,6 +885,49 @@ class DatasetService: ) filtered_data["collection_binding_id"] = dataset_collection_binding.id + @staticmethod + def _check_summary_index_setting_model_changed(dataset: Dataset, data: dict[str, Any]) -> bool: + """ + Check if summary_index_setting model (model_name or model_provider_name) has changed. + + Args: + dataset: Current dataset object + data: Update data dictionary + + Returns: + bool: True if summary model changed, False otherwise + """ + # Check if summary_index_setting is being updated + if "summary_index_setting" not in data or data.get("summary_index_setting") is None: + return False + + new_summary_setting = data.get("summary_index_setting") + old_summary_setting = dataset.summary_index_setting + + # If old setting doesn't exist or is disabled, no need to regenerate + if not old_summary_setting or not old_summary_setting.get("enable"): + return False + + # If new setting is disabled, no need to regenerate + if not new_summary_setting or not new_summary_setting.get("enable"): + return False + + # Compare model_name and model_provider_name + old_model_name = old_summary_setting.get("model_name") + old_model_provider = old_summary_setting.get("model_provider_name") + new_model_name = new_summary_setting.get("model_name") + new_model_provider = new_summary_setting.get("model_provider_name") + + # Check if model changed + if old_model_name != new_model_name or old_model_provider != new_model_provider: + logger.info( + f"Summary index setting model changed for dataset {dataset.id}: " + f"old={old_model_provider}/{old_model_name}, new={new_model_provider}/{new_model_name}" + ) + return True + + return False + @staticmethod def update_rag_pipeline_dataset_settings( session: Session, dataset: Dataset, knowledge_configuration: KnowledgeConfiguration, has_published: bool = False @@ -1824,6 +1900,8 @@ class DocumentService: DuplicateDocumentIndexingTaskProxy( dataset.tenant_id, dataset.id, duplicate_document_ids ).delay() + # Note: Summary index generation is triggered in document_indexing_task after indexing completes + # to ensure segments are available. See tasks/document_indexing_task.py except LockNotOwnedError: pass @@ -2128,6 +2206,14 @@ class DocumentService: name: str, batch: str, ): + # Set need_summary based on dataset's summary_index_setting + need_summary = False + if ( + dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ): + need_summary = True + document = Document( tenant_id=dataset.tenant_id, dataset_id=dataset.id, @@ -2141,6 +2227,7 @@ class DocumentService: created_by=account.id, doc_form=document_form, doc_language=document_language, + need_summary=need_summary, ) doc_metadata = {} if dataset.built_in_field_enabled: @@ -2365,6 +2452,7 @@ class DocumentService: embedding_model_provider=knowledge_config.embedding_model_provider, collection_binding_id=dataset_collection_binding_id, retrieval_model=retrieval_model.model_dump() if retrieval_model else None, + summary_index_setting=knowledge_config.summary_index_setting, is_multimodal=knowledge_config.is_multimodal, ) @@ -2546,6 +2634,14 @@ class DocumentService: if not isinstance(args["process_rule"]["rules"]["segmentation"]["max_tokens"], int): raise ValueError("Process rule segmentation max_tokens is invalid") + # valid summary index setting + if args["process_rule"]["summary_index_setting"] and args["process_rule"]["summary_index_setting"]["enable"]: + summary_index_setting = args["process_rule"]["summary_index_setting"] + if "model_name" not in summary_index_setting or not summary_index_setting["model_name"]: + raise ValueError("Summary index model name is required") + if "model_provider_name" not in summary_index_setting or not summary_index_setting["model_provider_name"]: + raise ValueError("Summary index model provider name is required") + @staticmethod def batch_update_document_status( dataset: Dataset, document_ids: list[str], action: Literal["enable", "disable", "archive", "un_archive"], user @@ -3014,6 +3110,37 @@ class SegmentService: if args.enabled or keyword_changed: # update segment vector index VectorService.update_segment_vector(args.keywords, segment, dataset) + # update summary index if summary is provided and has changed + if args.summary is not None: + # Check if summary index is enabled + has_summary_index = ( + dataset.indexing_technique == "high_quality" + and dataset.summary_index_setting + and dataset.summary_index_setting.get("enable") is True + ) + + if has_summary_index: + # Query existing summary from database + from models.dataset import DocumentSegmentSummary + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .where( + DocumentSegmentSummary.chunk_id == segment.id, + DocumentSegmentSummary.dataset_id == dataset.id, + ) + .first() + ) + + # Check if summary has changed + existing_summary_content = existing_summary.summary_content if existing_summary else None + if existing_summary_content != args.summary: + # Summary has changed, update it + from services.summary_index_service import SummaryIndexService + try: + SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) + except Exception as e: + logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + # Don't fail the entire update if summary update fails else: segment_hash = helper.generate_text_hash(content) tokens = 0 @@ -3088,6 +3215,15 @@ class SegmentService: elif document.doc_form in (IndexStructureType.PARAGRAPH_INDEX, IndexStructureType.QA_INDEX): # update segment vector index VectorService.update_segment_vector(args.keywords, segment, dataset) + # update summary index if summary is provided + if args.summary is not None: + from services.summary_index_service import SummaryIndexService + + try: + SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary) + except Exception as e: + logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + # Don't fail the entire update if summary update fails # update multimodel vector index VectorService.update_multimodel_vector(segment, args.attachment_ids or [], dataset) except Exception as e: diff --git a/api/services/entities/knowledge_entities/knowledge_entities.py b/api/services/entities/knowledge_entities/knowledge_entities.py index 7959734e89..8dc5b93501 100644 --- a/api/services/entities/knowledge_entities/knowledge_entities.py +++ b/api/services/entities/knowledge_entities/knowledge_entities.py @@ -119,6 +119,7 @@ class KnowledgeConfig(BaseModel): data_source: DataSource | None = None process_rule: ProcessRule | None = None retrieval_model: RetrievalModel | None = None + summary_index_setting: dict | None = None doc_form: str = "text_model" doc_language: str = "English" embedding_model: str | None = None @@ -141,6 +142,7 @@ class SegmentUpdateArgs(BaseModel): regenerate_child_chunks: bool = False enabled: bool | None = None attachment_ids: list[str] | None = None + summary: str | None = None # Summary content for summary index class ChildChunkUpdateArgs(BaseModel): diff --git a/api/services/feature_service.py b/api/services/feature_service.py index 9b853b8337..fc91f450b7 100644 --- a/api/services/feature_service.py +++ b/api/services/feature_service.py @@ -170,6 +170,8 @@ class SystemFeatureModel(BaseModel): plugin_installation_permission: PluginInstallationPermissionModel = PluginInstallationPermissionModel() enable_change_email: bool = True plugin_manager: PluginManagerModel = PluginManagerModel() + enable_trial_app: bool = False + enable_explore_banner: bool = False class FeatureService: @@ -225,6 +227,8 @@ class FeatureService: system_features.is_allow_register = dify_config.ALLOW_REGISTER system_features.is_allow_create_workspace = dify_config.ALLOW_CREATE_WORKSPACE system_features.is_email_setup = dify_config.MAIL_TYPE is not None and dify_config.MAIL_TYPE != "" + system_features.enable_trial_app = dify_config.ENABLE_TRIAL_APP + system_features.enable_explore_banner = dify_config.ENABLE_EXPLORE_BANNER @classmethod def _fulfill_params_from_env(cls, features: FeatureModel): diff --git a/api/services/llm_generation_service.py b/api/services/llm_generation_service.py new file mode 100644 index 0000000000..eb8327537e --- /dev/null +++ b/api/services/llm_generation_service.py @@ -0,0 +1,37 @@ +""" +LLM Generation Detail Service. + +Provides methods to query and attach generation details to workflow node executions +and messages, avoiding N+1 query problems. +""" + +from sqlalchemy import select +from sqlalchemy.orm import Session + +from core.app.entities.llm_generation_entities import LLMGenerationDetailData +from models import LLMGenerationDetail + + +class LLMGenerationService: + """Service for handling LLM generation details.""" + + def __init__(self, session: Session): + self._session = session + + def get_generation_detail_for_message(self, message_id: str) -> LLMGenerationDetailData | None: + """Query generation detail for a specific message.""" + stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id == message_id) + detail = self._session.scalars(stmt).first() + return detail.to_domain_model() if detail else None + + def get_generation_details_for_messages( + self, + message_ids: list[str], + ) -> dict[str, LLMGenerationDetailData]: + """Batch query generation details for multiple messages.""" + if not message_ids: + return {} + + stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id.in_(message_ids)) + details = self._session.scalars(stmt).all() + return {detail.message_id: detail.to_domain_model() for detail in details if detail.message_id} diff --git a/api/services/recommended_app_service.py b/api/services/recommended_app_service.py index 544383a106..6b211a5632 100644 --- a/api/services/recommended_app_service.py +++ b/api/services/recommended_app_service.py @@ -1,4 +1,7 @@ from configs import dify_config +from extensions.ext_database import db +from models.model import AccountTrialAppRecord, TrialApp +from services.feature_service import FeatureService from services.recommend_app.recommend_app_factory import RecommendAppRetrievalFactory @@ -20,6 +23,15 @@ class RecommendedAppService: ) ) + if FeatureService.get_system_features().enable_trial_app: + apps = result["recommended_apps"] + for app in apps: + app_id = app["app_id"] + trial_app_model = db.session.query(TrialApp).where(TrialApp.app_id == app_id).first() + if trial_app_model: + app["can_trial"] = True + else: + app["can_trial"] = False return result @classmethod @@ -32,4 +44,30 @@ class RecommendedAppService: mode = dify_config.HOSTED_FETCH_APP_TEMPLATES_MODE retrieval_instance = RecommendAppRetrievalFactory.get_recommend_app_factory(mode)() result: dict = retrieval_instance.get_recommend_app_detail(app_id) + if FeatureService.get_system_features().enable_trial_app: + app_id = result["id"] + trial_app_model = db.session.query(TrialApp).where(TrialApp.app_id == app_id).first() + if trial_app_model: + result["can_trial"] = True + else: + result["can_trial"] = False return result + + @classmethod + def add_trial_app_record(cls, app_id: str, account_id: str): + """ + Add trial app record. + :param app_id: app id + :return: + """ + account_trial_app_record = ( + db.session.query(AccountTrialAppRecord) + .where(AccountTrialAppRecord.app_id == app_id, AccountTrialAppRecord.account_id == account_id) + .first() + ) + if account_trial_app_record: + account_trial_app_record.count += 1 + db.session.commit() + else: + db.session.add(AccountTrialAppRecord(app_id=app_id, count=1, account_id=account_id)) + db.session.commit() diff --git a/api/services/summary_index_service.py b/api/services/summary_index_service.py new file mode 100644 index 0000000000..1d5c51aad8 --- /dev/null +++ b/api/services/summary_index_service.py @@ -0,0 +1,612 @@ +"""Summary index service for generating and managing document segment summaries.""" + +import logging +import time +import uuid +from typing import Any + +from core.rag.datasource.vdb.vector_factory import Vector +from core.rag.index_processor.constant.doc_type import DocType +from core.rag.models.document import Document +from extensions.ext_database import db +from libs import helper +from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary +from models.dataset import Document as DatasetDocument + +logger = logging.getLogger(__name__) + + +class SummaryIndexService: + """Service for generating and managing summary indexes.""" + + @staticmethod + def generate_summary_for_segment( + segment: DocumentSegment, + dataset: Dataset, + summary_index_setting: dict, + ) -> str: + """ + Generate summary for a single segment. + + Args: + segment: DocumentSegment to generate summary for + dataset: Dataset containing the segment + summary_index_setting: Summary index configuration + + Returns: + Generated summary text + + Raises: + ValueError: If summary_index_setting is invalid or generation fails + """ + # Reuse the existing generate_summary method from ParagraphIndexProcessor + # Use lazy import to avoid circular import + from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor + + summary_content = ParagraphIndexProcessor.generate_summary( + tenant_id=dataset.tenant_id, + text=segment.content, + summary_index_setting=summary_index_setting, + ) + + if not summary_content: + raise ValueError("Generated summary is empty") + + return summary_content + + @staticmethod + def create_summary_record( + segment: DocumentSegment, + dataset: Dataset, + summary_content: str, + status: str = "generating", + ) -> DocumentSegmentSummary: + """ + Create or update a DocumentSegmentSummary record. + If a summary record already exists for this segment, it will be updated instead of creating a new one. + + Args: + segment: DocumentSegment to create summary for + dataset: Dataset containing the segment + summary_content: Generated summary content + status: Summary status (default: "generating") + + Returns: + Created or updated DocumentSegmentSummary instance + """ + # Check if summary record already exists + existing_summary = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + + if existing_summary: + # Update existing record + existing_summary.summary_content = summary_content + existing_summary.status = status + existing_summary.error = None # Clear any previous errors + # Re-enable if it was disabled + if not existing_summary.enabled: + existing_summary.enabled = True + existing_summary.disabled_at = None + existing_summary.disabled_by = None + db.session.add(existing_summary) + db.session.flush() + return existing_summary + else: + # Create new record (enabled by default) + summary_record = DocumentSegmentSummary( + dataset_id=dataset.id, + document_id=segment.document_id, + chunk_id=segment.id, + summary_content=summary_content, + status=status, + enabled=True, # Explicitly set enabled to True + ) + db.session.add(summary_record) + db.session.flush() + return summary_record + + @staticmethod + def vectorize_summary( + summary_record: DocumentSegmentSummary, + segment: DocumentSegment, + dataset: Dataset, + ) -> None: + """ + Vectorize summary and store in vector database. + + Args: + summary_record: DocumentSegmentSummary record + segment: Original DocumentSegment + dataset: Dataset containing the segment + """ + if dataset.indexing_technique != "high_quality": + logger.warning( + f"Summary vectorization skipped for dataset {dataset.id}: " + "indexing_technique is not high_quality" + ) + return + + # Reuse existing index_node_id if available (like segment does), otherwise generate new one + old_summary_node_id = summary_record.summary_index_node_id + if old_summary_node_id: + # Reuse existing index_node_id (like segment behavior) + summary_index_node_id = old_summary_node_id + else: + # Generate new index node ID only for new summaries + summary_index_node_id = str(uuid.uuid4()) + + # Always regenerate hash (in case summary content changed) + summary_hash = helper.generate_text_hash(summary_record.summary_content) + + # Delete old vector only if we're reusing the same index_node_id (to overwrite) + # If index_node_id changed, the old vector should have been deleted elsewhere + if old_summary_node_id and old_summary_node_id == summary_index_node_id: + try: + vector = Vector(dataset) + vector.delete_by_ids([old_summary_node_id]) + except Exception as e: + logger.warning( + f"Failed to delete old summary vector for segment {segment.id}: {str(e)}. " + "Continuing with new vectorization." + ) + + # Create document with summary content and metadata + summary_document = Document( + page_content=summary_record.summary_content, + metadata={ + "doc_id": summary_index_node_id, + "doc_hash": summary_hash, + "dataset_id": dataset.id, + "document_id": segment.document_id, + "original_chunk_id": segment.id, # Key: link to original chunk + "doc_type": DocType.TEXT, + "is_summary": True, # Identifier for summary documents + }, + ) + + # Vectorize and store with retry mechanism for connection errors + max_retries = 3 + retry_delay = 2.0 + + for attempt in range(max_retries): + try: + vector = Vector(dataset) + vector.add_texts([summary_document], duplicate_check=True) + + # Success - update summary record with index node info + summary_record.summary_index_node_id = summary_index_node_id + summary_record.summary_index_node_hash = summary_hash + summary_record.status = "completed" + db.session.add(summary_record) + db.session.flush() + return # Success, exit function + + except (ConnectionError, Exception) as e: + error_str = str(e).lower() + # Check if it's a connection-related error that might be transient + is_connection_error = any(keyword in error_str for keyword in [ + "connection", "disconnected", "timeout", "network", + "could not connect", "server disconnected", "weaviate" + ]) + + if is_connection_error and attempt < max_retries - 1: + # Retry for connection errors + wait_time = retry_delay * (2 ** attempt) # Exponential backoff + logger.warning( + f"Vectorization attempt {attempt + 1}/{max_retries} failed for segment {segment.id}: {str(e)}. " + f"Retrying in {wait_time:.1f} seconds..." + ) + time.sleep(wait_time) + continue + else: + # Final attempt failed or non-connection error - log and update status + logger.error( + f"Failed to vectorize summary for segment {segment.id} after {attempt + 1} attempts: {str(e)}", + exc_info=True + ) + summary_record.status = "error" + summary_record.error = f"Vectorization failed: {str(e)}" + db.session.add(summary_record) + db.session.flush() + raise + + @staticmethod + def generate_and_vectorize_summary( + segment: DocumentSegment, + dataset: Dataset, + summary_index_setting: dict, + ) -> DocumentSegmentSummary: + """ + Generate summary for a segment and vectorize it. + + Args: + segment: DocumentSegment to generate summary for + dataset: Dataset containing the segment + summary_index_setting: Summary index configuration + + Returns: + Created DocumentSegmentSummary instance + + Raises: + ValueError: If summary generation fails + """ + try: + # Generate summary + summary_content = SummaryIndexService.generate_summary_for_segment( + segment, dataset, summary_index_setting + ) + + # Create or update summary record (will handle overwrite internally) + summary_record = SummaryIndexService.create_summary_record( + segment, dataset, summary_content, status="generating" + ) + + # Vectorize summary (will delete old vector if exists before creating new one) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + + db.session.commit() + logger.info(f"Successfully generated and vectorized summary for segment {segment.id}") + return summary_record + + except Exception as e: + logger.exception(f"Failed to generate summary for segment {segment.id}: {str(e)}") + # Update summary record with error status if it exists + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + if summary_record: + summary_record.status = "error" + summary_record.error = str(e) + db.session.add(summary_record) + db.session.commit() + raise + + @staticmethod + def generate_summaries_for_document( + dataset: Dataset, + document: DatasetDocument, + summary_index_setting: dict, + segment_ids: list[str] | None = None, + only_parent_chunks: bool = False, + ) -> list[DocumentSegmentSummary]: + """ + Generate summaries for all segments in a document including vectorization. + + Args: + dataset: Dataset containing the document + document: DatasetDocument to generate summaries for + summary_index_setting: Summary index configuration + segment_ids: Optional list of specific segment IDs to process + only_parent_chunks: If True, only process parent chunks (for parent-child mode) + + Returns: + List of created DocumentSegmentSummary instances + """ + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + f"Skipping summary generation for dataset {dataset.id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'" + ) + return [] + + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info(f"Summary index is disabled for dataset {dataset.id}") + return [] + + # Skip qa_model documents + if document.doc_form == "qa_model": + logger.info(f"Skipping summary generation for qa_model document {document.id}") + return [] + + logger.info( + f"Starting summary generation for document {document.id} in dataset {dataset.id}, " + f"segment_ids: {len(segment_ids) if segment_ids else 'all'}, " + f"only_parent_chunks: {only_parent_chunks}" + ) + + # Query segments (only enabled segments) + query = db.session.query(DocumentSegment).filter_by( + dataset_id=dataset.id, + document_id=document.id, + status="completed", + enabled=True, # Only generate summaries for enabled segments + ) + + if segment_ids: + query = query.filter(DocumentSegment.id.in_(segment_ids)) + + segments = query.all() + + if not segments: + logger.info(f"No segments found for document {document.id}") + return [] + + summary_records = [] + + for segment in segments: + # For parent-child mode, only process parent chunks + # In parent-child mode, all DocumentSegments are parent chunks, + # so we process all of them. Child chunks are stored in ChildChunk table + # and are not DocumentSegments, so they won't be in the segments list. + # This check is mainly for clarity and future-proofing. + if only_parent_chunks: + # In parent-child mode, all segments in the query are parent chunks + # Child chunks are not DocumentSegments, so they won't appear here + # We can process all segments + pass + + try: + summary_record = SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + summary_records.append(summary_record) + except Exception as e: + logger.error(f"Failed to generate summary for segment {segment.id}: {str(e)}") + # Continue with other segments + continue + + logger.info( + f"Completed summary generation for document {document.id}: " + f"{len(summary_records)} summaries generated and vectorized" + ) + return summary_records + + @staticmethod + def disable_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + disabled_by: str | None = None, + ) -> None: + """ + Disable summary records and remove vectors from vector database for segments. + Unlike delete, this preserves the summary records but marks them as disabled. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to disable summaries for. If None, disable all. + disabled_by: User ID who disabled the summaries + """ + from libs.datetime_utils import naive_utc_now + + query = db.session.query(DocumentSegmentSummary).filter_by( + dataset_id=dataset.id, + enabled=True, # Only disable enabled summaries + ) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + logger.info( + f"Disabling {len(summaries)} summary records for dataset {dataset.id}, " + f"segment_ids: {len(segment_ids) if segment_ids else 'all'}" + ) + + # Remove from vector database (but keep records) + if dataset.indexing_technique == "high_quality": + summary_node_ids = [ + s.summary_index_node_id for s in summaries if s.summary_index_node_id + ] + if summary_node_ids: + try: + vector = Vector(dataset) + vector.delete_by_ids(summary_node_ids) + except Exception as e: + logger.warning(f"Failed to remove summary vectors: {str(e)}") + + # Disable summary records (don't delete) + now = naive_utc_now() + for summary in summaries: + summary.enabled = False + summary.disabled_at = now + summary.disabled_by = disabled_by + db.session.add(summary) + + db.session.commit() + logger.info(f"Disabled {len(summaries)} summary records for dataset {dataset.id}") + + @staticmethod + def enable_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + ) -> None: + """ + Enable summary records and re-add vectors to vector database for segments. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to enable summaries for. If None, enable all. + """ + # Only enable summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return + + query = db.session.query(DocumentSegmentSummary).filter_by( + dataset_id=dataset.id, + enabled=False, # Only enable disabled summaries + ) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + logger.info( + f"Enabling {len(summaries)} summary records for dataset {dataset.id}, " + f"segment_ids: {len(segment_ids) if segment_ids else 'all'}" + ) + + # Re-vectorize and re-add to vector database + enabled_count = 0 + for summary in summaries: + # Get the original segment + segment = db.session.query(DocumentSegment).filter_by( + id=summary.chunk_id, + dataset_id=dataset.id, + ).first() + + if not segment or not segment.enabled or segment.status != "completed": + continue + + if not summary.summary_content: + continue + + try: + # Re-vectorize summary + SummaryIndexService.vectorize_summary(summary, segment, dataset) + + # Enable summary record + summary.enabled = True + summary.disabled_at = None + summary.disabled_by = None + db.session.add(summary) + enabled_count += 1 + except Exception as e: + logger.error(f"Failed to re-vectorize summary {summary.id}: {str(e)}") + # Keep it disabled if vectorization fails + continue + + db.session.commit() + logger.info(f"Enabled {enabled_count} summary records for dataset {dataset.id}") + + @staticmethod + def delete_summaries_for_segments( + dataset: Dataset, + segment_ids: list[str] | None = None, + ) -> None: + """ + Delete summary records and vectors for segments (used only for actual deletion scenarios). + For disable/enable operations, use disable_summaries_for_segments/enable_summaries_for_segments. + + Args: + dataset: Dataset containing the segments + segment_ids: List of segment IDs to delete summaries for. If None, delete all. + """ + query = db.session.query(DocumentSegmentSummary).filter_by(dataset_id=dataset.id) + + if segment_ids: + query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids)) + + summaries = query.all() + + if not summaries: + return + + # Delete from vector database + if dataset.indexing_technique == "high_quality": + summary_node_ids = [ + s.summary_index_node_id for s in summaries if s.summary_index_node_id + ] + if summary_node_ids: + vector = Vector(dataset) + vector.delete_by_ids(summary_node_ids) + + # Delete summary records + for summary in summaries: + db.session.delete(summary) + + db.session.commit() + logger.info(f"Deleted {len(summaries)} summary records for dataset {dataset.id}") + + @staticmethod + def update_summary_for_segment( + segment: DocumentSegment, + dataset: Dataset, + summary_content: str, + ) -> DocumentSegmentSummary | None: + """ + Update summary for a segment and re-vectorize it. + + Args: + segment: DocumentSegment to update summary for + dataset: Dataset containing the segment + summary_content: New summary content + + Returns: + Updated DocumentSegmentSummary instance, or None if summary index is not enabled + """ + # Only update summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + return None + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + return None + + # Skip qa_model documents + if segment.document and segment.document.doc_form == "qa_model": + return None + + try: + # Find existing summary record + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + + if summary_record: + # Update existing summary + old_summary_node_id = summary_record.summary_index_node_id + + # Update summary content + summary_record.summary_content = summary_content + summary_record.status = "generating" + db.session.add(summary_record) + db.session.flush() + + # Delete old vector if exists + if old_summary_node_id: + vector = Vector(dataset) + vector.delete_by_ids([old_summary_node_id]) + + # Re-vectorize summary + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + + db.session.commit() + logger.info(f"Successfully updated and re-vectorized summary for segment {segment.id}") + return summary_record + else: + # Create new summary record if doesn't exist + summary_record = SummaryIndexService.create_summary_record( + segment, dataset, summary_content, status="generating" + ) + SummaryIndexService.vectorize_summary(summary_record, segment, dataset) + db.session.commit() + logger.info(f"Successfully created and vectorized summary for segment {segment.id}") + return summary_record + + except Exception as e: + logger.exception(f"Failed to update summary for segment {segment.id}: {str(e)}") + # Update summary record with error status if it exists + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by(chunk_id=segment.id, dataset_id=dataset.id) + .first() + ) + if summary_record: + summary_record.status = "error" + summary_record.error = str(e) + db.session.add(summary_record) + db.session.commit() + raise + diff --git a/api/tasks/add_document_to_index_task.py b/api/tasks/add_document_to_index_task.py index e7dead8a56..da6f468edd 100644 --- a/api/tasks/add_document_to_index_task.py +++ b/api/tasks/add_document_to_index_task.py @@ -117,6 +117,18 @@ def add_document_to_index_task(dataset_document_id: str): ) db.session.commit() + # Enable summary indexes for all segments in this document + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + if segment_ids_list: + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + ) + except Exception as e: + logger.warning(f"Failed to enable summaries for document {dataset_document.id}: {str(e)}") + end_at = time.perf_counter() logger.info( click.style(f"Document added to index: {dataset_document.id} latency: {end_at - start_at}", fg="green") diff --git a/api/tasks/annotation/batch_import_annotations_task.py b/api/tasks/annotation/batch_import_annotations_task.py index 775814318b..97c0b31490 100644 --- a/api/tasks/annotation/batch_import_annotations_task.py +++ b/api/tasks/annotation/batch_import_annotations_task.py @@ -31,7 +31,6 @@ def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id: start_at = time.perf_counter() indexing_cache_key = f"app_annotation_batch_import_{str(job_id)}" active_jobs_key = f"annotation_import_active:{tenant_id}" - # get app info app = db.session.query(App).where(App.id == app_id, App.tenant_id == tenant_id, App.status == "normal").first() @@ -100,6 +99,5 @@ def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id: except Exception as cleanup_error: # Log but don't fail if cleanup fails - the job will be auto-expired logger.warning("Failed to clean up active job tracking for %s: %s", job_id, cleanup_error) - # Close database session db.session.close() diff --git a/api/tasks/delete_segment_from_index_task.py b/api/tasks/delete_segment_from_index_task.py index bea5c952cf..14146018f1 100644 --- a/api/tasks/delete_segment_from_index_task.py +++ b/api/tasks/delete_segment_from_index_task.py @@ -42,6 +42,7 @@ def delete_segment_from_index_task( doc_form = dataset_document.doc_form # Proceed with index cleanup using the index_node_ids directly + # For actual deletion, we should delete summaries (not just disable them) index_processor = IndexProcessorFactory(doc_form).init_index_processor() index_processor.clean( dataset, @@ -49,6 +50,7 @@ def delete_segment_from_index_task( with_keywords=True, delete_child_chunks=True, precomputed_child_node_ids=child_node_ids, + delete_summaries=True, # Actually delete summaries when segment is deleted ) if dataset.is_multimodal: # delete segment attachment binding diff --git a/api/tasks/disable_segment_from_index_task.py b/api/tasks/disable_segment_from_index_task.py index 6b5f01b416..67c2867edd 100644 --- a/api/tasks/disable_segment_from_index_task.py +++ b/api/tasks/disable_segment_from_index_task.py @@ -53,6 +53,17 @@ def disable_segment_from_index_task(segment_id: str): logger.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan")) return + # Disable summary index for this segment + from services.summary_index_service import SummaryIndexService + try: + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=[segment.id], + disabled_by=segment.disabled_by, + ) + except Exception as e: + logger.warning(f"Failed to disable summary for segment {segment.id}: {str(e)}") + index_type = dataset_document.doc_form index_processor = IndexProcessorFactory(index_type).init_index_processor() index_processor.clean(dataset, [segment.index_node_id]) diff --git a/api/tasks/disable_segments_from_index_task.py b/api/tasks/disable_segments_from_index_task.py index c2a3de29f4..b6a534bacf 100644 --- a/api/tasks/disable_segments_from_index_task.py +++ b/api/tasks/disable_segments_from_index_task.py @@ -58,12 +58,25 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen return try: + # Disable summary indexes for these segments + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + try: + # Get disabled_by from first segment (they should all have the same disabled_by) + disabled_by = segments[0].disabled_by if segments else None + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + disabled_by=disabled_by, + ) + except Exception as e: + logger.warning(f"Failed to disable summaries for segments: {str(e)}") + index_node_ids = [segment.index_node_id for segment in segments] if dataset.is_multimodal: - segment_ids = [segment.id for segment in segments] segment_attachment_bindings = ( db.session.query(SegmentAttachmentBinding) - .where(SegmentAttachmentBinding.segment_id.in_(segment_ids)) + .where(SegmentAttachmentBinding.segment_id.in_(segment_ids_list)) .all() ) if segment_attachment_bindings: diff --git a/api/tasks/document_indexing_task.py b/api/tasks/document_indexing_task.py index acbdab631b..319837ceaf 100644 --- a/api/tasks/document_indexing_task.py +++ b/api/tasks/document_indexing_task.py @@ -8,6 +8,7 @@ from celery import shared_task from configs import dify_config from core.entities.document_task import DocumentTask from core.indexing_runner import DocumentIsPausedError, IndexingRunner +from tasks.generate_summary_index_task import generate_summary_index_task from core.rag.pipeline.queue import TenantIsolatedTaskQueue from enums.cloud_plan import CloudPlan from extensions.ext_database import db @@ -100,6 +101,60 @@ def _document_indexing(dataset_id: str, document_ids: Sequence[str]): indexing_runner.run(documents) end_at = time.perf_counter() logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green")) + + # Trigger summary index generation for completed documents if enabled + # Only generate for high_quality indexing technique and when summary_index_setting is enabled + # Re-query dataset to get latest summary_index_setting (in case it was updated) + dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first() + if not dataset: + logger.warning(f"Dataset {dataset_id} not found after indexing") + return + + if dataset.indexing_technique == "high_quality": + summary_index_setting = dataset.summary_index_setting + if summary_index_setting and summary_index_setting.get("enable"): + # Check each document's indexing status and trigger summary generation if completed + for document_id in document_ids: + # Re-query document to get latest status (IndexingRunner may have updated it) + document = ( + db.session.query(Document) + .where(Document.id == document_id, Document.dataset_id == dataset_id) + .first() + ) + if document: + logger.info( + f"Checking document {document_id} for summary generation: " + f"status={document.indexing_status}, doc_form={document.doc_form}" + ) + if document.indexing_status == "completed" and document.doc_form != "qa_model": + try: + generate_summary_index_task.delay(dataset.id, document_id, None) + logger.info( + f"Queued summary index generation task for document {document_id} " + f"in dataset {dataset.id} after indexing completed" + ) + except Exception as e: + logger.exception( + f"Failed to queue summary index generation task for document {document_id}: {str(e)}" + ) + # Don't fail the entire indexing process if summary task queuing fails + else: + logger.info( + f"Skipping summary generation for document {document_id}: " + f"status={document.indexing_status}, doc_form={document.doc_form}" + ) + else: + logger.warning(f"Document {document_id} not found after indexing") + else: + logger.info( + f"Summary index generation skipped for dataset {dataset.id}: " + f"summary_index_setting.enable={summary_index_setting.get('enable') if summary_index_setting else None}" + ) + else: + logger.info( + f"Summary index generation skipped for dataset {dataset.id}: " + f"indexing_technique={dataset.indexing_technique} (not 'high_quality')" + ) except DocumentIsPausedError as ex: logger.info(click.style(str(ex), fg="yellow")) except Exception: diff --git a/api/tasks/enable_segment_to_index_task.py b/api/tasks/enable_segment_to_index_task.py index 7615469ed0..113e19871e 100644 --- a/api/tasks/enable_segment_to_index_task.py +++ b/api/tasks/enable_segment_to_index_task.py @@ -103,6 +103,16 @@ def enable_segment_to_index_task(segment_id: str): # save vector index index_processor.load(dataset, [document], multimodal_documents=multimodel_documents) + # Enable summary index for this segment + from services.summary_index_service import SummaryIndexService + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=[segment.id], + ) + except Exception as e: + logger.warning(f"Failed to enable summary for segment {segment.id}: {str(e)}") + end_at = time.perf_counter() logger.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green")) except Exception as e: diff --git a/api/tasks/enable_segments_to_index_task.py b/api/tasks/enable_segments_to_index_task.py index 9f17d09e18..0c419ca2f0 100644 --- a/api/tasks/enable_segments_to_index_task.py +++ b/api/tasks/enable_segments_to_index_task.py @@ -108,6 +108,17 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i # save vector index index_processor.load(dataset, documents, multimodal_documents=multimodal_documents) + # Enable summary indexes for these segments + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + try: + SummaryIndexService.enable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + ) + except Exception as e: + logger.warning(f"Failed to enable summaries for segments: {str(e)}") + end_at = time.perf_counter() logger.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green")) except Exception as e: diff --git a/api/tasks/generate_summary_index_task.py b/api/tasks/generate_summary_index_task.py new file mode 100644 index 0000000000..2850658ce4 --- /dev/null +++ b/api/tasks/generate_summary_index_task.py @@ -0,0 +1,113 @@ +"""Async task for generating summary indexes.""" + +import logging +import time + +import click +from celery import shared_task + +from extensions.ext_database import db +from models.dataset import Dataset, DocumentSegment +from models.dataset import Document as DatasetDocument +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) + + +@shared_task(queue="dataset") +def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids: list[str] | None = None): + """ + Async generate summary index for document segments. + + Args: + dataset_id: Dataset ID + document_id: Document ID + segment_ids: Optional list of specific segment IDs to process. If None, process all segments. + + Usage: + generate_summary_index_task.delay(dataset_id, document_id) + generate_summary_index_task.delay(dataset_id, document_id, segment_ids) + """ + logger.info( + click.style( + f"Start generating summary index for document {document_id} in dataset {dataset_id}", + fg="green", + ) + ) + start_at = time.perf_counter() + + try: + dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first() + if not dataset: + logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red")) + db.session.close() + return + + document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first() + if not document: + logger.error(click.style(f"Document not found: {document_id}", fg="red")) + db.session.close() + return + + # Only generate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + click.style( + f"Skipping summary generation for dataset {dataset_id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'", + fg="cyan", + ) + ) + db.session.close() + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info( + click.style( + f"Summary index is disabled for dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + # Determine if only parent chunks should be processed + only_parent_chunks = dataset.chunk_structure == "parent_child_index" + + # Generate summaries + summary_records = SummaryIndexService.generate_summaries_for_document( + dataset=dataset, + document=document, + summary_index_setting=summary_index_setting, + segment_ids=segment_ids, + only_parent_chunks=only_parent_chunks, + ) + + end_at = time.perf_counter() + logger.info( + click.style( + f"Summary index generation completed for document {document_id}: " + f"{len(summary_records)} summaries generated, latency: {end_at - start_at}", + fg="green", + ) + ) + + except Exception as e: + logger.exception(f"Failed to generate summary index for document {document_id}: {str(e)}") + # Update document segments with error status if needed + if segment_ids: + db.session.query(DocumentSegment).filter( + DocumentSegment.id.in_(segment_ids), + DocumentSegment.dataset_id == dataset_id, + ).update( + { + DocumentSegment.error: f"Summary generation failed: {str(e)}", + }, + synchronize_session=False, + ) + db.session.commit() + finally: + db.session.close() + diff --git a/api/tasks/regenerate_summary_index_task.py b/api/tasks/regenerate_summary_index_task.py new file mode 100644 index 0000000000..ddc48f9d99 --- /dev/null +++ b/api/tasks/regenerate_summary_index_task.py @@ -0,0 +1,219 @@ +"""Task for regenerating summary indexes when dataset settings change.""" + +import logging +import time +from typing import Any + +import click +from celery import shared_task +from sqlalchemy import select + +from extensions.ext_database import db +from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary +from models.dataset import Document as DatasetDocument +from services.summary_index_service import SummaryIndexService + +logger = logging.getLogger(__name__) + + +@shared_task(queue="dataset") +def regenerate_summary_index_task( + dataset_id: str, + regenerate_reason: str = "summary_model_changed", + regenerate_vectors_only: bool = False, +): + """ + Regenerate summary indexes for all documents in a dataset. + + This task is triggered when: + 1. summary_index_setting model changes (regenerate_reason="summary_model_changed") + - Regenerates summary content and vectors for all existing summaries + 2. embedding_model changes (regenerate_reason="embedding_model_changed") + - Only regenerates vectors for existing summaries (keeps summary content) + + Args: + dataset_id: Dataset ID + regenerate_reason: Reason for regeneration ("summary_model_changed" or "embedding_model_changed") + regenerate_vectors_only: If True, only regenerate vectors without regenerating summary content + """ + logger.info( + click.style( + f"Start regenerate summary index for dataset {dataset_id}, reason: {regenerate_reason}", + fg="green", + ) + ) + start_at = time.perf_counter() + + try: + dataset = db.session.query(Dataset).filter_by(id=dataset_id).first() + if not dataset: + logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red")) + db.session.close() + return + + # Only regenerate summary index for high_quality indexing technique + if dataset.indexing_technique != "high_quality": + logger.info( + click.style( + f"Skipping summary regeneration for dataset {dataset_id}: " + f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'", + fg="cyan", + ) + ) + db.session.close() + return + + # Check if summary index is enabled + summary_index_setting = dataset.summary_index_setting + if not summary_index_setting or not summary_index_setting.get("enable"): + logger.info( + click.style( + f"Summary index is disabled for dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + # Get all documents with completed indexing status + dataset_documents = db.session.scalars( + select(DatasetDocument).where( + DatasetDocument.dataset_id == dataset_id, + DatasetDocument.indexing_status == "completed", + DatasetDocument.enabled == True, + DatasetDocument.archived == False, + ) + ).all() + + if not dataset_documents: + logger.info( + click.style( + f"No documents found for summary regeneration in dataset {dataset_id}", + fg="cyan", + ) + ) + db.session.close() + return + + logger.info( + f"Found {len(dataset_documents)} documents for summary regeneration in dataset {dataset_id}" + ) + + total_segments_processed = 0 + total_segments_failed = 0 + + for dataset_document in dataset_documents: + # Skip qa_model documents + if dataset_document.doc_form == "qa_model": + continue + + try: + # Get all segments with existing summaries + segments = ( + db.session.query(DocumentSegment) + .join( + DocumentSegmentSummary, + DocumentSegment.id == DocumentSegmentSummary.chunk_id, + ) + .where( + DocumentSegment.document_id == dataset_document.id, + DocumentSegment.dataset_id == dataset_id, + DocumentSegment.status == "completed", + DocumentSegment.enabled == True, + DocumentSegmentSummary.dataset_id == dataset_id, + ) + .order_by(DocumentSegment.position.asc()) + .all() + ) + + if not segments: + continue + + logger.info( + f"Regenerating summaries for {len(segments)} segments in document {dataset_document.id}" + ) + + for segment in segments: + try: + # Get existing summary record + summary_record = ( + db.session.query(DocumentSegmentSummary) + .filter_by( + chunk_id=segment.id, + dataset_id=dataset_id, + ) + .first() + ) + + if not summary_record: + logger.warning( + f"Summary record not found for segment {segment.id}, skipping" + ) + continue + + if regenerate_vectors_only: + # Only regenerate vectors (for embedding_model change) + # Delete old vector + if summary_record.summary_index_node_id: + try: + from core.rag.datasource.vdb.vector_factory import Vector + + vector = Vector(dataset) + vector.delete_by_ids([summary_record.summary_index_node_id]) + except Exception as e: + logger.warning( + f"Failed to delete old summary vector for segment {segment.id}: {str(e)}" + ) + + # Re-vectorize with new embedding model + SummaryIndexService.vectorize_summary( + summary_record, segment, dataset + ) + db.session.commit() + else: + # Regenerate both summary content and vectors (for summary_model change) + SummaryIndexService.generate_and_vectorize_summary( + segment, dataset, summary_index_setting + ) + db.session.commit() + + total_segments_processed += 1 + + except Exception as e: + logger.error( + f"Failed to regenerate summary for segment {segment.id}: {str(e)}", + exc_info=True, + ) + total_segments_failed += 1 + # Update summary record with error status + if summary_record: + summary_record.status = "error" + summary_record.error = f"Regeneration failed: {str(e)}" + db.session.add(summary_record) + db.session.commit() + continue + + except Exception as e: + logger.error( + f"Failed to process document {dataset_document.id} for summary regeneration: {str(e)}", + exc_info=True, + ) + continue + + end_at = time.perf_counter() + logger.info( + click.style( + f"Summary index regeneration completed for dataset {dataset_id}: " + f"{total_segments_processed} segments processed successfully, " + f"{total_segments_failed} segments failed, " + f"total documents: {len(dataset_documents)}, " + f"latency: {end_at - start_at:.2f}s", + fg="green", + ) + ) + + except Exception: + logger.exception(f"Regenerate summary index failed for dataset {dataset_id}") + finally: + db.session.close() + diff --git a/api/tasks/remove_document_from_index_task.py b/api/tasks/remove_document_from_index_task.py index c0ab2d0b41..7d191f00c0 100644 --- a/api/tasks/remove_document_from_index_task.py +++ b/api/tasks/remove_document_from_index_task.py @@ -47,6 +47,20 @@ def remove_document_from_index_task(document_id: str): index_processor = IndexProcessorFactory(document.doc_form).init_index_processor() segments = db.session.scalars(select(DocumentSegment).where(DocumentSegment.document_id == document.id)).all() + + # Disable summary indexes for all segments in this document + from services.summary_index_service import SummaryIndexService + segment_ids_list = [segment.id for segment in segments] + if segment_ids_list: + try: + SummaryIndexService.disable_summaries_for_segments( + dataset=dataset, + segment_ids=segment_ids_list, + disabled_by=document.disabled_by, + ) + except Exception as e: + logger.warning(f"Failed to disable summaries for document {document.id}: {str(e)}") + index_node_ids = [segment.index_node_id for segment in segments] if index_node_ids: try: diff --git a/api/tests/unit_tests/core/agent/__init__.py b/api/tests/unit_tests/core/agent/__init__.py new file mode 100644 index 0000000000..a9ccd45f4b --- /dev/null +++ b/api/tests/unit_tests/core/agent/__init__.py @@ -0,0 +1,3 @@ +""" +Mark agent test modules as a package to avoid import name collisions. +""" diff --git a/api/tests/unit_tests/core/agent/patterns/__init__.py b/api/tests/unit_tests/core/agent/patterns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/core/agent/patterns/test_base.py b/api/tests/unit_tests/core/agent/patterns/test_base.py new file mode 100644 index 0000000000..b0e0d44940 --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_base.py @@ -0,0 +1,324 @@ +"""Tests for AgentPattern base class.""" + +from decimal import Decimal +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import AgentLog, ExecutionContext +from core.agent.patterns.base import AgentPattern +from core.model_runtime.entities.llm_entities import LLMUsage + + +class ConcreteAgentPattern(AgentPattern): + """Concrete implementation of AgentPattern for testing.""" + + def run(self, prompt_messages, model_parameters, stop=[], stream=True): + """Minimal implementation for testing.""" + yield from [] + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +@pytest.fixture +def agent_pattern(mock_model_instance, mock_context): + """Create a concrete agent pattern for testing.""" + return ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + max_iterations=10, + ) + + +class TestAccumulateUsage: + """Tests for _accumulate_usage method.""" + + def test_accumulate_usage_to_empty_dict(self, agent_pattern): + """Test accumulating usage to an empty dict creates a copy.""" + total_usage: dict = {"usage": None} + delta_usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + + agent_pattern._accumulate_usage(total_usage, delta_usage) + + assert total_usage["usage"] is not None + assert total_usage["usage"].total_tokens == 150 + assert total_usage["usage"].prompt_tokens == 100 + assert total_usage["usage"].completion_tokens == 50 + # Verify it's a copy, not a reference + assert total_usage["usage"] is not delta_usage + + def test_accumulate_usage_adds_to_existing(self, agent_pattern): + """Test accumulating usage adds to existing values.""" + initial_usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + total_usage: dict = {"usage": initial_usage} + + delta_usage = LLMUsage( + prompt_tokens=200, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.2"), + completion_tokens=100, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.2"), + total_tokens=300, + total_price=Decimal("0.4"), + currency="USD", + latency=0.5, + ) + + agent_pattern._accumulate_usage(total_usage, delta_usage) + + assert total_usage["usage"].total_tokens == 450 # 150 + 300 + assert total_usage["usage"].prompt_tokens == 300 # 100 + 200 + assert total_usage["usage"].completion_tokens == 150 # 50 + 100 + + def test_accumulate_usage_multiple_rounds(self, agent_pattern): + """Test accumulating usage across multiple rounds.""" + total_usage: dict = {"usage": None} + + # Round 1: 100 tokens + round1_usage = LLMUsage( + prompt_tokens=70, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.07"), + completion_tokens=30, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.06"), + total_tokens=100, + total_price=Decimal("0.13"), + currency="USD", + latency=0.3, + ) + agent_pattern._accumulate_usage(total_usage, round1_usage) + assert total_usage["usage"].total_tokens == 100 + + # Round 2: 150 tokens + round2_usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.4, + ) + agent_pattern._accumulate_usage(total_usage, round2_usage) + assert total_usage["usage"].total_tokens == 250 # 100 + 150 + + # Round 3: 200 tokens + round3_usage = LLMUsage( + prompt_tokens=130, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.13"), + completion_tokens=70, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.14"), + total_tokens=200, + total_price=Decimal("0.27"), + currency="USD", + latency=0.5, + ) + agent_pattern._accumulate_usage(total_usage, round3_usage) + assert total_usage["usage"].total_tokens == 450 # 100 + 150 + 200 + + +class TestCreateLog: + """Tests for _create_log method.""" + + def test_create_log_with_label_and_status(self, agent_pattern): + """Test creating a log with label and status.""" + log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"key": "value"}, + ) + + assert log.label == "ROUND 1" + assert log.log_type == AgentLog.LogType.ROUND + assert log.status == AgentLog.LogStatus.START + assert log.data == {"key": "value"} + assert log.parent_id is None + + def test_create_log_with_parent_id(self, agent_pattern): + """Test creating a log with parent_id.""" + parent_log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + child_log = agent_pattern._create_log( + label="CALL tool", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={}, + parent_id=parent_log.id, + ) + + assert child_log.parent_id == parent_log.id + assert child_log.log_type == AgentLog.LogType.TOOL_CALL + + +class TestFinishLog: + """Tests for _finish_log method.""" + + def test_finish_log_updates_status(self, agent_pattern): + """Test that finish_log updates status to SUCCESS.""" + log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + finished_log = agent_pattern._finish_log(log, data={"result": "done"}) + + assert finished_log.status == AgentLog.LogStatus.SUCCESS + assert finished_log.data == {"result": "done"} + + def test_finish_log_adds_usage_metadata(self, agent_pattern): + """Test that finish_log adds usage to metadata.""" + log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + + finished_log = agent_pattern._finish_log(log, usage=usage) + + assert finished_log.metadata[AgentLog.LogMetadata.TOTAL_TOKENS] == 150 + assert finished_log.metadata[AgentLog.LogMetadata.TOTAL_PRICE] == Decimal("0.2") + assert finished_log.metadata[AgentLog.LogMetadata.CURRENCY] == "USD" + assert finished_log.metadata[AgentLog.LogMetadata.LLM_USAGE] == usage + + +class TestFindToolByName: + """Tests for _find_tool_by_name method.""" + + def test_find_existing_tool(self, mock_model_instance, mock_context): + """Test finding an existing tool by name.""" + mock_tool = MagicMock() + mock_tool.entity.identity.name = "test_tool" + + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + found_tool = pattern._find_tool_by_name("test_tool") + assert found_tool == mock_tool + + def test_find_nonexistent_tool_returns_none(self, mock_model_instance, mock_context): + """Test that finding a nonexistent tool returns None.""" + mock_tool = MagicMock() + mock_tool.entity.identity.name = "test_tool" + + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + found_tool = pattern._find_tool_by_name("nonexistent_tool") + assert found_tool is None + + +class TestMaxIterationsCapping: + """Tests for max_iterations capping.""" + + def test_max_iterations_capped_at_99(self, mock_model_instance, mock_context): + """Test that max_iterations is capped at 99.""" + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + max_iterations=150, + ) + + assert pattern.max_iterations == 99 + + def test_max_iterations_not_capped_when_under_99(self, mock_model_instance, mock_context): + """Test that max_iterations is not capped when under 99.""" + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + max_iterations=50, + ) + + assert pattern.max_iterations == 50 diff --git a/api/tests/unit_tests/core/agent/patterns/test_function_call.py b/api/tests/unit_tests/core/agent/patterns/test_function_call.py new file mode 100644 index 0000000000..6b3600dbbf --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_function_call.py @@ -0,0 +1,332 @@ +"""Tests for FunctionCallStrategy.""" + +from decimal import Decimal +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import AgentLog, ExecutionContext +from core.model_runtime.entities.llm_entities import LLMUsage +from core.model_runtime.entities.message_entities import ( + PromptMessageTool, + SystemPromptMessage, + UserPromptMessage, +) + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +@pytest.fixture +def mock_tool(): + """Create a mock tool.""" + tool = MagicMock() + tool.entity.identity.name = "test_tool" + tool.to_prompt_message_tool.return_value = PromptMessageTool( + name="test_tool", + description="A test tool", + parameters={ + "type": "object", + "properties": {"param1": {"type": "string", "description": "A parameter"}}, + "required": ["param1"], + }, + ) + return tool + + +class TestFunctionCallStrategyInit: + """Tests for FunctionCallStrategy initialization.""" + + def test_initialization(self, mock_model_instance, mock_context, mock_tool): + """Test basic initialization.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + max_iterations=10, + ) + + assert strategy.model_instance == mock_model_instance + assert strategy.context == mock_context + assert strategy.max_iterations == 10 + assert len(strategy.tools) == 1 + + def test_initialization_with_tool_invoke_hook(self, mock_model_instance, mock_context, mock_tool): + """Test initialization with tool_invoke_hook.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + mock_hook = MagicMock() + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + tool_invoke_hook=mock_hook, + ) + + assert strategy.tool_invoke_hook == mock_hook + + +class TestConvertToolsToPromptFormat: + """Tests for _convert_tools_to_prompt_format method.""" + + def test_convert_tools_returns_prompt_message_tools(self, mock_model_instance, mock_context, mock_tool): + """Test that _convert_tools_to_prompt_format returns PromptMessageTool list.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + tools = strategy._convert_tools_to_prompt_format() + + assert len(tools) == 1 + assert isinstance(tools[0], PromptMessageTool) + assert tools[0].name == "test_tool" + + def test_convert_tools_empty_when_no_tools(self, mock_model_instance, mock_context): + """Test that _convert_tools_to_prompt_format returns empty list when no tools.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + tools = strategy._convert_tools_to_prompt_format() + + assert tools == [] + + +class TestAgentLogGeneration: + """Tests for AgentLog generation during run.""" + + def test_round_log_structure(self, mock_model_instance, mock_context, mock_tool): + """Test that round logs have correct structure.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + max_iterations=1, + ) + + # Create a round log + round_log = strategy._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"inputs": {"query": "test"}}, + ) + + assert round_log.label == "ROUND 1" + assert round_log.log_type == AgentLog.LogType.ROUND + assert round_log.status == AgentLog.LogStatus.START + assert "inputs" in round_log.data + + def test_tool_call_log_structure(self, mock_model_instance, mock_context, mock_tool): + """Test that tool call logs have correct structure.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + # Create a parent round log + round_log = strategy._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + # Create a tool call log + tool_log = strategy._create_log( + label="CALL test_tool", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={"tool_name": "test_tool", "tool_args": {"param1": "value1"}}, + parent_id=round_log.id, + ) + + assert tool_log.label == "CALL test_tool" + assert tool_log.log_type == AgentLog.LogType.TOOL_CALL + assert tool_log.parent_id == round_log.id + assert tool_log.data["tool_name"] == "test_tool" + + +class TestToolInvocation: + """Tests for tool invocation.""" + + def test_invoke_tool_with_hook(self, mock_model_instance, mock_context, mock_tool): + """Test that tool invocation uses hook when provided.""" + from core.agent.patterns.function_call import FunctionCallStrategy + from core.tools.entities.tool_entities import ToolInvokeMeta + + mock_hook = MagicMock() + mock_meta = ToolInvokeMeta( + time_cost=0.5, + error=None, + tool_config={"tool_provider_type": "test", "tool_provider": "test_id"}, + ) + mock_hook.return_value = ("Tool result", ["file-1"], mock_meta) + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + tool_invoke_hook=mock_hook, + ) + + result, files, meta = strategy._invoke_tool(mock_tool, {"param1": "value"}, "test_tool") + + mock_hook.assert_called_once() + assert result == "Tool result" + assert files == [] # Hook returns file IDs, but _invoke_tool returns empty File list + assert meta == mock_meta + + def test_invoke_tool_without_hook_attribute_set(self, mock_model_instance, mock_context, mock_tool): + """Test that tool_invoke_hook is None when not provided.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + tool_invoke_hook=None, + ) + + # Verify that tool_invoke_hook is None + assert strategy.tool_invoke_hook is None + + +class TestUsageTracking: + """Tests for usage tracking across rounds.""" + + def test_round_usage_is_separate_from_total(self, mock_model_instance, mock_context): + """Test that round usage is tracked separately from total.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + # Simulate two rounds of usage + total_usage: dict = {"usage": None} + round1_usage: dict = {"usage": None} + round2_usage: dict = {"usage": None} + + # Round 1 + usage1 = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + strategy._accumulate_usage(round1_usage, usage1) + strategy._accumulate_usage(total_usage, usage1) + + # Round 2 + usage2 = LLMUsage( + prompt_tokens=200, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.2"), + completion_tokens=100, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.2"), + total_tokens=300, + total_price=Decimal("0.4"), + currency="USD", + latency=0.5, + ) + strategy._accumulate_usage(round2_usage, usage2) + strategy._accumulate_usage(total_usage, usage2) + + # Verify round usage is separate + assert round1_usage["usage"].total_tokens == 150 + assert round2_usage["usage"].total_tokens == 300 + # Verify total is accumulated + assert total_usage["usage"].total_tokens == 450 + + +class TestPromptMessageHandling: + """Tests for prompt message handling.""" + + def test_messages_include_system_and_user(self, mock_model_instance, mock_context, mock_tool): + """Test that messages include system and user prompts.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + messages = [ + SystemPromptMessage(content="You are a helpful assistant."), + UserPromptMessage(content="Hello"), + ] + + # Just verify the messages can be processed + assert len(messages) == 2 + assert isinstance(messages[0], SystemPromptMessage) + assert isinstance(messages[1], UserPromptMessage) + + def test_assistant_message_with_tool_calls(self, mock_model_instance, mock_context, mock_tool): + """Test that assistant messages can contain tool calls.""" + from core.model_runtime.entities.message_entities import AssistantPromptMessage + + tool_call = AssistantPromptMessage.ToolCall( + id="call_123", + type="function", + function=AssistantPromptMessage.ToolCall.ToolCallFunction( + name="test_tool", + arguments='{"param1": "value1"}', + ), + ) + + assistant_message = AssistantPromptMessage( + content="I'll help you with that.", + tool_calls=[tool_call], + ) + + assert len(assistant_message.tool_calls) == 1 + assert assistant_message.tool_calls[0].function.name == "test_tool" diff --git a/api/tests/unit_tests/core/agent/patterns/test_react.py b/api/tests/unit_tests/core/agent/patterns/test_react.py new file mode 100644 index 0000000000..a942ba6100 --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_react.py @@ -0,0 +1,224 @@ +"""Tests for ReActStrategy.""" + +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import ExecutionContext +from core.agent.patterns.react import ReActStrategy +from core.model_runtime.entities import SystemPromptMessage, UserPromptMessage + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +@pytest.fixture +def mock_tool(): + """Create a mock tool.""" + from core.model_runtime.entities.message_entities import PromptMessageTool + + tool = MagicMock() + tool.entity.identity.name = "test_tool" + tool.entity.identity.provider = "test_provider" + + # Use real PromptMessageTool for proper serialization + prompt_tool = PromptMessageTool( + name="test_tool", + description="A test tool", + parameters={"type": "object", "properties": {}}, + ) + tool.to_prompt_message_tool.return_value = prompt_tool + + return tool + + +class TestReActStrategyInit: + """Tests for ReActStrategy initialization.""" + + def test_init_with_instruction(self, mock_model_instance, mock_context): + """Test that instruction is stored correctly.""" + instruction = "You are a helpful assistant." + + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + instruction=instruction, + ) + + assert strategy.instruction == instruction + + def test_init_with_empty_instruction(self, mock_model_instance, mock_context): + """Test that empty instruction is handled correctly.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + assert strategy.instruction == "" + + +class TestBuildPromptWithReactFormat: + """Tests for _build_prompt_with_react_format method.""" + + def test_replace_tools_placeholder(self, mock_model_instance, mock_context, mock_tool): + """Test that {{tools}} placeholder is replaced.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + system_content = "You have access to: {{tools}}" + messages = [ + SystemPromptMessage(content=system_content), + UserPromptMessage(content="Hello"), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True) + + # The tools placeholder should be replaced with JSON + assert "{{tools}}" not in result[0].content + assert "test_tool" in result[0].content + + def test_replace_tool_names_placeholder(self, mock_model_instance, mock_context, mock_tool): + """Test that {{tool_names}} placeholder is replaced.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + system_content = "Valid actions: {{tool_names}}" + messages = [ + SystemPromptMessage(content=system_content), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True) + + assert "{{tool_names}}" not in result[0].content + assert '"test_tool"' in result[0].content + + def test_replace_instruction_placeholder(self, mock_model_instance, mock_context): + """Test that {{instruction}} placeholder is replaced.""" + instruction = "You are a helpful coding assistant." + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + instruction=instruction, + ) + + system_content = "{{instruction}}\n\nYou have access to: {{tools}}" + messages = [ + SystemPromptMessage(content=system_content), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True, instruction) + + assert "{{instruction}}" not in result[0].content + assert instruction in result[0].content + + def test_no_tools_available_message(self, mock_model_instance, mock_context): + """Test that 'No tools available' is shown when include_tools is False.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + system_content = "You have access to: {{tools}}" + messages = [ + SystemPromptMessage(content=system_content), + ] + + result = strategy._build_prompt_with_react_format(messages, [], False) + + assert "No tools available" in result[0].content + + def test_scratchpad_appended_as_assistant_message(self, mock_model_instance, mock_context): + """Test that agent scratchpad is appended as AssistantPromptMessage.""" + from core.agent.entities import AgentScratchpadUnit + from core.model_runtime.entities import AssistantPromptMessage + + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + messages = [ + SystemPromptMessage(content="System prompt"), + UserPromptMessage(content="User query"), + ] + + scratchpad = [ + AgentScratchpadUnit( + thought="I need to search for information", + action_str='{"action": "search", "action_input": "query"}', + observation="Search results here", + ) + ] + + result = strategy._build_prompt_with_react_format(messages, scratchpad, True) + + # The last message should be an AssistantPromptMessage with scratchpad content + assert len(result) == 3 + assert isinstance(result[-1], AssistantPromptMessage) + assert "I need to search for information" in result[-1].content + assert "Search results here" in result[-1].content + + def test_empty_scratchpad_no_extra_message(self, mock_model_instance, mock_context): + """Test that empty scratchpad doesn't add extra message.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + messages = [ + SystemPromptMessage(content="System prompt"), + UserPromptMessage(content="User query"), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True) + + # Should only have the original 2 messages + assert len(result) == 2 + + def test_original_messages_not_modified(self, mock_model_instance, mock_context): + """Test that original messages list is not modified.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + original_content = "Original system prompt {{tools}}" + messages = [ + SystemPromptMessage(content=original_content), + ] + + strategy._build_prompt_with_react_format(messages, [], True) + + # Original message should not be modified + assert messages[0].content == original_content diff --git a/api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py b/api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py new file mode 100644 index 0000000000..07b9df2acf --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py @@ -0,0 +1,203 @@ +"""Tests for StrategyFactory.""" + +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import AgentEntity, ExecutionContext +from core.agent.patterns.function_call import FunctionCallStrategy +from core.agent.patterns.react import ReActStrategy +from core.agent.patterns.strategy_factory import StrategyFactory +from core.model_runtime.entities.model_entities import ModelFeature + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +class TestStrategyFactory: + """Tests for StrategyFactory.create_strategy method.""" + + def test_create_function_call_strategy_with_tool_call_feature(self, mock_model_instance, mock_context): + """Test that FunctionCallStrategy is created when model supports TOOL_CALL.""" + model_features = [ModelFeature.TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_create_function_call_strategy_with_multi_tool_call_feature(self, mock_model_instance, mock_context): + """Test that FunctionCallStrategy is created when model supports MULTI_TOOL_CALL.""" + model_features = [ModelFeature.MULTI_TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_create_function_call_strategy_with_stream_tool_call_feature(self, mock_model_instance, mock_context): + """Test that FunctionCallStrategy is created when model supports STREAM_TOOL_CALL.""" + model_features = [ModelFeature.STREAM_TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_create_react_strategy_without_tool_call_features(self, mock_model_instance, mock_context): + """Test that ReActStrategy is created when model doesn't support tool calling.""" + model_features = [ModelFeature.VISION] # Only vision, no tool calling + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, ReActStrategy) + + def test_create_react_strategy_with_empty_features(self, mock_model_instance, mock_context): + """Test that ReActStrategy is created when model has no features.""" + model_features: list[ModelFeature] = [] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, ReActStrategy) + + def test_explicit_function_calling_strategy_with_support(self, mock_model_instance, mock_context): + """Test explicit FUNCTION_CALLING strategy selection with model support.""" + model_features = [ModelFeature.TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + agent_strategy=AgentEntity.Strategy.FUNCTION_CALLING, + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_explicit_function_calling_strategy_without_support_falls_back_to_react( + self, mock_model_instance, mock_context + ): + """Test that explicit FUNCTION_CALLING falls back to ReAct when not supported.""" + model_features: list[ModelFeature] = [] # No tool calling support + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + agent_strategy=AgentEntity.Strategy.FUNCTION_CALLING, + ) + + # Should fall back to ReAct since FC is not supported + assert isinstance(strategy, ReActStrategy) + + def test_explicit_chain_of_thought_strategy(self, mock_model_instance, mock_context): + """Test explicit CHAIN_OF_THOUGHT strategy selection.""" + model_features = [ModelFeature.TOOL_CALL] # Even with tool call support + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + agent_strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT, + ) + + assert isinstance(strategy, ReActStrategy) + + def test_react_strategy_with_instruction(self, mock_model_instance, mock_context): + """Test that ReActStrategy receives instruction parameter.""" + model_features: list[ModelFeature] = [] + instruction = "You are a helpful assistant." + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + instruction=instruction, + ) + + assert isinstance(strategy, ReActStrategy) + assert strategy.instruction == instruction + + def test_max_iterations_passed_to_strategy(self, mock_model_instance, mock_context): + """Test that max_iterations is passed to the strategy.""" + model_features = [ModelFeature.TOOL_CALL] + max_iterations = 5 + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + max_iterations=max_iterations, + ) + + assert strategy.max_iterations == max_iterations + + def test_tool_invoke_hook_passed_to_strategy(self, mock_model_instance, mock_context): + """Test that tool_invoke_hook is passed to the strategy.""" + model_features = [ModelFeature.TOOL_CALL] + mock_hook = MagicMock() + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + tool_invoke_hook=mock_hook, + ) + + assert strategy.tool_invoke_hook == mock_hook diff --git a/api/tests/unit_tests/core/agent/test_agent_app_runner.py b/api/tests/unit_tests/core/agent/test_agent_app_runner.py new file mode 100644 index 0000000000..d9301ccfe0 --- /dev/null +++ b/api/tests/unit_tests/core/agent/test_agent_app_runner.py @@ -0,0 +1,388 @@ +"""Tests for AgentAppRunner.""" + +from decimal import Decimal +from unittest.mock import MagicMock, patch + +import pytest + +from core.agent.entities import AgentEntity, AgentLog, AgentPromptEntity, AgentResult +from core.model_runtime.entities import SystemPromptMessage, UserPromptMessage +from core.model_runtime.entities.llm_entities import LLMUsage + + +class TestOrganizePromptMessages: + """Tests for _organize_prompt_messages method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + # We'll patch the class to avoid complex initialization + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + + # Set up required attributes + runner.config = MagicMock(spec=AgentEntity) + runner.config.strategy = AgentEntity.Strategy.FUNCTION_CALLING + runner.config.prompt = None + + runner.app_config = MagicMock() + runner.app_config.prompt_template = MagicMock() + runner.app_config.prompt_template.simple_prompt_template = "You are a helpful assistant." + + runner.history_prompt_messages = [] + runner.query = "Hello" + runner._current_thoughts = [] + runner.files = [] + runner.model_config = MagicMock() + runner.memory = None + runner.application_generate_entity = MagicMock() + runner.application_generate_entity.file_upload_config = None + + return runner + + def test_function_calling_uses_simple_prompt(self, mock_runner): + """Test that function calling strategy uses simple_prompt_template.""" + mock_runner.config.strategy = AgentEntity.Strategy.FUNCTION_CALLING + + with patch.object(mock_runner, "_init_system_message") as mock_init: + mock_init.return_value = [SystemPromptMessage(content="You are a helpful assistant.")] + with patch.object(mock_runner, "_organize_user_query") as mock_query: + mock_query.return_value = [UserPromptMessage(content="Hello")] + with patch("core.agent.agent_app_runner.AgentHistoryPromptTransform") as mock_transform: + mock_transform.return_value.get_prompt.return_value = [ + SystemPromptMessage(content="You are a helpful assistant.") + ] + + result = mock_runner._organize_prompt_messages() + + # Verify _init_system_message was called with simple_prompt_template + mock_init.assert_called_once() + call_args = mock_init.call_args[0] + assert call_args[0] == "You are a helpful assistant." + + def test_chain_of_thought_uses_agent_prompt(self, mock_runner): + """Test that chain of thought strategy uses agent prompt template.""" + mock_runner.config.strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT + mock_runner.config.prompt = AgentPromptEntity( + first_prompt="ReAct prompt template with {{tools}}", + next_iteration="Continue...", + ) + + with patch.object(mock_runner, "_init_system_message") as mock_init: + mock_init.return_value = [SystemPromptMessage(content="ReAct prompt template with {{tools}}")] + with patch.object(mock_runner, "_organize_user_query") as mock_query: + mock_query.return_value = [UserPromptMessage(content="Hello")] + with patch("core.agent.agent_app_runner.AgentHistoryPromptTransform") as mock_transform: + mock_transform.return_value.get_prompt.return_value = [ + SystemPromptMessage(content="ReAct prompt template with {{tools}}") + ] + + result = mock_runner._organize_prompt_messages() + + # Verify _init_system_message was called with agent prompt + mock_init.assert_called_once() + call_args = mock_init.call_args[0] + assert call_args[0] == "ReAct prompt template with {{tools}}" + + def test_chain_of_thought_without_prompt_falls_back(self, mock_runner): + """Test that chain of thought without prompt falls back to simple_prompt_template.""" + mock_runner.config.strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT + mock_runner.config.prompt = None + + with patch.object(mock_runner, "_init_system_message") as mock_init: + mock_init.return_value = [SystemPromptMessage(content="You are a helpful assistant.")] + with patch.object(mock_runner, "_organize_user_query") as mock_query: + mock_query.return_value = [UserPromptMessage(content="Hello")] + with patch("core.agent.agent_app_runner.AgentHistoryPromptTransform") as mock_transform: + mock_transform.return_value.get_prompt.return_value = [ + SystemPromptMessage(content="You are a helpful assistant.") + ] + + result = mock_runner._organize_prompt_messages() + + # Verify _init_system_message was called with simple_prompt_template + mock_init.assert_called_once() + call_args = mock_init.call_args[0] + assert call_args[0] == "You are a helpful assistant." + + +class TestInitSystemMessage: + """Tests for _init_system_message method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + return runner + + def test_empty_messages_with_template(self, mock_runner): + """Test that system message is created when messages are empty.""" + result = mock_runner._init_system_message("System template", []) + + assert len(result) == 1 + assert isinstance(result[0], SystemPromptMessage) + assert result[0].content == "System template" + + def test_empty_messages_without_template(self, mock_runner): + """Test that empty list is returned when no template and no messages.""" + result = mock_runner._init_system_message("", []) + + assert result == [] + + def test_existing_system_message_not_duplicated(self, mock_runner): + """Test that system message is not duplicated if already present.""" + existing_messages = [ + SystemPromptMessage(content="Existing system"), + UserPromptMessage(content="User message"), + ] + + result = mock_runner._init_system_message("New template", existing_messages) + + # Should not insert new system message + assert len(result) == 2 + assert result[0].content == "Existing system" + + def test_system_message_inserted_when_missing(self, mock_runner): + """Test that system message is inserted when first message is not system.""" + existing_messages = [ + UserPromptMessage(content="User message"), + ] + + result = mock_runner._init_system_message("System template", existing_messages) + + assert len(result) == 2 + assert isinstance(result[0], SystemPromptMessage) + assert result[0].content == "System template" + + +class TestClearUserPromptImageMessages: + """Tests for _clear_user_prompt_image_messages method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + return runner + + def test_text_content_unchanged(self, mock_runner): + """Test that text content is unchanged.""" + messages = [ + UserPromptMessage(content="Plain text message"), + ] + + result = mock_runner._clear_user_prompt_image_messages(messages) + + assert len(result) == 1 + assert result[0].content == "Plain text message" + + def test_original_messages_not_modified(self, mock_runner): + """Test that original messages are not modified (deep copy).""" + from core.model_runtime.entities.message_entities import ( + ImagePromptMessageContent, + TextPromptMessageContent, + ) + + messages = [ + UserPromptMessage( + content=[ + TextPromptMessageContent(data="Text part"), + ImagePromptMessageContent( + data="http://example.com/image.jpg", + format="url", + mime_type="image/jpeg", + ), + ] + ), + ] + + result = mock_runner._clear_user_prompt_image_messages(messages) + + # Original should still have list content + assert isinstance(messages[0].content, list) + # Result should have string content + assert isinstance(result[0].content, str) + + +class TestToolInvokeHook: + """Tests for _create_tool_invoke_hook method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + + runner.user_id = "test-user" + runner.tenant_id = "test-tenant" + runner.application_generate_entity = MagicMock() + runner.application_generate_entity.trace_manager = None + runner.application_generate_entity.invoke_from = "api" + runner.application_generate_entity.app_config = MagicMock() + runner.application_generate_entity.app_config.app_id = "test-app" + runner.agent_callback = MagicMock() + runner.conversation = MagicMock() + runner.conversation.id = "test-conversation" + runner.queue_manager = MagicMock() + runner._current_message_file_ids = [] + + return runner + + def test_hook_calls_agent_invoke(self, mock_runner): + """Test that the hook calls ToolEngine.agent_invoke.""" + from core.tools.entities.tool_entities import ToolInvokeMeta + + mock_message = MagicMock() + mock_message.id = "test-message" + + mock_tool = MagicMock() + mock_tool_meta = ToolInvokeMeta( + time_cost=0.5, + error=None, + tool_config={ + "tool_provider_type": "test_provider", + "tool_provider": "test_id", + }, + ) + + with patch("core.agent.agent_app_runner.ToolEngine") as mock_engine: + mock_engine.agent_invoke.return_value = ("Tool result", ["file-1", "file-2"], mock_tool_meta) + + hook = mock_runner._create_tool_invoke_hook(mock_message) + result_content, result_files, result_meta = hook(mock_tool, {"arg": "value"}, "test_tool") + + # Verify ToolEngine.agent_invoke was called + mock_engine.agent_invoke.assert_called_once() + + # Verify return values + assert result_content == "Tool result" + assert result_files == ["file-1", "file-2"] + assert result_meta == mock_tool_meta + + def test_hook_publishes_file_events(self, mock_runner): + """Test that the hook publishes QueueMessageFileEvent for files.""" + from core.tools.entities.tool_entities import ToolInvokeMeta + + mock_message = MagicMock() + mock_message.id = "test-message" + + mock_tool = MagicMock() + mock_tool_meta = ToolInvokeMeta( + time_cost=0.5, + error=None, + tool_config={}, + ) + + with patch("core.agent.agent_app_runner.ToolEngine") as mock_engine: + mock_engine.agent_invoke.return_value = ("Tool result", ["file-1", "file-2"], mock_tool_meta) + + hook = mock_runner._create_tool_invoke_hook(mock_message) + hook(mock_tool, {}, "test_tool") + + # Verify file events were published + assert mock_runner.queue_manager.publish.call_count == 2 + assert mock_runner._current_message_file_ids == ["file-1", "file-2"] + + +class TestAgentLogProcessing: + """Tests for AgentLog processing in run method.""" + + def test_agent_log_status_enum(self): + """Test AgentLog status enum values.""" + assert AgentLog.LogStatus.START == "start" + assert AgentLog.LogStatus.SUCCESS == "success" + assert AgentLog.LogStatus.ERROR == "error" + + def test_agent_log_metadata_enum(self): + """Test AgentLog metadata enum values.""" + assert AgentLog.LogMetadata.STARTED_AT == "started_at" + assert AgentLog.LogMetadata.FINISHED_AT == "finished_at" + assert AgentLog.LogMetadata.ELAPSED_TIME == "elapsed_time" + assert AgentLog.LogMetadata.TOTAL_PRICE == "total_price" + assert AgentLog.LogMetadata.TOTAL_TOKENS == "total_tokens" + assert AgentLog.LogMetadata.LLM_USAGE == "llm_usage" + + def test_agent_result_structure(self): + """Test AgentResult structure.""" + usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + + result = AgentResult( + text="Final answer", + files=[], + usage=usage, + finish_reason="stop", + ) + + assert result.text == "Final answer" + assert result.files == [] + assert result.usage == usage + assert result.finish_reason == "stop" + + +class TestOrganizeUserQuery: + """Tests for _organize_user_query method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + runner.files = [] + runner.application_generate_entity = MagicMock() + runner.application_generate_entity.file_upload_config = None + return runner + + def test_simple_query_without_files(self, mock_runner): + """Test organizing a simple query without files.""" + result = mock_runner._organize_user_query("Hello world", []) + + assert len(result) == 1 + assert isinstance(result[0], UserPromptMessage) + assert result[0].content == "Hello world" + + def test_query_with_files(self, mock_runner): + """Test organizing a query with files.""" + from core.file.models import File + + mock_file = MagicMock(spec=File) + mock_runner.files = [mock_file] + + with patch("core.agent.agent_app_runner.file_manager") as mock_fm: + from core.model_runtime.entities.message_entities import ImagePromptMessageContent + + mock_fm.to_prompt_message_content.return_value = ImagePromptMessageContent( + data="http://example.com/image.jpg", + format="url", + mime_type="image/jpeg", + ) + + result = mock_runner._organize_user_query("Describe this image", []) + + assert len(result) == 1 + assert isinstance(result[0], UserPromptMessage) + assert isinstance(result[0].content, list) + assert len(result[0].content) == 2 # Image + Text diff --git a/api/tests/unit_tests/core/agent/test_entities.py b/api/tests/unit_tests/core/agent/test_entities.py new file mode 100644 index 0000000000..5136f48aab --- /dev/null +++ b/api/tests/unit_tests/core/agent/test_entities.py @@ -0,0 +1,191 @@ +"""Tests for agent entities.""" + +from core.agent.entities import AgentEntity, AgentLog, AgentPromptEntity, AgentScratchpadUnit, ExecutionContext + + +class TestExecutionContext: + """Tests for ExecutionContext entity.""" + + def test_create_with_all_fields(self): + """Test creating ExecutionContext with all fields.""" + context = ExecutionContext( + user_id="user-123", + app_id="app-456", + conversation_id="conv-789", + message_id="msg-012", + tenant_id="tenant-345", + ) + + assert context.user_id == "user-123" + assert context.app_id == "app-456" + assert context.conversation_id == "conv-789" + assert context.message_id == "msg-012" + assert context.tenant_id == "tenant-345" + + def test_create_minimal(self): + """Test creating minimal ExecutionContext.""" + context = ExecutionContext.create_minimal(user_id="user-123") + + assert context.user_id == "user-123" + assert context.app_id is None + assert context.conversation_id is None + assert context.message_id is None + assert context.tenant_id is None + + def test_to_dict(self): + """Test converting ExecutionContext to dictionary.""" + context = ExecutionContext( + user_id="user-123", + app_id="app-456", + conversation_id="conv-789", + message_id="msg-012", + tenant_id="tenant-345", + ) + + result = context.to_dict() + + assert result == { + "user_id": "user-123", + "app_id": "app-456", + "conversation_id": "conv-789", + "message_id": "msg-012", + "tenant_id": "tenant-345", + } + + def test_with_updates(self): + """Test creating new context with updates.""" + original = ExecutionContext( + user_id="user-123", + app_id="app-456", + ) + + updated = original.with_updates(message_id="msg-789") + + # Original should be unchanged + assert original.message_id is None + # Updated should have new value + assert updated.message_id == "msg-789" + assert updated.user_id == "user-123" + assert updated.app_id == "app-456" + + +class TestAgentLog: + """Tests for AgentLog entity.""" + + def test_create_log_with_required_fields(self): + """Test creating AgentLog with required fields.""" + log = AgentLog( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"key": "value"}, + ) + + assert log.label == "ROUND 1" + assert log.log_type == AgentLog.LogType.ROUND + assert log.status == AgentLog.LogStatus.START + assert log.data == {"key": "value"} + assert log.id is not None # Auto-generated + assert log.parent_id is None + assert log.error is None + + def test_log_type_enum(self): + """Test LogType enum values.""" + assert AgentLog.LogType.ROUND == "round" + assert AgentLog.LogType.THOUGHT == "thought" + assert AgentLog.LogType.TOOL_CALL == "tool_call" + + def test_log_status_enum(self): + """Test LogStatus enum values.""" + assert AgentLog.LogStatus.START == "start" + assert AgentLog.LogStatus.SUCCESS == "success" + assert AgentLog.LogStatus.ERROR == "error" + + def test_log_metadata_enum(self): + """Test LogMetadata enum values.""" + assert AgentLog.LogMetadata.STARTED_AT == "started_at" + assert AgentLog.LogMetadata.FINISHED_AT == "finished_at" + assert AgentLog.LogMetadata.ELAPSED_TIME == "elapsed_time" + assert AgentLog.LogMetadata.TOTAL_PRICE == "total_price" + assert AgentLog.LogMetadata.TOTAL_TOKENS == "total_tokens" + assert AgentLog.LogMetadata.LLM_USAGE == "llm_usage" + + +class TestAgentScratchpadUnit: + """Tests for AgentScratchpadUnit entity.""" + + def test_is_final_with_final_answer_action(self): + """Test is_final returns True for Final Answer action.""" + unit = AgentScratchpadUnit( + thought="I know the answer", + action=AgentScratchpadUnit.Action( + action_name="Final Answer", + action_input="The answer is 42", + ), + ) + + assert unit.is_final() is True + + def test_is_final_with_tool_action(self): + """Test is_final returns False for tool action.""" + unit = AgentScratchpadUnit( + thought="I need to search", + action=AgentScratchpadUnit.Action( + action_name="search", + action_input={"query": "test"}, + ), + ) + + assert unit.is_final() is False + + def test_is_final_with_no_action(self): + """Test is_final returns True when no action.""" + unit = AgentScratchpadUnit( + thought="Just thinking", + ) + + assert unit.is_final() is True + + def test_action_to_dict(self): + """Test Action.to_dict method.""" + action = AgentScratchpadUnit.Action( + action_name="search", + action_input={"query": "test"}, + ) + + result = action.to_dict() + + assert result == { + "action": "search", + "action_input": {"query": "test"}, + } + + +class TestAgentEntity: + """Tests for AgentEntity.""" + + def test_strategy_enum(self): + """Test Strategy enum values.""" + assert AgentEntity.Strategy.CHAIN_OF_THOUGHT == "chain-of-thought" + assert AgentEntity.Strategy.FUNCTION_CALLING == "function-calling" + + def test_create_with_prompt(self): + """Test creating AgentEntity with prompt.""" + prompt = AgentPromptEntity( + first_prompt="You are a helpful assistant.", + next_iteration="Continue thinking...", + ) + + entity = AgentEntity( + provider="openai", + model="gpt-4", + strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT, + prompt=prompt, + max_iteration=5, + ) + + assert entity.provider == "openai" + assert entity.model == "gpt-4" + assert entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT + assert entity.prompt == prompt + assert entity.max_iteration == 5 diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py new file mode 100644 index 0000000000..8779e8c586 --- /dev/null +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py @@ -0,0 +1,47 @@ +from unittest.mock import MagicMock + +from core.app.apps.base_app_queue_manager import PublishFrom +from core.app.apps.workflow_app_runner import WorkflowBasedAppRunner +from core.workflow.graph_events import NodeRunStreamChunkEvent +from core.workflow.nodes import NodeType + + +class DummyQueueManager: + def __init__(self) -> None: + self.published = [] + + def publish(self, event, publish_from: PublishFrom) -> None: + self.published.append((event, publish_from)) + + +def test_skip_empty_final_chunk() -> None: + queue_manager = DummyQueueManager() + runner = WorkflowBasedAppRunner(queue_manager=queue_manager, app_id="app") + + empty_final_event = NodeRunStreamChunkEvent( + id="exec", + node_id="node", + node_type=NodeType.LLM, + selector=["node", "text"], + chunk="", + is_final=True, + ) + + runner._handle_event(workflow_entry=MagicMock(), event=empty_final_event) + assert queue_manager.published == [] + + normal_event = NodeRunStreamChunkEvent( + id="exec", + node_id="node", + node_type=NodeType.LLM, + selector=["node", "text"], + chunk="hi", + is_final=False, + ) + + runner._handle_event(workflow_entry=MagicMock(), event=normal_event) + + assert len(queue_manager.published) == 1 + published_event, publish_from = queue_manager.published[0] + assert publish_from == PublishFrom.APPLICATION_MANAGER + assert published_event.text == "hi" diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py new file mode 100644 index 0000000000..822b6a808f --- /dev/null +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -0,0 +1,231 @@ +"""Tests for ResponseStreamCoordinator object field streaming.""" + +from unittest.mock import MagicMock + +from core.workflow.entities.tool_entities import ToolResultStatus +from core.workflow.enums import NodeType +from core.workflow.graph.graph import Graph +from core.workflow.graph_engine.response_coordinator.coordinator import ResponseStreamCoordinator +from core.workflow.graph_engine.response_coordinator.session import ResponseSession +from core.workflow.graph_events import ( + ChunkType, + NodeRunStreamChunkEvent, + ToolCall, + ToolResult, +) +from core.workflow.nodes.base.entities import BaseNodeData +from core.workflow.nodes.base.template import Template, VariableSegment +from core.workflow.runtime import VariablePool + + +class TestResponseCoordinatorObjectStreaming: + """Test streaming of object-type variables with child fields.""" + + def test_object_field_streaming(self): + """Test that when selecting an object variable, all child field streams are forwarded.""" + # Create mock graph and variable pool + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + # Mock nodes + llm_node = MagicMock() + llm_node.id = "llm_node" + llm_node.node_type = NodeType.LLM + llm_node.execution_type = MagicMock() + llm_node.blocks_variable_output = MagicMock(return_value=False) + + response_node = MagicMock() + response_node.id = "response_node" + response_node.node_type = NodeType.ANSWER + response_node.execution_type = MagicMock() + response_node.blocks_variable_output = MagicMock(return_value=False) + + # Mock template for response node + response_node.node_data = MagicMock(spec=BaseNodeData) + response_node.node_data.answer = "{{#llm_node.generation#}}" + + graph.nodes = { + "llm_node": llm_node, + "response_node": response_node, + } + graph.root_node = llm_node + graph.get_outgoing_edges = MagicMock(return_value=[]) + + # Create coordinator + coordinator = ResponseStreamCoordinator(variable_pool, graph) + + # Track execution + coordinator.track_node_execution("llm_node", "exec_123") + coordinator.track_node_execution("response_node", "exec_456") + + # Simulate streaming events for child fields of generation object + # 1. Content stream + content_event_1 = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "content"], + chunk="Hello", + is_final=False, + chunk_type=ChunkType.TEXT, + ) + content_event_2 = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "content"], + chunk=" world", + is_final=True, + chunk_type=ChunkType.TEXT, + ) + + # 2. Tool call stream + tool_call_event = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "tool_calls"], + chunk='{"query": "test"}', + is_final=True, + chunk_type=ChunkType.TOOL_CALL, + tool_call=ToolCall( + id="call_123", + name="search", + arguments='{"query": "test"}', + ), + ) + + # 3. Tool result stream + tool_result_event = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "tool_results"], + chunk="Found 10 results", + is_final=True, + chunk_type=ChunkType.TOOL_RESULT, + tool_result=ToolResult( + id="call_123", + name="search", + output="Found 10 results", + files=[], + status=ToolResultStatus.SUCCESS, + ), + ) + + # Intercept these events + coordinator.intercept_event(content_event_1) + coordinator.intercept_event(tool_call_event) + coordinator.intercept_event(tool_result_event) + coordinator.intercept_event(content_event_2) + + # Verify that all child streams are buffered + assert ("llm_node", "generation", "content") in coordinator._stream_buffers + assert ("llm_node", "generation", "tool_calls") in coordinator._stream_buffers + assert ("llm_node", "generation", "tool_results") in coordinator._stream_buffers + + # Verify payloads are preserved in buffered events + buffered_call = coordinator._stream_buffers[("llm_node", "generation", "tool_calls")][0] + assert buffered_call.tool_call is not None + assert buffered_call.tool_call.id == "call_123" + buffered_result = coordinator._stream_buffers[("llm_node", "generation", "tool_results")][0] + assert buffered_result.tool_result is not None + assert buffered_result.tool_result.status == "success" + + # Verify we can find child streams + child_streams = coordinator._find_child_streams(["llm_node", "generation"]) + assert len(child_streams) == 3 + assert ("llm_node", "generation", "content") in child_streams + assert ("llm_node", "generation", "tool_calls") in child_streams + assert ("llm_node", "generation", "tool_results") in child_streams + + def test_find_child_streams(self): + """Test the _find_child_streams method.""" + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + coordinator = ResponseStreamCoordinator(variable_pool, graph) + + # Add some mock streams + coordinator._stream_buffers = { + ("node1", "generation", "content"): [], + ("node1", "generation", "tool_calls"): [], + ("node1", "generation", "thought"): [], + ("node1", "text"): [], # Not a child of generation + ("node2", "generation", "content"): [], # Different node + } + + # Find children of node1.generation + children = coordinator._find_child_streams(["node1", "generation"]) + + assert len(children) == 3 + assert ("node1", "generation", "content") in children + assert ("node1", "generation", "tool_calls") in children + assert ("node1", "generation", "thought") in children + assert ("node1", "text") not in children + assert ("node2", "generation", "content") not in children + + def test_find_child_streams_with_closed_streams(self): + """Test that _find_child_streams also considers closed streams.""" + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + coordinator = ResponseStreamCoordinator(variable_pool, graph) + + # Add some streams - some buffered, some closed + coordinator._stream_buffers = { + ("node1", "generation", "content"): [], + } + coordinator._closed_streams = { + ("node1", "generation", "tool_calls"), + ("node1", "generation", "thought"), + } + + # Should find all children regardless of whether they're in buffers or closed + children = coordinator._find_child_streams(["node1", "generation"]) + + assert len(children) == 3 + assert ("node1", "generation", "content") in children + assert ("node1", "generation", "tool_calls") in children + assert ("node1", "generation", "thought") in children + + def test_special_selector_rewrites_to_active_response_node(self): + """Ensure special selectors attribute streams to the active response node.""" + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + response_node = MagicMock() + response_node.id = "response_node" + response_node.node_type = NodeType.ANSWER + graph.nodes = {"response_node": response_node} + graph.root_node = response_node + + coordinator = ResponseStreamCoordinator(variable_pool, graph) + coordinator.track_node_execution("response_node", "exec_resp") + + coordinator._active_session = ResponseSession( + node_id="response_node", + template=Template(segments=[VariableSegment(selector=["sys", "foo"])]), + ) + + event = NodeRunStreamChunkEvent( + id="stream_1", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["sys", "foo"], + chunk="hi", + is_final=True, + chunk_type=ChunkType.TEXT, + ) + + coordinator._stream_buffers[("sys", "foo")] = [event] + coordinator._stream_positions[("sys", "foo")] = 0 + coordinator._closed_streams.add(("sys", "foo")) + + events, is_complete = coordinator._process_variable_segment(VariableSegment(selector=["sys", "foo"])) + + assert is_complete + assert len(events) == 1 + rewritten = events[0] + assert rewritten.node_id == "response_node" + assert rewritten.id == "exec_resp" diff --git a/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py new file mode 100644 index 0000000000..951149e933 --- /dev/null +++ b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py @@ -0,0 +1,328 @@ +"""Tests for StreamChunkEvent and its subclasses.""" + +from core.workflow.entities import ToolCall, ToolResult, ToolResultStatus +from core.workflow.node_events import ( + ChunkType, + StreamChunkEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, +) + + +class TestChunkType: + """Tests for ChunkType enum.""" + + def test_chunk_type_values(self): + """Test that ChunkType has expected values.""" + assert ChunkType.TEXT == "text" + assert ChunkType.TOOL_CALL == "tool_call" + assert ChunkType.TOOL_RESULT == "tool_result" + assert ChunkType.THOUGHT == "thought" + + def test_chunk_type_is_str_enum(self): + """Test that ChunkType values are strings.""" + for chunk_type in ChunkType: + assert isinstance(chunk_type.value, str) + + +class TestStreamChunkEvent: + """Tests for base StreamChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating StreamChunkEvent with required fields.""" + event = StreamChunkEvent( + selector=["node1", "text"], + chunk="Hello", + ) + + assert event.selector == ["node1", "text"] + assert event.chunk == "Hello" + assert event.is_final is False + assert event.chunk_type == ChunkType.TEXT + + def test_create_with_all_fields(self): + """Test creating StreamChunkEvent with all fields.""" + event = StreamChunkEvent( + selector=["node1", "output"], + chunk="World", + is_final=True, + chunk_type=ChunkType.TEXT, + ) + + assert event.selector == ["node1", "output"] + assert event.chunk == "World" + assert event.is_final is True + assert event.chunk_type == ChunkType.TEXT + + def test_default_chunk_type_is_text(self): + """Test that default chunk_type is TEXT.""" + event = StreamChunkEvent( + selector=["node1", "text"], + chunk="test", + ) + + assert event.chunk_type == ChunkType.TEXT + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = StreamChunkEvent( + selector=["node1", "text"], + chunk="Hello", + is_final=True, + ) + + data = event.model_dump() + + assert data["selector"] == ["node1", "text"] + assert data["chunk"] == "Hello" + assert data["is_final"] is True + assert data["chunk_type"] == "text" + + +class TestToolCallChunkEvent: + """Tests for ToolCallChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating ToolCallChunkEvent with required fields.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk='{"city": "Beijing"}', + tool_call=ToolCall(id="call_123", name="weather", arguments=None), + ) + + assert event.selector == ["node1", "tool_calls"] + assert event.chunk == '{"city": "Beijing"}' + assert event.tool_call.id == "call_123" + assert event.tool_call.name == "weather" + assert event.chunk_type == ChunkType.TOOL_CALL + + def test_chunk_type_is_tool_call(self): + """Test that chunk_type is always TOOL_CALL.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk="", + tool_call=ToolCall(id="call_123", name="test_tool", arguments=None), + ) + + assert event.chunk_type == ChunkType.TOOL_CALL + + def test_tool_arguments_field(self): + """Test tool_arguments field.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk='{"param": "value"}', + tool_call=ToolCall( + id="call_123", + name="test_tool", + arguments='{"param": "value"}', + ), + ) + + assert event.tool_call.arguments == '{"param": "value"}' + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk='{"city": "Beijing"}', + tool_call=ToolCall( + id="call_123", + name="weather", + arguments='{"city": "Beijing"}', + ), + is_final=True, + ) + + data = event.model_dump() + + assert data["chunk_type"] == "tool_call" + assert data["tool_call"]["id"] == "call_123" + assert data["tool_call"]["name"] == "weather" + assert data["tool_call"]["arguments"] == '{"city": "Beijing"}' + assert data["is_final"] is True + + +class TestToolResultChunkEvent: + """Tests for ToolResultChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating ToolResultChunkEvent with required fields.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="Weather: Sunny, 25°C", + tool_result=ToolResult(id="call_123", name="weather", output="Weather: Sunny, 25°C"), + ) + + assert event.selector == ["node1", "tool_results"] + assert event.chunk == "Weather: Sunny, 25°C" + assert event.tool_result.id == "call_123" + assert event.tool_result.name == "weather" + assert event.chunk_type == ChunkType.TOOL_RESULT + + def test_chunk_type_is_tool_result(self): + """Test that chunk_type is always TOOL_RESULT.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_result=ToolResult(id="call_123", name="test_tool"), + ) + + assert event.chunk_type == ChunkType.TOOL_RESULT + + def test_tool_files_default_empty(self): + """Test that tool_files defaults to empty list.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_result=ToolResult(id="call_123", name="test_tool"), + ) + + assert event.tool_result.files == [] + + def test_tool_files_with_values(self): + """Test tool_files with file IDs.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_result=ToolResult( + id="call_123", + name="test_tool", + files=["file_1", "file_2"], + ), + ) + + assert event.tool_result.files == ["file_1", "file_2"] + + def test_tool_error_output(self): + """Test error output captured in tool_result.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="", + tool_result=ToolResult( + id="call_123", + name="test_tool", + output="Tool execution failed", + status=ToolResultStatus.ERROR, + ), + ) + + assert event.tool_result.output == "Tool execution failed" + assert event.tool_result.status == ToolResultStatus.ERROR + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="Weather: Sunny", + tool_result=ToolResult( + id="call_123", + name="weather", + output="Weather: Sunny", + files=["file_1"], + status=ToolResultStatus.SUCCESS, + ), + is_final=True, + ) + + data = event.model_dump() + + assert data["chunk_type"] == "tool_result" + assert data["tool_result"]["id"] == "call_123" + assert data["tool_result"]["name"] == "weather" + assert data["tool_result"]["files"] == ["file_1"] + assert data["is_final"] is True + + +class TestThoughtChunkEvent: + """Tests for ThoughtChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating ThoughtChunkEvent with required fields.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="I need to query the weather...", + ) + + assert event.selector == ["node1", "thought"] + assert event.chunk == "I need to query the weather..." + assert event.chunk_type == ChunkType.THOUGHT + + def test_chunk_type_is_thought(self): + """Test that chunk_type is always THOUGHT.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="thinking...", + ) + + assert event.chunk_type == ChunkType.THOUGHT + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="I need to analyze this...", + is_final=False, + ) + + data = event.model_dump() + + assert data["chunk_type"] == "thought" + assert data["chunk"] == "I need to analyze this..." + assert data["is_final"] is False + + +class TestEventInheritance: + """Tests for event inheritance relationships.""" + + def test_tool_call_is_stream_chunk(self): + """Test that ToolCallChunkEvent is a StreamChunkEvent.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk="", + tool_call=ToolCall(id="call_123", name="test", arguments=None), + ) + + assert isinstance(event, StreamChunkEvent) + + def test_tool_result_is_stream_chunk(self): + """Test that ToolResultChunkEvent is a StreamChunkEvent.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_result=ToolResult(id="call_123", name="test"), + ) + + assert isinstance(event, StreamChunkEvent) + + def test_thought_is_stream_chunk(self): + """Test that ThoughtChunkEvent is a StreamChunkEvent.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="thinking...", + ) + + assert isinstance(event, StreamChunkEvent) + + def test_all_events_have_common_fields(self): + """Test that all events have common StreamChunkEvent fields.""" + events = [ + StreamChunkEvent(selector=["n", "t"], chunk="a"), + ToolCallChunkEvent( + selector=["n", "t"], + chunk="b", + tool_call=ToolCall(id="1", name="t", arguments=None), + ), + ToolResultChunkEvent( + selector=["n", "t"], + chunk="c", + tool_result=ToolResult(id="1", name="t"), + ), + ThoughtChunkEvent(selector=["n", "t"], chunk="d"), + ] + + for event in events: + assert hasattr(event, "selector") + assert hasattr(event, "chunk") + assert hasattr(event, "is_final") + assert hasattr(event, "chunk_type") diff --git a/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py b/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py new file mode 100644 index 0000000000..9d793f804f --- /dev/null +++ b/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py @@ -0,0 +1,148 @@ +import types +from collections.abc import Generator +from typing import Any + +import pytest + +from core.model_runtime.entities.llm_entities import LLMUsage +from core.workflow.entities import ToolCallResult +from core.workflow.entities.tool_entities import ToolResultStatus +from core.workflow.node_events import ModelInvokeCompletedEvent, NodeEventBase +from core.workflow.nodes.llm.node import LLMNode + + +class _StubModelInstance: + """Minimal stub to satisfy _stream_llm_events signature.""" + + provider_model_bundle = None + + +def _drain(generator: Generator[NodeEventBase, None, Any]): + events: list = [] + try: + while True: + events.append(next(generator)) + except StopIteration as exc: + return events, exc.value + + +@pytest.fixture(autouse=True) +def patch_deduct_llm_quota(monkeypatch): + # Avoid touching real quota logic during unit tests + monkeypatch.setattr("core.workflow.nodes.llm.node.llm_utils.deduct_llm_quota", lambda **_: None) + + +def _make_llm_node(reasoning_format: str) -> LLMNode: + node = LLMNode.__new__(LLMNode) + object.__setattr__(node, "_node_data", types.SimpleNamespace(reasoning_format=reasoning_format, tools=[])) + object.__setattr__(node, "tenant_id", "tenant") + return node + + +def test_stream_llm_events_extracts_reasoning_for_tagged(): + node = _make_llm_node(reasoning_format="tagged") + tagged_text = "ThoughtAnswer" + usage = LLMUsage.empty_usage() + + def generator(): + yield ModelInvokeCompletedEvent( + text=tagged_text, + usage=usage, + finish_reason="stop", + reasoning_content="", + structured_output=None, + ) + + events, returned = _drain( + node._stream_llm_events(generator(), model_instance=types.SimpleNamespace(provider_model_bundle=None)) + ) + + assert events == [] + clean_text, reasoning_content, gen_reasoning, gen_clean, ret_usage, finish_reason, structured, gen_data = returned + assert clean_text == tagged_text # original preserved for output + assert reasoning_content == "" # tagged mode keeps reasoning separate + assert gen_clean == "Answer" # stripped content for generation + assert gen_reasoning == "Thought" # reasoning extracted from tag + assert ret_usage == usage + assert finish_reason == "stop" + assert structured is None + assert gen_data is None + + # generation building should include reasoning and sequence + generation_content = gen_clean or clean_text + sequence = [ + {"type": "reasoning", "index": 0}, + {"type": "content", "start": 0, "end": len(generation_content)}, + ] + assert sequence == [ + {"type": "reasoning", "index": 0}, + {"type": "content", "start": 0, "end": len("Answer")}, + ] + + +def test_stream_llm_events_no_reasoning_results_in_empty_sequence(): + node = _make_llm_node(reasoning_format="tagged") + plain_text = "Hello world" + usage = LLMUsage.empty_usage() + + def generator(): + yield ModelInvokeCompletedEvent( + text=plain_text, + usage=usage, + finish_reason=None, + reasoning_content="", + structured_output=None, + ) + + events, returned = _drain( + node._stream_llm_events(generator(), model_instance=types.SimpleNamespace(provider_model_bundle=None)) + ) + + assert events == [] + _, _, gen_reasoning, gen_clean, *_ = returned + generation_content = gen_clean or plain_text + assert gen_reasoning == "" + assert generation_content == plain_text + # Empty reasoning should imply empty sequence in generation construction + sequence = [] + assert sequence == [] + + +def test_serialize_tool_call_strips_files_to_ids(): + file_cls = pytest.importorskip("core.file").File + file_type = pytest.importorskip("core.file.enums").FileType + transfer_method = pytest.importorskip("core.file.enums").FileTransferMethod + + file_with_id = file_cls( + id="f1", + tenant_id="t", + type=file_type.IMAGE, + transfer_method=transfer_method.REMOTE_URL, + remote_url="http://example.com/f1", + storage_key="k1", + ) + file_with_related = file_cls( + id=None, + tenant_id="t", + type=file_type.IMAGE, + transfer_method=transfer_method.REMOTE_URL, + related_id="rel2", + remote_url="http://example.com/f2", + storage_key="k2", + ) + tool_call = ToolCallResult( + id="tc", + name="do", + arguments='{"a":1}', + output="ok", + files=[file_with_id, file_with_related], + status=ToolResultStatus.SUCCESS, + ) + + serialized = LLMNode._serialize_tool_call(tool_call) + + assert serialized["files"] == ["f1", "rel2"] + assert serialized["id"] == "tc" + assert serialized["name"] == "do" + assert serialized["arguments"] == '{"a":1}' + assert serialized["output"] == "ok" diff --git a/web/app/components/app/configuration/config-var/index.tsx b/web/app/components/app/configuration/config-var/index.tsx index 4a38fc92a6..6142008ae2 100644 --- a/web/app/components/app/configuration/config-var/index.tsx +++ b/web/app/components/app/configuration/config-var/index.tsx @@ -274,9 +274,9 @@ const ConfigVar: FC = ({ promptVariables, readonly, onPromptVar )} {hasVar && ( -
+
{ onPromptVariablesChange?.(list.map(item => item.variable)) }} handle=".handle" diff --git a/web/app/components/app/configuration/config-var/var-item.tsx b/web/app/components/app/configuration/config-var/var-item.tsx index 1fc21e3d33..b26249dac8 100644 --- a/web/app/components/app/configuration/config-var/var-item.tsx +++ b/web/app/components/app/configuration/config-var/var-item.tsx @@ -39,7 +39,7 @@ const VarItem: FC = ({ const [isDeleting, setIsDeleting] = useState(false) return ( -
+
{canDrag && ( diff --git a/web/app/components/app/configuration/config-vision/index.tsx b/web/app/components/app/configuration/config-vision/index.tsx index bc313b9ac1..481e6b5ab6 100644 --- a/web/app/components/app/configuration/config-vision/index.tsx +++ b/web/app/components/app/configuration/config-vision/index.tsx @@ -1,5 +1,6 @@ 'use client' import type { FC } from 'react' +import { noop } from 'es-toolkit/function' import { produce } from 'immer' import * as React from 'react' import { useCallback } from 'react' @@ -10,14 +11,17 @@ import { useFeatures, useFeaturesStore } from '@/app/components/base/features/ho import { Vision } from '@/app/components/base/icons/src/vender/features' import Switch from '@/app/components/base/switch' import Tooltip from '@/app/components/base/tooltip' +import OptionCard from '@/app/components/workflow/nodes/_base/components/option-card' import { SupportUploadFileTypes } from '@/app/components/workflow/types' // import OptionCard from '@/app/components/workflow/nodes/_base/components/option-card' import ConfigContext from '@/context/debug-configuration' +import { Resolution } from '@/types/app' +import { cn } from '@/utils/classnames' import ParamConfig from './param-config' const ConfigVision: FC = () => { const { t } = useTranslation() - const { isShowVisionConfig, isAllowVideoUpload } = useContext(ConfigContext) + const { isShowVisionConfig, isAllowVideoUpload, readonly } = useContext(ConfigContext) const file = useFeatures(s => s.features.file) const featuresStore = useFeaturesStore() @@ -54,7 +58,7 @@ const ConfigVision: FC = () => { setFeatures(newFeatures) }, [featuresStore, isAllowVideoUpload]) - if (!isShowVisionConfig) + if (!isShowVisionConfig || (readonly && !isImageEnabled)) return null return ( @@ -75,37 +79,55 @@ const ConfigVision: FC = () => { />
- {/*
-
{t('appDebug.vision.visionSettings.resolution')}
- - {t('appDebug.vision.visionSettings.resolutionTooltip').split('\n').map(item => ( -
{item}
- ))} -
- } - /> -
*/} - {/*
- handleChange(Resolution.high)} - /> - handleChange(Resolution.low)} - /> -
*/} - -
- + {readonly + ? ( + <> +
+
{t('vision.visionSettings.resolution', { ns: 'appDebug' })}
+ + {t('vision.visionSettings.resolutionTooltip', { ns: 'appDebug' }).split('\n').map(item => ( +
{item}
+ ))} +
+ )} + /> +
+
+ + +
+ + ) + : ( + <> + +
+ + + )} +
) diff --git a/web/app/components/app/configuration/config/agent/agent-tools/index.tsx b/web/app/components/app/configuration/config/agent/agent-tools/index.tsx index 02179822c9..3378afb944 100644 --- a/web/app/components/app/configuration/config/agent/agent-tools/index.tsx +++ b/web/app/components/app/configuration/config/agent/agent-tools/index.tsx @@ -40,7 +40,7 @@ type AgentToolWithMoreInfo = AgentTool & { icon: any, collection?: Collection } const AgentTools: FC = () => { const { t } = useTranslation() const [isShowChooseTool, setIsShowChooseTool] = useState(false) - const { modelConfig, setModelConfig } = useContext(ConfigContext) + const { readonly, modelConfig, setModelConfig } = useContext(ConfigContext) const { data: buildInTools } = useAllBuiltInTools() const { data: customTools } = useAllCustomTools() const { data: workflowTools } = useAllWorkflowTools() @@ -168,10 +168,10 @@ const AgentTools: FC = () => { {tools.filter(item => !!item.enabled).length} / {tools.length} -  +   {t('agent.tools.enabled', { ns: 'appDebug' })} - {tools.length < MAX_TOOLS_NUM && ( + {tools.length < MAX_TOOLS_NUM && !readonly && ( <>
{ )} > -
+
{tools.map((item: AgentTool & { icon: any, collection?: Collection }, index) => (
{ > {getProviderShowName(item)} {item.tool_label} - {!item.isDeleted && ( + {!item.isDeleted && !readonly && ( @@ -260,7 +260,7 @@ const AgentTools: FC = () => {
)} - {!item.isDeleted && ( + {!item.isDeleted && !readonly && (
{!item.notAuthor && ( { {!item.notAuthor && ( { const newModelConfig = produce(modelConfig, (draft) => { @@ -313,6 +313,7 @@ const AgentTools: FC = () => { {item.notAuthor && (
-
-
- -
+ {!readonly && ( +
+
+ +
+ )}
) } diff --git a/web/app/components/app/configuration/config/config-document.tsx b/web/app/components/app/configuration/config/config-document.tsx index 3f192fd401..7d48c1582a 100644 --- a/web/app/components/app/configuration/config/config-document.tsx +++ b/web/app/components/app/configuration/config/config-document.tsx @@ -17,7 +17,7 @@ const ConfigDocument: FC = () => { const { t } = useTranslation() const file = useFeatures(s => s.features.file) const featuresStore = useFeaturesStore() - const { isShowDocumentConfig } = useContext(ConfigContext) + const { isShowDocumentConfig, readonly } = useContext(ConfigContext) const isDocumentEnabled = file?.allowed_file_types?.includes(SupportUploadFileTypes.document) ?? false @@ -45,7 +45,7 @@ const ConfigDocument: FC = () => { setFeatures(newFeatures) }, [featuresStore]) - if (!isShowDocumentConfig) + if (!isShowDocumentConfig || (readonly && !isDocumentEnabled)) return null return ( @@ -65,14 +65,16 @@ const ConfigDocument: FC = () => { )} /> -
-
- -
+ {!readonly && ( +
+
+ +
+ )} ) } diff --git a/web/app/components/app/configuration/config/index.tsx b/web/app/components/app/configuration/config/index.tsx index f208b99e59..3e2b201172 100644 --- a/web/app/components/app/configuration/config/index.tsx +++ b/web/app/components/app/configuration/config/index.tsx @@ -18,6 +18,7 @@ import ConfigDocument from './config-document' const Config: FC = () => { const { + readonly, mode, isAdvancedMode, modelModeType, @@ -27,6 +28,7 @@ const Config: FC = () => { modelConfig, setModelConfig, setPrevPromptConfig, + dataSets, } = useContext(ConfigContext) const isChatApp = [AppModeEnum.ADVANCED_CHAT, AppModeEnum.AGENT_CHAT, AppModeEnum.CHAT].includes(mode) const formattingChangedDispatcher = useFormattingChangedDispatcher() @@ -65,19 +67,27 @@ const Config: FC = () => { promptTemplate={promptTemplate} promptVariables={promptVariables} onChange={handlePromptChange} + readonly={readonly} /> {/* Variables */} - + {!(readonly && promptVariables.length === 0) && ( + + )} {/* Dataset */} - - + {!(readonly && dataSets.length === 0) && ( + + )} {/* Tools */} - {isAgent && ( + {isAgent && !(readonly && modelConfig.agentConfig.tools.length === 0) && ( )} @@ -88,7 +98,7 @@ const Config: FC = () => { {/* Chat History */} - {isAdvancedMode && isChatApp && modelModeType === ModelModeType.completion && ( + {!readonly && isAdvancedMode && isChatApp && modelModeType === ModelModeType.completion && ( = ({ config, onSave, onRemove, + readonly = false, editable = true, }) => { const media = useBreakpoints() @@ -56,6 +57,7 @@ const Item: FC = ({
@@ -70,7 +72,7 @@ const Item: FC = ({
{ - editable && ( + editable && !readonly && ( { e.stopPropagation() @@ -81,14 +83,18 @@ const Item: FC = ({ ) } - onRemove(config.id)} - state={isDeleting ? ActionButtonState.Destructive : ActionButtonState.Default} - onMouseEnter={() => setIsDeleting(true)} - onMouseLeave={() => setIsDeleting(false)} - > - - + { + !readonly && ( + onRemove(config.id)} + state={isDeleting ? ActionButtonState.Destructive : ActionButtonState.Default} + onMouseEnter={() => setIsDeleting(true)} + onMouseLeave={() => setIsDeleting(false)} + > + + + ) + }
{ config.indexing_technique && ( @@ -107,11 +113,13 @@ const Item: FC = ({ ) } setShowSettingsModal(false)} footer={null} mask={isMobile} panelClassName="mt-16 mx-2 sm:mr-2 mb-3 !p-0 !max-w-[640px] rounded-xl"> - setShowSettingsModal(false)} - onSave={handleSave} - /> + {showSettingsModal && ( + setShowSettingsModal(false)} + onSave={handleSave} + /> + )}
) diff --git a/web/app/components/app/configuration/dataset-config/index.tsx b/web/app/components/app/configuration/dataset-config/index.tsx index 309c6e7ddb..6de77cad9e 100644 --- a/web/app/components/app/configuration/dataset-config/index.tsx +++ b/web/app/components/app/configuration/dataset-config/index.tsx @@ -30,6 +30,7 @@ import { import { useSelector as useAppContextSelector } from '@/context/app-context' import ConfigContext from '@/context/debug-configuration' import { AppModeEnum } from '@/types/app' +import { cn } from '@/utils/classnames' import { hasEditPermissionForDataset } from '@/utils/permission' import FeaturePanel from '../base/feature-panel' import OperationBtn from '../base/operation-btn' @@ -38,7 +39,11 @@ import CardItem from './card-item' import ContextVar from './context-var' import ParamsConfig from './params-config' -const DatasetConfig: FC = () => { +type Props = { + readonly?: boolean + hideMetadataFilter?: boolean +} +const DatasetConfig: FC = ({ readonly, hideMetadataFilter }) => { const { t } = useTranslation() const userProfile = useAppContextSelector(s => s.userProfile) const { @@ -259,17 +264,19 @@ const DatasetConfig: FC = () => { className="mt-2" title={t('feature.dataSet.title', { ns: 'appDebug' })} headerRight={( -
- {!isAgent && } - -
+ !readonly && ( +
+ {!isAgent && } + +
+ ) )} hasHeaderBottomBorder={!hasData} noBodySpacing > {hasData ? ( -
+
{formattedDataset.map(item => ( { onRemove={onRemove} onSave={handleSave} editable={item.editable} + readonly={readonly} /> ))}
@@ -287,27 +295,29 @@ const DatasetConfig: FC = () => {
)} -
- item.type === MetadataFilteringVariableType.string || item.type === MetadataFilteringVariableType.select)} - availableCommonNumberVars={promptVariablesToSelect.filter(item => item.type === MetadataFilteringVariableType.number)} - /> -
+ {!hideMetadataFilter && ( +
+ item.type === MetadataFilteringVariableType.string || item.type === MetadataFilteringVariableType.select)} + availableCommonNumberVars={promptVariablesToSelect.filter(item => item.type === MetadataFilteringVariableType.number)} + /> +
+ )} - {mode === AppModeEnum.COMPLETION && dataSet.length > 0 && ( + {!readonly && mode === AppModeEnum.COMPLETION && dataSet.length > 0 && ( { const { t } = useTranslation() - const { modelConfig, setInputs } = useContext(ConfigContext) + const { modelConfig, setInputs, readonly } = useContext(ConfigContext) const promptVariables = modelConfig.configs.prompt_variables.filter(({ key, name }) => { return key && key?.trim() && name && name?.trim() @@ -89,6 +89,7 @@ const ChatUserInput = ({ placeholder={name} autoFocus={index === 0} maxLength={max_length || DEFAULT_VALUE_MAX_LEN} + readOnly={readonly} /> )} {type === 'paragraph' && ( @@ -97,6 +98,7 @@ const ChatUserInput = ({ placeholder={name} value={inputs[key] ? `${inputs[key]}` : ''} onChange={(e) => { handleInputValueChange(key, e.target.value) }} + readOnly={readonly} /> )} {type === 'select' && ( @@ -106,6 +108,7 @@ const ChatUserInput = ({ onSelect={(i) => { handleInputValueChange(key, i.value as string) }} items={(options || []).map(i => ({ name: i, value: i }))} allowSearch={false} + disabled={readonly} /> )} {type === 'number' && ( @@ -116,6 +119,7 @@ const ChatUserInput = ({ placeholder={name} autoFocus={index === 0} maxLength={max_length || DEFAULT_VALUE_MAX_LEN} + readOnly={readonly} /> )} {type === 'checkbox' && ( @@ -124,6 +128,7 @@ const ChatUserInput = ({ value={!!inputs[key]} required={required} onChange={(value) => { handleInputValueChange(key, value) }} + readonly={readonly} /> )} diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx index d7918e7ad6..eb18ca45b1 100644 --- a/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx +++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx @@ -15,6 +15,7 @@ import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/ import { useDebugConfigurationContext } from '@/context/debug-configuration' import { useEventEmitterContextContext } from '@/context/event-emitter' import { useProviderContext } from '@/context/provider-context' +import { AppSourceType } from '@/service/share' import { promptVariablesToUserInputsForm } from '@/utils/model-config' import { APP_CHAT_WITH_MULTIPLE_MODEL } from '../types' @@ -130,11 +131,11 @@ const TextGenerationItem: FC = ({ return ( { const { userProfile } = useAppContext() const { + readonly, modelConfig, appId, inputs, @@ -150,6 +151,7 @@ const DebugWithSingleModel = ( return ( = ({ }) => { const { t } = useTranslation() const { + readonly, appId, mode, modelModeType, @@ -416,25 +418,33 @@ const Debug: FC = ({ } {mode !== AppModeEnum.COMPLETION && ( <> - - - - - - {varList.length > 0 && ( -
+ { + !readonly && ( - setExpanded(!expanded)}> - + + + - {expanded &&
} -
- )} + ) + } + + { + varList.length > 0 && ( +
+ + !readonly && setExpanded(!expanded)}> + + + + {expanded &&
} +
+ ) + } )}
@@ -444,19 +454,21 @@ const Debug: FC = ({
)} - {mode === AppModeEnum.COMPLETION && ( - - )} + { + mode === AppModeEnum.COMPLETION && ( + + ) + } { debugWithMultipleModel && ( @@ -510,12 +522,12 @@ const Debug: FC = ({
= ({
) } - {isShowFormattingChangeConfirm && ( - - )} - {!isAPIKeySet && ()} + { + isShowFormattingChangeConfirm && ( + + ) + } + {!isAPIKeySet && !readonly && ()} ) } diff --git a/web/app/components/app/configuration/prompt-value-panel/index.tsx b/web/app/components/app/configuration/prompt-value-panel/index.tsx index 9b61b3c7aa..8aade70458 100644 --- a/web/app/components/app/configuration/prompt-value-panel/index.tsx +++ b/web/app/components/app/configuration/prompt-value-panel/index.tsx @@ -41,7 +41,7 @@ const PromptValuePanel: FC = ({ onVisionFilesChange, }) => { const { t } = useTranslation() - const { modelModeType, modelConfig, setInputs, mode, isAdvancedMode, completionPromptConfig, chatPromptConfig } = useContext(ConfigContext) + const { readonly, modelModeType, modelConfig, setInputs, mode, isAdvancedMode, completionPromptConfig, chatPromptConfig } = useContext(ConfigContext) const [userInputFieldCollapse, setUserInputFieldCollapse] = useState(false) const promptVariables = modelConfig.configs.prompt_variables.filter(({ key, name }) => { return key && key?.trim() && name && name?.trim() @@ -79,12 +79,12 @@ const PromptValuePanel: FC = ({ if (isAdvancedMode) { if (modelModeType === ModelModeType.chat) - return chatPromptConfig.prompt.every(({ text }) => !text) + return chatPromptConfig?.prompt.every(({ text }) => !text) return !completionPromptConfig.prompt?.text } else { return !modelConfig.configs.prompt_template } - }, [chatPromptConfig.prompt, completionPromptConfig.prompt?.text, isAdvancedMode, mode, modelConfig.configs.prompt_template, modelModeType]) + }, [chatPromptConfig?.prompt, completionPromptConfig.prompt?.text, isAdvancedMode, mode, modelConfig.configs.prompt_template, modelModeType]) const handleInputValueChange = (key: string, value: string | boolean) => { if (!(key in promptVariableObj)) @@ -143,6 +143,7 @@ const PromptValuePanel: FC = ({ placeholder={name} autoFocus={index === 0} maxLength={max_length || DEFAULT_VALUE_MAX_LEN} + readOnly={readonly} /> )} {type === 'paragraph' && ( @@ -151,6 +152,7 @@ const PromptValuePanel: FC = ({ placeholder={name} value={inputs[key] ? `${inputs[key]}` : ''} onChange={(e) => { handleInputValueChange(key, e.target.value) }} + readOnly={readonly} /> )} {type === 'select' && ( @@ -161,6 +163,7 @@ const PromptValuePanel: FC = ({ items={(options || []).map(i => ({ name: i, value: i }))} allowSearch={false} bgClassName="bg-gray-50" + disabled={readonly} /> )} {type === 'number' && ( @@ -171,6 +174,7 @@ const PromptValuePanel: FC = ({ placeholder={name} autoFocus={index === 0} maxLength={max_length || DEFAULT_VALUE_MAX_LEN} + readOnly={readonly} /> )} {type === 'checkbox' && ( @@ -179,6 +183,7 @@ const PromptValuePanel: FC = ({ value={!!inputs[key]} required={required} onChange={(value) => { handleInputValueChange(key, value) }} + readonly={readonly} /> )} @@ -197,6 +202,7 @@ const PromptValuePanel: FC = ({ url: fileItem.url, upload_file_id: fileItem.fileId, })))} + disabled={readonly} /> @@ -205,12 +211,12 @@ const PromptValuePanel: FC = ({ )} {!userInputFieldCollapse && (
- + {canNotRun && (
diff --git a/web/app/components/app/create-app-dialog/app-card/index.spec.tsx b/web/app/components/app/create-app-dialog/app-card/index.spec.tsx index e1f9773ac3..82e4fb8f94 100644 --- a/web/app/components/app/create-app-dialog/app-card/index.spec.tsx +++ b/web/app/components/app/create-app-dialog/app-card/index.spec.tsx @@ -10,6 +10,7 @@ vi.mock('@heroicons/react/20/solid', () => ({ })) const mockApp: App = { + can_trial: true, app: { id: 'test-app-id', mode: AppModeEnum.CHAT, diff --git a/web/app/components/app/create-app-dialog/app-card/index.tsx b/web/app/components/app/create-app-dialog/app-card/index.tsx index 695faed5e0..15cfbd5411 100644 --- a/web/app/components/app/create-app-dialog/app-card/index.tsx +++ b/web/app/components/app/create-app-dialog/app-card/index.tsx @@ -1,9 +1,14 @@ 'use client' import type { App } from '@/models/explore' import { PlusIcon } from '@heroicons/react/20/solid' +import { RiInformation2Line } from '@remixicon/react' +import { useCallback } from 'react' import { useTranslation } from 'react-i18next' +import { useContextSelector } from 'use-context-selector' import AppIcon from '@/app/components/base/app-icon' import Button from '@/app/components/base/button' +import AppListContext from '@/context/app-list-context' +import { useGlobalPublicStore } from '@/context/global-public-context' import { cn } from '@/utils/classnames' import { AppTypeIcon, AppTypeLabel } from '../../type-selector' @@ -20,6 +25,14 @@ const AppCard = ({ }: AppCardProps) => { const { t } = useTranslation() const { app: appBasicInfo } = app + const { systemFeatures } = useGlobalPublicStore() + const isTrialApp = app.can_trial && systemFeatures.enable_trial_app + const setShowTryAppPanel = useContextSelector(AppListContext, ctx => ctx.setShowTryAppPanel) + const showTryAPPPanel = useCallback((appId: string) => { + return () => { + setShowTryAppPanel?.(true, { appId, app }) + } + }, [setShowTryAppPanel, app.category]) return (
@@ -51,11 +64,17 @@ const AppCard = ({
{canCreate && ( )} diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx index 410953ccf7..5197a02bb3 100644 --- a/web/app/components/app/log/list.tsx +++ b/web/app/components/app/log/list.tsx @@ -39,6 +39,7 @@ import { useAppContext } from '@/context/app-context' import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' import useTimestamp from '@/hooks/use-timestamp' import { fetchChatMessages, updateLogMessageAnnotations, updateLogMessageFeedbacks } from '@/service/log' +import { AppSourceType } from '@/service/share' import { useChatConversationDetail, useCompletionConversationDetail } from '@/service/use-log' import { AppModeEnum } from '@/types/app' import { cn } from '@/utils/classnames' @@ -638,12 +639,12 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
item.from_source === 'admin')} onFeedback={feedback => onFeedback(detail.message.id, feedback)} diff --git a/web/app/components/app/text-generate/item/index.tsx b/web/app/components/app/text-generate/item/index.tsx index 78f4f426f5..c39282a022 100644 --- a/web/app/components/app/text-generate/item/index.tsx +++ b/web/app/components/app/text-generate/item/index.tsx @@ -29,7 +29,7 @@ import { Markdown } from '@/app/components/base/markdown' import NewAudioButton from '@/app/components/base/new-audio-button' import Toast from '@/app/components/base/toast' import { fetchTextGenerationMessage } from '@/service/debug' -import { fetchMoreLikeThis, updateFeedback } from '@/service/share' +import { AppSourceType, fetchMoreLikeThis, updateFeedback } from '@/service/share' import { cn } from '@/utils/classnames' import ResultTab from './result-tab' @@ -53,7 +53,7 @@ export type IGenerationItemProps = { onFeedback?: (feedback: FeedbackType) => void onSave?: (messageId: string) => void isMobile?: boolean - isInstalledApp: boolean + appSourceType: AppSourceType installedAppId?: string taskId?: string controlClearMoreLikeThis?: number @@ -87,7 +87,7 @@ const GenerationItem: FC = ({ onSave, depth = 1, isMobile, - isInstalledApp, + appSourceType, installedAppId, taskId, controlClearMoreLikeThis, @@ -100,6 +100,7 @@ const GenerationItem: FC = ({ const { t } = useTranslation() const params = useParams() const isTop = depth === 1 + const isTryApp = appSourceType === AppSourceType.tryApp const [completionRes, setCompletionRes] = useState('') const [childMessageId, setChildMessageId] = useState(null) const [childFeedback, setChildFeedback] = useState({ @@ -113,7 +114,7 @@ const GenerationItem: FC = ({ const setShowPromptLogModal = useAppStore(s => s.setShowPromptLogModal) const handleFeedback = async (childFeedback: FeedbackType) => { - await updateFeedback({ url: `/messages/${childMessageId}/feedbacks`, body: { rating: childFeedback.rating } }, isInstalledApp, installedAppId) + await updateFeedback({ url: `/messages/${childMessageId}/feedbacks`, body: { rating: childFeedback.rating } }, appSourceType, installedAppId) setChildFeedback(childFeedback) } @@ -131,7 +132,7 @@ const GenerationItem: FC = ({ onSave, isShowTextToSpeech, isMobile, - isInstalledApp, + appSourceType, installedAppId, controlClearMoreLikeThis, isWorkflow, @@ -145,7 +146,7 @@ const GenerationItem: FC = ({ return } startQuerying() - const res: any = await fetchMoreLikeThis(messageId as string, isInstalledApp, installedAppId) + const res: any = await fetchMoreLikeThis(messageId as string, appSourceType, installedAppId) setCompletionRes(res.answer) setChildFeedback({ rating: null, @@ -310,7 +311,7 @@ const GenerationItem: FC = ({ )} {/* action buttons */}
- {!isInWebApp && !isInstalledApp && !isResponding && ( + {!isInWebApp && (appSourceType !== AppSourceType.installedApp) && !isResponding && (
@@ -319,12 +320,12 @@ const GenerationItem: FC = ({
)}
- {moreLikeThis && ( + {moreLikeThis && !isTryApp && ( )} - {isShowTextToSpeech && ( + {isShowTextToSpeech && !isTryApp && ( = ({ )} - {isInWebApp && !isWorkflow && ( + {isInWebApp && !isWorkflow && !isTryApp && ( { onSave?.(messageId as string) }}> )}
- {(supportFeedback || isInWebApp) && !isWorkflow && !isError && messageId && ( + {(supportFeedback || isInWebApp) && !isWorkflow && !isTryApp && !isError && messageId && (
{!feedback?.rating && ( <> diff --git a/web/app/components/apps/index.tsx b/web/app/components/apps/index.tsx index b151df1e1f..255bfbf9c5 100644 --- a/web/app/components/apps/index.tsx +++ b/web/app/components/apps/index.tsx @@ -1,7 +1,17 @@ 'use client' +import type { CreateAppModalProps } from '../explore/create-app-modal' +import type { CurrentTryAppParams } from '@/context/explore-context' +import { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' import { useEducationInit } from '@/app/education-apply/hooks' +import AppListContext from '@/context/app-list-context' import useDocumentTitle from '@/hooks/use-document-title' +import { useImportDSL } from '@/hooks/use-import-dsl' +import { DSLImportMode } from '@/models/app' +import { fetchAppDetail } from '@/service/explore' +import DSLConfirmModal from '../app/create-from-dsl-modal/dsl-confirm-modal' +import CreateAppModal from '../explore/create-app-modal' +import TryApp from '../explore/try-app' import List from './list' const Apps = () => { @@ -10,10 +20,124 @@ const Apps = () => { useDocumentTitle(t('menus.apps', { ns: 'common' })) useEducationInit() + const [currentTryAppParams, setCurrentTryAppParams] = useState(undefined) + const currApp = currentTryAppParams?.app + const [isShowTryAppPanel, setIsShowTryAppPanel] = useState(false) + const hideTryAppPanel = useCallback(() => { + setIsShowTryAppPanel(false) + }, []) + const setShowTryAppPanel = (showTryAppPanel: boolean, params?: CurrentTryAppParams) => { + if (showTryAppPanel) + setCurrentTryAppParams(params) + else + setCurrentTryAppParams(undefined) + setIsShowTryAppPanel(showTryAppPanel) + } + const [isShowCreateModal, setIsShowCreateModal] = useState(false) + + const handleShowFromTryApp = useCallback(() => { + setIsShowCreateModal(true) + }, []) + + const [controlRefreshList, setControlRefreshList] = useState(0) + const [controlHideCreateFromTemplatePanel, setControlHideCreateFromTemplatePanel] = useState(0) + const onSuccess = useCallback(() => { + setControlRefreshList(prev => prev + 1) + setControlHideCreateFromTemplatePanel(prev => prev + 1) + }, []) + + const [showDSLConfirmModal, setShowDSLConfirmModal] = useState(false) + + const { + handleImportDSL, + handleImportDSLConfirm, + versions, + isFetching, + } = useImportDSL() + + const onConfirmDSL = useCallback(async () => { + await handleImportDSLConfirm({ + onSuccess, + }) + }, [handleImportDSLConfirm, onSuccess]) + + const onCreate: CreateAppModalProps['onConfirm'] = async ({ + name, + icon_type, + icon, + icon_background, + description, + }) => { + hideTryAppPanel() + + const { export_data } = await fetchAppDetail( + currApp?.app.id as string, + ) + const payload = { + mode: DSLImportMode.YAML_CONTENT, + yaml_content: export_data, + name, + icon_type, + icon, + icon_background, + description, + } + await handleImportDSL(payload, { + onSuccess: () => { + setIsShowCreateModal(false) + }, + onPending: () => { + setShowDSLConfirmModal(true) + }, + }) + } + return ( -
- -
+ +
+ + {isShowTryAppPanel && ( + + )} + + { + showDSLConfirmModal && ( + setShowDSLConfirmModal(false)} + onConfirm={onConfirmDSL} + confirmDisabled={isFetching} + /> + ) + } + + {isShowCreateModal && ( + setIsShowCreateModal(false)} + /> + )} +
+
) } diff --git a/web/app/components/apps/list.tsx b/web/app/components/apps/list.tsx index 095ed3f696..ef814c9e36 100644 --- a/web/app/components/apps/list.tsx +++ b/web/app/components/apps/list.tsx @@ -1,5 +1,6 @@ 'use client' +import type { FC } from 'react' import { RiApps2Line, RiDragDropLine, @@ -55,7 +56,12 @@ const CreateFromDSLModal = dynamic(() => import('@/app/components/app/create-fro ssr: false, }) -const List = () => { +type Props = { + controlRefreshList?: number +} +const List: FC = ({ + controlRefreshList = 0, +}) => { const { t } = useTranslation() const { systemFeatures } = useGlobalPublicStore() const router = useRouter() @@ -140,6 +146,13 @@ const List = () => { refetch, } = useInfiniteAppList(appListQueryParams, { enabled: !isCurrentWorkspaceDatasetOperator }) + useEffect(() => { + if (controlRefreshList > 0) { + refetch() + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [controlRefreshList]) + const anchorRef = useRef(null) const options = [ { value: 'all', text: t('types.all', { ns: 'app' }), icon: }, diff --git a/web/app/components/apps/new-app-card.tsx b/web/app/components/apps/new-app-card.tsx index bfa7af3892..a83abd3d37 100644 --- a/web/app/components/apps/new-app-card.tsx +++ b/web/app/components/apps/new-app-card.tsx @@ -6,10 +6,12 @@ import { useSearchParams, } from 'next/navigation' import * as React from 'react' -import { useMemo, useState } from 'react' +import { useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' +import { useContextSelector } from 'use-context-selector' import { CreateFromDSLModalTab } from '@/app/components/app/create-from-dsl-modal' import { FileArrow01, FilePlus01, FilePlus02 } from '@/app/components/base/icons/src/vender/line/files' +import AppListContext from '@/context/app-list-context' import { useProviderContext } from '@/context/provider-context' import { cn } from '@/utils/classnames' @@ -55,6 +57,12 @@ const CreateAppCard = ({ return undefined }, [dslUrl]) + const controlHideCreateFromTemplatePanel = useContextSelector(AppListContext, ctx => ctx.controlHideCreateFromTemplatePanel) + useEffect(() => { + if (controlHideCreateFromTemplatePanel > 0) + setShowNewAppTemplateDialog(false) + }, [controlHideCreateFromTemplatePanel]) + return (
{ +const ActionButton = ({ className, size, state = ActionButtonState.Default, styleCss, children, ref, disabled, ...props }: ActionButtonProps) => { return ( + ) + }, +) +CarouselPrevious.displayName = 'CarouselPrevious' + +const CarouselNext = React.forwardRef( + ({ children, ...props }, ref) => { + const { scrollNext, canScrollNext } = useCarousel() + + return ( + + ) + }, +) +CarouselNext.displayName = 'CarouselNext' + +const CarouselDot = React.forwardRef( + ({ children, ...props }, ref) => { + const { api, selectedIndex } = useCarousel() + + return api?.slideNodes().map((_, index) => { + return ( + + ) + }) + }, +) +CarouselDot.displayName = 'CarouselDot' + +const CarouselPlugins = { + Autoplay, +} + +Carousel.Content = CarouselContent +Carousel.Item = CarouselItem +Carousel.Previous = CarouselPrevious +Carousel.Next = CarouselNext +Carousel.Dot = CarouselDot +Carousel.Plugin = CarouselPlugins + +export { Carousel, useCarousel } diff --git a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx index 25ff39370f..38a3f6c6b2 100644 --- a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx +++ b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx @@ -12,6 +12,7 @@ import SuggestedQuestions from '@/app/components/base/chat/chat/answer/suggested import { Markdown } from '@/app/components/base/markdown' import { InputVarType } from '@/app/components/workflow/types' import { + AppSourceType, fetchSuggestedQuestions, getUrl, stopChatMessageResponding, @@ -52,6 +53,11 @@ const ChatWrapper = () => { initUserVariables, } = useChatWithHistoryContext() + const appSourceType = isInstalledApp ? AppSourceType.installedApp : AppSourceType.webApp + + // Semantic variable for better code readability + const isHistoryConversation = !!currentConversationId + const appConfig = useMemo(() => { const config = appParams || {} @@ -79,7 +85,7 @@ const ChatWrapper = () => { inputsForm: inputsForms, }, appPrevChatTree, - taskId => stopChatMessageResponding('', taskId, isInstalledApp, appId), + taskId => stopChatMessageResponding('', taskId, appSourceType, appId), clearChatList, setClearChatList, ) @@ -138,11 +144,11 @@ const ChatWrapper = () => { } handleSend( - getUrl('chat-messages', isInstalledApp, appId || ''), + getUrl('chat-messages', appSourceType, appId || ''), data, { - onGetSuggestedQuestions: responseItemId => fetchSuggestedQuestions(responseItemId, isInstalledApp, appId), - onConversationComplete: currentConversationId ? undefined : handleNewConversationCompleted, + onGetSuggestedQuestions: responseItemId => fetchSuggestedQuestions(responseItemId, appSourceType, appId), + onConversationComplete: isHistoryConversation ? undefined : handleNewConversationCompleted, isPublicAPI: !isInstalledApp, }, ) diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx index ed1981b530..ad1de38d07 100644 --- a/web/app/components/base/chat/chat-with-history/hooks.tsx +++ b/web/app/components/base/chat/chat-with-history/hooks.tsx @@ -27,6 +27,7 @@ import { useWebAppStore } from '@/context/web-app-context' import { useAppFavicon } from '@/hooks/use-app-favicon' import { changeLanguage } from '@/i18n-config/client' import { + AppSourceType, delConversation, pinConversation, renameConversation, @@ -72,6 +73,7 @@ function getFormattedChatList(messages: any[]) { export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { const isInstalledApp = useMemo(() => !!installedAppInfo, [installedAppInfo]) + const appSourceType = isInstalledApp ? AppSourceType.installedApp : AppSourceType.webApp const appInfo = useWebAppStore(s => s.appInfo) const appParams = useWebAppStore(s => s.appParams) const appMeta = useWebAppStore(s => s.appMeta) @@ -177,7 +179,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { }, [currentConversationId, newConversationId]) const { data: appPinnedConversationData } = useShareConversations({ - isInstalledApp, + appSourceType, appId, pinned: true, limit: 100, @@ -190,7 +192,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { data: appConversationData, isLoading: appConversationDataLoading, } = useShareConversations({ - isInstalledApp, + appSourceType, appId, pinned: false, limit: 100, @@ -204,7 +206,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { isLoading: appChatListDataLoading, } = useShareChatList({ conversationId: chatShouldReloadKey, - isInstalledApp, + appSourceType, appId, }, { enabled: !!chatShouldReloadKey, @@ -334,10 +336,11 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { const { data: newConversation } = useShareConversationName({ conversationId: newConversationId, - isInstalledApp, + appSourceType, appId, }, { refetchOnWindowFocus: false, + enabled: !!newConversationId, }) const [originConversationList, setOriginConversationList] = useState([]) useEffect(() => { @@ -462,16 +465,16 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { }, [invalidateShareConversations]) const handlePinConversation = useCallback(async (conversationId: string) => { - await pinConversation(isInstalledApp, appId, conversationId) + await pinConversation(appSourceType, appId, conversationId) notify({ type: 'success', message: t('api.success', { ns: 'common' }) }) handleUpdateConversationList() - }, [isInstalledApp, appId, notify, t, handleUpdateConversationList]) + }, [appSourceType, appId, notify, t, handleUpdateConversationList]) const handleUnpinConversation = useCallback(async (conversationId: string) => { - await unpinConversation(isInstalledApp, appId, conversationId) + await unpinConversation(appSourceType, appId, conversationId) notify({ type: 'success', message: t('api.success', { ns: 'common' }) }) handleUpdateConversationList() - }, [isInstalledApp, appId, notify, t, handleUpdateConversationList]) + }, [appSourceType, appId, notify, t, handleUpdateConversationList]) const [conversationDeleting, setConversationDeleting] = useState(false) const handleDeleteConversation = useCallback(async ( @@ -485,7 +488,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { try { setConversationDeleting(true) - await delConversation(isInstalledApp, appId, conversationId) + await delConversation(appSourceType, appId, conversationId) notify({ type: 'success', message: t('api.success', { ns: 'common' }) }) onSuccess() } @@ -520,7 +523,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { setConversationRenaming(true) try { - await renameConversation(isInstalledApp, appId, conversationId, newName) + await renameConversation(appSourceType, appId, conversationId, newName) notify({ type: 'success', @@ -550,9 +553,9 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { }, [handleConversationIdInfoChange, invalidateShareConversations]) const handleFeedback = useCallback(async (messageId: string, feedback: Feedback) => { - await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, isInstalledApp, appId) + await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, appSourceType, appId) notify({ type: 'success', message: t('api.success', { ns: 'common' }) }) - }, [isInstalledApp, appId, t, notify]) + }, [appSourceType, appId, t, notify]) return { isInstalledApp, diff --git a/web/app/components/base/chat/chat/answer/index.tsx b/web/app/components/base/chat/chat/answer/index.tsx index 7420b84ede..0eea0b5a18 100644 --- a/web/app/components/base/chat/chat/answer/index.tsx +++ b/web/app/components/base/chat/chat/answer/index.tsx @@ -21,6 +21,7 @@ import BasicContent from './basic-content' import More from './more' import Operation from './operation' import SuggestedQuestions from './suggested-questions' +import ToolCalls from './tool-calls' import WorkflowProcessItem from './workflow-process' type AnswerProps = { @@ -61,6 +62,7 @@ const Answer: FC = ({ workflowProcess, allFiles, message_files, + toolCalls, } = item const hasAgentThoughts = !!agent_thoughts?.length @@ -150,10 +152,15 @@ const Answer: FC = ({ data={workflowProcess} item={item} hideProcessDetail={hideProcessDetail} - readonly={hideProcessDetail && appData ? !appData.site.show_workflow_steps : undefined} + readonly={hideProcessDetail && appData ? !appData.site?.show_workflow_steps : undefined} /> ) } + { + !!toolCalls?.length && ( + + ) + } { responding && contentIsEmpty && !hasAgentThoughts && (
diff --git a/web/app/components/base/chat/chat/answer/suggested-questions.tsx b/web/app/components/base/chat/chat/answer/suggested-questions.tsx index 019ed78348..ce997a49b6 100644 --- a/web/app/components/base/chat/chat/answer/suggested-questions.tsx +++ b/web/app/components/base/chat/chat/answer/suggested-questions.tsx @@ -1,6 +1,7 @@ import type { FC } from 'react' import type { ChatItem } from '../../types' import { memo } from 'react' +import { cn } from '@/utils/classnames' import { useChatContext } from '../context' type SuggestedQuestionsProps = { @@ -9,7 +10,7 @@ type SuggestedQuestionsProps = { const SuggestedQuestions: FC = ({ item, }) => { - const { onSend } = useChatContext() + const { onSend, readonly } = useChatContext() const { isOpeningStatement, @@ -24,8 +25,11 @@ const SuggestedQuestions: FC = ({ {suggestedQuestions.filter(q => !!q && q.trim()).map((question, index) => (
onSend?.(question)} + className={cn( + 'system-sm-medium mr-1 mt-1 inline-flex max-w-full shrink-0 cursor-pointer flex-wrap rounded-lg border-[0.5px] border-components-button-secondary-border bg-components-button-secondary-bg px-3.5 py-2 text-components-button-secondary-accent-text shadow-xs last:mr-0 hover:border-components-button-secondary-border-hover hover:bg-components-button-secondary-bg-hover', + readonly && 'pointer-events-none opacity-50', + )} + onClick={() => !readonly && onSend?.(question)} > {question}
diff --git a/web/app/components/base/chat/chat/answer/tool-calls/index.tsx b/web/app/components/base/chat/chat/answer/tool-calls/index.tsx new file mode 100644 index 0000000000..66118006fe --- /dev/null +++ b/web/app/components/base/chat/chat/answer/tool-calls/index.tsx @@ -0,0 +1,23 @@ +import type { ToolCallItem } from '@/types/workflow' +import ToolCallItemComponent from '@/app/components/workflow/run/llm-log/tool-call-item' + +type ToolCallsProps = { + toolCalls: ToolCallItem[] +} +const ToolCalls = ({ + toolCalls, +}: ToolCallsProps) => { + return ( +
+ {toolCalls.map((toolCall: ToolCallItem, index: number) => ( + + ))} +
+ ) +} + +export default ToolCalls diff --git a/web/app/components/base/chat/chat/answer/workflow-process.tsx b/web/app/components/base/chat/chat/answer/workflow-process.tsx index fd8c1daf3e..12f0001bd4 100644 --- a/web/app/components/base/chat/chat/answer/workflow-process.tsx +++ b/web/app/components/base/chat/chat/answer/workflow-process.tsx @@ -45,7 +45,7 @@ const WorkflowProcessItem = ({ return (
{ @@ -239,7 +244,14 @@ const ChatInputArea = ({ ) }
- {showFeatureBar && } + {showFeatureBar && ( + + )} ) } diff --git a/web/app/components/base/chat/chat/chat-input-area/operation.tsx b/web/app/components/base/chat/chat/chat-input-area/operation.tsx index 27e5bf6cad..5bce827754 100644 --- a/web/app/components/base/chat/chat/chat-input-area/operation.tsx +++ b/web/app/components/base/chat/chat/chat-input-area/operation.tsx @@ -8,6 +8,7 @@ import { RiMicLine, RiSendPlane2Fill, } from '@remixicon/react' +import { noop } from 'es-toolkit/function' import { memo } from 'react' import ActionButton from '@/app/components/base/action-button' import Button from '@/app/components/base/button' @@ -15,6 +16,7 @@ import { FileUploaderInChatInput } from '@/app/components/base/file-uploader' import { cn } from '@/utils/classnames' type OperationProps = { + readonly?: boolean fileConfig?: FileUpload speechToTextConfig?: EnableType onShowVoiceInput?: () => void @@ -23,6 +25,7 @@ type OperationProps = { ref?: Ref } const Operation: FC = ({ + readonly, ref, fileConfig, speechToTextConfig, @@ -41,11 +44,12 @@ const Operation: FC = ({ ref={ref} >
- {fileConfig?.enabled && } + {fileConfig?.enabled && } { speechToTextConfig?.enabled && ( @@ -56,7 +60,7 @@ const Operation: FC = ({ + { + !hideEditEntrance && ( + + ) + }
)}
diff --git a/web/app/components/base/file-uploader/file-uploader-in-chat-input/index.tsx b/web/app/components/base/file-uploader/file-uploader-in-chat-input/index.tsx index 1ae328d67a..08bb8b45d1 100644 --- a/web/app/components/base/file-uploader/file-uploader-in-chat-input/index.tsx +++ b/web/app/components/base/file-uploader/file-uploader-in-chat-input/index.tsx @@ -13,21 +13,27 @@ import FileFromLinkOrLocal from '../file-from-link-or-local' type FileUploaderInChatInputProps = { fileConfig: FileUpload + readonly?: boolean } const FileUploaderInChatInput = ({ fileConfig, + readonly, }: FileUploaderInChatInputProps) => { const renderTrigger = useCallback((open: boolean) => { return ( ) }, []) + if (readonly) + return renderTrigger(false) + return ( = ({ if (IS_CE_EDITION) return null - const cspHeader = IS_PROD + const cspHeader = process.env.NODE_ENV === 'production' ? (headers() as unknown as UnsafeUnwrappedHeaders).get('content-security-policy') : null const nonce = extractNonceFromCSP(cspHeader) diff --git a/web/app/components/base/icons/assets/vender/workflow/thinking.svg b/web/app/components/base/icons/assets/vender/workflow/thinking.svg new file mode 100644 index 0000000000..2ec614932f --- /dev/null +++ b/web/app/components/base/icons/assets/vender/workflow/thinking.svg @@ -0,0 +1,4 @@ + + + + diff --git a/web/app/components/base/icons/src/vender/workflow/Thinking.json b/web/app/components/base/icons/src/vender/workflow/Thinking.json new file mode 100644 index 0000000000..6fe807775d --- /dev/null +++ b/web/app/components/base/icons/src/vender/workflow/Thinking.json @@ -0,0 +1,35 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "12", + "height": "14", + "viewBox": "0 0 12 14", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M2 9.49479C0.782372 8.51826 0 7.01768 0 5.33333C0 2.38782 2.38782 0 5.33333 0C8.20841 0 10.5503 2.27504 10.6608 5.12305L11.888 6.96354C12.0843 7.25794 12.0161 7.65424 11.7331 7.86654L10.6667 8.66602V10C10.6667 10.7364 10.0697 11.3333 9.33333 11.3333H8V13.3333H6.66667V10.6667C6.66667 10.2985 6.96514 10 7.33333 10H9.33333V8.33333C9.33333 8.12349 9.43239 7.92603 9.60026 7.80013L10.4284 7.17838L9.44531 5.70312C9.3723 5.59361 9.33333 5.46495 9.33333 5.33333C9.33333 3.1242 7.54248 1.33333 5.33333 1.33333C3.1242 1.33333 1.33333 3.1242 1.33333 5.33333C1.33333 6.69202 2.0103 7.89261 3.04818 8.61654C3.2269 8.74119 3.33329 8.94552 3.33333 9.16341V13.3333H2V9.49479Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M6.04367 4.24012L5.6504 3.21778C5.59993 3.08657 5.47393 3 5.33333 3C5.19273 3 5.06673 3.08657 5.01627 3.21778L4.62303 4.24012C4.55531 4.41618 4.41618 4.55531 4.24012 4.62303L3.21778 5.01624C3.08657 5.0667 3 5.19276 3 5.33333C3 5.47393 3.08657 5.59993 3.21778 5.6504L4.24012 6.04367C4.41618 6.11133 4.55531 6.25047 4.62303 6.42653L5.01627 7.44887C5.06673 7.58007 5.19273 7.66667 5.33333 7.66667C5.47393 7.66667 5.59993 7.58007 5.6504 7.44887L6.04367 6.42653C6.11133 6.25047 6.25047 6.11133 6.42653 6.04367L7.44887 5.6504C7.58007 5.59993 7.66667 5.47393 7.66667 5.33333C7.66667 5.19276 7.58007 5.0667 7.44887 5.01624L6.42653 4.62303C6.25047 4.55531 6.11133 4.41618 6.04367 4.24012Z", + "fill": "currentColor" + }, + "children": [] + } + ] + }, + "name": "Thinking" +} diff --git a/web/app/components/base/icons/src/vender/workflow/Thinking.tsx b/web/app/components/base/icons/src/vender/workflow/Thinking.tsx new file mode 100644 index 0000000000..dbe3716a24 --- /dev/null +++ b/web/app/components/base/icons/src/vender/workflow/Thinking.tsx @@ -0,0 +1,20 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import type { IconData } from '@/app/components/base/icons/IconBase' +import * as React from 'react' +import IconBase from '@/app/components/base/icons/IconBase' +import data from './Thinking.json' + +const Icon = ( + { + ref, + ...props + }: React.SVGProps & { + ref?: React.RefObject> + }, +) => + +Icon.displayName = 'Thinking' + +export default Icon diff --git a/web/app/components/base/icons/src/vender/workflow/index.ts b/web/app/components/base/icons/src/vender/workflow/index.ts index ec8dce100d..b559d9c6aa 100644 --- a/web/app/components/base/icons/src/vender/workflow/index.ts +++ b/web/app/components/base/icons/src/vender/workflow/index.ts @@ -24,6 +24,7 @@ export { default as ParameterExtractor } from './ParameterExtractor' export { default as QuestionClassifier } from './QuestionClassifier' export { default as Schedule } from './Schedule' export { default as TemplatingTransform } from './TemplatingTransform' +export { default as Thinking } from './Thinking' export { default as TriggerAll } from './TriggerAll' export { default as VariableX } from './VariableX' export { default as WebhookLine } from './WebhookLine' diff --git a/web/app/components/base/image-uploader/text-generation-image-uploader.tsx b/web/app/components/base/image-uploader/text-generation-image-uploader.tsx index d5ad09ff43..569ff559a2 100644 --- a/web/app/components/base/image-uploader/text-generation-image-uploader.tsx +++ b/web/app/components/base/image-uploader/text-generation-image-uploader.tsx @@ -70,10 +70,12 @@ const PasteImageLinkButton: FC = ({ type TextGenerationImageUploaderProps = { settings: VisionSettings onFilesChange: (files: ImageFile[]) => void + disabled?: boolean } const TextGenerationImageUploader: FC = ({ settings, onFilesChange, + disabled, }) => { const { t } = useTranslation() @@ -93,7 +95,7 @@ const TextGenerationImageUploader: FC = ({ const localUpload = ( = settings.number_limits} + disabled={files.length >= settings.number_limits || disabled} limit={+settings.image_file_size_limit!} > { @@ -115,7 +117,7 @@ const TextGenerationImageUploader: FC = ({ const urlUpload = ( = settings.number_limits} + disabled={files.length >= settings.number_limits || disabled} /> ) diff --git a/web/app/components/base/tab-header/index.tsx b/web/app/components/base/tab-header/index.tsx index e762e23232..6ba6a354a3 100644 --- a/web/app/components/base/tab-header/index.tsx +++ b/web/app/components/base/tab-header/index.tsx @@ -16,6 +16,8 @@ export type ITabHeaderProps = { items: Item[] value: string itemClassName?: string + itemWrapClassName?: string + activeItemClassName?: string onChange: (value: string) => void } @@ -23,6 +25,8 @@ const TabHeader: FC = ({ items, value, itemClassName, + itemWrapClassName, + activeItemClassName, onChange, }) => { const renderItem = ({ id, name, icon, extra, disabled }: Item) => ( @@ -30,8 +34,9 @@ const TabHeader: FC = ({ key={id} className={cn( 'system-md-semibold relative flex cursor-pointer items-center border-b-2 border-transparent pb-2 pt-2.5', - id === value ? 'border-components-tab-active text-text-primary' : 'text-text-tertiary', + id === value ? cn('border-components-tab-active text-text-primary', activeItemClassName) : 'text-text-tertiary', disabled && 'cursor-not-allowed opacity-30', + itemWrapClassName, )} onClick={() => !disabled && onChange(id)} > diff --git a/web/app/components/base/voice-input/index.tsx b/web/app/components/base/voice-input/index.tsx index 4fa2c774f4..52e3c754f8 100644 --- a/web/app/components/base/voice-input/index.tsx +++ b/web/app/components/base/voice-input/index.tsx @@ -8,7 +8,7 @@ import { useParams, usePathname } from 'next/navigation' import { useCallback, useEffect, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { StopCircle } from '@/app/components/base/icons/src/vender/solid/mediaAndDevices' -import { audioToText } from '@/service/share' +import { AppSourceType, audioToText } from '@/service/share' import { cn } from '@/utils/classnames' import s from './index.module.css' import { convertToMp3 } from './utils' @@ -108,7 +108,7 @@ const VoiceInput = ({ } try { - const audioResponse = await audioToText(url, isPublic, formData) + const audioResponse = await audioToText(url, isPublic ? AppSourceType.webApp : AppSourceType.installedApp, formData) onConverted(audioResponse.text) onCancel() } diff --git a/web/app/components/browser-initializer.tsx b/web/app/components/browser-initializer.tsx index c2194ca8d4..3e7b3e6df1 100644 --- a/web/app/components/browser-initializer.tsx +++ b/web/app/components/browser-initializer.tsx @@ -5,11 +5,7 @@ if (!Array.prototype.toSpliced) { // eslint-disable-next-line no-extend-native Array.prototype.toSpliced = function (this: T[], start: number, deleteCount?: number, ...items: T[]): T[] { const copy = this.slice() - // When deleteCount is undefined (omitted), delete to end; otherwise let splice handle coercion - if (deleteCount === undefined) - copy.splice(start, copy.length - start, ...items) - else - copy.splice(start, deleteCount, ...items) + copy.splice(start, deleteCount ?? copy.length - start, ...items) return copy } } diff --git a/web/app/components/explore/app-card/index.spec.tsx b/web/app/components/explore/app-card/index.spec.tsx index 769b317929..152eab92a9 100644 --- a/web/app/components/explore/app-card/index.spec.tsx +++ b/web/app/components/explore/app-card/index.spec.tsx @@ -10,6 +10,7 @@ vi.mock('../../app/type-selector', () => ({ })) const createApp = (overrides?: Partial): App => ({ + can_trial: true, app_id: 'app-id', description: 'App description', copyright: '2024', diff --git a/web/app/components/explore/app-card/index.tsx b/web/app/components/explore/app-card/index.tsx index 0b6cd9920d..5d82ab65cc 100644 --- a/web/app/components/explore/app-card/index.tsx +++ b/web/app/components/explore/app-card/index.tsx @@ -1,8 +1,13 @@ 'use client' import type { App } from '@/models/explore' import { PlusIcon } from '@heroicons/react/20/solid' +import { RiInformation2Line } from '@remixicon/react' +import { useCallback } from 'react' import { useTranslation } from 'react-i18next' +import { useContextSelector } from 'use-context-selector' import AppIcon from '@/app/components/base/app-icon' +import ExploreContext from '@/context/explore-context' +import { useGlobalPublicStore } from '@/context/global-public-context' import { AppModeEnum } from '@/types/app' import { cn } from '@/utils/classnames' import { AppTypeIcon } from '../../app/type-selector' @@ -23,8 +28,17 @@ const AppCard = ({ }: AppCardProps) => { const { t } = useTranslation() const { app: appBasicInfo } = app + const { systemFeatures } = useGlobalPublicStore() + const isTrialApp = app.can_trial && systemFeatures.enable_trial_app + const setShowTryAppPanel = useContextSelector(ExploreContext, ctx => ctx.setShowTryAppPanel) + const showTryAPPPanel = useCallback((appId: string) => { + return () => { + setShowTryAppPanel?.(true, { appId, app }) + } + }, [setShowTryAppPanel, app]) + return ( -
+
- {isExplore && canCreate && ( + {isExplore && (canCreate || isTrialApp) && ( )} diff --git a/web/app/components/explore/app-list/index.spec.tsx b/web/app/components/explore/app-list/index.spec.tsx index a9e4feeba8..e15f37245f 100644 --- a/web/app/components/explore/app-list/index.spec.tsx +++ b/web/app/components/explore/app-list/index.spec.tsx @@ -102,6 +102,7 @@ const createApp = (overrides: Partial = {}): App => ({ description: overrides.app?.description ?? 'Alpha description', use_icon_as_answer_icon: overrides.app?.use_icon_as_answer_icon ?? false, }, + can_trial: true, app_id: overrides.app_id ?? 'app-1', description: overrides.description ?? 'Alpha description', copyright: overrides.copyright ?? '', @@ -127,6 +128,8 @@ const renderWithContext = (hasEditPermission = false, onSuccess?: () => void) => setInstalledApps: vi.fn(), isFetchingInstalledApps: false, setIsFetchingInstalledApps: vi.fn(), + isShowTryAppPanel: false, + setShowTryAppPanel: vi.fn(), }} > diff --git a/web/app/components/explore/app-list/index.tsx b/web/app/components/explore/app-list/index.tsx index 5b318b780b..1749bde76a 100644 --- a/web/app/components/explore/app-list/index.tsx +++ b/web/app/components/explore/app-list/index.tsx @@ -7,14 +7,17 @@ import { useQueryState } from 'nuqs' import * as React from 'react' import { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' -import { useContext } from 'use-context-selector' +import { useContext, useContextSelector } from 'use-context-selector' import DSLConfirmModal from '@/app/components/app/create-from-dsl-modal/dsl-confirm-modal' +import Button from '@/app/components/base/button' import Input from '@/app/components/base/input' import Loading from '@/app/components/base/loading' import AppCard from '@/app/components/explore/app-card' +import Banner from '@/app/components/explore/banner/banner' import Category from '@/app/components/explore/category' import CreateAppModal from '@/app/components/explore/create-app-modal' import ExploreContext from '@/context/explore-context' +import { useGlobalPublicStore } from '@/context/global-public-context' import { useImportDSL } from '@/hooks/use-import-dsl' import { DSLImportMode, @@ -22,6 +25,7 @@ import { import { fetchAppDetail } from '@/service/explore' import { useExploreAppList } from '@/service/use-explore' import { cn } from '@/utils/classnames' +import TryApp from '../try-app' import s from './style.module.css' type AppsProps = { @@ -32,12 +36,19 @@ const Apps = ({ onSuccess, }: AppsProps) => { const { t } = useTranslation() + const { systemFeatures } = useGlobalPublicStore() const { hasEditPermission } = useContext(ExploreContext) const allCategoriesEn = t('apps.allCategories', { ns: 'explore', lng: 'en' }) const [keywords, setKeywords] = useState('') const [searchKeywords, setSearchKeywords] = useState('') + const hasFilterCondition = !!keywords + const handleResetFilter = useCallback(() => { + setKeywords('') + setSearchKeywords('') + }, []) + const { run: handleSearch } = useDebounceFn(() => { setSearchKeywords(keywords) }, { wait: 500 }) @@ -84,6 +95,18 @@ const Apps = ({ isFetching, } = useImportDSL() const [showDSLConfirmModal, setShowDSLConfirmModal] = useState(false) + + const isShowTryAppPanel = useContextSelector(ExploreContext, ctx => ctx.isShowTryAppPanel) + const setShowTryAppPanel = useContextSelector(ExploreContext, ctx => ctx.setShowTryAppPanel) + const hideTryAppPanel = useCallback(() => { + setShowTryAppPanel(false) + }, [setShowTryAppPanel]) + const appParams = useContextSelector(ExploreContext, ctx => ctx.currentApp) + const handleShowFromTryApp = useCallback(() => { + setCurrApp(appParams?.app || null) + setIsShowCreateModal(true) + }, [appParams?.app]) + const onCreate: CreateAppModalProps['onConfirm'] = async ({ name, icon_type, @@ -91,6 +114,8 @@ const Apps = ({ icon_background, description, }) => { + hideTryAppPanel() + const { export_data } = await fetchAppDetail( currApp?.app.id as string, ) @@ -137,22 +162,24 @@ const Apps = ({ 'flex h-full flex-col border-l-[0.5px] border-divider-regular', )} > - -
-
{t('apps.title', { ns: 'explore' })}
-
{t('apps.description', { ns: 'explore' })}
-
- + {systemFeatures.enable_explore_banner && ( +
+ +
+ )}
- +
+
{!hasFilterCondition ? t('apps.title', { ns: 'explore' }) : t('apps.resultNum', { num: searchFilteredList.length, ns: 'explore' })}
+ {hasFilterCondition && ( + <> +
+ + + )} +
+
+ +
+
) } + + {isShowTryAppPanel && ( + + )}
) } diff --git a/web/app/components/explore/banner/banner-item.tsx b/web/app/components/explore/banner/banner-item.tsx new file mode 100644 index 0000000000..5c6df39ebc --- /dev/null +++ b/web/app/components/explore/banner/banner-item.tsx @@ -0,0 +1,198 @@ +import type { FC } from 'react' +import { RiArrowRightLine } from '@remixicon/react' +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' +import { useTranslation } from 'react-i18next' +import { useCarousel } from '@/app/components/base/carousel' +import { cn } from '@/utils/classnames' +import { IndicatorButton } from './indicator-button' + +export type BannerData = { + id: string + content: { + 'category': string + 'title': string + 'description': string + 'img-src': string + } + status: 'enabled' | 'disabled' + link: string + created_at: number +} + +type BannerItemProps = { + banner: BannerData + autoplayDelay: number + isPaused?: boolean +} + +const RESPONSIVE_BREAKPOINT = 1200 +const MAX_RESPONSIVE_WIDTH = 600 +const INDICATOR_WIDTH = 20 +const INDICATOR_GAP = 8 +const MIN_VIEW_MORE_WIDTH = 480 + +export const BannerItem: FC = ({ banner, autoplayDelay, isPaused = false }) => { + const { t } = useTranslation() + const { api, selectedIndex } = useCarousel() + const { category, title, description, 'img-src': imgSrc } = banner.content + + const [resetKey, setResetKey] = useState(0) + const textAreaRef = useRef(null) + const [maxWidth, setMaxWidth] = useState(undefined) + + const slideInfo = useMemo(() => { + const slides = api?.slideNodes() ?? [] + const totalSlides = slides.length + const nextIndex = totalSlides > 0 ? (selectedIndex + 1) % totalSlides : 0 + return { slides, totalSlides, nextIndex } + }, [api, selectedIndex]) + + const indicatorsWidth = useMemo(() => { + const count = slideInfo.totalSlides + if (count === 0) + return 0 + // Calculate: indicator buttons + gaps + extra spacing (3 * 20px for divider and padding) + return (count + 2) * INDICATOR_WIDTH + (count - 1) * INDICATOR_GAP + }, [slideInfo.totalSlides]) + + const viewMoreStyle = useMemo(() => { + if (!maxWidth) + return undefined + return { + maxWidth: `${maxWidth}px`, + minWidth: indicatorsWidth ? `${Math.min(maxWidth - indicatorsWidth, MIN_VIEW_MORE_WIDTH)}px` : undefined, + } + }, [maxWidth, indicatorsWidth]) + + const responsiveStyle = useMemo( + () => (maxWidth !== undefined ? { maxWidth: `${maxWidth}px` } : undefined), + [maxWidth], + ) + + const incrementResetKey = useCallback(() => setResetKey(prev => prev + 1), []) + + useEffect(() => { + const updateMaxWidth = () => { + if (window.innerWidth < RESPONSIVE_BREAKPOINT && textAreaRef.current) { + const textAreaWidth = textAreaRef.current.offsetWidth + setMaxWidth(Math.min(textAreaWidth, MAX_RESPONSIVE_WIDTH)) + } + else { + setMaxWidth(undefined) + } + } + + updateMaxWidth() + + const resizeObserver = new ResizeObserver(updateMaxWidth) + if (textAreaRef.current) + resizeObserver.observe(textAreaRef.current) + + window.addEventListener('resize', updateMaxWidth) + + return () => { + resizeObserver.disconnect() + window.removeEventListener('resize', updateMaxWidth) + } + }, []) + + useEffect(() => { + incrementResetKey() + }, [selectedIndex, incrementResetKey]) + + const handleBannerClick = useCallback(() => { + incrementResetKey() + if (banner.link) + window.open(banner.link, '_blank', 'noopener,noreferrer') + }, [banner.link, incrementResetKey]) + + const handleIndicatorClick = useCallback((index: number) => { + incrementResetKey() + api?.scrollTo(index) + }, [api, incrementResetKey]) + + return ( +
+ {/* Left content area */} +
+
+ {/* Text section */} +
+ {/* Title area */} +
+

+ {category} +

+

+ {title} +

+
+ {/* Description area */} +
+

+ {description} +

+
+
+ + {/* Actions section */} +
+ {/* View more button */} +
+
+ +
+ + {t('banner.viewMore', { ns: 'explore' })} + +
+ +
+ {/* Slide navigation indicators */} +
+ {slideInfo.slides.map((_: unknown, index: number) => ( + handleIndicatorClick(index)} + /> + ))} +
+
+
+
+
+
+ + {/* Right image area */} +
+ {title} +
+
+ ) +} diff --git a/web/app/components/explore/banner/banner.tsx b/web/app/components/explore/banner/banner.tsx new file mode 100644 index 0000000000..c94f4bc316 --- /dev/null +++ b/web/app/components/explore/banner/banner.tsx @@ -0,0 +1,95 @@ +import type { FC } from 'react' +import type { BannerData } from './banner-item' +import * as React from 'react' +import { useEffect, useMemo, useRef, useState } from 'react' +import { Carousel } from '@/app/components/base/carousel' +import { useLocale } from '@/context/i18n' +import { useGetBanners } from '@/service/use-explore' +import Loading from '../../base/loading' +import { BannerItem } from './banner-item' + +const AUTOPLAY_DELAY = 5000 +const MIN_LOADING_HEIGHT = 168 +const RESIZE_DEBOUNCE_DELAY = 50 + +const LoadingState: FC = () => ( +
+ +
+) + +const Banner: FC = () => { + const locale = useLocale() + const { data: banners, isLoading, isError } = useGetBanners(locale) + const [isHovered, setIsHovered] = useState(false) + const [isResizing, setIsResizing] = useState(false) + const resizeTimerRef = useRef(null) + + const enabledBanners = useMemo( + () => banners?.filter((banner: BannerData) => banner.status === 'enabled') ?? [], + [banners], + ) + + const isPaused = isHovered || isResizing + + // Handle window resize to pause animation + useEffect(() => { + const handleResize = () => { + setIsResizing(true) + + if (resizeTimerRef.current) + clearTimeout(resizeTimerRef.current) + + resizeTimerRef.current = setTimeout(() => { + setIsResizing(false) + }, RESIZE_DEBOUNCE_DELAY) + } + + window.addEventListener('resize', handleResize) + + return () => { + window.removeEventListener('resize', handleResize) + if (resizeTimerRef.current) + clearTimeout(resizeTimerRef.current) + } + }, []) + + if (isLoading) + return + + if (isError || enabledBanners.length === 0) + return null + + return ( + setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + > + + {enabledBanners.map((banner: BannerData) => ( + + + + ))} + + + ) +} + +export default React.memo(Banner) diff --git a/web/app/components/explore/banner/indicator-button.tsx b/web/app/components/explore/banner/indicator-button.tsx new file mode 100644 index 0000000000..a674c74e6d --- /dev/null +++ b/web/app/components/explore/banner/indicator-button.tsx @@ -0,0 +1,111 @@ +import type { FC } from 'react' +import { useCallback, useEffect, useRef, useState } from 'react' +import { cn } from '@/utils/classnames' + +type IndicatorButtonProps = { + index: number + selectedIndex: number + isNextSlide: boolean + autoplayDelay: number + resetKey: number + isPaused?: boolean + onClick: () => void +} + +const PROGRESS_MAX = 100 +const DEGREES_PER_PERCENT = 3.6 + +export const IndicatorButton: FC = ({ + index, + selectedIndex, + isNextSlide, + autoplayDelay, + resetKey, + isPaused = false, + onClick, +}) => { + const [progress, setProgress] = useState(0) + const frameIdRef = useRef(undefined) + const startTimeRef = useRef(0) + + const isActive = index === selectedIndex + const shouldAnimate = !document.hidden && !isPaused + + useEffect(() => { + if (!isNextSlide) { + setProgress(0) + if (frameIdRef.current) + cancelAnimationFrame(frameIdRef.current) + return + } + + setProgress(0) + startTimeRef.current = Date.now() + + const animate = () => { + if (!document.hidden && !isPaused) { + const elapsed = Date.now() - startTimeRef.current + const newProgress = Math.min((elapsed / autoplayDelay) * PROGRESS_MAX, PROGRESS_MAX) + setProgress(newProgress) + + if (newProgress < PROGRESS_MAX) + frameIdRef.current = requestAnimationFrame(animate) + } + else { + frameIdRef.current = requestAnimationFrame(animate) + } + } + + if (shouldAnimate) + frameIdRef.current = requestAnimationFrame(animate) + + return () => { + if (frameIdRef.current) + cancelAnimationFrame(frameIdRef.current) + } + }, [isNextSlide, autoplayDelay, resetKey, isPaused]) + + const handleClick = useCallback((e: React.MouseEvent) => { + e.stopPropagation() + onClick() + }, [onClick]) + + const progressDegrees = progress * DEGREES_PER_PERCENT + + return ( + + ) +} diff --git a/web/app/components/explore/category.tsx b/web/app/components/explore/category.tsx index 97a9ca92b3..47c2a4e3a7 100644 --- a/web/app/components/explore/category.tsx +++ b/web/app/components/explore/category.tsx @@ -29,7 +29,7 @@ const Category: FC = ({ const isAllCategories = !list.includes(value as AppCategory) || value === allCategoriesEn const itemClassName = (isSelected: boolean) => cn( - 'flex h-[32px] cursor-pointer items-center rounded-lg border-[0.5px] border-transparent px-3 py-[7px] font-medium leading-[18px] text-text-tertiary hover:bg-components-main-nav-nav-button-bg-active', + 'system-sm-medium flex h-7 cursor-pointer items-center rounded-lg border border-transparent px-3 text-text-tertiary hover:bg-components-main-nav-nav-button-bg-active', isSelected && 'border-components-main-nav-nav-button-border bg-components-main-nav-nav-button-bg-active text-components-main-nav-nav-button-text-active shadow-xs', ) diff --git a/web/app/components/explore/index.tsx b/web/app/components/explore/index.tsx index 30132eea66..0b5e18a1de 100644 --- a/web/app/components/explore/index.tsx +++ b/web/app/components/explore/index.tsx @@ -1,5 +1,6 @@ 'use client' import type { FC } from 'react' +import type { CurrentTryAppParams } from '@/context/explore-context' import type { InstalledApp } from '@/models/explore' import { useRouter } from 'next/navigation' import * as React from 'react' @@ -41,6 +42,16 @@ const Explore: FC = ({ return router.replace('/datasets') }, [isCurrentWorkspaceDatasetOperator]) + const [currentTryAppParams, setCurrentTryAppParams] = useState(undefined) + const [isShowTryAppPanel, setIsShowTryAppPanel] = useState(false) + const setShowTryAppPanel = (showTryAppPanel: boolean, params?: CurrentTryAppParams) => { + if (showTryAppPanel) + setCurrentTryAppParams(params) + else + setCurrentTryAppParams(undefined) + setIsShowTryAppPanel(showTryAppPanel) + } + return (
= ({ setInstalledApps, isFetchingInstalledApps, setIsFetchingInstalledApps, + currentApp: currentTryAppParams, + isShowTryAppPanel, + setShowTryAppPanel, } } > diff --git a/web/app/components/explore/installed-app/index.tsx b/web/app/components/explore/installed-app/index.tsx index def66c0260..7366057445 100644 --- a/web/app/components/explore/installed-app/index.tsx +++ b/web/app/components/explore/installed-app/index.tsx @@ -1,5 +1,6 @@ 'use client' import type { FC } from 'react' +import type { AccessMode } from '@/models/access-control' import type { AppData } from '@/models/share' import * as React from 'react' import { useEffect } from 'react' @@ -62,8 +63,8 @@ const InstalledApp: FC = ({ if (appMeta) updateWebAppMeta(appMeta) if (webAppAccessMode) - updateWebAppAccessMode(webAppAccessMode.accessMode) - updateUserCanAccessApp(Boolean(userCanAccessApp && userCanAccessApp?.result)) + updateWebAppAccessMode((webAppAccessMode as { accessMode: AccessMode }).accessMode) + updateUserCanAccessApp(Boolean(userCanAccessApp && (userCanAccessApp as { result: boolean })?.result)) }, [installedApp, appMeta, appParams, updateAppInfo, updateAppParams, updateUserCanAccessApp, updateWebAppMeta, userCanAccessApp, webAppAccessMode, updateWebAppAccessMode]) if (appParamsError) { diff --git a/web/app/components/explore/sidebar/app-nav-item/index.tsx b/web/app/components/explore/sidebar/app-nav-item/index.tsx index 3347efeb3f..08558578f6 100644 --- a/web/app/components/explore/sidebar/app-nav-item/index.tsx +++ b/web/app/components/explore/sidebar/app-nav-item/index.tsx @@ -56,7 +56,7 @@ export default function AppNavItem({ <>
-
{name}
+
{name}
e.stopPropagation()}> { setInstalledApps: vi.fn(), isFetchingInstalledApps: false, setIsFetchingInstalledApps: vi.fn(), - }} + } as any} > , diff --git a/web/app/components/explore/sidebar/index.tsx b/web/app/components/explore/sidebar/index.tsx index 1257886165..225b58199b 100644 --- a/web/app/components/explore/sidebar/index.tsx +++ b/web/app/components/explore/sidebar/index.tsx @@ -1,5 +1,7 @@ 'use client' import type { FC } from 'react' +import { RiAppsFill, RiExpandRightLine, RiLayoutLeft2Line } from '@remixicon/react' +import { useBoolean } from 'ahooks' import Link from 'next/link' import { useSelectedLayoutSegments } from 'next/navigation' import * as React from 'react' @@ -14,6 +16,7 @@ import { useGetInstalledApps, useUninstallApp, useUpdateAppPinStatus } from '@/s import { cn } from '@/utils/classnames' import Toast from '../../base/toast' import Item from './app-nav-item' +import NoApps from './no-apps' const SelectedDiscoveryIcon = () => ( @@ -45,6 +48,9 @@ const SideBar: FC = ({ const media = useBreakpoints() const isMobile = media === MediaType.mobile + const [isFold, { + toggle: toggleIsFold, + }] = useBoolean(false) const [showConfirm, setShowConfirm] = useState(false) const [currId, setCurrId] = useState('') @@ -84,22 +90,31 @@ const SideBar: FC = ({ const pinnedAppsCount = installedApps.filter(({ is_pinned }) => is_pinned).length return ( -
+
- {isDiscoverySelected ? : } - {!isMobile &&
{t('sidebar.discovery', { ns: 'explore' })}
} +
+ +
+ {!isMobile && !isFold &&
{t('sidebar.title', { ns: 'explore' })}
}
+ + {installedApps.length === 0 && !isMobile && !isFold + && ( +
+ +
+ )} + {installedApps.length > 0 && ( -
-

{t('sidebar.workspace', { ns: 'explore' })}

+
+ {!isMobile && !isFold &&

{t('sidebar.webApps', { ns: 'explore' })}

}
= ({ {installedApps.map(({ id, is_pinned, uninstallable, app: { name, icon_type, icon, icon_url, icon_background } }, index) => ( = ({
)} + + {!isMobile && ( +
+ {isFold + ? + : ( + + )} +
+ )} + {showConfirm && ( { + const { t } = useTranslation() + const { theme } = useTheme() + return ( +
+
+
{t(`${i18nPrefix}.title`, { ns: 'explore' })}
+
{t(`${i18nPrefix}.description`, { ns: 'explore' })}
+ {t(`${i18nPrefix}.learnMore`, { ns: 'explore' })} +
+ ) +} +export default React.memo(NoApps) diff --git a/web/app/components/explore/sidebar/no-apps/no-web-apps-dark.png b/web/app/components/explore/sidebar/no-apps/no-web-apps-dark.png new file mode 100644 index 0000000000..e153686fcd Binary files /dev/null and b/web/app/components/explore/sidebar/no-apps/no-web-apps-dark.png differ diff --git a/web/app/components/explore/sidebar/no-apps/no-web-apps-light.png b/web/app/components/explore/sidebar/no-apps/no-web-apps-light.png new file mode 100644 index 0000000000..2416b957d2 Binary files /dev/null and b/web/app/components/explore/sidebar/no-apps/no-web-apps-light.png differ diff --git a/web/app/components/explore/sidebar/no-apps/style.module.css b/web/app/components/explore/sidebar/no-apps/style.module.css new file mode 100644 index 0000000000..ad3787ce2b --- /dev/null +++ b/web/app/components/explore/sidebar/no-apps/style.module.css @@ -0,0 +1,7 @@ +.light { + background-image: url('./no-web-apps-light.png'); +} + +.dark { + background-image: url('./no-web-apps-dark.png'); +} diff --git a/web/app/components/explore/try-app/app-info/index.tsx b/web/app/components/explore/try-app/app-info/index.tsx new file mode 100644 index 0000000000..eab265bd04 --- /dev/null +++ b/web/app/components/explore/try-app/app-info/index.tsx @@ -0,0 +1,95 @@ +'use client' +import type { FC } from 'react' +import type { TryAppInfo } from '@/service/try-app' +import { RiAddLine } from '@remixicon/react' +import * as React from 'react' +import { useTranslation } from 'react-i18next' +import { AppTypeIcon } from '@/app/components/app/type-selector' +import AppIcon from '@/app/components/base/app-icon' +import Button from '@/app/components/base/button' +import { cn } from '@/utils/classnames' +import useGetRequirements from './use-get-requirements' + +type Props = { + appId: string + appDetail: TryAppInfo + category?: string + className?: string + onCreate: () => void +} + +const headerClassName = 'system-sm-semibold-uppercase text-text-secondary mb-3' + +const AppInfo: FC = ({ + appId, + className, + category, + appDetail, + onCreate, +}) => { + const { t } = useTranslation() + const mode = appDetail?.mode + const { requirements } = useGetRequirements({ appDetail, appId }) + return ( +
+ {/* name and icon */} +
+
+ + +
+
+
+
{appDetail.name}
+
+
+ {mode === 'advanced-chat' &&
{t('types.advanced', { ns: 'app' }).toUpperCase()}
} + {mode === 'chat' &&
{t('types.chatbot', { ns: 'app' }).toUpperCase()}
} + {mode === 'agent-chat' &&
{t('types.agent', { ns: 'app' }).toUpperCase()}
} + {mode === 'workflow' &&
{t('types.workflow', { ns: 'app' }).toUpperCase()}
} + {mode === 'completion' &&
{t('types.completion', { ns: 'app' }).toUpperCase()}
} +
+
+
+ {appDetail.description && ( +
{appDetail.description}
+ )} + + + {category && ( +
+
{t('tryApp.category', { ns: 'explore' })}
+
{category}
+
+ )} + {requirements.length > 0 && ( +
+
{t('tryApp.requirements', { ns: 'explore' })}
+
+ {requirements.map(item => ( +
+
+
{item.name}
+
+ ))} +
+
+ )} + +
+ ) +} +export default React.memo(AppInfo) diff --git a/web/app/components/explore/try-app/app-info/use-get-requirements.ts b/web/app/components/explore/try-app/app-info/use-get-requirements.ts new file mode 100644 index 0000000000..976989be73 --- /dev/null +++ b/web/app/components/explore/try-app/app-info/use-get-requirements.ts @@ -0,0 +1,78 @@ +import type { LLMNodeType } from '@/app/components/workflow/nodes/llm/types' +import type { ToolNodeType } from '@/app/components/workflow/nodes/tool/types' +import type { TryAppInfo } from '@/service/try-app' +import type { AgentTool } from '@/types/app' +import { uniqBy } from 'es-toolkit/compat' +import { BlockEnum } from '@/app/components/workflow/types' +import { MARKETPLACE_API_PREFIX } from '@/config' +import { useGetTryAppFlowPreview } from '@/service/use-try-app' + +type Params = { + appDetail: TryAppInfo + appId: string +} + +type RequirementItem = { + name: string + iconUrl: string +} +const getIconUrl = (provider: string, tool: string) => { + return `${MARKETPLACE_API_PREFIX}/plugins/${provider}/${tool}/icon` +} + +const useGetRequirements = ({ appDetail, appId }: Params) => { + const isBasic = ['chat', 'completion', 'agent-chat'].includes(appDetail.mode) + const isAgent = appDetail.mode === 'agent-chat' + const isAdvanced = !isBasic + const { data: flowData } = useGetTryAppFlowPreview(appId, isBasic) + + const requirements: RequirementItem[] = [] + if (isBasic) { + const modelProviderAndName = appDetail.model_config.model.provider.split('/') + const name = appDetail.model_config.model.provider.split('/').pop() || '' + requirements.push({ + name, + iconUrl: getIconUrl(modelProviderAndName[0], modelProviderAndName[1]), + }) + } + if (isAgent) { + requirements.push(...appDetail.model_config.agent_mode.tools.filter(data => (data as AgentTool).enabled).map((data) => { + const tool = data as AgentTool + const modelProviderAndName = tool.provider_id.split('/') + return { + name: tool.tool_label, + iconUrl: getIconUrl(modelProviderAndName[0], modelProviderAndName[1]), + } + })) + } + if (isAdvanced && flowData && flowData?.graph?.nodes?.length > 0) { + const nodes = flowData.graph.nodes + const llmNodes = nodes.filter(node => node.data.type === BlockEnum.LLM) + requirements.push(...llmNodes.map((node) => { + const data = node.data as LLMNodeType + const modelProviderAndName = data.model.provider.split('/') + return { + name: data.model.name, + iconUrl: getIconUrl(modelProviderAndName[0], modelProviderAndName[1]), + } + })) + + const toolNodes = nodes.filter(node => node.data.type === BlockEnum.Tool) + requirements.push(...toolNodes.map((node) => { + const data = node.data as ToolNodeType + const toolProviderAndName = data.provider_id.split('/') + return { + name: data.tool_label, + iconUrl: getIconUrl(toolProviderAndName[0], toolProviderAndName[1]), + } + })) + } + + const uniqueRequirements = uniqBy(requirements, 'name') + + return { + requirements: uniqueRequirements, + } +} + +export default useGetRequirements diff --git a/web/app/components/explore/try-app/app/chat.tsx b/web/app/components/explore/try-app/app/chat.tsx new file mode 100644 index 0000000000..25a9093f75 --- /dev/null +++ b/web/app/components/explore/try-app/app/chat.tsx @@ -0,0 +1,101 @@ +'use client' +import type { FC } from 'react' +import type { TryAppInfo } from '@/service/try-app' +import { RiResetLeftLine } from '@remixicon/react' +import { useBoolean } from 'ahooks' +import * as React from 'react' +import { useEffect } from 'react' +import { useTranslation } from 'react-i18next' +import ActionButton from '@/app/components/base/action-button' +import Alert from '@/app/components/base/alert' +import AppIcon from '@/app/components/base/app-icon' +import ChatWrapper from '@/app/components/base/chat/embedded-chatbot/chat-wrapper' +import { + EmbeddedChatbotContext, +} from '@/app/components/base/chat/embedded-chatbot/context' +import { + useEmbeddedChatbot, +} from '@/app/components/base/chat/embedded-chatbot/hooks' +import ViewFormDropdown from '@/app/components/base/chat/embedded-chatbot/inputs-form/view-form-dropdown' +import Tooltip from '@/app/components/base/tooltip' +import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' +import { AppSourceType } from '@/service/share' +import { cn } from '@/utils/classnames' +import { useThemeContext } from '../../../base/chat/embedded-chatbot/theme/theme-context' + +type Props = { + appId: string + appDetail: TryAppInfo + className: string +} + +const TryApp: FC = ({ + appId, + appDetail, + className, +}) => { + const { t } = useTranslation() + const media = useBreakpoints() + const isMobile = media === MediaType.mobile + const themeBuilder = useThemeContext() + const { removeConversationIdInfo, ...chatData } = useEmbeddedChatbot(AppSourceType.tryApp, appId) + const currentConversationId = chatData.currentConversationId + const inputsForms = chatData.inputsForms + useEffect(() => { + if (appId) + removeConversationIdInfo(appId) + }, [appId]) + const [isHideTryNotice, { + setTrue: hideTryNotice, + }] = useBoolean(false) + + const handleNewConversation = () => { + removeConversationIdInfo(appId) + chatData.handleNewConversation() + } + return ( + +
+
+
+ +
{appDetail.name}
+
+
+ {currentConversationId && ( + + + + + + )} + {currentConversationId && inputsForms.length > 0 && ( + + )} +
+
+
+ {!isHideTryNotice && ( + + )} + +
+
+
+ ) +} +export default React.memo(TryApp) diff --git a/web/app/components/explore/try-app/app/index.tsx b/web/app/components/explore/try-app/app/index.tsx new file mode 100644 index 0000000000..f5dc14510d --- /dev/null +++ b/web/app/components/explore/try-app/app/index.tsx @@ -0,0 +1,44 @@ +'use client' +import type { FC } from 'react' +import type { AppData } from '@/models/share' +import type { TryAppInfo } from '@/service/try-app' +import * as React from 'react' +import useDocumentTitle from '@/hooks/use-document-title' +import Chat from './chat' +import TextGeneration from './text-generation' + +type Props = { + appId: string + appDetail: TryAppInfo +} + +const TryApp: FC = ({ + appId, + appDetail, +}) => { + const mode = appDetail?.mode + const isChat = ['chat', 'advanced-chat', 'agent-chat'].includes(mode!) + const isCompletion = !isChat + + useDocumentTitle(appDetail?.site?.title || '') + return ( +
+ {isChat && ( + + )} + {isCompletion && ( + + )} +
+ ) +} +export default React.memo(TryApp) diff --git a/web/app/components/explore/try-app/app/text-generation.tsx b/web/app/components/explore/try-app/app/text-generation.tsx new file mode 100644 index 0000000000..350166329d --- /dev/null +++ b/web/app/components/explore/try-app/app/text-generation.tsx @@ -0,0 +1,261 @@ +'use client' +import type { FC } from 'react' +import type { Task } from '../../../share/text-generation/types' +import type { MoreLikeThisConfig, PromptConfig, TextToSpeechConfig } from '@/models/debug' +import type { AppData, SiteInfo } from '@/models/share' +import type { VisionFile, VisionSettings } from '@/types/app' +import { useBoolean } from 'ahooks' +import { noop } from 'es-toolkit/function' +import * as React from 'react' +import { useCallback, useEffect, useRef, useState } from 'react' +import { useTranslation } from 'react-i18next' +import Alert from '@/app/components/base/alert' +import AppIcon from '@/app/components/base/app-icon' +import Loading from '@/app/components/base/loading' +import Res from '@/app/components/share/text-generation/result' +import { TaskStatus } from '@/app/components/share/text-generation/types' +import { appDefaultIconBackground } from '@/config' +import { useWebAppStore } from '@/context/web-app-context' +import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' +import { AppSourceType } from '@/service/share' +import { useGetTryAppParams } from '@/service/use-try-app' +import { Resolution, TransferMethod } from '@/types/app' +import { cn } from '@/utils/classnames' +import { userInputsFormToPromptVariables } from '@/utils/model-config' +import RunOnce from '../../../share/text-generation/run-once' + +type Props = { + appId: string + className?: string + isWorkflow?: boolean + appData: AppData | null +} + +const TextGeneration: FC = ({ + appId, + className, + isWorkflow, + appData, +}) => { + const { t } = useTranslation() + const media = useBreakpoints() + const isPC = media === MediaType.pc + + const [inputs, doSetInputs] = useState>({}) + const inputsRef = useRef>(inputs) + const setInputs = useCallback((newInputs: Record) => { + doSetInputs(newInputs) + inputsRef.current = newInputs + }, []) + + const updateAppInfo = useWebAppStore(s => s.updateAppInfo) + const { data: tryAppParams } = useGetTryAppParams(appId) + + const updateAppParams = useWebAppStore(s => s.updateAppParams) + const appParams = useWebAppStore(s => s.appParams) + const [siteInfo, setSiteInfo] = useState(null) + const [promptConfig, setPromptConfig] = useState(null) + const [customConfig, setCustomConfig] = useState | null>(null) + const [moreLikeThisConfig, setMoreLikeThisConfig] = useState(null) + const [textToSpeechConfig, setTextToSpeechConfig] = useState(null) + const [controlSend, setControlSend] = useState(0) + const [visionConfig, setVisionConfig] = useState({ + enabled: false, + number_limits: 2, + detail: Resolution.low, + transfer_methods: [TransferMethod.local_file], + }) + const [completionFiles, setCompletionFiles] = useState([]) + const [isShowResultPanel, { setTrue: doShowResultPanel, setFalse: hideResultPanel }] = useBoolean(false) + const showResultPanel = () => { + // fix: useClickAway hideResSidebar will close sidebar + setTimeout(() => { + doShowResultPanel() + }, 0) + } + + const handleSend = () => { + setControlSend(Date.now()) + showResultPanel() + } + + const [resultExisted, setResultExisted] = useState(false) + + useEffect(() => { + if (!appData) + return + updateAppInfo(appData) + }, [appData, updateAppInfo]) + + useEffect(() => { + if (!tryAppParams) + return + updateAppParams(tryAppParams) + }, [tryAppParams, updateAppParams]) + + useEffect(() => { + (async () => { + if (!appData || !appParams) + return + const { site: siteInfo, custom_config } = appData + setSiteInfo(siteInfo as SiteInfo) + setCustomConfig(custom_config) + + const { user_input_form, more_like_this, file_upload, text_to_speech }: any = appParams + setVisionConfig({ + // legacy of image upload compatible + ...file_upload, + transfer_methods: file_upload?.allowed_file_upload_methods || file_upload?.allowed_upload_methods, + // legacy of image upload compatible + image_file_size_limit: appParams?.system_parameters.image_file_size_limit, + fileUploadConfig: appParams?.system_parameters, + } as any) + const prompt_variables = userInputsFormToPromptVariables(user_input_form) + setPromptConfig({ + prompt_template: '', // placeholder for future + prompt_variables, + } as PromptConfig) + setMoreLikeThisConfig(more_like_this) + setTextToSpeechConfig(text_to_speech) + })() + }, [appData, appParams]) + + const [isCompleted, setIsCompleted] = useState(false) + const handleCompleted = useCallback(() => { + setIsCompleted(true) + }, []) + const [isHideTryNotice, { + setTrue: hideTryNotice, + }] = useBoolean(false) + + const renderRes = (task?: Task) => ( + setResultExisted(true)} + /> + ) + + const renderResWrap = ( +
+
+ {isCompleted && !isHideTryNotice && ( + + )} + {renderRes()} +
+
+ ) + + if (!siteInfo || !promptConfig) { + return ( +
+ +
+ ) + } + + return ( +
+ {/* Left */} +
+ {/* Header */} +
+
+ +
{siteInfo.title}
+
+ {siteInfo.description && ( +
{siteInfo.description}
+ )} +
+ {/* form */} +
+ +
+
+ + {/* Result */} +
+ {!isPC && ( +
{ + if (isShowResultPanel) + hideResultPanel() + else + showResultPanel() + }} + > +
+
+ )} + {renderResWrap} +
+
+ ) +} + +export default React.memo(TextGeneration) diff --git a/web/app/components/explore/try-app/index.tsx b/web/app/components/explore/try-app/index.tsx new file mode 100644 index 0000000000..edf3ae04fa --- /dev/null +++ b/web/app/components/explore/try-app/index.tsx @@ -0,0 +1,73 @@ +'use client' +import type { FC } from 'react' +import { RiCloseLine } from '@remixicon/react' +import * as React from 'react' +import { useState } from 'react' +import Loading from '@/app/components/base/loading' +import Modal from '@/app/components/base/modal/index' +import { useGetTryAppInfo } from '@/service/use-try-app' +import Button from '../../base/button' +import App from './app' +import AppInfo from './app-info' +import Preview from './preview' +import Tab, { TypeEnum } from './tab' + +type Props = { + appId: string + category?: string + onClose: () => void + onCreate: () => void +} + +const TryApp: FC = ({ + appId, + category, + onClose, + onCreate, +}) => { + const [type, setType] = useState(TypeEnum.TRY) + const { data: appDetail, isLoading } = useGetTryAppInfo(appId) + + return ( + + {isLoading ? ( +
+ +
+ ) : ( +
+
+ + +
+ {/* Main content */} +
+ {type === TypeEnum.TRY ? : } + +
+
+ )} +
+ ) +} +export default React.memo(TryApp) diff --git a/web/app/components/explore/try-app/preview/basic-app-preview.tsx b/web/app/components/explore/try-app/preview/basic-app-preview.tsx new file mode 100644 index 0000000000..e1e7465e46 --- /dev/null +++ b/web/app/components/explore/try-app/preview/basic-app-preview.tsx @@ -0,0 +1,364 @@ +'use client' +import type { FC } from 'react' +import type { Features as FeaturesData, FileUpload } from '@/app/components/base/features/types' +import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations' +import type { ModelConfig } from '@/models/debug' +import type { ModelConfig as BackendModelConfig, PromptVariable } from '@/types/app' +import { noop } from 'es-toolkit/function' +import { clone } from 'es-toolkit/object' +import * as React from 'react' +import { useMemo, useState } from 'react' +import Config from '@/app/components/app/configuration/config' +import Debug from '@/app/components/app/configuration/debug' +import { FeaturesProvider } from '@/app/components/base/features' +import Loading from '@/app/components/base/loading' +import { FILE_EXTS } from '@/app/components/base/prompt-editor/constants' +import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' +import { SupportUploadFileTypes } from '@/app/components/workflow/types' +import { ANNOTATION_DEFAULT, DEFAULT_AGENT_SETTING, DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config' +import ConfigContext from '@/context/debug-configuration' +import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' +import { PromptMode } from '@/models/debug' +import { useAllToolProviders } from '@/service/use-tools' +import { useGetTryAppDataSets, useGetTryAppInfo } from '@/service/use-try-app' +import { ModelModeType, Resolution, TransferMethod } from '@/types/app' +import { correctModelProvider, correctToolProvider } from '@/utils' +import { userInputsFormToPromptVariables } from '@/utils/model-config' +import { basePath } from '@/utils/var' +import { useTextGenerationCurrentProviderAndModelAndModelList } from '../../../header/account-setting/model-provider-page/hooks' + +type Props = { + appId: string +} + +const defaultModelConfig = { + provider: 'langgenius/openai/openai', + model_id: 'gpt-3.5-turbo', + mode: ModelModeType.unset, + configs: { + prompt_template: '', + prompt_variables: [] as PromptVariable[], + }, + more_like_this: null, + opening_statement: '', + suggested_questions: [], + sensitive_word_avoidance: null, + speech_to_text: null, + text_to_speech: null, + file_upload: null, + suggested_questions_after_answer: null, + retriever_resource: null, + annotation_reply: null, + dataSets: [], + agentConfig: DEFAULT_AGENT_SETTING, +} +const BasicAppPreview: FC = ({ + appId, +}) => { + const media = useBreakpoints() + const isMobile = media === MediaType.mobile + + const { data: appDetail, isLoading: isLoadingAppDetail } = useGetTryAppInfo(appId) + const { data: collectionListFromServer, isLoading: isLoadingToolProviders } = useAllToolProviders() + const collectionList = collectionListFromServer?.map((item) => { + return { + ...item, + icon: basePath && typeof item.icon == 'string' && !item.icon.includes(basePath) ? `${basePath}${item.icon}` : item.icon, + } + }) + const datasetIds = (() => { + if (isLoadingAppDetail) + return [] + const modelConfig = appDetail?.model_config + if (!modelConfig) + return [] + let datasets: any = null + + if (modelConfig.agent_mode?.tools?.find(({ dataset }: any) => dataset?.enabled)) + datasets = modelConfig.agent_mode?.tools.filter(({ dataset }: any) => dataset?.enabled) + // new dataset struct + else if (modelConfig.dataset_configs.datasets?.datasets?.length > 0) + datasets = modelConfig.dataset_configs?.datasets?.datasets + + if (datasets?.length && datasets?.length > 0) + return datasets.map(({ dataset }: any) => dataset.id) + + return [] + })() + const { data: dataSetData, isLoading: isLoadingDatasets } = useGetTryAppDataSets(appId, datasetIds) + const dataSets = dataSetData?.data || [] + const isLoading = isLoadingAppDetail || isLoadingDatasets || isLoadingToolProviders + + const modelConfig: ModelConfig = ((modelConfig?: BackendModelConfig) => { + if (isLoading || !modelConfig) + return defaultModelConfig + + const model = modelConfig.model + + const newModelConfig = { + provider: correctModelProvider(model.provider), + model_id: model.name, + mode: model.mode, + configs: { + prompt_template: modelConfig.pre_prompt || '', + prompt_variables: userInputsFormToPromptVariables( + [ + ...(modelConfig.user_input_form as any), + ...( + modelConfig.external_data_tools?.length + ? modelConfig.external_data_tools.map((item: any) => { + return { + external_data_tool: { + variable: item.variable as string, + label: item.label as string, + enabled: item.enabled, + type: item.type as string, + config: item.config, + required: true, + icon: item.icon, + icon_background: item.icon_background, + }, + } + }) + : [] + ), + ], + modelConfig.dataset_query_variable, + ), + }, + more_like_this: modelConfig.more_like_this, + opening_statement: modelConfig.opening_statement, + suggested_questions: modelConfig.suggested_questions, + sensitive_word_avoidance: modelConfig.sensitive_word_avoidance, + speech_to_text: modelConfig.speech_to_text, + text_to_speech: modelConfig.text_to_speech, + file_upload: modelConfig.file_upload, + suggested_questions_after_answer: modelConfig.suggested_questions_after_answer, + retriever_resource: modelConfig.retriever_resource, + annotation_reply: modelConfig.annotation_reply, + external_data_tools: modelConfig.external_data_tools, + dataSets, + agentConfig: appDetail?.mode === 'agent-chat' ? { + max_iteration: DEFAULT_AGENT_SETTING.max_iteration, + ...modelConfig.agent_mode, + // remove dataset + enabled: true, // modelConfig.agent_mode?.enabled is not correct. old app: the value of app with dataset's is always true + tools: modelConfig.agent_mode?.tools.filter((tool: any) => { + return !tool.dataset + }).map((tool: any) => { + const toolInCollectionList = collectionList?.find(c => tool.provider_id === c.id) + return { + ...tool, + isDeleted: appDetail?.deleted_tools?.some((deletedTool: any) => deletedTool.id === tool.id && deletedTool.tool_name === tool.tool_name), + notAuthor: toolInCollectionList?.is_team_authorization === false, + ...(tool.provider_type === 'builtin' + ? { + provider_id: correctToolProvider(tool.provider_name, !!toolInCollectionList), + provider_name: correctToolProvider(tool.provider_name, !!toolInCollectionList), + } + : {}), + } + }), + } : DEFAULT_AGENT_SETTING, + } + return (newModelConfig as any) + })(appDetail?.model_config) + const mode = appDetail?.mode + // const isChatApp = ['chat', 'advanced-chat', 'agent-chat'].includes(mode!) + + // chat configuration + const promptMode = modelConfig?.prompt_type === PromptMode.advanced ? PromptMode.advanced : PromptMode.simple + const isAdvancedMode = promptMode === PromptMode.advanced + const isAgent = mode === 'agent-chat' + const chatPromptConfig = isAdvancedMode ? (modelConfig?.chat_prompt_config || clone(DEFAULT_CHAT_PROMPT_CONFIG)) : undefined + const suggestedQuestions = modelConfig?.suggested_questions || [] + const moreLikeThisConfig = modelConfig?.more_like_this || { enabled: false } + const suggestedQuestionsAfterAnswerConfig = modelConfig?.suggested_questions_after_answer || { enabled: false } + const speechToTextConfig = modelConfig?.speech_to_text || { enabled: false } + const textToSpeechConfig = modelConfig?.text_to_speech || { enabled: false, voice: '', language: '' } + const citationConfig = modelConfig?.retriever_resource || { enabled: false } + const annotationConfig = modelConfig?.annotation_reply || { + id: '', + enabled: false, + score_threshold: ANNOTATION_DEFAULT.score_threshold, + embedding_model: { + embedding_provider_name: '', + embedding_model_name: '', + }, + } + const moderationConfig = modelConfig?.sensitive_word_avoidance || { enabled: false } + // completion configuration + const completionPromptConfig = modelConfig?.completion_prompt_config || clone(DEFAULT_COMPLETION_PROMPT_CONFIG) as any + + // prompt & model config + const inputs = {} + const query = '' + const completionParams = useState({}) + + const { + currentModel: currModel, + } = useTextGenerationCurrentProviderAndModelAndModelList( + { + provider: modelConfig.provider, + model: modelConfig.model_id, + }, + ) + + const isShowVisionConfig = !!currModel?.features?.includes(ModelFeatureEnum.vision) + const isShowDocumentConfig = !!currModel?.features?.includes(ModelFeatureEnum.document) + const isShowAudioConfig = !!currModel?.features?.includes(ModelFeatureEnum.audio) + const isAllowVideoUpload = !!currModel?.features?.includes(ModelFeatureEnum.video) + const visionConfig = { + enabled: false, + number_limits: 2, + detail: Resolution.low, + transfer_methods: [TransferMethod.local_file], + } + + const featuresData: FeaturesData = useMemo(() => { + return { + moreLikeThis: modelConfig.more_like_this || { enabled: false }, + opening: { + enabled: !!modelConfig.opening_statement, + opening_statement: modelConfig.opening_statement || '', + suggested_questions: modelConfig.suggested_questions || [], + }, + moderation: modelConfig.sensitive_word_avoidance || { enabled: false }, + speech2text: modelConfig.speech_to_text || { enabled: false }, + text2speech: modelConfig.text_to_speech || { enabled: false }, + file: { + image: { + detail: modelConfig.file_upload?.image?.detail || Resolution.high, + enabled: !!modelConfig.file_upload?.image?.enabled, + number_limits: modelConfig.file_upload?.image?.number_limits || 3, + transfer_methods: modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'], + }, + enabled: !!(modelConfig.file_upload?.enabled || modelConfig.file_upload?.image?.enabled), + allowed_file_types: modelConfig.file_upload?.allowed_file_types || [], + allowed_file_extensions: modelConfig.file_upload?.allowed_file_extensions || [...FILE_EXTS[SupportUploadFileTypes.image], ...FILE_EXTS[SupportUploadFileTypes.video]].map(ext => `.${ext}`), + allowed_file_upload_methods: modelConfig.file_upload?.allowed_file_upload_methods || modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'], + number_limits: modelConfig.file_upload?.number_limits || modelConfig.file_upload?.image?.number_limits || 3, + fileUploadConfig: {}, + } as FileUpload, + suggested: modelConfig.suggested_questions_after_answer || { enabled: false }, + citation: modelConfig.retriever_resource || { enabled: false }, + annotationReply: modelConfig.annotation_reply || { enabled: false }, + } + }, [modelConfig]) + + if (isLoading) { + return ( +
+ +
+ ) + } + const value = { + readonly: true, + appId, + isAPIKeySet: true, + isTrailFinished: false, + mode, + modelModeType: '', + promptMode, + isAdvancedMode, + isAgent, + isOpenAI: false, + isFunctionCall: false, + collectionList: [], + setPromptMode: noop, + canReturnToSimpleMode: false, + setCanReturnToSimpleMode: noop, + chatPromptConfig, + completionPromptConfig, + currentAdvancedPrompt: '', + setCurrentAdvancedPrompt: noop, + conversationHistoriesRole: completionPromptConfig.conversation_histories_role, + showHistoryModal: false, + setConversationHistoriesRole: noop, + hasSetBlockStatus: true, + conversationId: '', + introduction: '', + setIntroduction: noop, + suggestedQuestions, + setSuggestedQuestions: noop, + setConversationId: noop, + controlClearChatMessage: false, + setControlClearChatMessage: noop, + prevPromptConfig: {}, + setPrevPromptConfig: noop, + moreLikeThisConfig, + setMoreLikeThisConfig: noop, + suggestedQuestionsAfterAnswerConfig, + setSuggestedQuestionsAfterAnswerConfig: noop, + speechToTextConfig, + setSpeechToTextConfig: noop, + textToSpeechConfig, + setTextToSpeechConfig: noop, + citationConfig, + setCitationConfig: noop, + annotationConfig, + setAnnotationConfig: noop, + moderationConfig, + setModerationConfig: noop, + externalDataToolsConfig: {}, + setExternalDataToolsConfig: noop, + formattingChanged: false, + setFormattingChanged: noop, + inputs, + setInputs: noop, + query, + setQuery: noop, + completionParams, + setCompletionParams: noop, + modelConfig, + setModelConfig: noop, + showSelectDataSet: noop, + dataSets, + setDataSets: noop, + datasetConfigs: [], + datasetConfigsRef: {}, + setDatasetConfigs: noop, + hasSetContextVar: true, + isShowVisionConfig, + visionConfig, + setVisionConfig: noop, + isAllowVideoUpload, + isShowDocumentConfig, + isShowAudioConfig, + rerankSettingModalOpen: false, + setRerankSettingModalOpen: noop, + } + return ( + + +
+
+
+ +
+ {!isMobile && ( +
+
+ +
+
+ )} +
+
+
+
+ ) +} +export default React.memo(BasicAppPreview) diff --git a/web/app/components/explore/try-app/preview/flow-app-preview.tsx b/web/app/components/explore/try-app/preview/flow-app-preview.tsx new file mode 100644 index 0000000000..ba64aecfba --- /dev/null +++ b/web/app/components/explore/try-app/preview/flow-app-preview.tsx @@ -0,0 +1,39 @@ +'use client' +import type { FC } from 'react' +import * as React from 'react' +import Loading from '@/app/components/base/loading' +import WorkflowPreview from '@/app/components/workflow/workflow-preview' +import { useGetTryAppFlowPreview } from '@/service/use-try-app' +import { cn } from '@/utils/classnames' + +type Props = { + appId: string + className?: string +} + +const FlowAppPreview: FC = ({ + appId, + className, +}) => { + const { data, isLoading } = useGetTryAppFlowPreview(appId) + + if (isLoading) { + return ( +
+ +
+ ) + } + if (!data) + return null + return ( +
+ +
+ ) +} +export default React.memo(FlowAppPreview) diff --git a/web/app/components/explore/try-app/preview/index.tsx b/web/app/components/explore/try-app/preview/index.tsx new file mode 100644 index 0000000000..a0c5fdc594 --- /dev/null +++ b/web/app/components/explore/try-app/preview/index.tsx @@ -0,0 +1,25 @@ +'use client' +import type { FC } from 'react' +import type { TryAppInfo } from '@/service/try-app' +import * as React from 'react' +import BasicAppPreview from './basic-app-preview' +import FlowAppPreview from './flow-app-preview' + +type Props = { + appId: string + appDetail: TryAppInfo +} + +const Preview: FC = ({ + appId, + appDetail, +}) => { + const isBasicApp = ['agent-chat', 'chat', 'completion'].includes(appDetail.mode) + + return ( +
+ {isBasicApp ? : } +
+ ) +} +export default React.memo(Preview) diff --git a/web/app/components/explore/try-app/tab.tsx b/web/app/components/explore/try-app/tab.tsx new file mode 100644 index 0000000000..75ba402204 --- /dev/null +++ b/web/app/components/explore/try-app/tab.tsx @@ -0,0 +1,37 @@ +'use client' +import type { FC } from 'react' +import * as React from 'react' +import { useTranslation } from 'react-i18next' +import TabHeader from '../../base/tab-header' + +export enum TypeEnum { + TRY = 'try', + DETAIL = 'detail', +} + +type Props = { + value: TypeEnum + onChange: (value: TypeEnum) => void +} + +const Tab: FC = ({ + value, + onChange, +}) => { + const { t } = useTranslation() + const tabs = [ + { id: TypeEnum.TRY, name: t('tryApp.tabHeader.try', { ns: 'explore' }) }, + { id: TypeEnum.DETAIL, name: t('tryApp.tabHeader.detail', { ns: 'explore' }) }, + ] + return ( + void} + itemClassName="ml-0 system-md-semibold-uppercase" + itemWrapClassName="pt-2" + activeItemClassName="border-util-colors-blue-brand-blue-brand-500" + /> + ) +} +export default React.memo(Tab) diff --git a/web/app/components/header/account-setting/members-page/index.tsx b/web/app/components/header/account-setting/members-page/index.tsx index d405e8e4c4..5a8f3aebdb 100644 --- a/web/app/components/header/account-setting/members-page/index.tsx +++ b/web/app/components/header/account-setting/members-page/index.tsx @@ -1,10 +1,9 @@ 'use client' import type { InvitationResult } from '@/models/common' -import { RiPencilLine, RiUserAddLine } from '@remixicon/react' +import { RiPencilLine } from '@remixicon/react' import { useState } from 'react' import { useTranslation } from 'react-i18next' import Avatar from '@/app/components/base/avatar' -import Button from '@/app/components/base/button' import Tooltip from '@/app/components/base/tooltip' import { NUM_INFINITE } from '@/app/components/billing/config' import { Plan } from '@/app/components/billing/type' @@ -16,8 +15,8 @@ import { useProviderContext } from '@/context/provider-context' import { useFormatTimeFromNow } from '@/hooks/use-format-time-from-now' import { LanguagesSupported } from '@/i18n-config/language' import { useMembers } from '@/service/use-common' -import { cn } from '@/utils/classnames' import EditWorkspaceModal from './edit-workspace-modal' +import InviteButton from './invite-button' import InviteModal from './invite-modal' import InvitedModal from './invited-modal' import Operation from './operation' @@ -37,7 +36,7 @@ const MembersPage = () => { const { userProfile, currentWorkspace, isCurrentWorkspaceOwner, isCurrentWorkspaceManager } = useAppContext() const { data, refetch } = useMembers() - const { systemFeatures } = useGlobalPublicStore() + const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) const { formatTimeFromNow } = useFormatTimeFromNow() const [inviteModalVisible, setInviteModalVisible] = useState(false) const [invitationResults, setInvitationResults] = useState([]) @@ -104,10 +103,9 @@ const MembersPage = () => { {isMemberFull && ( )} - +
+ setInviteModalVisible(true)} /> +
diff --git a/web/app/components/header/account-setting/members-page/invite-button.tsx b/web/app/components/header/account-setting/members-page/invite-button.tsx new file mode 100644 index 0000000000..09002f2945 --- /dev/null +++ b/web/app/components/header/account-setting/members-page/invite-button.tsx @@ -0,0 +1,32 @@ +import { RiUserAddLine } from '@remixicon/react' +import { useTranslation } from 'react-i18next' +import Button from '@/app/components/base/button' +import Loading from '@/app/components/base/loading' +import { useGlobalPublicStore } from '@/context/global-public-context' +import { useWorkspacePermissions } from '@/service/use-workspace' + +type InviteButtonProps = { + disabled?: boolean + onClick?: () => void +} + +const InviteButton = (props: InviteButtonProps) => { + const { t } = useTranslation() + const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) + const { data: workspacePermissions, isFetching: isFetchingWorkspacePermissions } = useWorkspacePermissions(systemFeatures.branding.enabled) + if (systemFeatures.branding.enabled) { + if (isFetchingWorkspacePermissions) { + return + } + if (!workspacePermissions || workspacePermissions.allow_member_invite !== true) { + return null + } + } + return ( + + ) +} +export default InviteButton diff --git a/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx b/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx index 815c86abc7..6903a85f2e 100644 --- a/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx +++ b/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx @@ -5,6 +5,9 @@ import { } from '@remixicon/react' import { Fragment } from 'react' import { useTranslation } from 'react-i18next' +import Loading from '@/app/components/base/loading' +import { useGlobalPublicStore } from '@/context/global-public-context' +import { useWorkspacePermissions } from '@/service/use-workspace' import { cn } from '@/utils/classnames' type Props = { @@ -13,6 +16,16 @@ type Props = { const TransferOwnership = ({ onOperate }: Props) => { const { t } = useTranslation() + const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) + const { data: workspacePermissions, isFetching: isFetchingWorkspacePermissions } = useWorkspacePermissions(systemFeatures.branding.enabled) + if (systemFeatures.branding.enabled) { + if (isFetchingWorkspacePermissions) { + return + } + if (!workspacePermissions || workspacePermissions.allow_owner_transfer !== true) { + return {t('members.owner', { ns: 'common' })} + } + } return ( diff --git a/web/app/components/header/account-setting/model-provider-page/model-selector/feature-icon.tsx b/web/app/components/header/account-setting/model-provider-page/model-selector/feature-icon.tsx index 8e6e4567ff..5a3ada80ba 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-selector/feature-icon.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-selector/feature-icon.tsx @@ -2,6 +2,7 @@ import type { FC } from 'react' import { RiFileTextLine, RiFilmAiLine, + RiHammerLine, RiImageCircleAiLine, RiVoiceAiFill, } from '@remixicon/react' @@ -38,17 +39,33 @@ const FeatureIcon: FC = ({ // ) // } - // if (feature === ModelFeatureEnum.toolCall) { - // return ( - // - // - // - // - // - // ) - // } + if (feature === ModelFeatureEnum.toolCall) { + if (showFeaturesLabel) { + return ( + + + {ModelFeatureTextEnum.toolCall} + + ) + } + + return ( + +
+ + + +
+
+ ) + } // if (feature === ModelFeatureEnum.multiToolCall) { // return ( diff --git a/web/app/components/header/account-setting/model-provider-page/model-selector/popup-item.tsx b/web/app/components/header/account-setting/model-provider-page/model-selector/popup-item.tsx index bfa6f8d867..afda9846de 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-selector/popup-item.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-selector/popup-item.tsx @@ -96,6 +96,14 @@ const PopupItem: FC = ({
{currentProvider?.description?.[language] || currentProvider?.description?.en_US}
)} */}
+ { + modelItem.features?.includes(ModelFeatureEnum.toolCall) && ( + + ) + } {modelItem.model_type && ( {modelTypeFormat(modelItem.model_type)} @@ -118,7 +126,7 @@ const PopupItem: FC = ({
{t('model.capabilities', { ns: 'common' })}
- {modelItem.features?.map(feature => ( + {modelItem.features?.filter(feature => feature !== ModelFeatureEnum.toolCall).map(feature => ( { const res = await changeModelProviderPriority({ url: `/workspaces/current/model-providers/${provider.provider}/preferred-provider-type`, @@ -82,7 +80,7 @@ const CredentialPanel = ({ return t('modelProvider.auth.authRemoved', { ns: 'common' }) return '' - }, [authorized, authRemoved, current_credential_name, hasCredential]) + }, [authorized, authRemoved, current_credential_name, hasCredential, t]) const color = useMemo(() => { if (authRemoved || !hasCredential) @@ -118,7 +116,7 @@ const CredentialPanel = ({ provider={provider} /> { - showPrioritySelector && ( + systemConfig.enabled && isCustomConfigured && IS_CLOUD_EDITION && ( = ({ const systemConfig = provider.system_configuration const hasModelList = fetched && !!modelList.length const { isCurrentWorkspaceManager } = useAppContext() - const showModelProvider = systemConfig.enabled && MODEL_PROVIDER_QUOTA_GET_PAID.includes(provider.provider as ModelProviderQuotaGetPaid) && !IS_CE_EDITION + const showModelProvider = systemConfig.enabled && [...MODEL_PROVIDER_QUOTA_GET_PAID].includes(provider.provider as ModelProviderQuotaGetPaid) && !IS_CE_EDITION const showCredential = configurationMethods.includes(ConfigurationMethodEnum.predefinedModel) && isCurrentWorkspaceManager const getModelList = async (providerName: string) => { diff --git a/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx b/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx index e296bc4555..c637d07692 100644 --- a/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx +++ b/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx @@ -132,7 +132,7 @@ const QuotaPanel: FC = ({ return (
= ({ isWorkflow = false, }) => { const { notify } = Toast + const appSourceType = isInstalledApp ? AppSourceType.installedApp : AppSourceType.webApp const { t } = useTranslation() const media = useBreakpoints() @@ -102,16 +103,18 @@ const TextGeneration: FC = ({ // save message const [savedMessages, setSavedMessages] = useState([]) const fetchSavedMessage = useCallback(async () => { - const res: any = await doFetchSavedMessage(isInstalledApp, appId) + if (!appId) + return + const res: any = await doFetchSavedMessage(appSourceType, appId) setSavedMessages(res.data) - }, [isInstalledApp, appId]) + }, [appSourceType, appId]) const handleSaveMessage = async (messageId: string) => { - await saveMessage(messageId, isInstalledApp, appId) + await saveMessage(messageId, appSourceType, appId) notify({ type: 'success', message: t('api.saved', { ns: 'common' }) }) fetchSavedMessage() } const handleRemoveSavedMessage = async (messageId: string) => { - await removeMessage(messageId, isInstalledApp, appId) + await removeMessage(messageId, appSourceType, appId) notify({ type: 'success', message: t('api.remove', { ns: 'common' }) }) fetchSavedMessage() } @@ -424,9 +427,8 @@ const TextGeneration: FC = ({ isCallBatchAPI={isCallBatchAPI} isPC={isPC} isMobile={!isPC} - isInstalledApp={isInstalledApp} + appSourceType={isInstalledApp ? AppSourceType.installedApp : AppSourceType.webApp} appId={appId} - installedAppInfo={installedAppInfo} isError={task?.status === TaskStatus.failed} promptConfig={promptConfig} moreLikeThisEnabled={!!moreLikeThisConfig?.enabled} diff --git a/web/app/components/share/text-generation/result/index.tsx b/web/app/components/share/text-generation/result/index.tsx index a0ffb31b06..fe518c6d25 100644 --- a/web/app/components/share/text-generation/result/index.tsx +++ b/web/app/components/share/text-generation/result/index.tsx @@ -4,8 +4,8 @@ import type { FeedbackType } from '@/app/components/base/chat/chat/type' import type { WorkflowProcess } from '@/app/components/base/chat/types' import type { FileEntity } from '@/app/components/base/file-uploader/types' import type { PromptConfig } from '@/models/debug' -import type { InstalledApp } from '@/models/explore' import type { SiteInfo } from '@/models/share' +import type { AppSourceType } from '@/service/share' import type { VisionFile, VisionSettings } from '@/types/app' import { RiLoader2Line } from '@remixicon/react' import { useBoolean } from 'ahooks' @@ -35,9 +35,8 @@ export type IResultProps = { isCallBatchAPI: boolean isPC: boolean isMobile: boolean - isInstalledApp: boolean - appId: string - installedAppInfo?: InstalledApp + appSourceType: AppSourceType + appId?: string isError: boolean isShowTextToSpeech: boolean promptConfig: PromptConfig | null @@ -63,9 +62,8 @@ const Result: FC = ({ isCallBatchAPI, isPC, isMobile, - isInstalledApp, + appSourceType, appId, - installedAppInfo, isError, isShowTextToSpeech, promptConfig, @@ -133,7 +131,7 @@ const Result: FC = ({ }) const handleFeedback = async (feedback: FeedbackType) => { - await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, isInstalledApp, installedAppInfo?.id) + await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, appSourceType, appId) setFeedback(feedback) } @@ -147,9 +145,9 @@ const Result: FC = ({ setIsStopping(true) try { if (isWorkflow) - await stopWorkflowMessage(appId, currentTaskId, isInstalledApp, installedAppInfo?.id || '') + await stopWorkflowMessage(appId!, currentTaskId, appSourceType, appId || '') else - await stopChatMessageResponding(appId, currentTaskId, isInstalledApp, installedAppInfo?.id || '') + await stopChatMessageResponding(appId!, currentTaskId, appSourceType, appId || '') abortControllerRef.current?.abort() } catch (error) { @@ -159,7 +157,7 @@ const Result: FC = ({ finally { setIsStopping(false) } - }, [appId, currentTaskId, installedAppInfo?.id, isInstalledApp, isStopping, isWorkflow, notify]) + }, [appId, currentTaskId, appSourceType, appId, isStopping, isWorkflow, notify]) useEffect(() => { if (!onRunControlChange) @@ -468,8 +466,8 @@ const Result: FC = ({ })) }, }, - isInstalledApp, - installedAppInfo?.id, + appSourceType, + appId, ).catch((error) => { setRespondingFalse() resetRunState() @@ -514,7 +512,7 @@ const Result: FC = ({ getAbortController: (abortController) => { abortControllerRef.current = abortController }, - }, isInstalledApp, installedAppInfo?.id) + }, appSourceType, appId) } } @@ -562,8 +560,8 @@ const Result: FC = ({ feedback={feedback} onSave={handleSaveMessage} isMobile={isMobile} - isInstalledApp={isInstalledApp} - installedAppId={installedAppInfo?.id} + appSourceType={appSourceType} + installedAppId={appId} isLoading={isCallBatchAPI ? (!completionRes && isResponding) : false} taskId={isCallBatchAPI ? ((taskId as number) < 10 ? `0${taskId}` : `${taskId}`) : undefined} controlClearMoreLikeThis={controlClearMoreLikeThis} diff --git a/web/app/components/share/text-generation/types.ts b/web/app/components/share/text-generation/types.ts new file mode 100644 index 0000000000..dba8eb2ca9 --- /dev/null +++ b/web/app/components/share/text-generation/types.ts @@ -0,0 +1,16 @@ +type TaskParam = { + inputs: Record +} + +export type Task = { + id: number + status: TaskStatus + params: TaskParam +} + +export enum TaskStatus { + pending = 'pending', + running = 'running', + completed = 'completed', + failed = 'failed', +} diff --git a/web/app/components/workflow/constants.ts b/web/app/components/workflow/constants.ts index 4d95db7fcf..b06e194485 100644 --- a/web/app/components/workflow/constants.ts +++ b/web/app/components/workflow/constants.ts @@ -150,6 +150,10 @@ export const LLM_OUTPUT_STRUCT: Var[] = [ variable: 'usage', type: VarType.object, }, + { + variable: 'generation', + type: VarType.object, + }, ] export const KNOWLEDGE_RETRIEVAL_OUTPUT_STRUCT: Var[] = [ diff --git a/web/app/components/workflow/nodes/_base/components/before-run-form/bool-input.tsx b/web/app/components/workflow/nodes/_base/components/before-run-form/bool-input.tsx index 6a384d2cbc..bee42bb488 100644 --- a/web/app/components/workflow/nodes/_base/components/before-run-form/bool-input.tsx +++ b/web/app/components/workflow/nodes/_base/components/before-run-form/bool-input.tsx @@ -10,6 +10,7 @@ type Props = { value: boolean required?: boolean onChange: (value: boolean) => void + readonly?: boolean } const BoolInput: FC = ({ @@ -17,6 +18,7 @@ const BoolInput: FC = ({ onChange, name, required, + readonly, }) => { const { t } = useTranslation() const handleChange = useCallback(() => { @@ -28,6 +30,7 @@ const BoolInput: FC = ({ className="!h-4 !w-4" checked={!!value} onCheck={handleChange} + disabled={readonly} />
{name} diff --git a/web/app/components/workflow/nodes/_base/components/layout/box.tsx b/web/app/components/workflow/nodes/_base/components/layout/box.tsx index 62e709efc6..fbff9366fe 100644 --- a/web/app/components/workflow/nodes/_base/components/layout/box.tsx +++ b/web/app/components/workflow/nodes/_base/components/layout/box.tsx @@ -6,17 +6,20 @@ export type BoxProps = { className?: string children?: ReactNode withBorderBottom?: boolean + withBorderTop?: boolean } export const Box = memo(({ className, children, withBorderBottom, + withBorderTop, }: BoxProps) => { return (
diff --git a/web/app/components/workflow/nodes/_base/components/layout/field-title.tsx b/web/app/components/workflow/nodes/_base/components/layout/field-title.tsx index e5e8fe950d..2e581a0b9b 100644 --- a/web/app/components/workflow/nodes/_base/components/layout/field-title.tsx +++ b/web/app/components/workflow/nodes/_base/components/layout/field-title.tsx @@ -9,6 +9,7 @@ import { cn } from '@/utils/classnames' export type FieldTitleProps = { title?: string + className?: string operation?: ReactNode subTitle?: string | ReactNode tooltip?: string @@ -19,6 +20,7 @@ export type FieldTitleProps = { } export const FieldTitle = memo(({ title, + className, operation, subTitle, tooltip, @@ -31,7 +33,7 @@ export const FieldTitle = memo(({ const collapsedMerged = collapsed !== undefined ? collapsed : collapsedLocal return ( -
+
{ diff --git a/web/app/components/workflow/nodes/_base/components/layout/group.tsx b/web/app/components/workflow/nodes/_base/components/layout/group.tsx index 6e35cb7b69..7cca898b44 100644 --- a/web/app/components/workflow/nodes/_base/components/layout/group.tsx +++ b/web/app/components/workflow/nodes/_base/components/layout/group.tsx @@ -6,17 +6,20 @@ export type GroupProps = { className?: string children?: ReactNode withBorderBottom?: boolean + withBorderTop?: boolean } export const Group = memo(({ className, children, withBorderBottom, + withBorderTop, }: GroupProps) => { return (
diff --git a/web/app/components/workflow/nodes/_base/hooks/use-node-crud.ts b/web/app/components/workflow/nodes/_base/hooks/use-node-crud.ts index d1741f0bbb..cb3a898387 100644 --- a/web/app/components/workflow/nodes/_base/hooks/use-node-crud.ts +++ b/web/app/components/workflow/nodes/_base/hooks/use-node-crud.ts @@ -1,4 +1,6 @@ import type { CommonNodeType } from '@/app/components/workflow/types' +import { useCallback } from 'react' +import { useStoreApi } from 'reactflow' import { useNodeDataUpdate } from '@/app/components/workflow/hooks' const useNodeCrud = (id: string, data: CommonNodeType) => { @@ -18,3 +20,27 @@ const useNodeCrud = (id: string, data: CommonNodeType) => { } export default useNodeCrud + +export const useNodeCurdKit = (id: string) => { + const store = useStoreApi() + const { handleNodeDataUpdateWithSyncDraft } = useNodeDataUpdate() + + const getNodeData = useCallback(() => { + const { getNodes } = store.getState() + const nodes = getNodes() + + return nodes.find(node => node.id === id) + }, [store, id]) + + const handleNodeDataUpdate = useCallback((data: Partial>) => { + handleNodeDataUpdateWithSyncDraft({ + id, + data, + }) + }, [id, handleNodeDataUpdateWithSyncDraft]) + + return { + getNodeData, + handleNodeDataUpdate, + } +} diff --git a/web/app/components/workflow/nodes/llm/components/tools/index.tsx b/web/app/components/workflow/nodes/llm/components/tools/index.tsx new file mode 100644 index 0000000000..2eb4fd5869 --- /dev/null +++ b/web/app/components/workflow/nodes/llm/components/tools/index.tsx @@ -0,0 +1,53 @@ +import type { ToolValue } from '@/app/components/workflow/block-selector/types' +import { memo } from 'react' +import { useTranslation } from 'react-i18next' +import MultipleToolSelector from '@/app/components/plugins/plugin-detail-panel/multiple-tool-selector' +import { BoxGroup } from '@/app/components/workflow/nodes/_base/components/layout' +import MaxIterations from './max-iterations' +import { useNodeTools } from './use-node-tools' + +type ToolsProps = { + nodeId: string + tools?: ToolValue[] + maxIterations?: number +} +const Tools = ({ + nodeId, + tools = [], + maxIterations = 10, +}: ToolsProps) => { + const { t } = useTranslation() + const { + handleToolsChange, + handleMaxIterationsChange, + } = useNodeTools(nodeId) + + return ( + + + + + ) +} + +export default memo(Tools) diff --git a/web/app/components/workflow/nodes/llm/components/tools/max-iterations.tsx b/web/app/components/workflow/nodes/llm/components/tools/max-iterations.tsx new file mode 100644 index 0000000000..3f1a4b7130 --- /dev/null +++ b/web/app/components/workflow/nodes/llm/components/tools/max-iterations.tsx @@ -0,0 +1,40 @@ +import { memo } from 'react' +import { InputNumber } from '@/app/components/base/input-number' +import Slider from '@/app/components/base/slider' +import Tooltip from '@/app/components/base/tooltip' + +type MaxIterationsProps = { + value?: number + onChange?: (value: number) => void +} +const MaxIterations = ({ value = 10, onChange }: MaxIterationsProps) => { + return ( +
+
Max Iterations
+ +
+ {})} + min={1} + max={99} + step={1} + /> +
+ {})} + min={1} + max={99} + step={1} + /> +
+ ) +} + +export default memo(MaxIterations) diff --git a/web/app/components/workflow/nodes/llm/components/tools/use-node-tools.ts b/web/app/components/workflow/nodes/llm/components/tools/use-node-tools.ts new file mode 100644 index 0000000000..37b2e8f252 --- /dev/null +++ b/web/app/components/workflow/nodes/llm/components/tools/use-node-tools.ts @@ -0,0 +1,23 @@ +import type { LLMNodeType } from '../../types' +import type { ToolValue } from '@/app/components/workflow/block-selector/types' +import { useNodeCurdKit } from '@/app/components/workflow/nodes/_base/hooks/use-node-crud' + +export const useNodeTools = (nodeId: string) => { + const { handleNodeDataUpdate } = useNodeCurdKit(nodeId) + + const handleToolsChange = (tools: ToolValue[]) => { + handleNodeDataUpdate({ + tools, + }) + } + const handleMaxIterationsChange = (maxIterations: number) => { + handleNodeDataUpdate({ + max_iterations: maxIterations, + }) + } + + return { + handleToolsChange, + handleMaxIterationsChange, + } +} diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx index 670d3149be..9ddd3ecd88 100644 --- a/web/app/components/workflow/nodes/llm/panel.tsx +++ b/web/app/components/workflow/nodes/llm/panel.tsx @@ -22,6 +22,7 @@ import VarReferencePicker from '../_base/components/variable/var-reference-picke import ConfigPrompt from './components/config-prompt' import ReasoningFormatConfig from './components/reasoning-format-config' import StructureOutput from './components/structure-output' +import Tools from './components/tools' import useConfig from './use-config' const i18nPrefix = 'nodes.llm' @@ -233,6 +234,12 @@ const Panel: FC> = ({ )} + + {/* Vision: GPT4-vision and so on */} > = ({ type="object" description={t(`${i18nPrefix}.outputVars.usage`, { ns: 'workflow' })} /> + {inputs.structured_output_enabled && ( <> diff --git a/web/app/components/workflow/nodes/llm/types.ts b/web/app/components/workflow/nodes/llm/types.ts index 70dc4d9cc7..7e95c05a29 100644 --- a/web/app/components/workflow/nodes/llm/types.ts +++ b/web/app/components/workflow/nodes/llm/types.ts @@ -1,5 +1,18 @@ +import type { ToolValue } from '@/app/components/workflow/block-selector/types' import type { CommonNodeType, Memory, ModelConfig, PromptItem, ValueSelector, Variable, VisionSetting } from '@/app/components/workflow/types' +export type Tool = { + enabled: boolean + type: string + provider_name: 'plugin' | 'builtin' | 'api' | 'workflow' | 'app' | 'dataset-retrieval' + tool_name: string + plugin_unique_identifier?: string + credential_id?: string + parameters?: Record + settings?: Record + extra?: Record +} + export type LLMNodeType = CommonNodeType & { model: ModelConfig prompt_template: PromptItem[] | PromptItem @@ -18,6 +31,8 @@ export type LLMNodeType = CommonNodeType & { structured_output_enabled?: boolean structured_output?: StructuredOutput reasoning_format?: 'tagged' | 'separated' + tools?: ToolValue[] + max_iterations?: number } export enum Type { diff --git a/web/app/components/workflow/panel/debug-and-preview/hooks.ts b/web/app/components/workflow/panel/debug-and-preview/hooks.ts index 2c46833df8..7b82f10630 100644 --- a/web/app/components/workflow/panel/debug-and-preview/hooks.ts +++ b/web/app/components/workflow/panel/debug-and-preview/hooks.ts @@ -15,6 +15,7 @@ import { useState, } from 'react' import { useTranslation } from 'react-i18next' +import { v4 as uuidV4 } from 'uuid' import { getProcessedInputs, processOpeningStatement, @@ -266,13 +267,78 @@ export const useChat = ( } let hasSetResponseId = false + let toolCallId = '' + let thoughtId = '' handleRun( bodyParams, { - onData: (message: string, isFirstMessage: boolean, { conversationId: newConversationId, messageId, taskId }: any) => { + onData: (message: string, isFirstMessage: boolean, { + conversationId: newConversationId, + messageId, + taskId, + chunk_type, + tool_icon, + tool_icon_dark, + tool_name, + tool_arguments, + tool_files, + tool_error, + tool_elapsed_time, + }: any) => { responseItem.content = responseItem.content + message + if (chunk_type === 'tool_call') { + if (!responseItem.toolCalls) + responseItem.toolCalls = [] + toolCallId = uuidV4() + responseItem.toolCalls?.push({ + id: toolCallId, + type: 'tool', + toolName: tool_name, + toolArguments: tool_arguments, + toolIcon: tool_icon, + toolIconDark: tool_icon_dark, + }) + } + + if (chunk_type === 'tool_result') { + const currentToolCallIndex = responseItem.toolCalls?.findIndex(item => item.id === toolCallId) ?? -1 + + if (currentToolCallIndex > -1) { + responseItem.toolCalls![currentToolCallIndex].toolError = tool_error + responseItem.toolCalls![currentToolCallIndex].toolDuration = tool_elapsed_time + responseItem.toolCalls![currentToolCallIndex].toolFiles = tool_files + responseItem.toolCalls![currentToolCallIndex].toolOutput = message + } + } + + if (chunk_type === 'thought_start') { + if (!responseItem.toolCalls) + responseItem.toolCalls = [] + thoughtId = uuidV4() + responseItem.toolCalls.push({ + id: thoughtId, + type: 'thought', + thoughtOutput: '', + }) + } + + if (chunk_type === 'thought') { + const currentThoughtIndex = responseItem.toolCalls?.findIndex(item => item.id === thoughtId) ?? -1 + if (currentThoughtIndex > -1) { + responseItem.toolCalls![currentThoughtIndex].thoughtOutput += message + } + } + + if (chunk_type === 'thought_end') { + const currentThoughtIndex = responseItem.toolCalls?.findIndex(item => item.id === thoughtId) ?? -1 + if (currentThoughtIndex > -1) { + responseItem.toolCalls![currentThoughtIndex].thoughtOutput += message + responseItem.toolCalls![currentThoughtIndex].thoughtCompleted = true + } + } + if (messageId && !hasSetResponseId) { questionItem.id = `question-${messageId}` responseItem.id = messageId diff --git a/web/app/components/workflow/run/hooks.ts b/web/app/components/workflow/run/hooks.ts index 593836f5b3..9aa6d253a1 100644 --- a/web/app/components/workflow/run/hooks.ts +++ b/web/app/components/workflow/run/hooks.ts @@ -1,6 +1,7 @@ import type { AgentLogItemWithChildren, IterationDurationMap, + LLMTraceItem, LoopDurationMap, LoopVariableMap, NodeTracing, @@ -79,8 +80,18 @@ export const useLogs = () => { } }, [setAgentOrToolLogItemStack, setAgentOrToolLogListMap]) + const [showLLMDetail, { + setTrue: setShowLLMDetailTrue, + setFalse: setShowLLMDetailFalse, + }] = useBoolean(false) + const [llmResultList, setLLMResultList] = useState([]) + const handleShowLLMDetail = useCallback((detail: LLMTraceItem[]) => { + setShowLLMDetailTrue() + setLLMResultList(detail) + }, [setShowLLMDetailTrue, setLLMResultList]) + return { - showSpecialResultPanel: showRetryDetail || showIteratingDetail || showLoopingDetail || !!agentOrToolLogItemStack.length, + showSpecialResultPanel: showRetryDetail || showIteratingDetail || showLoopingDetail || !!agentOrToolLogItemStack.length || showLLMDetail, showRetryDetail, setShowRetryDetailTrue, setShowRetryDetailFalse, @@ -111,5 +122,12 @@ export const useLogs = () => { agentOrToolLogItemStack, agentOrToolLogListMap, handleShowAgentOrToolLog, + + showLLMDetail, + setShowLLMDetailTrue, + setShowLLMDetailFalse, + llmResultList, + setLLMResultList, + handleShowLLMDetail, } } diff --git a/web/app/components/workflow/run/index.tsx b/web/app/components/workflow/run/index.tsx index 51f641f265..9590dc24d7 100644 --- a/web/app/components/workflow/run/index.tsx +++ b/web/app/components/workflow/run/index.tsx @@ -153,7 +153,7 @@ const RunPanel: FC = ({
{/* panel detail */} -
+
{loading && (
@@ -192,7 +192,7 @@ const RunPanel: FC = ({ )} {!loading && currentTab === 'TRACING' && ( )} diff --git a/web/app/components/workflow/run/llm-log/index.tsx b/web/app/components/workflow/run/llm-log/index.tsx new file mode 100644 index 0000000000..26851aa68a --- /dev/null +++ b/web/app/components/workflow/run/llm-log/index.tsx @@ -0,0 +1,2 @@ +export { default as LLMLogTrigger } from './llm-log-trigger' +export { default as LLMResultPanel } from './llm-result-panel' diff --git a/web/app/components/workflow/run/llm-log/llm-log-trigger.tsx b/web/app/components/workflow/run/llm-log/llm-log-trigger.tsx new file mode 100644 index 0000000000..1d65f754b4 --- /dev/null +++ b/web/app/components/workflow/run/llm-log/llm-log-trigger.tsx @@ -0,0 +1,41 @@ +import type { LLMTraceItem, NodeTracing } from '@/types/workflow' +import { + RiArrowRightSLine, +} from '@remixicon/react' +import { useTranslation } from 'react-i18next' +import Button from '@/app/components/base/button' +import { Thinking } from '@/app/components/base/icons/src/vender/workflow' + +type LLMLogTriggerProps = { + nodeInfo: NodeTracing + onShowLLMDetail: (detail: LLMTraceItem[]) => void +} +const LLMLogTrigger = ({ + nodeInfo, + onShowLLMDetail, +}: LLMLogTriggerProps) => { + const { t } = useTranslation() + const llmTrace = nodeInfo?.execution_metadata?.llm_trace || [] + + const handleShowLLMDetail = (e: React.MouseEvent) => { + e.stopPropagation() + e.nativeEvent.stopImmediatePropagation() + onShowLLMDetail(llmTrace || []) + } + + return ( + + ) +} + +export default LLMLogTrigger diff --git a/web/app/components/workflow/run/llm-log/llm-result-panel.tsx b/web/app/components/workflow/run/llm-log/llm-result-panel.tsx new file mode 100644 index 0000000000..56687050a9 --- /dev/null +++ b/web/app/components/workflow/run/llm-log/llm-result-panel.tsx @@ -0,0 +1,73 @@ +'use client' + +import type { FC } from 'react' +import type { + LLMTraceItem, + ToolCallItem, +} from '@/types/workflow' +import { + RiArrowLeftLine, +} from '@remixicon/react' +import { memo } from 'react' +import { useTranslation } from 'react-i18next' +import ToolCallItemComponent from '@/app/components/workflow/run/llm-log/tool-call-item' + +type Props = { + list: LLMTraceItem[] + onBack: () => void +} + +const LLMResultPanel: FC = ({ + list, + onBack, +}) => { + const { t } = useTranslation() + const formattedList = list.map((item) => { + if (item.type === 'tool') { + return { + type: 'tool', + toolName: item.name, + toolProvider: item.provider, + toolIcon: item.icon, + toolIconDark: item.icon_dark, + toolArguments: item.output.arguments, + toolOutput: item.output.output, + toolDuration: item.duration, + } + } + + return { + type: 'model', + modelName: item.name, + modelProvider: item.provider, + modelIcon: item.icon, + modelIconDark: item.icon_dark, + modelOutput: item.output, + modelDuration: item.duration, + } + }) + + return ( +
+
{ + e.stopPropagation() + e.nativeEvent.stopImmediatePropagation() + onBack() + }} + > + + {t('singleRun.back', { ns: 'workflow' })} +
+
+ { + formattedList.map((item, index) => ( + + )) + } +
+
+ ) +} +export default memo(LLMResultPanel) diff --git a/web/app/components/workflow/run/llm-log/tool-call-item.tsx b/web/app/components/workflow/run/llm-log/tool-call-item.tsx new file mode 100644 index 0000000000..e3e5802655 --- /dev/null +++ b/web/app/components/workflow/run/llm-log/tool-call-item.tsx @@ -0,0 +1,152 @@ +import type { ToolCallItem } from '@/types/workflow' +import { + RiArrowDownSLine, +} from '@remixicon/react' +import { useState } from 'react' +import { useTranslation } from 'react-i18next' +import AppIcon from '@/app/components/base/app-icon' +import { Thinking } from '@/app/components/base/icons/src/vender/workflow' +import BlockIcon from '@/app/components/workflow/block-icon' +import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' +import { CodeLanguage } from '@/app/components/workflow/nodes/code/types' +import { BlockEnum } from '@/app/components/workflow/types' +import { cn } from '@/utils/classnames' + +type ToolCallItemComponentProps = { + className?: string + payload: ToolCallItem +} +const ToolCallItemComponent = ({ + className, + payload, +}: ToolCallItemComponentProps) => { + const { t } = useTranslation() + const [expand, setExpand] = useState(false) + return ( +
+
{ + setExpand(!expand) + }} + > + { + payload.type === 'thought' && ( + + ) + } + { + payload.type === 'tool' && ( + + ) + } + { + payload.type === 'model' && ( + + ) + } + { + payload.type === 'thought' && ( +
+ { + payload.thoughtCompleted && !expand && (payload.thoughtOutput || '') as string + } + { + payload.thoughtCompleted && expand && 'THOUGHT' + } + { + !payload.thoughtCompleted && 'THINKING...' + } +
+ ) + } + { + payload.type === 'tool' && ( +
{payload.toolName}
+ ) + } + { + payload.type === 'model' && ( +
{payload.modelName}
+ ) + } + { + !!payload.toolDuration && ( +
+ {payload.toolDuration?.toFixed(1)} + s +
+ ) + } + { + !!payload.modelDuration && ( +
+ {payload.modelDuration?.toFixed(1)} + s +
+ ) + } + +
+ { + expand && ( +
+
+ { + payload.type === 'thought' && typeof payload.thoughtOutput === 'string' && ( +
{payload.thoughtOutput}
+ ) + } + { + payload.type === 'model' && ( + {t('common.data', { ns: 'workflow' })}
} + language={CodeLanguage.json} + value={payload.modelOutput} + isJSONStringifyBeauty + /> + ) + } + { + payload.type === 'tool' && ( + {t('common.input', { ns: 'workflow' })}
} + language={CodeLanguage.json} + value={payload.toolArguments} + isJSONStringifyBeauty + /> + ) + } + { + payload.type === 'tool' && ( + {t('common.output', { ns: 'workflow' })}
} + language={CodeLanguage.json} + value={payload.toolOutput} + isJSONStringifyBeauty + /> + ) + } +
+ ) + } +
+ ) +} + +export default ToolCallItemComponent diff --git a/web/app/components/workflow/run/node.tsx b/web/app/components/workflow/run/node.tsx index 8611a98d3b..ffec4392b6 100644 --- a/web/app/components/workflow/run/node.tsx +++ b/web/app/components/workflow/run/node.tsx @@ -3,6 +3,7 @@ import type { FC } from 'react' import type { AgentLogItemWithChildren, IterationDurationMap, + LLMTraceItem, LoopDurationMap, LoopVariableMap, NodeTracing, @@ -29,6 +30,7 @@ import { BlockEnum } from '../types' import LargeDataAlert from '../variable-inspect/large-data-alert' import { AgentLogTrigger } from './agent-log' import { IterationLogTrigger } from './iteration-log' +import { LLMLogTrigger } from './llm-log' import { LoopLogTrigger } from './loop-log' import { RetryLogTrigger } from './retry-log' @@ -43,6 +45,7 @@ type Props = { onShowLoopDetail?: (detail: NodeTracing[][], loopDurationMap: LoopDurationMap, loopVariableMap: LoopVariableMap) => void onShowRetryDetail?: (detail: NodeTracing[]) => void onShowAgentOrToolLog?: (detail?: AgentLogItemWithChildren) => void + onShowLLMDetail?: (detail: LLMTraceItem[]) => void notShowIterationNav?: boolean notShowLoopNav?: boolean } @@ -58,6 +61,7 @@ const NodePanel: FC = ({ onShowLoopDetail, onShowRetryDetail, onShowAgentOrToolLog, + onShowLLMDetail, notShowIterationNav, notShowLoopNav, }) => { @@ -96,6 +100,7 @@ const NodePanel: FC = ({ const isRetryNode = hasRetryNode(nodeInfo.node_type) && !!nodeInfo.retryDetail?.length const isAgentNode = nodeInfo.node_type === BlockEnum.Agent && !!nodeInfo.agentLog?.length const isToolNode = nodeInfo.node_type === BlockEnum.Tool && !!nodeInfo.agentLog?.length + const isLLMNode = nodeInfo.node_type === BlockEnum.LLM && !!nodeInfo.execution_metadata?.llm_trace?.length const inputsTitle = useMemo(() => { let text = t('common.input', { ns: 'workflow' }) @@ -193,6 +198,12 @@ const NodePanel: FC = ({ onShowRetryResultList={onShowRetryDetail} /> )} + {isLLMNode && onShowLLMDetail && ( + + )} { (isAgentNode || isToolNode) && onShowAgentOrToolLog && ( void onShowRetryDetail?: (detail: NodeTracing[]) => void handleShowAgentOrToolLog?: (detail?: AgentLogItemWithChildren) => void + onShowLLMDetail?: (detail: LLMTraceItem[]) => void } const ResultPanel: FC = ({ @@ -71,6 +74,7 @@ const ResultPanel: FC = ({ handleShowLoopResultList, onShowRetryDetail, handleShowAgentOrToolLog, + onShowLLMDetail, }) => { const { t } = useTranslation() const isIterationNode = nodeInfo?.node_type === BlockEnum.Iteration && !!nodeInfo?.details?.length @@ -78,6 +82,7 @@ const ResultPanel: FC = ({ const isRetryNode = hasRetryNode(nodeInfo?.node_type) && !!nodeInfo?.retryDetail?.length const isAgentNode = nodeInfo?.node_type === BlockEnum.Agent && !!nodeInfo?.agentLog?.length const isToolNode = nodeInfo?.node_type === BlockEnum.Tool && !!nodeInfo?.agentLog?.length + const isLLMNode = nodeInfo?.node_type === BlockEnum.LLM && !!nodeInfo?.execution_metadata?.llm_trace?.length return (
@@ -116,6 +121,14 @@ const ResultPanel: FC = ({ /> ) } + { + isLLMNode && onShowLLMDetail && ( + + ) + } { (isAgentNode || isToolNode) && handleShowAgentOrToolLog && ( handleShowAgentOrToolLog?: (detail?: AgentLogItemWithChildren) => void + + showLLMDetail?: boolean + setShowLLMDetailFalse?: () => void + llmResultList?: LLMTraceItem[] } const SpecialResultPanel = ({ showRetryDetail, @@ -49,6 +55,10 @@ const SpecialResultPanel = ({ agentOrToolLogItemStack, agentOrToolLogListMap, handleShowAgentOrToolLog, + + showLLMDetail, + setShowLLMDetailFalse, + llmResultList, }: SpecialResultPanelProps) => { return (
{ @@ -64,6 +74,14 @@ const SpecialResultPanel = ({ /> ) } + { + !!showLLMDetail && !!llmResultList?.length && setShowLLMDetailFalse && ( + + ) + } { showIteratingDetail && !!iterationResultList?.length && setShowIteratingDetailFalse && ( = ({ agentOrToolLogItemStack, agentOrToolLogListMap, handleShowAgentOrToolLog, + + showLLMDetail, + setShowLLMDetailFalse, + llmResultList, + handleShowLLMDetail, } = useLogs() const renderNode = (node: NodeTracing) => { @@ -153,6 +158,7 @@ const TracingPanel: FC = ({ onShowLoopDetail={handleShowLoopResultList} onShowRetryDetail={handleShowRetryResultList} onShowAgentOrToolLog={handleShowAgentOrToolLog} + onShowLLMDetail={handleShowLLMDetail} hideInfo={hideNodeInfo} hideProcessDetail={hideNodeProcessDetail} /> @@ -182,6 +188,10 @@ const TracingPanel: FC = ({ agentOrToolLogItemStack={agentOrToolLogItemStack} agentOrToolLogListMap={agentOrToolLogListMap} handleShowAgentOrToolLog={handleShowAgentOrToolLog} + + showLLMDetail={showLLMDetail} + setShowLLMDetailFalse={setShowLLMDetailFalse} + llmResultList={llmResultList} /> ) } diff --git a/web/app/components/workflow/workflow-preview/index.tsx b/web/app/components/workflow/workflow-preview/index.tsx index 8f61c2cfb6..bb85e00b6b 100644 --- a/web/app/components/workflow/workflow-preview/index.tsx +++ b/web/app/components/workflow/workflow-preview/index.tsx @@ -61,12 +61,14 @@ type WorkflowPreviewProps = { edges: Edge[] viewport: Viewport className?: string + miniMapToRight?: boolean } const WorkflowPreview = ({ nodes, edges, viewport, className, + miniMapToRight, }: WorkflowPreviewProps) => { const [nodesData, setNodesData] = useState(() => initialNodes(nodes, edges)) const [edgesData, setEdgesData] = useState(() => initialEdges(edges, nodes)) @@ -97,8 +99,7 @@ const WorkflowPreview = ({ height: 72, }} maskColor="var(--color-workflow-minimap-bg)" - className="!absolute !bottom-14 !left-4 z-[9] !m-0 !h-[72px] !w-[102px] !rounded-lg !border-[0.5px] - !border-divider-subtle !bg-background-default-subtle !shadow-md !shadow-shadow-shadow-5" + className={cn('!absolute !bottom-14 z-[9] !m-0 !h-[72px] !w-[102px] !rounded-lg !border-[0.5px] !border-divider-subtle !bg-background-default-subtle !shadow-md !shadow-shadow-shadow-5', miniMapToRight ? '!right-4' : '!left-4')} />
diff --git a/web/app/signup/set-password/page.tsx b/web/app/signup/set-password/page.tsx index 69af045f1a..47ea2faf87 100644 --- a/web/app/signup/set-password/page.tsx +++ b/web/app/signup/set-password/page.tsx @@ -13,19 +13,6 @@ import { useMailRegister } from '@/service/use-common' import { cn } from '@/utils/classnames' import { sendGAEvent } from '@/utils/gtag' -const parseUtmInfo = () => { - const utmInfoStr = Cookies.get('utm_info') - if (!utmInfoStr) - return null - try { - return JSON.parse(utmInfoStr) - } - catch (e) { - console.error('Failed to parse utm_info cookie:', e) - return null - } -} - const ChangePasswordForm = () => { const { t } = useTranslation() const router = useRouter() @@ -70,7 +57,9 @@ const ChangePasswordForm = () => { }) const { result } = res as MailRegisterResponse if (result === 'success') { - const utmInfo = parseUtmInfo() + const utmInfoStr = Cookies.get('utm_info') + const utmInfo = utmInfoStr ? JSON.parse(utmInfoStr) : null + trackEvent(utmInfo ? 'user_registration_success_with_utm' : 'user_registration_success', { method: 'email', ...utmInfo, diff --git a/web/context/app-list-context.ts b/web/context/app-list-context.ts new file mode 100644 index 0000000000..130f85966a --- /dev/null +++ b/web/context/app-list-context.ts @@ -0,0 +1,19 @@ +import type { CurrentTryAppParams } from './explore-context' +import { noop } from 'es-toolkit/function' +import { createContext } from 'use-context-selector' + +type Props = { + currentApp?: CurrentTryAppParams + isShowTryAppPanel: boolean + setShowTryAppPanel: (showTryAppPanel: boolean, params?: CurrentTryAppParams) => void + controlHideCreateFromTemplatePanel: number +} + +const AppListContext = createContext({ + isShowTryAppPanel: false, + setShowTryAppPanel: noop, + currentApp: undefined, + controlHideCreateFromTemplatePanel: 0, +}) + +export default AppListContext diff --git a/web/context/debug-configuration.ts b/web/context/debug-configuration.ts index ba157e1bf7..48e34fbe5e 100644 --- a/web/context/debug-configuration.ts +++ b/web/context/debug-configuration.ts @@ -29,6 +29,7 @@ import { PromptMode } from '@/models/debug' import { AppModeEnum, ModelModeType, Resolution, RETRIEVE_TYPE, TransferMethod } from '@/types/app' type IDebugConfiguration = { + readonly?: boolean appId: string isAPIKeySet: boolean isTrailFinished: boolean @@ -108,6 +109,7 @@ type IDebugConfiguration = { } const DebugConfigurationContext = createContext({ + readonly: false, appId: '', isAPIKeySet: false, isTrailFinished: false, diff --git a/web/context/explore-context.ts b/web/context/explore-context.ts index fc446c0453..2e85b8356b 100644 --- a/web/context/explore-context.ts +++ b/web/context/explore-context.ts @@ -1,7 +1,12 @@ -import type { InstalledApp } from '@/models/explore' +import type { App, InstalledApp } from '@/models/explore' import { noop } from 'es-toolkit/function' import { createContext } from 'use-context-selector' +export type CurrentTryAppParams = { + appId: string + app: App +} + type IExplore = { controlUpdateInstalledApps: number setControlUpdateInstalledApps: (controlUpdateInstalledApps: number) => void @@ -10,6 +15,9 @@ type IExplore = { setInstalledApps: (installedApps: InstalledApp[]) => void isFetchingInstalledApps: boolean setIsFetchingInstalledApps: (isFetchingInstalledApps: boolean) => void + currentApp?: CurrentTryAppParams + isShowTryAppPanel: boolean + setShowTryAppPanel: (showTryAppPanel: boolean, params?: CurrentTryAppParams) => void } const ExploreContext = createContext({ @@ -20,6 +28,9 @@ const ExploreContext = createContext({ setInstalledApps: noop, isFetchingInstalledApps: false, setIsFetchingInstalledApps: noop, + isShowTryAppPanel: false, + setShowTryAppPanel: noop, + currentApp: undefined, }) export default ExploreContext diff --git a/web/eslint.config.mjs b/web/eslint.config.mjs index b8191a5eea..2b3240eb3e 100644 --- a/web/eslint.config.mjs +++ b/web/eslint.config.mjs @@ -177,7 +177,7 @@ export default antfu( }, rules: { // 'dify-i18n/no-as-any-in-t': ['error', { mode: 'all' }], - 'dify-i18n/no-as-any-in-t': 'error', + // 'dify-i18n/no-as-any-in-t': 'error', // 'dify-i18n/no-legacy-namespace-prefix': 'error', // 'dify-i18n/require-ns-option': 'error', }, diff --git a/web/i18n/ar-TN/explore.json b/web/i18n/ar-TN/explore.json index 80c036e50c..290fa68873 100644 --- a/web/i18n/ar-TN/explore.json +++ b/web/i18n/ar-TN/explore.json @@ -1,11 +1,9 @@ { "appCard.addToWorkspace": "إضافة إلى مساحة العمل", - "appCard.customize": "تخصيص", "appCustomize.nameRequired": "اسم التطبيق مطلوب", "appCustomize.subTitle": "أيقونة التطبيق واسمه", "appCustomize.title": "إنشاء تطبيق من {{name}}", "apps.allCategories": "موصى به", - "apps.description": "استخدم تطبيقات القوالب هذه فورًا أو خصص تطبيقاتك الخاصة بناءً على القوالب.", "apps.title": "استكشاف التطبيقات", "category.Agent": "وكيل", "category.Assistant": "مساعد", @@ -23,7 +21,5 @@ "sidebar.chat": "دردشة", "sidebar.delete.content": "هل أنت متأكد أنك تريد حذف هذا التطبيق؟", "sidebar.delete.title": "حذف التطبيق", - "sidebar.discovery": "اكتشاف", - "sidebar.workspace": "مساحة العمل", "title": "استكشاف" } diff --git a/web/i18n/de-DE/explore.json b/web/i18n/de-DE/explore.json index 6461fbc76d..a06dea7d3a 100644 --- a/web/i18n/de-DE/explore.json +++ b/web/i18n/de-DE/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Zum Arbeitsbereich hinzufügen", - "appCard.customize": "Anpassen", "appCustomize.nameRequired": "App-Name ist erforderlich", "appCustomize.subTitle": "App-Symbol & Name", "appCustomize.title": "App aus {{name}} erstellen", - "apps.allCategories": "Alle Kategorien", - "apps.description": "Nutzen Sie diese Vorlagen-Apps sofort oder passen Sie Ihre eigenen Apps basierend auf den Vorlagen an.", - "apps.title": "Apps von Dify erkunden", "category.Agent": "Agent", "category.Assistant": "Assistent", "category.Entertainment": "Unterhaltung", @@ -23,7 +18,5 @@ "sidebar.chat": "Chat", "sidebar.delete.content": "Sind Sie sicher, dass Sie diese App löschen möchten?", "sidebar.delete.title": "App löschen", - "sidebar.discovery": "Entdeckung", - "sidebar.workspace": "Arbeitsbereich", "title": "Entdecken" } diff --git a/web/i18n/de-DE/explore.ts b/web/i18n/de-DE/explore.ts new file mode 100644 index 0000000000..abada1081f --- /dev/null +++ b/web/i18n/de-DE/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Entdecken', + sidebar: { + chat: 'Chat', + action: { + pin: 'Anheften', + unpin: 'Lösen', + rename: 'Umbenennen', + delete: 'Löschen', + }, + delete: { + title: 'App löschen', + content: 'Sind Sie sicher, dass Sie diese App löschen möchten?', + }, + }, + apps: { + }, + appCard: { + customize: 'Anpassen', + }, + appCustomize: { + title: 'App aus {{name}} erstellen', + subTitle: 'App-Symbol & Name', + nameRequired: 'App-Name ist erforderlich', + }, + category: { + Assistant: 'Assistent', + Writing: 'Schreiben', + Translate: 'Übersetzen', + Programming: 'Programmieren', + HR: 'Personalwesen', + Agent: 'Agent', + Workflow: 'Arbeitsablauf', + Entertainment: 'Unterhaltung', + }, +} + +export default translation diff --git a/web/i18n/en-US/common.json b/web/i18n/en-US/common.json index 64ac47d804..0c94163260 100644 --- a/web/i18n/en-US/common.json +++ b/web/i18n/en-US/common.json @@ -108,6 +108,7 @@ "chat.conversationName": "Conversation name", "chat.conversationNameCanNotEmpty": "Conversation name required", "chat.conversationNamePlaceholder": "Please input conversation name", + "chat.inputDisabledPlaceholder": "Preview Only", "chat.inputPlaceholder": "Talk to {{botName}}", "chat.renameConversation": "Rename Conversation", "chat.resend": "Resend", diff --git a/web/i18n/en-US/common.ts b/web/i18n/en-US/common.ts new file mode 100644 index 0000000000..e0af44cc89 --- /dev/null +++ b/web/i18n/en-US/common.ts @@ -0,0 +1,789 @@ +const translation = { + theme: { + theme: 'Theme', + light: 'light', + dark: 'dark', + auto: 'system', + }, + api: { + success: 'Success', + actionSuccess: 'Action succeeded', + saved: 'Saved', + create: 'Created', + remove: 'Removed', + }, + operation: { + create: 'Create', + confirm: 'Confirm', + cancel: 'Cancel', + clear: 'Clear', + save: 'Save', + yes: 'Yes', + no: 'No', + deleteConfirmTitle: 'Delete?', + confirmAction: 'Please confirm your action.', + saveAndEnable: 'Save & Enable', + edit: 'Edit', + add: 'Add', + added: 'Added', + refresh: 'Restart', + reset: 'Reset', + search: 'Search', + noSearchResults: 'No {{content}} were found', + resetKeywords: 'Reset keywords', + selectCount: '{{count}} Selected', + searchCount: 'Find {{count}} {{content}}', + noSearchCount: '0 {{content}}', + change: 'Change', + remove: 'Remove', + send: 'Send', + copy: 'Copy', + copied: 'Copied', + lineBreak: 'Line break', + sure: 'I\'m sure', + download: 'Download', + downloadSuccess: 'Download Completed.', + downloadFailed: 'Download failed. Please try again later.', + viewDetails: 'View Details', + delete: 'Delete', + now: 'Now', + deleteApp: 'Delete App', + settings: 'Settings', + setup: 'Setup', + config: 'Config', + getForFree: 'Get for free', + reload: 'Reload', + ok: 'OK', + log: 'Log', + learnMore: 'Learn More', + params: 'Params', + duplicate: 'Duplicate', + rename: 'Rename', + audioSourceUnavailable: 'AudioSource is unavailable', + close: 'Close', + copyImage: 'Copy Image', + imageCopied: 'Image copied', + zoomOut: 'Zoom Out', + zoomIn: 'Zoom In', + openInNewTab: 'Open in new tab', + in: 'in', + saveAndRegenerate: 'Save & Regenerate Child Chunks', + view: 'View', + viewMore: 'VIEW MORE', + regenerate: 'Regenerate', + submit: 'Submit', + skip: 'Skip', + format: 'Format', + more: 'More', + selectAll: 'Select All', + deSelectAll: 'Deselect All', + }, + errorMsg: { + fieldRequired: '{{field}} is required', + urlError: 'url should start with http:// or https://', + }, + placeholder: { + input: 'Please enter', + select: 'Please select', + search: 'Search...', + }, + noData: 'No data', + label: { + optional: '(optional)', + }, + voice: { + language: { + zhHans: 'Chinese', + zhHant: 'Traditional Chinese', + enUS: 'English', + deDE: 'German', + frFR: 'French', + esES: 'Spanish', + itIT: 'Italian', + thTH: 'Thai', + idID: 'Indonesian', + jaJP: 'Japanese', + koKR: 'Korean', + ptBR: 'Portuguese', + ruRU: 'Russian', + ukUA: 'Ukrainian', + viVN: 'Vietnamese', + plPL: 'Polish', + roRO: 'Romanian', + hiIN: 'Hindi', + trTR: 'Türkçe', + faIR: 'Farsi', + }, + }, + unit: { + char: 'chars', + }, + actionMsg: { + noModification: 'No modifications at the moment.', + modifiedSuccessfully: 'Modified successfully', + modifiedUnsuccessfully: 'Modified unsuccessfully', + copySuccessfully: 'Copied successfully', + paySucceeded: 'Payment succeeded', + payCancelled: 'Payment cancelled', + generatedSuccessfully: 'Generated successfully', + generatedUnsuccessfully: 'Generated unsuccessfully', + }, + model: { + params: { + temperature: 'Temperature', + temperatureTip: + 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.', + top_p: 'Top P', + top_pTip: + 'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.', + presence_penalty: 'Presence penalty', + presence_penaltyTip: + 'How much to penalize new tokens based on whether they appear in the text so far.\nIncreases the model\'s likelihood to talk about new topics.', + frequency_penalty: 'Frequency penalty', + frequency_penaltyTip: + 'How much to penalize new tokens based on their existing frequency in the text so far.\nDecreases the model\'s likelihood to repeat the same line verbatim.', + max_tokens: 'Max token', + max_tokensTip: + 'Used to limit the maximum length of the reply, in tokens. \nLarger values may limit the space left for prompt words, chat logs, and Knowledge. \nIt is recommended to set it below two-thirds\ngpt-4-1106-preview, gpt-4-vision-preview max token (input 128k output 4k)', + maxTokenSettingTip: 'Your max token setting is high, potentially limiting space for prompts, queries, and data. Consider setting it below 2/3.', + setToCurrentModelMaxTokenTip: 'Max token is updated to the 80% maximum token of the current model {{maxToken}}.', + stop_sequences: 'Stop sequences', + stop_sequencesTip: 'Up to four sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.', + stop_sequencesPlaceholder: 'Enter sequence and press Tab', + }, + tone: { + Creative: 'Creative', + Balanced: 'Balanced', + Precise: 'Precise', + Custom: 'Custom', + }, + addMoreModel: 'Go to settings to add more models', + settingsLink: 'Model Provider Settings', + capabilities: 'MultiModal Capabilities', + }, + menus: { + status: 'beta', + explore: 'Explore', + apps: 'Studio', + appDetail: 'App Detail', + account: 'Account', + plugins: 'Plugins', + exploreMarketplace: 'Explore Marketplace', + pluginsTips: 'Integrate third-party plugins or create ChatGPT-compatible AI-Plugins.', + datasets: 'Knowledge', + datasetsTips: 'COMING SOON: Import your own text data or write data in real-time via Webhook for LLM context enhancement.', + newApp: 'New App', + newDataset: 'Create Knowledge', + tools: 'Tools', + }, + userProfile: { + settings: 'Settings', + contactUs: 'Contact Us', + emailSupport: 'Email Support', + workspace: 'Workspace', + createWorkspace: 'Create Workspace', + helpCenter: 'View Docs', + support: 'Support', + compliance: 'Compliance', + forum: 'Forum', + roadmap: 'Roadmap', + github: 'GitHub', + community: 'Community', + about: 'About', + logout: 'Log out', + }, + compliance: { + soc2Type1: 'SOC 2 Type I Report', + soc2Type2: 'SOC 2 Type II Report', + iso27001: 'ISO 27001:2022 Certification', + gdpr: 'GDPR DPA', + sandboxUpgradeTooltip: 'Only available with a Professional or Team plan.', + professionalUpgradeTooltip: 'Only available with a Team plan or above.', + }, + settings: { + accountGroup: 'GENERAL', + workplaceGroup: 'WORKSPACE', + generalGroup: 'GENERAL', + account: 'My account', + members: 'Members', + billing: 'Billing', + integrations: 'Integrations', + language: 'Language', + provider: 'Model Provider', + dataSource: 'Data Source', + plugin: 'Plugins', + apiBasedExtension: 'API Extension', + }, + account: { + account: 'Account', + myAccount: 'My Account', + studio: 'Studio', + avatar: 'Avatar', + name: 'Name', + email: 'Email', + password: 'Password', + passwordTip: 'You can set a permanent password if you don’t want to use temporary login codes', + setPassword: 'Set a password', + resetPassword: 'Reset password', + currentPassword: 'Current password', + newPassword: 'New password', + confirmPassword: 'Confirm password', + notEqual: 'Two passwords are different.', + langGeniusAccount: 'Account\'s data', + langGeniusAccountTip: 'The user data of your account.', + editName: 'Edit Name', + showAppLength: 'Show {{length}} apps', + delete: 'Delete Account', + deleteTip: 'Please note, once confirmed, as the Owner of any Workspaces, your workspaces will be scheduled in a queue for permanent deletion, and all your user data will be queued for permanent deletion.', + deletePrivacyLinkTip: 'For more information about how we handle your data, please see our ', + deletePrivacyLink: 'Privacy Policy.', + deleteSuccessTip: 'Your account needs time to finish deleting. We\'ll email you when it\'s all done.', + deleteLabel: 'To confirm, please type in your email below', + deletePlaceholder: 'Please enter your email', + sendVerificationButton: 'Send Verification Code', + verificationLabel: 'Verification Code', + verificationPlaceholder: 'Paste the 6-digit code', + permanentlyDeleteButton: 'Permanently Delete Account', + feedbackTitle: 'Feedback', + feedbackLabel: 'Tell us why you deleted your account?', + feedbackPlaceholder: 'Optional', + editWorkspaceInfo: 'Edit Workspace Info', + workspaceName: 'Workspace Name', + workspaceIcon: 'Workspace Icon', + changeEmail: { + title: 'Change Email', + verifyEmail: 'Verify your current email', + newEmail: 'Set up a new email address', + verifyNew: 'Verify your new email', + authTip: 'Once your email is changed, Google or GitHub accounts linked to your old email will no longer be able to log in to this account.', + content1: 'If you continue, we\'ll send a verification code to {{email}} for re-authentication.', + content2: 'Your current email is {{email}}. Verification code has been sent to this email address.', + content3: 'Enter a new email and we will send you a verification code.', + content4: 'We just sent you a temporary verification code to {{email}}.', + codeLabel: 'Verification code', + codePlaceholder: 'Paste the 6-digit code', + emailLabel: 'New email', + emailPlaceholder: 'Enter a new email', + existingEmail: 'A user with this email already exists.', + unAvailableEmail: 'This email is temporarily unavailable.', + sendVerifyCode: 'Send verification code', + continue: 'Continue', + changeTo: 'Change to {{email}}', + resendTip: 'Didn\'t receive a code?', + resendCount: 'Resend in {{count}}s', + resend: 'Resend', + }, + }, + members: { + team: 'Team', + invite: 'Add', + name: 'NAME', + lastActive: 'LAST ACTIVE', + role: 'ROLES', + pending: 'Pending...', + owner: 'Owner', + admin: 'Admin', + adminTip: 'Can build apps & manage team settings', + normal: 'Normal', + normalTip: 'Only can use apps, can not build apps', + builder: 'Builder', + builderTip: 'Can build & edit own apps', + editor: 'Editor', + editorTip: 'Can build & edit apps', + datasetOperator: 'Knowledge Admin', + datasetOperatorTip: 'Only can manage the knowledge base', + inviteTeamMember: 'Add team member', + inviteTeamMemberTip: 'They can access your team data directly after signing in.', + emailNotSetup: 'Email server is not set up, so invitation emails cannot be sent. Please notify users of the invitation link that will be issued after invitation instead.', + email: 'Email', + emailInvalid: 'Invalid Email Format', + emailPlaceholder: 'Please input emails', + sendInvite: 'Send Invite', + invitedAsRole: 'Invited as {{role}} user', + invitationSent: 'Invitation sent', + invitationSentTip: 'Invitation sent, and they can sign in to Dify to access your team data.', + invitationLink: 'Invitation Link', + failedInvitationEmails: 'Below users were not invited successfully', + ok: 'OK', + removeFromTeam: 'Remove from team', + removeFromTeamTip: 'Will remove team access', + setAdmin: 'Set as administrator', + setMember: 'Set to ordinary member', + setBuilder: 'Set as builder', + setEditor: 'Set as editor', + disInvite: 'Cancel the invitation', + deleteMember: 'Delete Member', + you: '(You)', + transferOwnership: 'Transfer Ownership', + transferModal: { + title: 'Transfer workspace ownership', + warning: 'You\'re about to transfer ownership of “{{workspace}}”. This takes effect immediately and can\'t be undone.', + warningTip: 'You\'ll become an admin member, and the new owner will have full control.', + sendTip: 'If you continue, we\'ll send a verification code to {{email}} for re-authentication.', + verifyEmail: 'Verify your current email', + verifyContent: 'Your current email is {{email}}.', + verifyContent2: 'We\'ll send a temporary verification code to this email for re-authentication.', + codeLabel: 'Verification code', + codePlaceholder: 'Paste the 6-digit code', + resendTip: 'Didn\'t receive a code?', + resendCount: 'Resend in {{count}}s', + resend: 'Resend', + transferLabel: 'Transfer workspace ownership to', + transferPlaceholder: 'Select a workspace member…', + sendVerifyCode: 'Send verification code', + continue: 'Continue', + transfer: 'Transfer workspace ownership', + }, + }, + feedback: { + title: 'Provide Feedback', + subtitle: 'Please tell us what went wrong with this response', + content: 'Feedback Content', + placeholder: 'Please describe what went wrong or how we can improve...', + }, + integrations: { + connected: 'Connected', + google: 'Google', + googleAccount: 'Login with Google account', + github: 'GitHub', + githubAccount: 'Login with GitHub account', + connect: 'Connect', + }, + language: { + displayLanguage: 'Display Language', + timezone: 'Time Zone', + }, + provider: { + apiKey: 'API Key', + enterYourKey: 'Enter your API key here', + invalidKey: 'Invalid OpenAI API key', + validatedError: 'Validation failed: ', + validating: 'Validating key...', + saveFailed: 'Save api key failed', + apiKeyExceedBill: 'This API KEY has no quota available, please read', + addKey: 'Add Key', + comingSoon: 'Coming Soon', + editKey: 'Edit', + invalidApiKey: 'Invalid API key', + azure: { + apiBase: 'API Base', + apiBasePlaceholder: 'The API Base URL of your Azure OpenAI Endpoint.', + apiKey: 'API Key', + apiKeyPlaceholder: 'Enter your API key here', + helpTip: 'Learn Azure OpenAI Service', + }, + openaiHosted: { + openaiHosted: 'Hosted OpenAI', + onTrial: 'ON TRIAL', + exhausted: 'QUOTA EXHAUSTED', + desc: 'The OpenAI hosting service provided by Dify allows you to use models such as GPT-3.5. Before your trial quota is used up, you need to set up other model providers.', + callTimes: 'Call times', + usedUp: 'Trial quota used up. Add own Model Provider.', + useYourModel: 'Currently using own Model Provider.', + close: 'Close', + }, + anthropicHosted: { + anthropicHosted: 'Anthropic Claude', + onTrial: 'ON TRIAL', + exhausted: 'QUOTA EXHAUSTED', + desc: 'Powerful model, which excels at a wide range of tasks from sophisticated dialogue and creative content generation to detailed instruction.', + callTimes: 'Call times', + usedUp: 'Trial quota used up. Add own Model Provider.', + useYourModel: 'Currently using own Model Provider.', + close: 'Close', + trialQuotaTip: 'Your Anthropic trial quota will expire on 2025/03/17 and will no longer be available thereafter. Please make use of it in time.', + }, + anthropic: { + using: 'The embedding capability is using', + enableTip: 'To enable the Anthropic model, you need to bind to OpenAI or Azure OpenAI Service first.', + notEnabled: 'Not enabled', + keyFrom: 'Get your API key from Anthropic', + }, + encrypted: { + front: 'Your API KEY will be encrypted and stored using', + back: ' technology.', + }, + }, + modelProvider: { + notConfigured: 'The system model has not yet been fully configured', + systemModelSettings: 'System Model Settings', + systemModelSettingsLink: 'Why is it necessary to set up a system model?', + selectModel: 'Select your model', + setupModelFirst: 'Please set up your model first', + systemReasoningModel: { + key: 'System Reasoning Model', + tip: 'Set the default inference model to be used for creating applications, as well as features such as dialogue name generation and next question suggestion will also use the default inference model.', + }, + embeddingModel: { + key: 'Embedding Model', + tip: 'Set the default model for document embedding processing of the Knowledge, both retrieval and import of the Knowledge use this Embedding model for vectorization processing. Switching will cause the vector dimension between the imported Knowledge and the question to be inconsistent, resulting in retrieval failure. To avoid retrieval failure, please do not switch this model at will.', + required: 'Embedding Model is required', + }, + speechToTextModel: { + key: 'Speech-to-Text Model', + tip: 'Set the default model for speech-to-text input in conversation.', + }, + ttsModel: { + key: 'Text-to-Speech Model', + tip: 'Set the default model for text-to-speech input in conversation.', + }, + rerankModel: { + key: 'Rerank Model', + tip: 'Rerank model will reorder the candidate document list based on the semantic match with user query, improving the results of semantic ranking', + }, + apiKey: 'API-KEY', + quota: 'Quota', + searchModel: 'Search model', + noModelFound: 'No model found for {{model}}', + models: 'Models', + showMoreModelProvider: 'Show more model provider', + selector: { + tip: 'This model has been removed. Please add a model or select another model.', + emptyTip: 'No available models', + emptySetting: 'Please go to settings to configure', + rerankTip: 'Please set up the Rerank model', + }, + card: { + quota: 'QUOTA', + onTrial: 'On Trial', + paid: 'Paid', + quotaExhausted: 'Quota exhausted', + callTimes: 'Call times', + tokens: 'Tokens', + buyQuota: 'Buy Quota', + priorityUse: 'Priority use', + removeKey: 'Remove API Key', + tip: 'Priority will be given to the paid quota. The Trial quota will be used after the paid quota is exhausted.', + }, + item: { + deleteDesc: '{{modelName}} are being used as system reasoning models. Some functions will not be available after removal. Please confirm.', + freeQuota: 'FREE QUOTA', + }, + addApiKey: 'Add your API key', + invalidApiKey: 'Invalid API key', + encrypted: { + front: 'Your API KEY will be encrypted and stored using', + back: ' technology.', + }, + freeQuota: { + howToEarn: 'How to earn', + }, + addMoreModelProvider: 'ADD MORE MODEL PROVIDER', + addModel: 'Add Model', + modelsNum: '{{num}} Models', + showModels: 'Show Models', + showModelsNum: 'Show {{num}} Models', + collapse: 'Collapse', + config: 'Config', + modelAndParameters: 'Model and Parameters', + model: 'Model', + featureSupported: '{{feature}} supported', + callTimes: 'Call times', + credits: 'Message Credits', + buyQuota: 'Buy Quota', + getFreeTokens: 'Get free Tokens', + priorityUsing: 'Prioritize using', + deprecated: 'Deprecated', + confirmDelete: 'Confirm deletion?', + quotaTip: 'Remaining available free tokens', + loadPresets: 'Load Presets', + parameters: 'PARAMETERS', + loadBalancing: 'Load balancing', + loadBalancingDescription: 'Configure multiple credentials for the model and invoke them automatically. ', + loadBalancingHeadline: 'Load Balancing', + configLoadBalancing: 'Config Load Balancing', + modelHasBeenDeprecated: 'This model has been deprecated', + providerManaged: 'Provider managed', + providerManagedDescription: 'Use the single set of credentials provided by the model provider.', + defaultConfig: 'Default Config', + apiKeyStatusNormal: 'APIKey status is normal', + apiKeyRateLimit: 'Rate limit was reached, available after {{seconds}}s', + addConfig: 'Add Config', + editConfig: 'Edit Config', + loadBalancingLeastKeyWarning: 'To enable load balancing at least 2 keys must be enabled.', + loadBalancingInfo: 'By default, load balancing uses the Round-robin strategy. If rate limiting is triggered, a 1-minute cooldown period will be applied.', + upgradeForLoadBalancing: 'Upgrade your plan to enable Load Balancing.', + toBeConfigured: 'To be configured', + configureTip: 'Set up api-key or add model to use', + installProvider: 'Install model providers', + installDataSourceProvider: 'Install data source providers', + discoverMore: 'Discover more in ', + emptyProviderTitle: 'Model provider not set up', + emptyProviderTip: 'Please install a model provider first.', + auth: { + unAuthorized: 'Unauthorized', + authRemoved: 'Auth removed', + apiKeys: 'API Keys', + addApiKey: 'Add API Key', + addModel: 'Add model', + addNewModel: 'Add new model', + addCredential: 'Add credential', + addModelCredential: 'Add model credential', + editModelCredential: 'Edit model credential', + modelCredentials: 'Model credentials', + modelCredential: 'Model credential', + configModel: 'Config model', + configLoadBalancing: 'Config Load Balancing', + authorizationError: 'Authorization error', + specifyModelCredential: 'Specify model credential', + specifyModelCredentialTip: 'Use a configured model credential.', + providerManaged: 'Provider managed', + providerManagedTip: 'The current configuration is hosted by the provider.', + apiKeyModal: { + title: 'API Key Authorization Configuration', + desc: 'After configuring credentials, all members within the workspace can use this model when orchestrating applications.', + addModel: 'Add model', + }, + manageCredentials: 'Manage Credentials', + customModelCredentials: 'Custom Model Credentials', + addNewModelCredential: 'Add new model credential', + removeModel: 'Remove Model', + selectModelCredential: 'Select a model credential', + customModelCredentialsDeleteTip: 'Credential is in use and cannot be deleted', + }, + parametersInvalidRemoved: 'Some parameters are invalid and have been removed', + }, + dataSource: { + add: 'Add a data source', + connect: 'Connect', + configure: 'Configure', + notion: { + title: 'Notion', + description: 'Using Notion as a data source for the Knowledge.', + connectedWorkspace: 'Connected workspace', + addWorkspace: 'Add workspace', + connected: 'Connected', + disconnected: 'Disconnected', + changeAuthorizedPages: 'Change authorized pages', + integratedAlert: 'Notion is integrated via internal credential, no need to re-authorize.', + pagesAuthorized: 'Pages authorized', + sync: 'Sync', + remove: 'Remove', + selector: { + pageSelected: 'Pages Selected', + searchPages: 'Search pages...', + noSearchResult: 'No search results', + addPages: 'Add pages', + preview: 'PREVIEW', + }, + }, + website: { + title: 'Website', + description: 'Import content from websites using web crawler.', + with: 'With', + configuredCrawlers: 'Configured crawlers', + active: 'Active', + inactive: 'Inactive', + }, + }, + plugin: { + serpapi: { + apiKey: 'API Key', + apiKeyPlaceholder: 'Enter your API key', + keyFrom: 'Get your SerpAPI key from SerpAPI Account Page', + }, + }, + apiBasedExtension: { + title: 'API extensions provide centralized API management, simplifying configuration for easy use across Dify\'s applications.', + link: 'Learn how to develop your own API Extension.', + add: 'Add API Extension', + selector: { + title: 'API Extension', + placeholder: 'Please select API extension', + manage: 'Manage API Extension', + }, + modal: { + title: 'Add API Extension', + editTitle: 'Edit API Extension', + name: { + title: 'Name', + placeholder: 'Please enter the name', + }, + apiEndpoint: { + title: 'API Endpoint', + placeholder: 'Please enter the API endpoint', + }, + apiKey: { + title: 'API-key', + placeholder: 'Please enter the API-key', + lengthError: 'API-key length cannot be less than 5 characters', + }, + }, + type: 'Type', + }, + about: { + changeLog: 'Changelog', + updateNow: 'Update now', + nowAvailable: 'Dify {{version}} is now available.', + latestAvailable: 'Dify {{version}} is the latest version available.', + }, + appMenus: { + overview: 'Monitoring', + promptEng: 'Orchestrate', + apiAccess: 'API Access', + logAndAnn: 'Logs & Annotations', + logs: 'Logs', + }, + environment: { + testing: 'TESTING', + development: 'DEVELOPMENT', + }, + appModes: { + completionApp: 'Text Generator', + chatApp: 'Chat App', + }, + datasetMenus: { + documents: 'Documents', + hitTesting: 'Retrieval Testing', + settings: 'Settings', + emptyTip: 'This Knowledge has not been integrated within any application. Please refer to the document for guidance.', + viewDoc: 'View documentation', + relatedApp: 'linked apps', + noRelatedApp: 'No linked apps', + pipeline: 'Pipeline', + }, + voiceInput: { + speaking: 'Speak now...', + converting: 'Converting to text...', + notAllow: 'microphone not authorized', + }, + modelName: { + 'gpt-3.5-turbo': 'GPT-3.5-Turbo', + 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', + 'gpt-4': 'GPT-4', + 'gpt-4-32k': 'GPT-4-32K', + 'text-davinci-003': 'Text-Davinci-003', + 'text-embedding-ada-002': 'Text-Embedding-Ada-002', + 'whisper-1': 'Whisper-1', + 'claude-instant-1': 'Claude-Instant', + 'claude-2': 'Claude-2', + }, + chat: { + renameConversation: 'Rename Conversation', + conversationName: 'Conversation name', + conversationNamePlaceholder: 'Please input conversation name', + conversationNameCanNotEmpty: 'Conversation name required', + citation: { + title: 'CITATIONS', + linkToDataset: 'Link to Knowledge', + characters: 'Characters:', + hitCount: 'Retrieval count:', + vectorHash: 'Vector hash:', + hitScore: 'Retrieval Score:', + }, + inputPlaceholder: 'Talk to {{botName}}', + inputDisabledPlaceholder: 'Preview Only', + thinking: 'Thinking...', + thought: 'Thought', + resend: 'Resend', + }, + promptEditor: { + placeholder: 'Write your prompt word here, enter \'{\' to insert a variable, enter \'/\' to insert a prompt content block', + context: { + item: { + title: 'Context', + desc: 'Insert context template', + }, + modal: { + title: '{{num}} Knowledge in Context', + add: 'Add Context ', + footer: 'You can manage contexts in the Context section below.', + }, + }, + history: { + item: { + title: 'Conversation History', + desc: 'Insert historical message template', + }, + modal: { + title: 'EXAMPLE', + user: 'Hello', + assistant: 'Hello! How can I assist you today?', + edit: 'Edit Conversation Role Names', + }, + }, + variable: { + item: { + title: 'Variables & External Tools', + desc: 'Insert Variables & External Tools', + }, + outputToolDisabledItem: { + title: 'Variables', + desc: 'Insert Variables', + }, + modal: { + add: 'New variable', + addTool: 'New tool', + }, + }, + query: { + item: { + title: 'Query', + desc: 'Insert user query template', + }, + }, + existed: 'Already exists in the prompt', + }, + imageUploader: { + uploadFromComputer: 'Upload from Computer', + uploadFromComputerReadError: 'Image reading failed, please try again.', + uploadFromComputerUploadError: 'Image upload failed, please upload again.', + uploadFromComputerLimit: 'Upload images cannot exceed {{size}} MB', + pasteImageLink: 'Paste image link', + pasteImageLinkInputPlaceholder: 'Paste image link here', + pasteImageLinkInvalid: 'Invalid image link', + imageUpload: 'Image Upload', + }, + fileUploader: { + uploadFromComputer: 'Local upload', + pasteFileLink: 'Paste file link', + pasteFileLinkInputPlaceholder: 'Enter URL...', + uploadFromComputerReadError: 'File reading failed, please try again.', + uploadFromComputerUploadError: 'File upload failed, please upload again.', + uploadFromComputerLimit: 'Upload {{type}} cannot exceed {{size}}', + pasteFileLinkInvalid: 'Invalid file link', + fileExtensionNotSupport: 'File extension not supported', + fileExtensionBlocked: 'This file type is blocked for security reasons', + }, + tag: { + placeholder: 'All Tags', + addNew: 'Add new tag', + noTag: 'No tags', + noTagYet: 'No tags yet', + addTag: 'Add tags', + editTag: 'Edit tags', + manageTags: 'Manage Tags', + selectorPlaceholder: 'Type to search or create', + create: 'Create', + delete: 'Delete tag', + deleteTip: 'The tag is being used, delete it?', + created: 'Tag created successfully', + failed: 'Tag creation failed', + }, + license: { + expiring: 'Expiring in one day', + expiring_plural: 'Expiring in {{count}} days', + unlimited: 'Unlimited', + }, + pagination: { + perPage: 'Items per page', + }, + avatar: { + deleteTitle: 'Remove Avatar', + deleteDescription: 'Are you sure you want to remove your profile picture? Your account will use the default initial avatar.', + }, + imageInput: { + dropImageHere: 'Drop your image here, or', + browse: 'browse', + supportedFormats: 'Supports PNG, JPG, JPEG, WEBP and GIF', + }, + you: 'You', + dynamicSelect: { + error: 'Loading options failed', + noData: 'No options available', + loading: 'Loading options...', + selected: '{{count}} selected', + }, +} + +export default translation diff --git a/web/i18n/en-US/explore.json b/web/i18n/en-US/explore.json index ba8fd9d448..89bbea81e5 100644 --- a/web/i18n/en-US/explore.json +++ b/web/i18n/en-US/explore.json @@ -1,12 +1,14 @@ { - "appCard.addToWorkspace": "Add to Workspace", - "appCard.customize": "Customize", + "appCard.addToWorkspace": "Use template", + "appCard.try": "Details", "appCustomize.nameRequired": "App name is required", "appCustomize.subTitle": "App icon & name", "appCustomize.title": "Create app from {{name}}", - "apps.allCategories": "Recommended", - "apps.description": "Use these template apps instantly or customize your own apps based on the templates.", - "apps.title": "Explore Apps", + "apps.allCategories": "All", + "apps.resetFilter": "Clear filter", + "apps.resultNum": "{{num}} results", + "apps.title": "Try Dify's curated apps to find AI solutions for your business", + "banner.viewMore": "VIEW MORE", "category.Agent": "Agent", "category.Assistant": "Assistant", "category.Entertainment": "Entertainment", @@ -23,7 +25,16 @@ "sidebar.chat": "Chat", "sidebar.delete.content": "Are you sure you want to delete this app?", "sidebar.delete.title": "Delete app", - "sidebar.discovery": "Discovery", - "sidebar.workspace": "Workspace", - "title": "Explore" + "sidebar.noApps.description": "Published web apps will appear here", + "sidebar.noApps.learnMore": "Learn more", + "sidebar.noApps.title": "No web apps", + "sidebar.title": "App gallery", + "sidebar.webApps": "Web apps", + "title": "Explore", + "tryApp.category": "Category", + "tryApp.createFromSampleApp": "Create from this sample app", + "tryApp.requirements": "Requirements", + "tryApp.tabHeader.detail": "Orchestration Details", + "tryApp.tabHeader.try": "Try it", + "tryApp.tryInfo": "This is a sample app. You can try up to 5 messages. To keep using it, click \"Create form this sample app\" and set it up!" } diff --git a/web/i18n/en-US/explore.ts b/web/i18n/en-US/explore.ts new file mode 100644 index 0000000000..b1891f6d3e --- /dev/null +++ b/web/i18n/en-US/explore.ts @@ -0,0 +1,63 @@ +const translation = { + title: 'Explore', + sidebar: { + title: 'App gallery', + chat: 'Chat', + webApps: 'Web apps', + action: { + pin: 'Pin', + unpin: 'Unpin', + rename: 'Rename', + delete: 'Delete', + }, + delete: { + title: 'Delete app', + content: 'Are you sure you want to delete this app?', + }, + noApps: { + title: 'No web apps', + description: 'Published web apps will appear here', + learnMore: 'Learn more', + }, + }, + apps: { + title: 'Try Dify\'s curated apps to find AI solutions for your business', + allCategories: 'All', + resultNum: '{{num}} results', + resetFilter: 'Clear filter', + }, + appCard: { + addToWorkspace: 'Use template', + try: 'Details', + }, + tryApp: { + tabHeader: { + try: 'Try it', + detail: 'Orchestration Details', + }, + createFromSampleApp: 'Create from this sample app', + category: 'Category', + requirements: 'Requirements', + tryInfo: 'This is a sample app. You can try up to 5 messages. To keep using it, click "Create form this sample app" and set it up!', + }, + appCustomize: { + title: 'Create app from {{name}}', + subTitle: 'App icon & name', + nameRequired: 'App name is required', + }, + category: { + Agent: 'Agent', + Assistant: 'Assistant', + Writing: 'Writing', + Translate: 'Translate', + Programming: 'Programming', + HR: 'HR', + Workflow: 'Workflow', + Entertainment: 'Entertainment', + }, + banner: { + viewMore: 'VIEW MORE', + }, +} + +export default translation diff --git a/web/i18n/en-US/workflow.json b/web/i18n/en-US/workflow.json index 107dad5b28..198a1d0061 100644 --- a/web/i18n/en-US/workflow.json +++ b/web/i18n/en-US/workflow.json @@ -126,6 +126,7 @@ "common.currentDraftUnpublished": "Current Draft Unpublished", "common.currentView": "Current View", "common.currentWorkflow": "Current Workflow", + "common.data": "Data", "common.debugAndPreview": "Preview", "common.disconnect": "Disconnect", "common.duplicate": "Duplicate", @@ -650,6 +651,7 @@ "nodes.llm.jsonSchema.warningTips.saveSchema": "Please finish editing the current field before saving the schema", "nodes.llm.model": "model", "nodes.llm.notSetContextInPromptTip": "To enable the context feature, please fill in the context variable in PROMPT.", + "nodes.llm.outputVars.generation": "Generation Information", "nodes.llm.outputVars.output": "Generate content", "nodes.llm.outputVars.reasoning_content": "Reasoning Content", "nodes.llm.outputVars.usage": "Model Usage Information", @@ -666,6 +668,7 @@ "nodes.llm.roleDescription.user": "Provide instructions, queries, or any text-based input to the model", "nodes.llm.singleRun.variable": "Variable", "nodes.llm.sysQueryInUser": "sys.query in user message is required", + "nodes.llm.tools.title": "Tools", "nodes.llm.variables": "variables", "nodes.llm.vision": "vision", "nodes.loop.ErrorMethod.continueOnError": "Continue on Error", diff --git a/web/i18n/es-ES/explore.json b/web/i18n/es-ES/explore.json index 51308de42d..e5d249741d 100644 --- a/web/i18n/es-ES/explore.json +++ b/web/i18n/es-ES/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Agregar al espacio de trabajo", - "appCard.customize": "Personalizar", "appCustomize.nameRequired": "El nombre de la aplicación es obligatorio", "appCustomize.subTitle": "Icono y nombre de la aplicación", "appCustomize.title": "Crear aplicación a partir de {{name}}", - "apps.allCategories": "Recomendado", - "apps.description": "Utiliza estas aplicaciones de plantilla al instante o personaliza tus propias aplicaciones basadas en las plantillas.", - "apps.title": "Explorar aplicaciones de Dify", "category.Agent": "Agente", "category.Assistant": "Asistente", "category.Entertainment": "Entretenimiento", @@ -23,7 +18,5 @@ "sidebar.chat": "Chat", "sidebar.delete.content": "¿Estás seguro de que quieres eliminar esta aplicación?", "sidebar.delete.title": "Eliminar aplicación", - "sidebar.discovery": "Descubrimiento", - "sidebar.workspace": "Espacio de trabajo", "title": "Explorar" } diff --git a/web/i18n/es-ES/explore.ts b/web/i18n/es-ES/explore.ts new file mode 100644 index 0000000000..dcd7e7ab91 --- /dev/null +++ b/web/i18n/es-ES/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Explorar', + sidebar: { + chat: 'Chat', + action: { + pin: 'Anclar', + unpin: 'Desanclar', + rename: 'Renombrar', + delete: 'Eliminar', + }, + delete: { + title: 'Eliminar aplicación', + content: '¿Estás seguro de que quieres eliminar esta aplicación?', + }, + }, + apps: { + }, + appCard: { + customize: 'Personalizar', + }, + appCustomize: { + title: 'Crear aplicación a partir de {{name}}', + subTitle: 'Icono y nombre de la aplicación', + nameRequired: 'El nombre de la aplicación es obligatorio', + }, + category: { + Assistant: 'Asistente', + Writing: 'Escritura', + Translate: 'Traducción', + Programming: 'Programación', + HR: 'Recursos Humanos', + Agent: 'Agente', + Workflow: 'Flujo de trabajo', + Entertainment: 'Entretenimiento', + }, +} + +export default translation diff --git a/web/i18n/fa-IR/explore.json b/web/i18n/fa-IR/explore.json index 206a24ea5b..11a7c76465 100644 --- a/web/i18n/fa-IR/explore.json +++ b/web/i18n/fa-IR/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "افزودن به فضای کاری", - "appCard.customize": "سفارشی کردن", "appCustomize.nameRequired": "نام برنامه الزامی است", "appCustomize.subTitle": "آیکون و نام برنامه", "appCustomize.title": "ایجاد برنامه از {{name}}", - "apps.allCategories": "پیشنهاد شده", - "apps.description": "از این برنامه‌های قالبی بلافاصله استفاده کنید یا برنامه‌های خود را بر اساس این قالب‌ها سفارشی کنید.", - "apps.title": "کاوش برنامه‌ها توسط دیفی", "category.Agent": "عامل", "category.Assistant": "دستیار", "category.Entertainment": "سرگرمی", @@ -23,7 +18,5 @@ "sidebar.chat": "چت", "sidebar.delete.content": "آیا مطمئن هستید که می‌خواهید این برنامه را حذف کنید؟", "sidebar.delete.title": "حذف برنامه", - "sidebar.discovery": "کشف", - "sidebar.workspace": "فضای کاری", "title": "کاوش" } diff --git a/web/i18n/fa-IR/explore.ts b/web/i18n/fa-IR/explore.ts new file mode 100644 index 0000000000..0c28102380 --- /dev/null +++ b/web/i18n/fa-IR/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'کاوش', + sidebar: { + chat: 'چت', + action: { + pin: 'سنجاق کردن', + unpin: 'برداشتن سنجاق', + rename: 'تغییر نام', + delete: 'حذف', + }, + delete: { + title: 'حذف برنامه', + content: 'آیا مطمئن هستید که می‌خواهید این برنامه را حذف کنید؟', + }, + }, + apps: { + }, + appCard: { + customize: 'سفارشی کردن', + }, + appCustomize: { + title: 'ایجاد برنامه از {{name}}', + subTitle: 'آیکون و نام برنامه', + nameRequired: 'نام برنامه الزامی است', + }, + category: { + Assistant: 'دستیار', + Writing: 'نوشتن', + Translate: 'ترجمه', + Programming: 'برنامه‌نویسی', + HR: 'منابع انسانی', + Agent: 'عامل', + Workflow: 'گردش', + Entertainment: 'سرگرمی', + }, +} + +export default translation diff --git a/web/i18n/fr-FR/explore.json b/web/i18n/fr-FR/explore.json index 34b8fbfc58..5b18a9d4d9 100644 --- a/web/i18n/fr-FR/explore.json +++ b/web/i18n/fr-FR/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Ajouter à l'espace de travail", - "appCard.customize": "Personnaliser", "appCustomize.nameRequired": "Le nom de l'application est requis", "appCustomize.subTitle": "Icône de l'application & nom", "appCustomize.title": "Créer une application à partir de {{name}}", - "apps.allCategories": "Recommandé", - "apps.description": "Utilisez ces applications modèles instantanément ou personnalisez vos propres applications basées sur les modèles.", - "apps.title": "Explorez les applications par Dify", "category.Agent": "Agent", "category.Assistant": "Assistant", "category.Entertainment": "Divertissement", @@ -23,7 +18,5 @@ "sidebar.chat": "Discussion", "sidebar.delete.content": "Êtes-vous sûr de vouloir supprimer cette application ?", "sidebar.delete.title": "Supprimer l'application", - "sidebar.discovery": "Découverte", - "sidebar.workspace": "Espace de travail", "title": "Explorer" } diff --git a/web/i18n/fr-FR/explore.ts b/web/i18n/fr-FR/explore.ts new file mode 100644 index 0000000000..1ab22c160b --- /dev/null +++ b/web/i18n/fr-FR/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Explorer', + sidebar: { + chat: 'Discussion', + action: { + pin: 'Épingle', + unpin: 'Détacher', + rename: 'Renommer', + delete: 'Supprimer', + }, + delete: { + title: 'Supprimer l\'application', + content: 'Êtes-vous sûr de vouloir supprimer cette application ?', + }, + }, + apps: { + }, + appCard: { + customize: 'Personnaliser', + }, + appCustomize: { + title: 'Créer une application à partir de {{name}}', + subTitle: 'Icône de l\'application & nom', + nameRequired: 'Le nom de l\'application est requis', + }, + category: { + Assistant: 'Assistant', + Writing: 'Écriture', + Translate: 'Traduire', + Programming: 'Programmation', + HR: 'RH', + Agent: 'Agent', + Workflow: 'Flux de travail', + Entertainment: 'Divertissement', + }, +} + +export default translation diff --git a/web/i18n/hi-IN/explore.json b/web/i18n/hi-IN/explore.json index 737868a4e5..955ca7a56e 100644 --- a/web/i18n/hi-IN/explore.json +++ b/web/i18n/hi-IN/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "कार्यक्षेत्र में जोड़ें", - "appCard.customize": "अनुकूलित करें", "appCustomize.nameRequired": "ऐप का नाम आवश्यक है", "appCustomize.subTitle": "ऐप आइकन और नाम", "appCustomize.title": "{{name}} से ऐप बनाएँ", - "apps.allCategories": "अनुशंसित", - "apps.description": "इन टेम्प्लेट ऐप्स का तुरंत उपयोग करें या टेम्प्लेट्स के आधार पर अपने स्वयं के ऐप्स को कस्टमाइज़ करें।", - "apps.title": "डिफ़ी द्वारा ऐप्स का अन्वेषण करें", "category.Agent": "आढ़तिया", "category.Assistant": "सहायक", "category.Entertainment": "मनोरंजन", @@ -23,7 +18,5 @@ "sidebar.chat": "चैट", "sidebar.delete.content": "क्या आप वाकई इस ऐप को हटाना चाहते हैं?", "sidebar.delete.title": "ऐप हटाएं", - "sidebar.discovery": "खोज", - "sidebar.workspace": "कार्यक्षेत्र", "title": "अन्वेषण करें" } diff --git a/web/i18n/hi-IN/explore.ts b/web/i18n/hi-IN/explore.ts new file mode 100644 index 0000000000..879b5c4a5d --- /dev/null +++ b/web/i18n/hi-IN/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'अन्वेषण करें', + sidebar: { + chat: 'चैट', + action: { + pin: 'पिन करें', + unpin: 'पिन हटाएँ', + rename: 'नाम बदलें', + delete: 'हटाएं', + }, + delete: { + title: 'ऐप हटाएं', + content: 'क्या आप वाकई इस ऐप को हटाना चाहते हैं?', + }, + }, + apps: { + }, + appCard: { + customize: 'अनुकूलित करें', + }, + appCustomize: { + title: '{{name}} से ऐप बनाएँ', + subTitle: 'ऐप आइकन और नाम', + nameRequired: 'ऐप का नाम आवश्यक है', + }, + category: { + Assistant: 'सहायक', + Writing: 'लेखन', + Translate: 'अनुवाद', + Programming: 'प्रोग्रामिंग', + HR: 'मानव संसाधन', + Workflow: 'कार्यप्रवाह', + Agent: 'आढ़तिया', + Entertainment: 'मनोरंजन', + }, +} + +export default translation diff --git a/web/i18n/id-ID/explore.json b/web/i18n/id-ID/explore.json index 3ba35de9eb..c74576c2d7 100644 --- a/web/i18n/id-ID/explore.json +++ b/web/i18n/id-ID/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Tambahkan ke Ruang Kerja", - "appCard.customize": "Menyesuaikan", "appCustomize.nameRequired": "Nama aplikasi diperlukan", "appCustomize.subTitle": "Ikon & nama aplikasi", "appCustomize.title": "Buat aplikasi dari {{name}}", - "apps.allCategories": "Direkomendasikan", - "apps.description": "Gunakan aplikasi templat ini secara instan atau sesuaikan aplikasi Anda sendiri berdasarkan templat.", - "apps.title": "Jelajahi Aplikasi", "category.Agent": "Agen", "category.Assistant": "Asisten", "category.Entertainment": "Hiburan", @@ -23,7 +18,5 @@ "sidebar.chat": "Mengobrol", "sidebar.delete.content": "Apakah Anda yakin ingin menghapus aplikasi ini?", "sidebar.delete.title": "Hapus aplikasi", - "sidebar.discovery": "Penemuan", - "sidebar.workspace": "Workspace", "title": "Menjelajahi" } diff --git a/web/i18n/id-ID/explore.ts b/web/i18n/id-ID/explore.ts new file mode 100644 index 0000000000..aa6fa46d16 --- /dev/null +++ b/web/i18n/id-ID/explore.ts @@ -0,0 +1,37 @@ +const translation = { + sidebar: { + action: { + unpin: 'Lepaskan sematan', + pin: 'Sematkan', + delete: 'Hapus', + rename: 'Ganti nama', + }, + delete: { + content: 'Apakah Anda yakin ingin menghapus aplikasi ini?', + title: 'Hapus aplikasi', + }, + chat: 'Mengobrol', + }, + apps: { + }, + appCard: { + customize: 'Menyesuaikan', + }, + appCustomize: { + subTitle: 'Ikon & nama aplikasi', + nameRequired: 'Nama aplikasi diperlukan', + }, + category: { + Entertainment: 'Hiburan', + Agent: 'Agen', + Writing: 'Tulisan', + Assistant: 'Asisten', + Programming: 'Pemrograman', + Translate: 'Terjemah', + Workflow: 'Alur Kerja', + HR: 'HR', + }, + title: 'Menjelajahi', +} + +export default translation diff --git a/web/i18n/it-IT/explore.json b/web/i18n/it-IT/explore.json index 80dc79df02..6e3e400a2b 100644 --- a/web/i18n/it-IT/explore.json +++ b/web/i18n/it-IT/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Aggiungi a Workspace", - "appCard.customize": "Personalizza", "appCustomize.nameRequired": "Il nome dell'app è obbligatorio", "appCustomize.subTitle": "Icona & nome dell'app", "appCustomize.title": "Crea app da {{name}}", - "apps.allCategories": "Consigliato", - "apps.description": "Usa queste app modello istantaneamente o personalizza le tue app basate sui modelli.", - "apps.title": "Esplora App di Dify", "category.Agent": "Agente", "category.Assistant": "Assistente", "category.Entertainment": "Intrattenimento", @@ -23,7 +18,5 @@ "sidebar.chat": "Chat", "sidebar.delete.content": "Sei sicuro di voler eliminare questa app?", "sidebar.delete.title": "Elimina app", - "sidebar.discovery": "Scoperta", - "sidebar.workspace": "Workspace", "title": "Esplora" } diff --git a/web/i18n/it-IT/explore.ts b/web/i18n/it-IT/explore.ts new file mode 100644 index 0000000000..1150f609af --- /dev/null +++ b/web/i18n/it-IT/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Esplora', + sidebar: { + chat: 'Chat', + action: { + pin: 'Fissa', + unpin: 'Sblocca', + rename: 'Rinomina', + delete: 'Elimina', + }, + delete: { + title: 'Elimina app', + content: 'Sei sicuro di voler eliminare questa app?', + }, + }, + apps: { + }, + appCard: { + customize: 'Personalizza', + }, + appCustomize: { + title: 'Crea app da {{name}}', + subTitle: 'Icona & nome dell\'app', + nameRequired: 'Il nome dell\'app è obbligatorio', + }, + category: { + Assistant: 'Assistente', + Writing: 'Scrittura', + Translate: 'Traduzione', + Programming: 'Programmazione', + HR: 'Risorse Umane', + Workflow: 'Flusso di lavoro', + Agent: 'Agente', + Entertainment: 'Intrattenimento', + }, +} + +export default translation diff --git a/web/i18n/ja-JP/common.json b/web/i18n/ja-JP/common.json index 11f543e7e5..ff6f15cad0 100644 --- a/web/i18n/ja-JP/common.json +++ b/web/i18n/ja-JP/common.json @@ -108,6 +108,7 @@ "chat.conversationName": "会話名", "chat.conversationNameCanNotEmpty": "会話名は必須です", "chat.conversationNamePlaceholder": "会話名を入力してください", + "chat.inputDisabledPlaceholder": "プレビューのみ", "chat.inputPlaceholder": "{{botName}} と話す", "chat.renameConversation": "会話名を変更", "chat.resend": "再送信してください", diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts new file mode 100644 index 0000000000..a4f858290a --- /dev/null +++ b/web/i18n/ja-JP/common.ts @@ -0,0 +1,776 @@ +const translation = { + theme: { + theme: 'テーマ', + light: '明るい', + dark: '暗い', + auto: 'システム', + }, + api: { + success: '成功', + actionSuccess: 'アクションが成功しました', + saved: '保存済み', + create: '作成済み', + remove: '削除済み', + }, + operation: { + create: '作成', + confirm: '確認', + cancel: 'キャンセル', + clear: 'クリア', + save: '保存', + saveAndEnable: '保存 & 有効に', + edit: '編集', + add: '追加', + added: '追加済み', + refresh: 'リフレッシュ', + reset: 'リセット', + search: '検索', + change: '変更', + remove: '削除', + send: '送信', + copy: 'コピー', + lineBreak: '改行', + sure: '確認済み', + download: 'ダウンロード', + downloadSuccess: 'ダウンロード完了', + downloadFailed: 'ダウンロードに失敗しました、後で再試行してください。', + delete: '削除', + settings: '設定', + setup: 'セットアップ', + getForFree: '無料で入手', + reload: '再読み込み', + ok: 'OK', + log: 'ログ', + learnMore: '詳細はこちら', + params: 'パラメータ', + duplicate: '複製', + rename: '名前の変更', + audioSourceUnavailable: 'AudioSource が利用できません', + zoomIn: 'ズームインする', + openInNewTab: '新しいタブで開く', + zoomOut: 'ズームアウト', + copyImage: '画像をコピー', + saveAndRegenerate: '保存して子チャンクを再生成', + close: '閉じる', + view: '表示', + viewMore: 'さらに表示', + regenerate: '再生成', + submit: '送信', + skip: 'スキップ', + imageCopied: 'コピーした画像', + deleteApp: 'アプリを削除', + viewDetails: '詳細を見る', + copied: 'コピーしました', + in: '中', + format: 'フォーマット', + more: 'もっと', + selectAll: 'すべて選択', + deSelectAll: 'すべて選択解除', + now: '今', + config: 'コンフィグ', + yes: 'はい', + no: 'いいえ', + deleteConfirmTitle: '削除しますか?', + confirmAction: '操作を確認してください。', + }, + errorMsg: { + fieldRequired: '{{field}}は必要です', + urlError: 'URL は http:// または https:// で始まる必要があります', + }, + placeholder: { + input: '入力してください', + select: '選択してください', + }, + voice: { + language: { + zhHans: '中国語', + zhHant: '繁体字中国語', + enUS: '英語', + deDE: 'ドイツ語', + frFR: 'フランス語', + esES: 'スペイン語', + itIT: 'イタリア語', + thTH: 'タイ語', + idID: 'インドネシア語', + jaJP: '日本語', + koKR: '韓国語', + ptBR: 'ポルトガル語', + ruRU: 'ロシア語', + ukUA: 'ウクライナ語', + viVN: 'ベトナム語', + plPL: 'ポーランド語', + roRO: 'ルーマニア語', + hiIN: 'ヒンディー語', + trTR: 'トルコ語', + faIR: 'ペルシア語', + }, + }, + unit: { + char: '文字', + }, + actionMsg: { + noModification: '現在は変更されていません。', + modifiedSuccessfully: '変更が正常に行われました', + modifiedUnsuccessfully: '変更が失敗しました', + copySuccessfully: 'コピーが正常に行われました', + paySucceeded: '支払いが成功しました', + payCancelled: '支払いがキャンセルされました', + generatedSuccessfully: '生成が成功しました', + generatedUnsuccessfully: '生成が失敗しました', + }, + model: { + params: { + temperature: '温度', + temperatureTip: + 'ランダム性を制御します:温度を下げると、よりランダムな完成品が得られます。温度がゼロに近づくにつれて、モデルは決定的で反復的になります。', + top_p: '上位 P', + top_pTip: + 'ニュークリアスサンプリングによる多様性の制御:0.5 は、すべての尤度加重オプションの半分が考慮されることを意味します。', + presence_penalty: '存在ペナルティ', + presence_penaltyTip: + 'これまでのテキストにトークンが表示されるかどうかに基づいて、新しいトークンにいくらペナルティを科すかを制御します。\nモデルが新しいトピックについて話す可能性が高まります。', + frequency_penalty: '頻度ペナルティ', + frequency_penaltyTip: + 'これまでのテキスト内のトークンの既存の頻度に基づいて、新しいトークンにどれだけペナルティを科すかを制御します。\nモデルが同じ行を文字通りに繰り返す可能性が低くなります。', + max_tokens: '最大トークン', + max_tokensTip: + '返信の最大長をトークン単位で制限するために使用されます。\n大きな値はプロンプトの単語、チャットログ、およびナレッジのために残されたスペースを制限する可能性があります。\nそれを 2/3 以下に設定することをお勧めします。\ngpt-4-1106-preview、gpt-4-vision-preview の最大トークン(入力 128k 出力 4k)以下に設定することをお勧めします。', + maxTokenSettingTip: '最大トークン設定が高いため、プロンプト、クエリ、およびデータのスペースが制限される可能性があります。現在のモデルの最大トークンの 80% 以下に設定してください。', + setToCurrentModelMaxTokenTip: '最大トークンが現在のモデルの最大トークンの 80% に更新されました {{maxToken}}.', + stop_sequences: '停止シーケンス', + stop_sequencesTip: 'API が進行中のトークンの生成を停止する最大 4 つのシーケンス。返されたテキストには停止シーケンスは含まれません。', + stop_sequencesPlaceholder: 'シーケンスを入力してタブキーを押してください', + }, + tone: { + Creative: 'クリエイティブ', + Balanced: 'バランス', + Precise: '正確', + Custom: 'カスタム', + }, + addMoreModel: '設定画面から他のモデルを追加してください', + capabilities: 'マルチモーダル機能', + settingsLink: 'モデルプロバイダー設定', + }, + menus: { + status: 'ベータ版', + explore: '探索', + apps: 'スタジオ', + appDetail: 'アプリの詳細', + account: 'アカウント', + plugins: 'プラグイン', + pluginsTips: 'サードパーティのプラグインを統合するか、ChatGPT 互換の AI プラグインを作成します。', + datasets: 'ナレッジ', + datasetsTips: '近日公開:独自のテキストデータをインポートするか、Webhook を介してリアルタイムにデータを記述して LLM コンテキストを強化します。', + newApp: '新しいアプリ', + newDataset: 'ナレッジの作成', + tools: 'ツール', + exploreMarketplace: 'マーケットプレイスを探索する', + }, + userProfile: { + settings: '設定', + emailSupport: 'サポート', + workspace: 'ワークスペース', + createWorkspace: 'ワークスペースを作成', + helpCenter: 'ドキュメントを見る', + support: 'サポート', + compliance: 'コンプライアンス', + roadmap: 'ロードマップ', + community: 'コミュニティ', + about: 'Dify について', + logout: 'ログアウト', + github: 'GitHub', + contactUs: 'お問い合わせ', + forum: 'フォーラム', + }, + compliance: { + soc2Type1: 'SOC 2 Type I 報告書', + soc2Type2: 'SOC 2 Type II 報告書', + iso27001: 'ISO 27001:2022 認証', + gdpr: 'GDPR データ処理契約(DPA)', + sandboxUpgradeTooltip: 'プロフェッショナルプランまたはチームプランでのみ利用可能', + professionalUpgradeTooltip: 'チームプラン以上の契約が必要です', + }, + settings: { + accountGroup: 'アカウント', + workplaceGroup: 'ワークスペース', + account: 'マイアカウント', + members: 'メンバー', + billing: '請求', + integrations: '統合', + language: '言語', + provider: 'モデルプロバイダー', + dataSource: 'データソース', + plugin: 'プラグイン', + apiBasedExtension: 'API 拡張', + generalGroup: '一般', + }, + account: { + avatar: 'アバター', + name: '名前', + email: 'メール', + password: 'パスワード', + passwordTip: '一時的なログインコードを使用したくない場合は、永続的なパスワードを設定できます。', + setPassword: 'パスワードを設定', + resetPassword: 'パスワードをリセット', + currentPassword: '現在のパスワード', + newPassword: '新しいパスワード', + confirmPassword: 'パスワードを確認', + notEqual: '2 つのパスワードが異なります。', + langGeniusAccount: 'アカウント関連データ', + langGeniusAccountTip: 'アカウントに関連するユーザーデータ。', + editName: '名前を編集', + showAppLength: '{{length}}アプリを表示', + delete: 'アカウントを削除', + deleteTip: 'アカウントを削除すると、すべてのデータが完全に消去され、復元できなくなります。', + account: 'アカウント', + myAccount: 'マイアカウント', + studio: 'スタジオ', + deletePrivacyLinkTip: 'お客様のデータの取り扱い方法の詳細については、当社の', + deletePrivacyLink: 'プライバシーポリシー。', + deleteSuccessTip: 'アカウントの削除が完了するまでに時間が必要です。すべて完了しましたら、メールでお知らせします。', + deleteLabel: '確認するには、以下にメールアドレスを入力してください', + deletePlaceholder: 'メールアドレスを入力してください', + verificationLabel: '認証コード', + verificationPlaceholder: '6 桁のコードを貼り付けます', + permanentlyDeleteButton: 'アカウントを完全に削除', + feedbackTitle: 'フィードバック', + feedbackLabel: 'アカウントを削除した理由を教えてください。', + feedbackPlaceholder: '任意', + sendVerificationButton: '確認コードの送信', + editWorkspaceInfo: 'ワークスペース情報を編集', + workspaceName: 'ワークスペース名', + workspaceIcon: 'ワークスペースアイコン', + changeEmail: { + title: 'メールアドレスを変更', + verifyEmail: '現在のメールアドレスを確認してください', + newEmail: '新しいメールアドレスを設定する', + verifyNew: '新しいメールアドレスを確認してください', + authTip: 'メールアドレスが変更されると、旧メールアドレスにリンクされている Google または GitHub アカウントは、このアカウントにログインできなくなります。', + content1: '変更を続ける場合、{{email}} に認証用の確認コードをお送りします。', + content2: '現在のメールアドレスは {{email}} です。認証コードはこのメールアドレスに送信されました。', + content3: '新しいメールアドレスを入力すると、確認コードが送信されます。', + content4: '一時確認コードを {{email}} に送信しました。', + codeLabel: 'コード', + codePlaceholder: 'コードを入力してください', + emailLabel: '新しいメール', + emailPlaceholder: '新しいメールを入力してください', + existingEmail: 'このメールアドレスのユーザーは既に存在します', + unAvailableEmail: 'このメールアドレスは現在使用できません。', + sendVerifyCode: '確認コードを送信', + continue: '続行', + changeTo: '{{email}} に変更', + resendTip: 'コードが届きませんか?', + resendCount: '{{count}} 秒後に再送信', + resend: '再送信', + }, + }, + members: { + team: 'チーム', + invite: '招待', + name: '名前', + lastActive: '最終アクティブ', + role: 'ロール', + pending: '保留中...', + owner: 'オーナー', + admin: '管理者', + adminTip: 'アプリの構築およびチーム設定の管理ができます', + normal: '通常', + normalTip: 'アプリの使用のみが可能で、アプリの構築はできません', + builder: 'ビルダー', + builderTip: '独自のアプリを作成・編集できる', + editor: 'エディター', + editorTip: 'アプリの構築ができますが、チーム設定の管理はできません', + datasetOperator: 'ナレッジ管理員', + datasetOperatorTip: 'ナレッジベースのみを管理できる', + inviteTeamMember: 'チームメンバーを招待する', + inviteTeamMemberTip: '彼らはサインイン後、直接あなたのチームデータにアクセスできます。', + emailNotSetup: 'メールサーバーがセットアップされていないので、招待メールを送信することはできません。代わりに招待後に発行される招待リンクをユーザーに通知してください。', + email: 'メール', + emailInvalid: '無効なメール形式', + emailPlaceholder: 'メールを入力してください', + sendInvite: '招待を送る', + invitedAsRole: '{{role}}ユーザーとして招待されました', + invitationSent: '招待が送信されました', + invitationSentTip: '招待が送信され、彼らは Dify にサインインしてあなたのチームデータにアクセスできます。', + invitationLink: '招待リンク', + failedInvitationEmails: '以下のユーザーは正常に招待されませんでした', + ok: 'OK', + removeFromTeam: 'チームから削除', + removeFromTeamTip: 'チームへのアクセスが削除されます', + setAdmin: '管理者に設定', + setMember: '通常のメンバーに設定', + setBuilder: 'ビルダーに設定', + setEditor: 'エディターに設定', + disInvite: '招待をキャンセル', + deleteMember: 'メンバーを削除', + you: '(あなた)', + transferOwnership: '所有権の移転', + transferModal: { + title: 'ワークスペースの所有権を移する', + warning: '「{{workspace}}」の所有権を移しようとしています。この操作は即時に有効となり、元に戻すことはできません。', + warningTip: 'あなたは管理者メンバーになり、新しいオーナーがすべての権限を持つことになります。', + sendTip: '続行する場合は、本人確認のため {{email}} に認証コードを送信します。', + verifyEmail: '現在のメールアドレスを確認', + verifyContent: '現在のメールアドレスは {{email}}。', + verifyContent2: 'このメールアドレスに一時的な認証コードを送信し、再認証を行います。', + codeLabel: '認証コード', + codePlaceholder: '6 桁のコードを入力してください', + resendTip: '認証コードを受け取れない場合は、', + resendCount: '{{count}} 秒後に再送信', + resend: '認証コードを再送信', + transferLabel: 'ワークスペースの所有権を転移する相手は', + transferPlaceholder: 'メールアドレスを入力してください', + sendVerifyCode: '認証コードを送信', + continue: '続行する', + transfer: 'ワークスペースの所有権を移する', + }, + }, + integrations: { + connected: '接続済み', + google: 'Google', + googleAccount: 'Google アカウントでログイン', + github: 'GitHub', + githubAccount: 'GitHub アカウントでログイン', + connect: '接続', + }, + language: { + displayLanguage: '表示言語', + timezone: 'タイムゾーン', + }, + provider: { + apiKey: 'API キー', + enterYourKey: 'ここに API キーを入力してください', + invalidKey: '無効な OpenAI API キー', + validatedError: '検証に失敗しました:', + validating: 'キーの検証中...', + saveFailed: 'API キーの保存に失敗しました', + apiKeyExceedBill: 'この API KEY には使用可能なクォータがありません。詳細は', + addKey: 'キーを追加', + comingSoon: '近日公開', + editKey: '編集', + invalidApiKey: '無効な API キー', + azure: { + apiBase: 'API ベース', + apiBasePlaceholder: 'Azure OpenAI エンドポイントの API ベース URL。', + apiKey: 'API キー', + apiKeyPlaceholder: 'ここに API キーを入力してください', + helpTip: 'Azure OpenAI サービスを学ぶ', + }, + openaiHosted: { + openaiHosted: 'ホステッド OpenAI', + onTrial: 'トライアル中', + exhausted: 'クォータが使い果たされました', + desc: 'Dify が提供する OpenAI ホスティングサービスを使用すると、GPT-3.5 などのモデルを使用できます。トライアルクォータが使い果たされる前に、他のモデルプロバイダを設定する必要があります。', + callTimes: '通話回数', + usedUp: 'トライアルクォータが使い果たされました。独自のモデルプロバイダを追加してください。', + useYourModel: '現在、独自のモデルプロバイダを使用しています。', + close: '閉じる', + }, + anthropicHosted: { + anthropicHosted: 'アンソピッククロード', + onTrial: 'トライアル中', + exhausted: 'クォータが使い果たされました', + desc: '高度なダイアログやクリエイティブなコンテンツ生成から詳細な指示まで、幅広いタスクに優れたパワフルなモデルです。', + callTimes: '通話回数', + usedUp: 'トライアルクォータが使い果たされました。独自のモデルプロバイダを追加してください。', + useYourModel: '現在、独自のモデルプロバイダを使用しています。', + close: '閉じる', + trialQuotaTip: 'お客様の Anthropic 試用枠は 2025/03/17 に失効し、その後は利用できなくなります。お早めにご利用ください。', + }, + anthropic: { + using: '埋め込み機能は使用中です', + enableTip: 'Anthropic モデルを有効にするには、まず OpenAI または Azure OpenAI サービスにバインドする必要があります。', + notEnabled: '有効にされていません', + keyFrom: 'Anthropic から API キーを取得してください', + }, + encrypted: { + front: 'API KEY は', + back: '技術を使用して暗号化および保存されます。', + }, + }, + modelProvider: { + notConfigured: 'システムモデルがまだ完全に設定されておらず、一部の機能が利用できない場合があります。', + systemModelSettings: 'システムモデル設定', + systemModelSettingsLink: 'システムモデルの設定が必要な理由は何ですか?', + selectModel: 'モデルを選択', + setupModelFirst: 'まずモデルをセットアップしてください', + systemReasoningModel: { + key: 'システム推論モデル', + tip: 'アプリの作成に使用されるデフォルトの推論モデルを設定します。また、対話名の生成や次の質問の提案などの機能もデフォルトの推論モデルを使用します。', + }, + embeddingModel: { + key: '埋め込みモデル', + tip: 'ナレッジのドキュメント埋め込み処理のデフォルトモデルを設定します。ナレッジの取得とインポートの両方に、この埋め込みモデルをベクトル化処理に使用します。切り替えると、インポートされたナレッジと質問の間のベクトル次元が一致せず、取得に失敗します。取得の失敗を避けるためには、このモデルを任意に切り替えないでください。', + required: '埋め込みモデルが必要です', + }, + speechToTextModel: { + key: '音声-to-テキストモデル', + tip: '会話での音声-to-テキスト入力に使用するデフォルトモデルを設定します。', + }, + ttsModel: { + key: 'テキスト-to-音声モデル', + tip: '会話でのテキスト-to-音声入力に使用するデフォルトモデルを設定します。', + }, + rerankModel: { + key: 'Rerank モデル', + tip: 'Rerank モデルは、ユーザークエリとの意味的一致に基づいて候補文書リストを再配置し、意味的ランキングの結果を向上させます。', + }, + apiKey: 'API-キー', + quota: 'クォータ', + searchModel: '検索モデル', + noModelFound: '{{model}}に対するモデルが見つかりません', + models: 'モデル', + showMoreModelProvider: 'より多くのモデルプロバイダを表示', + selector: { + tip: 'このモデルは削除されました。別のモデルを追加するか、別のモデルを選択してください。', + emptyTip: '利用可能なモデルはありません', + emptySetting: '設定に移動して構成してください', + rerankTip: 'Rerank モデルを設定してください', + }, + card: { + quota: 'クォータ', + onTrial: 'トライアル中', + paid: '有料', + quotaExhausted: 'クォータが使い果たされました', + callTimes: '通話回数', + tokens: 'トークン', + buyQuota: 'クォータを購入', + priorityUse: '優先利用', + removeKey: 'API キーを削除', + tip: '有料クォータは優先して使用されます。有料クォータを使用し終えた後、トライアルクォータが利用されます。', + }, + item: { + deleteDesc: '{{modelName}}はシステムが推測するモデルとして利用されています。削除すると、一部の機能が使用不可能になる可能性があります。ご確認ください。', + freeQuota: '無料のクォータ', + }, + addApiKey: 'API キーを追加', + invalidApiKey: '無効な API キー', + encrypted: { + front: 'API キーは', + back: ' の技術で暗号化されて保存されます。', + }, + freeQuota: { + howToEarn: '獲得方法', + }, + addMoreModelProvider: 'モデルプロバイダを追加', + addModel: 'モデルを追加', + modelsNum: '{{num}}のモデル', + showModels: 'モデルの表示', + showModelsNum: '{{num}}のモデルを表示', + collapse: '折り畳み', + config: '設定', + modelAndParameters: 'モデルとパラメータ', + model: 'モデル', + featureSupported: '{{feature}}に対応', + callTimes: '呼び出し回数', + credits: 'クレジット', + buyQuota: 'クォータ購入', + getFreeTokens: '無料トークンを獲得', + priorityUsing: '優先利用', + deprecated: '廃止予定', + confirmDelete: '削除を確認', + quotaTip: '残りの無料トークン', + loadPresets: 'プリセットの読み込み', + parameters: 'パラメータ', + loadBalancing: '負荷分散', + loadBalancingDescription: '複数の認証情報を使って負荷を分散させます。', + loadBalancingHeadline: '負荷分散', + configLoadBalancing: '負荷分散の設定', + modelHasBeenDeprecated: 'このモデルは廃止予定です', + providerManaged: 'プロバイダ管理', + providerManagedDescription: 'モデルプロバイダによって提供される認証情報を使用します。', + defaultConfig: 'デフォルトの設定', + apiKeyStatusNormal: 'API キーの状態は正常', + apiKeyRateLimit: 'レート制限に到達しました。{{seconds}}秒後に再度利用可能です', + addConfig: '設定を追加', + editConfig: '設定を編集', + loadBalancingLeastKeyWarning: '負荷分散を利用するには、最低 2 つのキーを有効化する必要があります。', + loadBalancingInfo: 'デフォルトでは、負荷分散はラウンドロビン方式を採用しています。レート制限が発生した場合、1 分間のクールダウン期間が適用されます。', + upgradeForLoadBalancing: '負荷分散を利用するには、プランのアップグレードが必要です。', + emptyProviderTitle: 'モデルプロバイダーが設定されていません', + discoverMore: 'もっと発見する', + installProvider: 'モデルプロバイダーをインストールする', + installDataSourceProvider: 'データソースプロバイダーをインストールする', + configureTip: 'API キーを設定するか、使用するモデルを追加してください', + toBeConfigured: '設定中', + emptyProviderTip: '最初にモデルプロバイダーをインストールしてください。', + auth: { + apiKeyModal: { + title: 'APIキー認証設定', + addModel: 'モデルを追加する', + desc: '認証情報を設定した後、ワークスペース内のすべてのメンバーは、アプリケーションを調整する際にこのモデルを使用できます。', + }, + authorizationError: '認証エラー', + apiKeys: 'APIキー', + unAuthorized: '無許可', + configModel: 'モデルを構成する', + addApiKey: 'APIキーを追加してください', + addCredential: '認証情報を追加する', + authRemoved: '認証が削除されました', + modelCredentials: 'モデルの資格情報', + providerManaged: 'プロバイダーが管理しました', + addNewModel: '新しいモデルを追加する', + configLoadBalancing: '構成ロードバランシング', + addModelCredential: 'モデルの資格情報を追加', + providerManagedTip: '現在の設定はプロバイダーによってホストされています。', + specifyModelCredential: 'モデルの資格情報を指定してください', + specifyModelCredentialTip: '構成されたモデルの認証情報を使用してください。', + addModel: 'モデルを追加する', + addNewModelCredential: '新しいモデルの認証情報を追加する', + editModelCredential: 'モデルの資格情報を編集する', + removeModel: 'モデルを削除する', + customModelCredentialsDeleteTip: '認証情報は使用中で、削除できません。', + modelCredential: 'モデルの資格情報', + manageCredentials: '認証情報を管理する', + customModelCredentials: 'カスタムモデルの認証情報', + selectModelCredential: 'モデルの資格情報を選択する', + }, + parametersInvalidRemoved: 'いくつかのパラメータが無効であり、削除されました。', + }, + dataSource: { + add: 'データソースの追加', + connect: '接続', + configure: '設定', + notion: { + title: 'Notion', + description: 'ナレッジデータソースとして Notion を使用します。', + connectedWorkspace: '接続済みワークスペース', + addWorkspace: 'ワークスペースの追加', + connected: '接続済み', + disconnected: '接続解除', + changeAuthorizedPages: '認証済みページの変更', + pagesAuthorized: '認証済みページ', + sync: '同期', + remove: '削除', + selector: { + pageSelected: '選択済みページ', + searchPages: 'ページ検索...', + noSearchResult: '検索結果なし', + addPages: 'ページの追加', + preview: 'プレビュー', + }, + integratedAlert: 'Notionは内部資格情報を通じて統合されており、再認証する必要はありません。', + }, + website: { + title: 'ウェブサイト', + description: 'ウェブクローラーを使ってウェブサイトからコンテンツを取り込みます。', + with: 'による', + configuredCrawlers: '設定済みクローラー', + active: 'アクティブ', + inactive: '非アクティブ', + }, + }, + plugin: { + serpapi: { + apiKey: 'API キー', + apiKeyPlaceholder: 'API キーを入力してください', + keyFrom: 'SerpAPI アカウントページから SerpAPI キーを取得してください', + }, + }, + apiBasedExtension: { + title: 'API 拡張機能は、Dify のアプリケーション全体での簡単な使用のための設定を簡素化し、集中的な API 管理を提供します。', + link: '独自の API 拡張機能を開発する方法について学ぶ。', + add: 'API 拡張機能を追加', + selector: { + title: 'API 拡張機能', + placeholder: 'API 拡張機能を選択してください', + manage: 'API 拡張機能を管理', + }, + modal: { + title: 'API 拡張機能を追加', + editTitle: 'API 拡張機能を編集', + name: { + title: '名前', + placeholder: '名前を入力してください', + }, + apiEndpoint: { + title: 'API エンドポイント', + placeholder: 'API エンドポイントを入力してください', + }, + apiKey: { + title: 'API キー', + placeholder: 'API キーを入力してください', + lengthError: 'API キーの長さは 5 文字未満にできません', + }, + }, + type: 'タイプ', + }, + about: { + changeLog: '変更ログ', + updateNow: '今すぐ更新', + nowAvailable: 'Dify {{version}} が利用可能です。', + latestAvailable: 'Dify {{version}} が最新バージョンです。', + }, + appMenus: { + overview: '監視', + promptEng: 'オーケストレート', + apiAccess: 'API アクセス', + logAndAnn: 'ログ&注釈', + logs: 'ログ', + }, + environment: { + testing: 'テスト', + development: '開発', + }, + appModes: { + completionApp: 'テキスト生成', + chatApp: 'チャットアプリ', + }, + datasetMenus: { + documents: 'ドキュメント', + hitTesting: '検索テスト', + settings: '設定', + emptyTip: 'このナレッジはどのアプリケーションにも統合されていません。ドキュメントを参照してガイダンスを確認してください。', + viewDoc: 'ドキュメントを表示', + relatedApp: '関連アプリ', + noRelatedApp: '関連付けられたアプリはありません', + pipeline: 'パイプライン', + }, + voiceInput: { + speaking: '今話しています...', + converting: 'テキストに変換中...', + notAllow: 'マイクが許可されていません', + }, + modelName: { + 'gpt-3.5-turbo': 'GPT-3.5-Turbo', + 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', + 'gpt-4': 'GPT-4', + 'gpt-4-32k': 'GPT-4-32K', + 'text-davinci-003': 'Text-Davinci-003', + 'text-embedding-ada-002': 'Text-Embedding-Ada-002', + 'whisper-1': 'Whisper-1', + 'claude-instant-1': 'Claude-Instant', + 'claude-2': 'Claude-2', + }, + chat: { + renameConversation: '会話名を変更', + conversationName: '会話名', + conversationNamePlaceholder: '会話名を入力してください', + conversationNameCanNotEmpty: '会話名は必須です', + citation: { + title: '引用', + linkToDataset: 'ナレッジへのリンク', + characters: '文字数:', + hitCount: '検索回数:', + vectorHash: 'ベクトルハッシュ:', + hitScore: '検索スコア:', + }, + inputPlaceholder: '{{botName}} と話す', + inputDisabledPlaceholder: 'プレビューのみ', + thought: '思考', + thinking: '考え中...', + resend: '再送信してください', + }, + promptEditor: { + placeholder: 'ここにプロンプトワードを入力してください。変数を挿入するには「{」を、プロンプトコンテンツブロックを挿入するには「/」を入力します。', + context: { + item: { + title: 'コンテキスト', + desc: 'コンテキストテンプレートを挿入', + }, + modal: { + title: '{{num}} 番目のコンテキスト', + add: 'コンテキストを追加', + footer: '以下のコンテキストセクションでコンテキストを管理できます。', + }, + }, + history: { + item: { + title: '会話履歴', + desc: '過去のメッセージテンプレートを挿入', + }, + modal: { + title: '例', + user: 'こんにちは', + assistant: 'こんにちは!今日はどのようにお手伝いできますか?', + edit: '会話の役割名を編集', + }, + }, + variable: { + item: { + title: '変数&外部ツール', + desc: '変数&外部ツールを挿入', + }, + outputToolDisabledItem: { + title: '変数', + desc: '変数を挿入', + }, + modal: { + add: '新しい変数', + addTool: '新しいツール', + }, + }, + query: { + item: { + title: 'クエリ', + desc: 'ユーザークエリテンプレートを挿入', + }, + }, + existed: 'プロンプトにすでに存在します', + }, + imageUploader: { + uploadFromComputer: 'コンピューターからアップロード', + uploadFromComputerReadError: '画像の読み込みに失敗しました。もう一度お試しください。', + uploadFromComputerUploadError: '画像のアップロードに失敗しました。もう一度アップロードしてください。', + uploadFromComputerLimit: 'アップロード画像のサイズは {{size}} MB を超えることはできません', + pasteImageLink: '画像リンクを貼り付ける', + pasteImageLinkInputPlaceholder: 'ここに画像リンクを貼り付けてください', + pasteImageLinkInvalid: '無効な画像リンク', + imageUpload: '画像アップロード', + }, + tag: { + placeholder: 'すべてのタグ', + addNew: '新しいタグを追加', + noTag: 'タグなし', + noTagYet: 'まだタグがありません', + addTag: 'タグを追加', + editTag: 'タグを編集', + manageTags: 'タグの管理', + selectorPlaceholder: '検索または作成する文字を入力', + create: '作成', + delete: 'タグを削除', + deleteTip: 'タグは使用されています、削除しますか', + created: 'タグは正常に作成されました', + failed: 'タグの作成に失敗しました', + }, + fileUploader: { + uploadFromComputer: 'ローカルアップロード', + pasteFileLink: 'ファイルリンクの貼り付け', + pasteFileLinkInputPlaceholder: 'URL を入力...', + uploadFromComputerLimit: 'アップロードファイルは{{size}}を超えてはなりません', + uploadFromComputerUploadError: 'ファイルのアップロードに失敗しました。再度アップロードしてください。', + uploadFromComputerReadError: 'ファイルの読み取りに失敗しました。もう一度やり直してください。', + fileExtensionNotSupport: 'ファイル拡張子はサポートされていません', + pasteFileLinkInvalid: '無効なファイルリンク', + fileExtensionBlocked: 'このファイルタイプは、セキュリティ上の理由でブロックされています', + }, + license: { + expiring_plural: '有効期限 {{count}} 日', + expiring: '1 日で有効期限が切れます', + unlimited: '無制限', + }, + pagination: { + perPage: 'ページあたりのアイテム数', + }, + you: 'あなた', + imageInput: { + browse: 'ブラウズする', + supportedFormats: 'PNG、JPG、JPEG、WEBP、および GIF をサポートしています。', + dropImageHere: 'ここに画像をドロップするか、', + }, + avatar: { + deleteTitle: 'アバターを削除する', + deleteDescription: '本当にプロフィール写真を削除してもよろしいですか?あなたのアカウントはデフォルトの初期アバターを使用します。', + }, + feedback: { + content: 'フィードバックコンテンツ', + title: 'フィードバックを提供する', + subtitle: 'この回答で何が間違っていたのか教えてください。', + placeholder: '何が間違っていたか、またはどのように改善できるかを教えてください...', + }, + label: { + optional: '(オプション)', + }, +} + +export default translation diff --git a/web/i18n/ja-JP/explore.json b/web/i18n/ja-JP/explore.json index 51afbe6133..661f1e87d0 100644 --- a/web/i18n/ja-JP/explore.json +++ b/web/i18n/ja-JP/explore.json @@ -1,12 +1,14 @@ { - "appCard.addToWorkspace": "ワークスペースに追加", - "appCard.customize": "カスタマイズ", + "appCard.addToWorkspace": "テンプレートを使用", + "appCard.try": "詳細", "appCustomize.nameRequired": "アプリ名は必須です", "appCustomize.subTitle": "アプリアイコンと名前", "appCustomize.title": "{{name}}からアプリを作成", - "apps.allCategories": "推奨", - "apps.description": "これらのテンプレートアプリを即座に使用するか、テンプレートに基づいて独自のアプリをカスタマイズしてください。", - "apps.title": "アプリを探索", + "apps.allCategories": "全て", + "apps.resetFilter": "クリア", + "apps.resultNum": "{{num}}件の結果", + "apps.title": "Difyの厳選アプリを試して、ビジネス向けのAIソリューションを見つけましょう", + "banner.viewMore": "もっと見る", "category.Agent": "エージェント", "category.Assistant": "アシスタント", "category.Entertainment": "エンターテイメント", @@ -23,7 +25,16 @@ "sidebar.chat": "チャット", "sidebar.delete.content": "このアプリを削除してもよろしいですか?", "sidebar.delete.title": "アプリを削除", - "sidebar.discovery": "探索", - "sidebar.workspace": "ワークスペース", - "title": "探索" + "sidebar.noApps.description": "公開されたWebアプリがここに表示されます", + "sidebar.noApps.learnMore": "詳細", + "sidebar.noApps.title": "Webアプリなし", + "sidebar.title": "アプリギャラリー", + "sidebar.webApps": "Webアプリ", + "title": "探索", + "tryApp.category": "カテゴリー", + "tryApp.createFromSampleApp": "テンプレートから作成", + "tryApp.requirements": "必要項目", + "tryApp.tabHeader.detail": "オーケストレーション詳細", + "tryApp.tabHeader.try": "お試し", + "tryApp.tryInfo": "これはサンプルアプリです。最大5件のメッセージまでお試しいただけます。引き続き利用するには、「テンプレートから作成」 をクリックして設定を行ってください。" } diff --git a/web/i18n/ja-JP/explore.ts b/web/i18n/ja-JP/explore.ts new file mode 100644 index 0000000000..2639bfc1dd --- /dev/null +++ b/web/i18n/ja-JP/explore.ts @@ -0,0 +1,64 @@ +const translation = { + title: '探索', + sidebar: { + title: 'アプリギャラリー', + chat: 'チャット', + webApps: 'Webアプリ', + action: { + pin: 'ピン留め', + unpin: 'ピン留め解除', + rename: '名前変更', + delete: '削除', + }, + delete: { + title: 'アプリを削除', + content: 'このアプリを削除してもよろしいですか?', + }, + noApps: { + title: 'Webアプリなし', + description: '公開されたWebアプリがここに表示されます', + learnMore: '詳細', + }, + }, + apps: { + title: 'Difyの厳選アプリを試して、ビジネス向けのAIソリューションを見つけましょう', + allCategories: '全て', + resultNum: '{{num}}件の結果', + resetFilter: 'クリア', + }, + appCard: { + addToWorkspace: 'テンプレートを使用', + try: '詳細', + customize: 'カスタマイズ', + }, + tryApp: { + tabHeader: { + try: 'お試し', + detail: 'オーケストレーション詳細', + }, + createFromSampleApp: 'テンプレートから作成', + category: 'カテゴリー', + requirements: '必要項目', + tryInfo: 'これはサンプルアプリです。最大5件のメッセージまでお試しいただけます。引き続き利用するには、「テンプレートから作成」 をクリックして設定を行ってください。', + }, + appCustomize: { + title: '{{name}}からアプリを作成', + subTitle: 'アプリアイコンと名前', + nameRequired: 'アプリ名は必須です', + }, + category: { + Assistant: 'アシスタント', + Writing: '執筆', + Translate: '翻訳', + Programming: 'プログラミング', + HR: '人事', + Workflow: 'ワークフロー', + Agent: 'エージェント', + Entertainment: 'エンターテイメント', + }, + banner: { + viewMore: 'もっと見る', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/explore.json b/web/i18n/ko-KR/explore.json index f7bbc63757..a44e90e3c1 100644 --- a/web/i18n/ko-KR/explore.json +++ b/web/i18n/ko-KR/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "작업 공간에 추가", - "appCard.customize": "사용자 정의", "appCustomize.nameRequired": "앱 이름은 필수입니다", "appCustomize.subTitle": "앱 아이콘 및 이름", "appCustomize.title": "{{name}}으로 앱 만들기", - "apps.allCategories": "모든 카테고리", - "apps.description": "이 템플릿 앱을 즉시 사용하거나 템플릿을 기반으로 고유한 앱을 사용자 정의하세요.", - "apps.title": "Dify 로 앱 탐색", "category.Agent": "에이전트", "category.Assistant": "어시스턴트", "category.Entertainment": "오락", @@ -23,7 +18,5 @@ "sidebar.chat": "채팅", "sidebar.delete.content": "이 앱을 삭제해도 괜찮습니까?", "sidebar.delete.title": "앱 삭제", - "sidebar.discovery": "탐색", - "sidebar.workspace": "작업 공간", "title": "탐색" } diff --git a/web/i18n/ko-KR/explore.ts b/web/i18n/ko-KR/explore.ts new file mode 100644 index 0000000000..756849b374 --- /dev/null +++ b/web/i18n/ko-KR/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: '탐색', + sidebar: { + chat: '채팅', + action: { + pin: '고정', + unpin: '고정 해제', + rename: '이름 변경', + delete: '삭제', + }, + delete: { + title: '앱 삭제', + content: '이 앱을 삭제해도 괜찮습니까?', + }, + }, + apps: { + }, + appCard: { + customize: '사용자 정의', + }, + appCustomize: { + title: '{{name}}으로 앱 만들기', + subTitle: '앱 아이콘 및 이름', + nameRequired: '앱 이름은 필수입니다', + }, + category: { + Assistant: '어시스턴트', + Writing: '작성', + Translate: '번역', + Programming: '프로그래밍', + Agent: '에이전트', + Workflow: '워크플로우', + HR: '인사', + Entertainment: '오락', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/explore.json b/web/i18n/pl-PL/explore.json index 409f0f4236..4d23037f99 100644 --- a/web/i18n/pl-PL/explore.json +++ b/web/i18n/pl-PL/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Dodaj do przestrzeni roboczej", - "appCard.customize": "Dostosuj", "appCustomize.nameRequired": "Nazwa aplikacji jest wymagana", "appCustomize.subTitle": "Ikona i nazwa aplikacji", "appCustomize.title": "Utwórz aplikację z {{name}}", - "apps.allCategories": "Polecane", - "apps.description": "Wykorzystaj te aplikacje szablonowe natychmiast lub dostosuj własne aplikacje na podstawie szablonów.", - "apps.title": "Odkrywaj aplikacje stworzone przez Dify", "category.Agent": "Agent", "category.Assistant": "Asystent", "category.Entertainment": "Rozrywka", @@ -23,7 +18,5 @@ "sidebar.chat": "Czat", "sidebar.delete.content": "Czy na pewno chcesz usunąć tę aplikację?", "sidebar.delete.title": "Usuń aplikację", - "sidebar.discovery": "Odkrywanie", - "sidebar.workspace": "Przestrzeń robocza", "title": "Odkryj" } diff --git a/web/i18n/pl-PL/explore.ts b/web/i18n/pl-PL/explore.ts new file mode 100644 index 0000000000..864dee6f49 --- /dev/null +++ b/web/i18n/pl-PL/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Odkryj', + sidebar: { + chat: 'Czat', + action: { + pin: 'Przypnij', + unpin: 'Odepnij', + rename: 'Zmień nazwę', + delete: 'Usuń', + }, + delete: { + title: 'Usuń aplikację', + content: 'Czy na pewno chcesz usunąć tę aplikację?', + }, + }, + apps: { + }, + appCard: { + customize: 'Dostosuj', + }, + appCustomize: { + title: 'Utwórz aplikację z {{name}}', + subTitle: 'Ikona i nazwa aplikacji', + nameRequired: 'Nazwa aplikacji jest wymagana', + }, + category: { + Assistant: 'Asystent', + Writing: 'Pisanie', + Translate: 'Tłumaczenie', + Programming: 'Programowanie', + HR: 'HR', + Agent: 'Agent', + Workflow: 'Przepływ pracy', + Entertainment: 'Rozrywka', + }, +} + +export default translation diff --git a/web/i18n/pt-BR/explore.json b/web/i18n/pt-BR/explore.json index 03692aac06..69df273cf7 100644 --- a/web/i18n/pt-BR/explore.json +++ b/web/i18n/pt-BR/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Adicionar ao Espaço de Trabalho", - "appCard.customize": "Personalizar", "appCustomize.nameRequired": "O nome do aplicativo é obrigatório", "appCustomize.subTitle": "Ícone e nome do aplicativo", "appCustomize.title": "Criar aplicativo a partir de {{name}}", - "apps.allCategories": "Recomendado", - "apps.description": "Use esses aplicativos modelo instantaneamente ou personalize seus próprios aplicativos com base nos modelos.", - "apps.title": "Explorar Aplicações por Dify", "category.Agent": "Agente", "category.Assistant": "Assistente", "category.Entertainment": "Entretenimento", @@ -23,7 +18,5 @@ "sidebar.chat": "Chat", "sidebar.delete.content": "Tem certeza de que deseja excluir este aplicativo?", "sidebar.delete.title": "Excluir aplicativo", - "sidebar.discovery": "Descoberta", - "sidebar.workspace": "Espaço de Trabalho", "title": "Badać" } diff --git a/web/i18n/pt-BR/explore.ts b/web/i18n/pt-BR/explore.ts new file mode 100644 index 0000000000..5bd24bb581 --- /dev/null +++ b/web/i18n/pt-BR/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Badać', + sidebar: { + chat: 'Chat', + action: { + pin: 'Fixar', + unpin: 'Desafixar', + rename: 'Renomear', + delete: 'Excluir', + }, + delete: { + title: 'Excluir aplicativo', + content: 'Tem certeza de que deseja excluir este aplicativo?', + }, + }, + apps: { + }, + appCard: { + customize: 'Personalizar', + }, + appCustomize: { + title: 'Criar aplicativo a partir de {{name}}', + subTitle: 'Ícone e nome do aplicativo', + nameRequired: 'O nome do aplicativo é obrigatório', + }, + category: { + Assistant: 'Assistente', + Writing: 'Escrita', + Translate: 'Traduzir', + Programming: 'Programação', + HR: 'RH', + Workflow: 'Fluxo de trabalho', + Agent: 'Agente', + Entertainment: 'Entretenimento', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/explore.json b/web/i18n/ro-RO/explore.json index 28509ab4fc..d2dec572b4 100644 --- a/web/i18n/ro-RO/explore.json +++ b/web/i18n/ro-RO/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Adăugați la spațiul de lucru", - "appCard.customize": "Personalizați", "appCustomize.nameRequired": "Numele aplicației este obligatoriu", "appCustomize.subTitle": "Pictogramă și nume aplicație", "appCustomize.title": "Creați o aplicație din {{name}}", - "apps.allCategories": "Recomandate", - "apps.description": "Utilizați aceste aplicații model imediat sau personalizați-vă propria aplicație pe baza modelelor.", - "apps.title": "Explorați aplicațiile Dify", "category.Agent": "Agent", "category.Assistant": "Asistent", "category.Entertainment": "Divertisment", @@ -23,7 +18,5 @@ "sidebar.chat": "Chat", "sidebar.delete.content": "Sunteți sigur că doriți să ștergeți această aplicație?", "sidebar.delete.title": "Ștergeți aplicația", - "sidebar.discovery": "Descoperire", - "sidebar.workspace": "Spațiu de lucru", "title": "Explorați" } diff --git a/web/i18n/ro-RO/explore.ts b/web/i18n/ro-RO/explore.ts new file mode 100644 index 0000000000..918713bc90 --- /dev/null +++ b/web/i18n/ro-RO/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Explorați', + sidebar: { + chat: 'Chat', + action: { + pin: 'Fixați', + unpin: 'Deblocați', + rename: 'Redenumire', + delete: 'Ștergeți', + }, + delete: { + title: 'Ștergeți aplicația', + content: 'Sunteți sigur că doriți să ștergeți această aplicație?', + }, + }, + apps: { + }, + appCard: { + customize: 'Personalizați', + }, + appCustomize: { + title: 'Creați o aplicație din {{name}}', + subTitle: 'Pictogramă și nume aplicație', + nameRequired: 'Numele aplicației este obligatoriu', + }, + category: { + Assistant: 'Asistent', + Writing: 'Scriere', + Translate: 'Traducere', + Programming: 'Programare', + HR: 'Resurse Umane', + Agent: 'Agent', + Workflow: 'Flux de lucru', + Entertainment: 'Divertisment', + }, +} + +export default translation diff --git a/web/i18n/ru-RU/explore.json b/web/i18n/ru-RU/explore.json index a061c35c0a..e92573f04d 100644 --- a/web/i18n/ru-RU/explore.json +++ b/web/i18n/ru-RU/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Добавить в рабочее пространство", - "appCard.customize": "Настроить", "appCustomize.nameRequired": "Название приложения обязательно", "appCustomize.subTitle": "Значок и название приложения", "appCustomize.title": "Создать приложение из {{name}}", - "apps.allCategories": "Рекомендуемые", - "apps.description": "Используйте эти шаблонные приложения мгновенно или настройте свои собственные приложения на основе шаблонов.", - "apps.title": "Обзор приложений от Dify", "category.Agent": "Агент", "category.Assistant": "Ассистент", "category.Entertainment": "Развлечение", @@ -23,7 +18,5 @@ "sidebar.chat": "Чат", "sidebar.delete.content": "Вы уверены, что хотите удалить это приложение?", "sidebar.delete.title": "Удалить приложение", - "sidebar.discovery": "Открытия", - "sidebar.workspace": "Рабочее пространство", "title": "Обзор" } diff --git a/web/i18n/ru-RU/explore.ts b/web/i18n/ru-RU/explore.ts new file mode 100644 index 0000000000..fd23926d7b --- /dev/null +++ b/web/i18n/ru-RU/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Обзор', + sidebar: { + chat: 'Чат', + action: { + pin: 'Закрепить', + unpin: 'Открепить', + rename: 'Переименовать', + delete: 'Удалить', + }, + delete: { + title: 'Удалить приложение', + content: 'Вы уверены, что хотите удалить это приложение?', + }, + }, + apps: { + }, + appCard: { + customize: 'Настроить', + }, + appCustomize: { + title: 'Создать приложение из {{name}}', + subTitle: 'Значок и название приложения', + nameRequired: 'Название приложения обязательно', + }, + category: { + Assistant: 'Ассистент', + Writing: 'Написание', + Translate: 'Перевод', + Programming: 'Программирование', + HR: 'HR', + Agent: 'Агент', + Workflow: 'Рабочий процесс', + Entertainment: 'Развлечение', + }, +} + +export default translation diff --git a/web/i18n/sl-SI/explore.json b/web/i18n/sl-SI/explore.json index ad8de813f9..f37c9bdea3 100644 --- a/web/i18n/sl-SI/explore.json +++ b/web/i18n/sl-SI/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Dodaj v delovni prostor", - "appCard.customize": "Prilagodi", "appCustomize.nameRequired": "Ime aplikacije je obvezno", "appCustomize.subTitle": "Ikona aplikacije & ime", "appCustomize.title": "Ustvari aplikacijo iz {{name}}", - "apps.allCategories": "Priporočeno", - "apps.description": "Uporabite te predloge aplikacij takoj ali prilagodite svoje aplikacije na podlagi predlog.", - "apps.title": "Razišči aplikacije Dify", "category.Agent": "Agent", "category.Assistant": "Pomočnik", "category.Entertainment": "Zabava", @@ -23,7 +18,5 @@ "sidebar.chat": "Klepet", "sidebar.delete.content": "Ali ste prepričani, da želite izbrisati to aplikacijo?", "sidebar.delete.title": "Izbriši aplikacijo", - "sidebar.discovery": "Odkritja", - "sidebar.workspace": "Delovni prostor", "title": "Razišči" } diff --git a/web/i18n/sl-SI/explore.ts b/web/i18n/sl-SI/explore.ts new file mode 100644 index 0000000000..ae25382b46 --- /dev/null +++ b/web/i18n/sl-SI/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Razišči', + sidebar: { + chat: 'Klepet', + action: { + pin: 'Pripni', + unpin: 'Odpni', + rename: 'Preimenuj', + delete: 'Izbriši', + }, + delete: { + title: 'Izbriši aplikacijo', + content: 'Ali ste prepričani, da želite izbrisati to aplikacijo?', + }, + }, + apps: { + }, + appCard: { + customize: 'Prilagodi', + }, + appCustomize: { + title: 'Ustvari aplikacijo iz {{name}}', + subTitle: 'Ikona aplikacije & ime', + nameRequired: 'Ime aplikacije je obvezno', + }, + category: { + Assistant: 'Pomočnik', + Writing: 'Pisanje', + Translate: 'Prevajanje', + Programming: 'Programiranje', + HR: 'Kadri', + Workflow: 'Potek dela', + Agent: 'Agent', + Entertainment: 'Zabava', + }, +} + +export default translation diff --git a/web/i18n/th-TH/explore.json b/web/i18n/th-TH/explore.json index 17d998d177..4bdeb041c0 100644 --- a/web/i18n/th-TH/explore.json +++ b/web/i18n/th-TH/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "เพิ่มไปยังพื้นที่ทํางาน", - "appCard.customize": "ปรับแต่ง", "appCustomize.nameRequired": "ต้องใช้ชื่อแอป", "appCustomize.subTitle": "ไอคอนและชื่อแอป", "appCustomize.title": "สร้างแอปจาก {{name}}", - "apps.allCategories": "แนะ นำ", - "apps.description": "ใช้แอปเทมเพลตเหล่านี้ทันทีหรือปรับแต่งแอปของคุณเองตามเทมเพลต", - "apps.title": "สํารวจแอพโดย Dify", "category.Agent": "ตัวแทน", "category.Assistant": "ผู้ช่วย", "category.Entertainment": "ความบันเทิง", @@ -23,7 +18,5 @@ "sidebar.chat": "สนทนา", "sidebar.delete.content": "คุณแน่ใจหรือไม่ว่าต้องการลบแอปนี้?", "sidebar.delete.title": "ลบแอป", - "sidebar.discovery": "การค้นพบ", - "sidebar.workspace": "พื้นที่", "title": "สํารวจ" } diff --git a/web/i18n/th-TH/explore.ts b/web/i18n/th-TH/explore.ts new file mode 100644 index 0000000000..239d1e7182 --- /dev/null +++ b/web/i18n/th-TH/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'สํารวจ', + sidebar: { + chat: 'สนทนา', + action: { + pin: 'เข็มกลัด', + unpin: 'ปลดหมุด', + rename: 'ตั้งชื่อใหม่', + delete: 'ลบ', + }, + delete: { + title: 'ลบแอป', + content: 'คุณแน่ใจหรือไม่ว่าต้องการลบแอปนี้?', + }, + }, + apps: { + }, + appCard: { + customize: 'ปรับแต่ง', + }, + appCustomize: { + title: 'สร้างแอปจาก {{name}}', + subTitle: 'ไอคอนและชื่อแอป', + nameRequired: 'ต้องใช้ชื่อแอป', + }, + category: { + Assistant: 'ผู้ช่วย', + Writing: 'การเขียน', + Translate: 'แปล', + Programming: 'โปรแกรม', + HR: 'ชั่วโมง', + Workflow: 'เวิร์กโฟลว์', + Agent: 'ตัวแทน', + Entertainment: 'ความบันเทิง', + }, +} + +export default translation diff --git a/web/i18n/tr-TR/explore.json b/web/i18n/tr-TR/explore.json index c4badf8b6f..76f3a7e321 100644 --- a/web/i18n/tr-TR/explore.json +++ b/web/i18n/tr-TR/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Çalışma Alanına Ekle", - "appCard.customize": "Özelleştir", "appCustomize.nameRequired": "Uygulama ismi gereklidir", "appCustomize.subTitle": "Uygulama simgesi ve ismi", "appCustomize.title": "{{name}} uygulamasından uygulama oluştur", - "apps.allCategories": "Önerilen", - "apps.description": "Bu şablon uygulamalarını anında kullanın veya şablonlara dayalı kendi uygulamalarınızı özelleştirin.", - "apps.title": "Dify Tarafından Keşfet Uygulamaları", "category.Agent": "Aracı", "category.Assistant": "Asistan", "category.Entertainment": "Eğlence", @@ -23,7 +18,5 @@ "sidebar.chat": "Sohbet", "sidebar.delete.content": "Bu uygulamayı silmek istediğinizden emin misiniz?", "sidebar.delete.title": "Uygulamayı sil", - "sidebar.discovery": "Keşif", - "sidebar.workspace": "Çalışma Alanı", "title": "Keşfet" } diff --git a/web/i18n/tr-TR/explore.ts b/web/i18n/tr-TR/explore.ts new file mode 100644 index 0000000000..b2e3a48e7b --- /dev/null +++ b/web/i18n/tr-TR/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Keşfet', + sidebar: { + chat: 'Sohbet', + action: { + pin: 'Sabitle', + unpin: 'Sabitlemeyi Kaldır', + rename: 'Yeniden Adlandır', + delete: 'Sil', + }, + delete: { + title: 'Uygulamayı sil', + content: 'Bu uygulamayı silmek istediğinizden emin misiniz?', + }, + }, + apps: { + }, + appCard: { + customize: 'Özelleştir', + }, + appCustomize: { + title: '{{name}} uygulamasından uygulama oluştur', + subTitle: 'Uygulama simgesi ve ismi', + nameRequired: 'Uygulama ismi gereklidir', + }, + category: { + Assistant: 'Asistan', + Writing: 'Yazma', + Translate: 'Çeviri', + Programming: 'Programlama', + HR: 'İK', + Agent: 'Aracı', + Workflow: 'İş Akışı', + Entertainment: 'Eğlence', + }, +} + +export default translation diff --git a/web/i18n/uk-UA/explore.json b/web/i18n/uk-UA/explore.json index 28672b723a..29e4995535 100644 --- a/web/i18n/uk-UA/explore.json +++ b/web/i18n/uk-UA/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Додати до робочого простору", - "appCard.customize": "Налаштувати", "appCustomize.nameRequired": "Назва програми обов’язкова", "appCustomize.subTitle": "Значок програми та назва", "appCustomize.title": "Створити додаток з {{name}}", - "apps.allCategories": "Рекомендовані", - "apps.description": "Використовуйте ці шаблони миттєво або налаштуйте власні програми на основі шаблонів.", - "apps.title": "Вивчайте програми від Dify", "category.Agent": "Агент", "category.Assistant": "Помічник", "category.Entertainment": "Розваги", @@ -23,7 +18,5 @@ "sidebar.chat": "Чат", "sidebar.delete.content": "Ви впевнені, що хочете видалити цю програму?", "sidebar.delete.title": "Видалити додаток", - "sidebar.discovery": "Відкриття", - "sidebar.workspace": "Робочий простір", "title": "Досліджувати" } diff --git a/web/i18n/uk-UA/explore.ts b/web/i18n/uk-UA/explore.ts new file mode 100644 index 0000000000..eb6adae23a --- /dev/null +++ b/web/i18n/uk-UA/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Досліджувати', + sidebar: { + chat: 'Чат', + action: { + pin: 'Закріпити', + unpin: 'Відкріпити', + rename: 'Перейменувати', + delete: 'Видалити', + }, + delete: { + title: 'Видалити додаток', + content: 'Ви впевнені, що хочете видалити цю програму?', + }, + }, + apps: { + }, + appCard: { + customize: 'Налаштувати', + }, + appCustomize: { + title: 'Створити додаток з {{name}}', + subTitle: 'Значок програми та назва', + nameRequired: 'Назва програми обов’язкова', + }, + category: { + Assistant: 'Помічник', + Writing: 'Написання', + Translate: 'Переклад', + Programming: 'Програмування', + HR: 'HR', + Workflow: 'Робочий процес', + Agent: 'Агент', + Entertainment: 'Розваги', + }, +} + +export default translation diff --git a/web/i18n/vi-VN/explore.json b/web/i18n/vi-VN/explore.json index a7bcf64ffa..605c0661c9 100644 --- a/web/i18n/vi-VN/explore.json +++ b/web/i18n/vi-VN/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "Thêm vào không gian làm việc", - "appCard.customize": "Tùy chỉnh", "appCustomize.nameRequired": "Vui lòng nhập tên ứng dụng", "appCustomize.subTitle": "Biểu tượng và tên ứng dụng", "appCustomize.title": "Tạo ứng dụng từ {{name}}", - "apps.allCategories": "Tất cả danh mục", - "apps.description": "Sử dụng ngay các ứng dụng mẫu này hoặc tùy chỉnh ứng dụng của bạn dựa trên các mẫu có sẵn.", - "apps.title": "Khám phá ứng dụng bởi Dify", "category.Agent": "Người đại lý", "category.Assistant": "Trợ lý", "category.Entertainment": "Giải trí", @@ -23,7 +18,5 @@ "sidebar.chat": "Trò chuyện", "sidebar.delete.content": "Bạn có chắc chắn muốn xóa ứng dụng này không?", "sidebar.delete.title": "Xóa ứng dụng", - "sidebar.discovery": "Khám phá", - "sidebar.workspace": "Không gian làm việc", "title": "Khám phá" } diff --git a/web/i18n/vi-VN/explore.ts b/web/i18n/vi-VN/explore.ts new file mode 100644 index 0000000000..ed27f42b32 --- /dev/null +++ b/web/i18n/vi-VN/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Khám phá', + sidebar: { + chat: 'Trò chuyện', + action: { + pin: 'Ghim', + unpin: 'Bỏ ghim', + rename: 'Đổi tên', + delete: 'Xóa', + }, + delete: { + title: 'Xóa ứng dụng', + content: 'Bạn có chắc chắn muốn xóa ứng dụng này không?', + }, + }, + apps: { + }, + appCard: { + customize: 'Tùy chỉnh', + }, + appCustomize: { + title: 'Tạo ứng dụng từ {{name}}', + subTitle: 'Biểu tượng và tên ứng dụng', + nameRequired: 'Vui lòng nhập tên ứng dụng', + }, + category: { + Assistant: 'Trợ lý', + Writing: 'Viết lách', + Translate: 'Dịch thuật', + Programming: 'Lập trình', + HR: 'Nhân sự', + Agent: 'Người đại lý', + Workflow: 'Quy trình làm việc', + Entertainment: 'Giải trí', + }, +} + +export default translation diff --git a/web/i18n/zh-Hans/common.json b/web/i18n/zh-Hans/common.json index be7d4690af..d6b9c75511 100644 --- a/web/i18n/zh-Hans/common.json +++ b/web/i18n/zh-Hans/common.json @@ -108,6 +108,7 @@ "chat.conversationName": "会话名称", "chat.conversationNameCanNotEmpty": "会话名称必填", "chat.conversationNamePlaceholder": "请输入会话名称", + "chat.inputDisabledPlaceholder": "仅供试用", "chat.inputPlaceholder": "和 {{botName}} 聊天", "chat.renameConversation": "重命名会话", "chat.resend": "重新发送", diff --git a/web/i18n/zh-Hans/common.ts b/web/i18n/zh-Hans/common.ts new file mode 100644 index 0000000000..8ad5f67d1b --- /dev/null +++ b/web/i18n/zh-Hans/common.ts @@ -0,0 +1,789 @@ +const translation = { + theme: { + theme: '主题', + light: '浅色', + dark: '深色', + auto: '自动', + }, + api: { + success: '成功', + actionSuccess: '操作成功', + saved: '已保存', + create: '已创建', + remove: '已移除', + }, + operation: { + create: '创建', + confirm: '确认', + cancel: '取消', + clear: '清空', + save: '保存', + yes: '是', + no: '否', + deleteConfirmTitle: '删除?', + confirmAction: '请确认您的操作。', + saveAndEnable: '保存并启用', + edit: '编辑', + add: '添加', + added: '已添加', + refresh: '重新开始', + reset: '重置', + search: '搜索', + noSearchResults: '没有找到{{content}}', + resetKeywords: '重置关键词', + selectCount: '已选择 {{count}} 项', + searchCount: '找到 {{count}} 个 {{content}}', + noSearchCount: '0 个 {{content}}', + change: '更改', + remove: '移除', + send: '发送', + copy: '复制', + copied: ' 已复制', + lineBreak: '换行', + sure: '我确定', + download: '下载', + downloadSuccess: '下载完毕', + downloadFailed: '下载失败,请稍后重试。', + viewDetails: '查看详情', + delete: '删除', + deleteApp: '删除应用', + settings: '设置', + setup: '设置', + config: '配置', + getForFree: '免费获取', + reload: '刷新', + ok: '好的', + log: '日志', + learnMore: '了解更多', + params: '参数设置', + duplicate: '复制', + rename: '重命名', + audioSourceUnavailable: '音源不可用', + copyImage: '复制图片', + imageCopied: '图片已复制', + zoomOut: '缩小', + zoomIn: '放大', + openInNewTab: '在新标签页打开', + in: '在', + saveAndRegenerate: '保存并重新生成子分段', + close: '关闭', + view: '查看', + viewMore: '查看更多', + regenerate: '重新生成', + submit: '提交', + skip: '跳过', + format: '格式化', + more: '更多', + selectAll: '全选', + deSelectAll: '取消全选', + now: '现在', + }, + errorMsg: { + fieldRequired: '{{field}} 为必填项', + urlError: 'url 应该以 http:// 或 https:// 开头', + }, + placeholder: { + input: '请输入', + select: '请选择', + search: '搜索...', + }, + noData: '暂无数据', + label: { + optional: '(可选)', + }, + voice: { + language: { + zhHans: '中文', + zhHant: '繁体中文', + enUS: '英语', + deDE: '德语', + frFR: '法语', + esES: '西班牙语', + itIT: '意大利语', + thTH: '泰语', + idID: '印尼语', + jaJP: '日语', + koKR: '韩语', + ptBR: '葡萄牙语', + ruRU: '俄语', + ukUA: '乌克兰语', + viVN: '越南语', + plPL: '波兰语', + roRO: '罗马尼亚语', + hiIN: '印地语', + trTR: '土耳其语', + faIR: '波斯语', + }, + }, + unit: { + char: '个字符', + }, + actionMsg: { + noModification: '暂无修改', + modifiedSuccessfully: '修改成功', + modifiedUnsuccessfully: '修改失败', + copySuccessfully: '复制成功', + generatedSuccessfully: '已重新生成', + generatedUnsuccessfully: '生成失败', + paySucceeded: '已支付成功', + payCancelled: '已取消支付', + }, + model: { + params: { + temperature: '随机性 temperature', + temperatureTip: + '控制回复的随机性。\n值越大,回复越随机。\n值越小,回复越确定或一致。', + top_p: '核采样 top_p', + top_pTip: + '控制生成多样性。\n值越大,输出会包括更多的单词选项。\n值越小,模型会更集中在高概率的单词上,输出更确定但可能缺乏多样性。\n核采样和随机性不建议同时修改。', + presence_penalty: '话题新鲜度 presence_penalty', + presence_penaltyTip: + '控制生成时对上文已存在的话题的偏好程度。\n值越大,越可能使用到新的话题。', + frequency_penalty: '频率惩罚度 frequency_penalty', + frequency_penaltyTip: + '影响常见与罕见词汇使用。\n值较大时,倾向于生成不常见的词汇和表达方式。\n值越小,更倾向于使用常见和普遍接受的词汇或短语。', + max_tokens: '单次回复限制 max_tokens', + max_tokensTip: + '用于限制回复的最大长度,以 token 为单位。\n较大的值可能会限制给提示词、聊天记录和知识库留出的空间。\n建议将其设置在三分之二以下。\ngpt-4-1106-preview、gpt-4-vision-preview 最大长度 (输入 128k,输出 4k)', + maxTokenSettingTip: '您设置的最大 tokens 数较大,可能会导致 prompt、用户问题、知识库内容没有 token 空间进行处理,建议设置到 2/3 以下。', + setToCurrentModelMaxTokenTip: '最大令牌数更新为当前模型最大的令牌数 {{maxToken}} 的 80%。', + stop_sequences: '停止序列 stop_sequences', + stop_sequencesTip: '最多四个序列,API 将停止生成更多的 token。返回的文本将不包含停止序列。', + stop_sequencesPlaceholder: '输入序列并按 Tab 键', + }, + tone: { + Creative: '创意', + Balanced: '平衡', + Precise: '精确', + Custom: '自定义', + }, + addMoreModel: '添加更多模型', + settingsLink: '模型设置', + capabilities: '多模态能力', + }, + menus: { + status: 'beta', + explore: '探索', + apps: '工作室', + appDetail: '应用详情', + account: '账户', + plugins: '插件', + exploreMarketplace: '探索 Marketplace', + pluginsTips: '集成第三方插件或创建与 ChatGPT 兼容的 AI 插件。', + datasets: '知识库', + datasetsTips: '即将到来:上传自己的长文本数据,或通过 Webhook 集成自己的数据源', + newApp: '创建应用', + newDataset: '创建知识库', + tools: '工具', + }, + userProfile: { + settings: '设置', + emailSupport: '邮件支持', + workspace: '工作空间', + createWorkspace: '创建工作空间', + helpCenter: '查看帮助文档', + support: '支持', + compliance: '合规', + forum: '论坛', + roadmap: '路线图', + github: 'GitHub', + community: '社区', + about: '关于', + logout: '登出', + contactUs: '联系我们', + }, + compliance: { + soc2Type1: 'SOC 2 Type I Report', + soc2Type2: 'SOC 2 Type II Report', + iso27001: 'ISO 27001:2022 Certification', + gdpr: 'GDPR DPA', + sandboxUpgradeTooltip: '仅适用于 Professional 或 Team 版计划。', + professionalUpgradeTooltip: '仅适用于 Team 版计划或以上。', + }, + settings: { + accountGroup: '通用', + workplaceGroup: '工作空间', + generalGroup: '通用', + account: '我的账户', + members: '成员', + billing: '账单', + integrations: '集成', + language: '语言', + provider: '模型供应商', + dataSource: '数据来源', + plugin: '插件', + apiBasedExtension: 'API 扩展', + }, + account: { + account: '账户', + myAccount: '我的账户', + studio: '工作室', + avatar: '头像', + name: '用户名', + email: '邮箱', + password: '密码', + passwordTip: '如果您不想使用验证码登录,可以设置永久密码', + setPassword: '设置密码', + resetPassword: '重置密码', + currentPassword: '原密码', + newPassword: '新密码', + notEqual: '两个密码不相同', + confirmPassword: '确认密码', + langGeniusAccount: '账号关联数据', + langGeniusAccountTip: '您的账号相关的用户数据。', + editName: '编辑名字', + showAppLength: '显示 {{length}} 个应用', + delete: '删除账户', + deleteTip: '请注意,一旦确认,作为任何空间的所有者,您的空间将被安排进入永久删除队列,您的所有用户数据也将被排入永久删除队列。', + deletePrivacyLinkTip: '有关我们如何处理您的数据的更多信息,请参阅我们的', + deletePrivacyLink: '隐私政策', + deleteSuccessTip: '删除账户需要一些时间。完成后,我们会通过邮件通知您。', + deleteLabel: '请输入您的邮箱以确认', + deletePlaceholder: '输入您的邮箱...', + sendVerificationButton: '发送验证码', + verificationLabel: '验证码', + verificationPlaceholder: '输入 6 位数字验证码', + permanentlyDeleteButton: '永久删除', + feedbackTitle: '反馈', + feedbackLabel: '请告诉我们您为什么删除账户?', + feedbackPlaceholder: '选填', + editWorkspaceInfo: '编辑工作空间信息', + workspaceName: '工作空间名称', + workspaceIcon: '工作空间图标', + changeEmail: { + title: '更改邮箱', + verifyEmail: '验证当前邮箱', + newEmail: '设置新邮箱', + verifyNew: '验证新邮箱', + authTip: '一旦您的电子邮件地址更改,链接到您旧电子邮件地址的 Google 或 GitHub 帐户将无法再登录该帐户。', + content1: '如果您继续,我们将向 {{email}} 发送验证码以进行重新验证。', + content2: '你的当前邮箱是 {{email}} 。验证码已发送至该邮箱。', + content3: '输入新的邮箱,我们将向您发送验证码。', + content4: '我们已将验证码发送至 {{email}}。', + codeLabel: '验证码', + codePlaceholder: '输入 6 位数字验证码', + emailLabel: '新邮箱', + emailPlaceholder: '输入新邮箱', + existingEmail: '该邮箱已存在', + unAvailableEmail: '该邮箱暂时无法使用。', + sendVerifyCode: '发送验证码', + continue: '继续', + changeTo: '更改为 {{email}}', + resendTip: '没有收到验证码?', + resendCount: '请在 {{count}} 秒后重新发送', + resend: '重新发送', + }, + }, + members: { + team: '团队', + invite: '添加', + name: '姓名', + lastActive: '上次活动时间', + role: '角色', + pending: '待定...', + owner: '所有者', + admin: '管理员', + adminTip: '能够建立应用程序和管理团队设置', + normal: '成员', + normalTip: '只能使用应用程序,不能建立应用程序', + editor: '编辑', + editorTip: '能够建立并编辑应用程序,不能管理团队设置', + datasetOperator: '知识库管理员', + datasetOperatorTip: '只能管理知识库', + inviteTeamMember: '添加团队成员', + inviteTeamMemberTip: '对方在登录后可以访问你的团队数据。', + emailNotSetup: '由于邮件服务器未设置,无法发送邀请邮件。请将邀请后生成的邀请链接通知用户。', + email: '邮箱', + emailInvalid: '邮箱格式无效', + emailPlaceholder: '输入邮箱', + sendInvite: '发送邀请', + invitedAsRole: '邀请为{{role}}用户', + invitationSent: '邀请已发送', + invitationSentTip: '邀请已发送,对方登录 Dify 后即可访问你的团队数据。', + invitationLink: '邀请链接', + failedInvitationEmails: '邀请以下邮箱失败', + ok: '好的', + removeFromTeam: '移出团队', + removeFromTeamTip: '将取消团队访问', + setAdmin: '设为管理员', + setMember: '设为普通成员', + setEditor: '设为编辑', + disInvite: '取消邀请', + deleteMember: '删除成员', + you: '(你)', + builderTip: '可以构建和编辑自己的应用程序', + setBuilder: 'Set as builder(设置为构建器)', + builder: '构建器', + transferOwnership: '转移所有权', + transferModal: { + title: '转移工作空间所有权', + warning: '您即将转移 “{{workspace}}”的所有权。该操作将立即生效,且无法撤销。', + warningTip: '您将成为管理员成员,新所有者将拥有完全控制权。', + sendTip: '如果您继续,我们将向 {{email}} 发送验证码以进行身份认证。', + verifyEmail: '验证您当前的邮箱', + verifyContent: '您当前的邮箱是 {{email}}。', + verifyContent2: '我们将向该邮箱发送临时验证码以完成身份验证。', + codeLabel: '验证码', + codePlaceholder: '输入 6 位数字验证码', + resendTip: '没有收到验证码?', + resendCount: '请在 {{count}} 秒后重新发送', + resend: '重新发送', + transferLabel: '新所有者', + transferPlaceholder: '选择一个成员', + sendVerifyCode: '发送验证码', + continue: '继续', + transfer: '转移工作空间所有权', + }, + }, + integrations: { + connected: '登录方式', + google: 'Google', + googleAccount: 'Google 账号登录', + github: 'GitHub', + githubAccount: 'GitHub 账号登录', + connect: '绑定', + }, + language: { + displayLanguage: '界面语言', + timezone: '时区', + }, + provider: { + apiKey: 'API 密钥', + enterYourKey: '输入你的 API 密钥', + invalidKey: '无效的 OpenAI API 密钥', + validatedError: '校验失败:', + validating: '验证密钥中...', + saveFailed: 'API 密钥保存失败', + apiKeyExceedBill: '此 API KEY 已没有可用配额,请阅读', + addKey: '添加 密钥', + comingSoon: '即将推出', + editKey: '编辑', + invalidApiKey: '无效的 API 密钥', + azure: { + apiBase: 'API Base', + apiBasePlaceholder: '输入您的 Azure OpenAI API Base 地址', + apiKey: 'API Key', + apiKeyPlaceholder: '输入你的 API 密钥', + helpTip: '了解 Azure OpenAI Service', + }, + openaiHosted: { + openaiHosted: '托管 OpenAI', + onTrial: '体验', + exhausted: '超出限额', + desc: '托管 OpenAI 由 Dify 提供的托管 OpenAI 服务,你可以使用 GPT-3.5 等模型,在体验额度消耗完毕前你需要设置其它模型供应商。', + callTimes: '调用次数', + usedUp: '试用额度已用完,请在下方添加自己的模型供应商', + useYourModel: '当前正在使用你自己的模型供应商。', + close: '关闭', + }, + anthropicHosted: { + anthropicHosted: 'Anthropic Claude', + onTrial: '体验', + exhausted: '超出限额', + desc: '功能强大的模型,擅长执行从复杂对话和创意内容生成到详细指导的各种任务。', + callTimes: '调用次数', + usedUp: '试用额度已用完,请在下方添加自己的模型供应商', + useYourModel: '当前正在使用你自己的模型供应商。', + close: '关闭', + trialQuotaTip: '您的 Anthropic 体验额度将于 2025/03/17 过期,过期后将无法使用,请尽快体验。', + }, + anthropic: { + using: '嵌入能力正在使用', + enableTip: '要启用 Anthropic 模型,您需要先绑定 OpenAI 或 Azure OpenAI 服务。', + notEnabled: '未启用', + keyFrom: '从 Anthropic 获取您的 API 密钥', + }, + encrypted: { + front: '密钥将使用 ', + back: ' 技术进行加密和存储。', + }, + }, + modelProvider: { + notConfigured: '系统模型尚未完全配置', + systemModelSettings: '系统模型设置', + systemModelSettingsLink: '为什么需要设置系统模型?', + selectModel: '选择您的模型', + setupModelFirst: '请先设置您的模型', + systemReasoningModel: { + key: '系统推理模型', + tip: '设置创建应用使用的默认推理模型,以及对话名称生成、下一步问题建议等功能也会使用该默认推理模型。', + }, + embeddingModel: { + key: 'Embedding 模型', + tip: '设置知识库文档嵌入处理的默认模型,检索和导入知识库均使用该 Embedding 模型进行向量化处理,切换后将导致已导入的知识库与问题之间的向量维度不一致,从而导致检索失败。为避免检索失败,请勿随意切换该模型。', + required: '请选择 Embedding 模型', + }, + speechToTextModel: { + key: '语音转文本模型', + tip: '设置对话中语音转文字输入的默认使用模型。', + }, + ttsModel: { + key: '文本转语音模型', + tip: '设置对话中文字转语音输出的默认使用模型。', + }, + rerankModel: { + key: 'Rerank 模型', + tip: '重排序模型将根据候选文档列表与用户问题语义匹配度进行重新排序,从而改进语义排序的结果', + }, + quota: '额度', + searchModel: '搜索模型', + noModelFound: '找不到模型 {{model}}', + models: '模型列表', + showMoreModelProvider: '显示更多模型提供商', + selector: { + tip: '该模型已被删除。请添模型或选择其他模型。', + emptyTip: '无可用模型', + emptySetting: '请前往设置进行配置', + rerankTip: '请设置 Rerank 模型', + }, + card: { + quota: '额度', + onTrial: '试用中', + paid: '已购买', + quotaExhausted: '配额已用完', + callTimes: '调用次数', + tokens: 'Tokens', + buyQuota: '购买额度', + priorityUse: '优先使用', + removeKey: '删除 API 密钥', + tip: '已付费额度将优先考虑。试用额度将在付费额度用完后使用。', + }, + item: { + deleteDesc: '{{modelName}} 被用作系统推理模型。删除后部分功能将无法使用。请确认。', + freeQuota: '免费额度', + }, + addApiKey: '添加您的 API 密钥', + invalidApiKey: 'Invalid API key', + encrypted: { + front: '您的密钥将使用', + back: '技术进行加密和存储。', + }, + freeQuota: { + howToEarn: '如何获取', + }, + addMoreModelProvider: '添加更多模型提供商', + addModel: '添加模型', + modelsNum: '{{num}} 个模型', + showModels: '显示模型', + showModelsNum: '显示 {{num}} 个模型', + collapse: '收起', + config: '配置', + modelAndParameters: '模型及参数', + model: '模型', + featureSupported: '支持 {{feature}} 功能', + callTimes: '调用次数', + credits: '消息额度', + buyQuota: '购买额度', + getFreeTokens: '获得免费 Tokens', + priorityUsing: '优先使用', + deprecated: '已弃用', + confirmDelete: '确认删除?', + quotaTip: '剩余免费额度', + loadPresets: '加载预设', + parameters: '参数', + loadBalancing: '负载均衡', + loadBalancingDescription: '为模型配置多组凭据,并自动调用。', + loadBalancingHeadline: '负载均衡', + configLoadBalancing: '设置负载均衡', + modelHasBeenDeprecated: '该模型已废弃', + providerManaged: '由模型供应商管理', + providerManagedDescription: '使用模型供应商提供的单组凭据', + defaultConfig: '默认配置', + apiKeyStatusNormal: 'API Key 正常', + apiKeyRateLimit: '已达频率上限,{{seconds}}秒后恢复', + addConfig: '增加配置', + editConfig: '修改配置', + loadBalancingLeastKeyWarning: '至少启用 2 个 Key 以使用负载均衡', + loadBalancingInfo: '默认情况下,负载均衡使用 Round-robin 策略。如果触发速率限制,将应用 1 分钟的冷却时间', + upgradeForLoadBalancing: '升级以解锁负载均衡功能', + apiKey: 'API 密钥', + toBeConfigured: '待配置', + configureTip: '请配置 API 密钥,添加模型。', + installProvider: '安装模型供应商', + installDataSourceProvider: '安装数据源供应商', + discoverMore: '发现更多就在', + emptyProviderTitle: '尚未安装模型供应商', + emptyProviderTip: '请安装模型供应商。', + auth: { + unAuthorized: '未授权', + authRemoved: '授权已移除', + apiKeys: 'API 密钥', + addApiKey: '添加 API 密钥', + addModel: '添加模型', + addNewModel: '添加新模型', + addCredential: '添加凭据', + addModelCredential: '添加模型凭据', + editModelCredential: '编辑模型凭据', + modelCredentials: '模型凭据', + modelCredential: '模型凭据', + configModel: '配置模型', + configLoadBalancing: '配置负载均衡', + authorizationError: '授权错误', + specifyModelCredential: '指定模型凭据', + specifyModelCredentialTip: '使用已配置的模型凭据。', + providerManaged: '由模型供应商管理', + providerManagedTip: '使用模型供应商提供的单组凭据。', + apiKeyModal: { + title: 'API 密钥授权配置', + desc: '配置凭据后,工作空间中的所有成员都可以在编排应用时使用此模型。', + addModel: '添加模型', + }, + manageCredentials: '管理凭据', + customModelCredentials: '自定义模型凭据', + addNewModelCredential: '添加模型新凭据', + removeModel: '移除模型', + selectModelCredential: '选择模型凭据', + customModelCredentialsDeleteTip: '模型凭据正在使用中,无法删除', + }, + parametersInvalidRemoved: '部分参数无效,已移除', + }, + dataSource: { + add: '添加数据源', + connect: '绑定', + configure: '配置', + notion: { + title: 'Notion', + description: '使用 Notion 作为知识库的数据源。', + connectedWorkspace: '已绑定工作空间', + addWorkspace: '添加工作空间', + connected: '已绑定', + disconnected: '未绑定', + changeAuthorizedPages: '更改授权页面', + pagesAuthorized: '已授权页面', + sync: '同步', + remove: '删除', + selector: { + pageSelected: '已选页面', + searchPages: '搜索页面...', + noSearchResult: '无搜索结果', + addPages: '添加页面', + preview: '预览', + }, + integratedAlert: 'Notion通过内部凭证集成,无需重新授权。', + }, + website: { + title: '网站', + description: '使用网络爬虫从网站导入内容。', + with: '使用', + configuredCrawlers: '已配置的爬虫', + active: '可用', + inactive: '不可用', + }, + }, + plugin: { + serpapi: { + apiKey: 'API Key', + apiKeyPlaceholder: '输入你的 API 密钥', + keyFrom: '从 SerpAPI 帐户页面获取您的 SerpAPI 密钥', + }, + }, + apiBasedExtension: { + title: 'API 扩展提供了一个集中式的 API 管理,在此统一添加 API 配置后,方便在 Dify 上的各类应用中直接使用。', + link: '了解如何开发您自己的 API 扩展。', + add: '新增 API 扩展', + selector: { + title: 'API 扩展', + placeholder: '请选择 API 扩展', + manage: '管理 API 扩展', + }, + modal: { + title: '新增 API 扩展', + editTitle: '编辑 API 扩展', + name: { + title: '名称', + placeholder: '请输入名称', + }, + apiEndpoint: { + title: 'API Endpoint', + placeholder: '请输入 API endpoint', + }, + apiKey: { + title: 'API-key', + placeholder: '请输入 API-key', + lengthError: 'API-key 不能少于 5 位', + }, + }, + type: '类型', + }, + about: { + changeLog: '更新日志', + updateNow: '现在更新', + nowAvailable: 'Dify {{version}} 现已可用。', + latestAvailable: 'Dify {{version}} 已是最新版本。', + }, + appMenus: { + overview: '监测', + promptEng: '编排', + apiAccess: '访问 API', + logAndAnn: '日志与标注', + logs: '日志', + }, + environment: { + testing: '测试环境', + development: '开发环境', + }, + appModes: { + completionApp: '文本生成型应用', + chatApp: '对话型应用', + }, + datasetMenus: { + documents: '文档', + hitTesting: '召回测试', + settings: '设置', + emptyTip: '此知识尚未集成到任何应用程序中。请参阅文档以获取指导。', + viewDoc: '查看文档', + relatedApp: '个关联应用', + noRelatedApp: '无关联应用', + pipeline: '流水线', + }, + voiceInput: { + speaking: '现在讲...', + converting: '正在转换为文本...', + notAllow: '麦克风未授权', + }, + modelName: { + 'gpt-3.5-turbo': 'GPT-3.5-Turbo', + 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', + 'gpt-4': 'GPT-4', + 'gpt-4-32k': 'GPT-4-32K', + 'text-davinci-003': 'Text-Davinci-003', + 'text-embedding-ada-002': 'Text-Embedding-Ada-002', + 'whisper-1': 'Whisper-1', + 'claude-instant-1': 'Claude-Instant', + 'claude-2': 'Claude-2', + }, + chat: { + renameConversation: '重命名会话', + conversationName: '会话名称', + conversationNamePlaceholder: '请输入会话名称', + conversationNameCanNotEmpty: '会话名称必填', + citation: { + title: '引用', + linkToDataset: '跳转至知识库', + characters: '字符:', + hitCount: '召回次数:', + vectorHash: '向量哈希:', + hitScore: '召回得分:', + }, + inputPlaceholder: '和 {{botName}} 聊天', + inputDisabledPlaceholder: '仅供试用', + thinking: '深度思考中...', + thought: '已深度思考', + resend: '重新发送', + }, + promptEditor: { + placeholder: '在这里写你的提示词,输入\'{\' 插入变量、输入\'/\' 插入提示内容块', + context: { + item: { + title: '上下文', + desc: '插入上下文模板', + }, + modal: { + title: '有 {{num}} 个知识库在上下文中', + add: '添加上下文', + footer: '您可以在下面的“上下文”部分中管理上下文。', + }, + }, + history: { + item: { + title: '会话历史', + desc: '插入历史消息模板', + }, + modal: { + title: '示例', + user: '你好', + assistant: '你好!今天我能为您提供什么帮助?', + edit: '编辑对话角色名称', + }, + }, + variable: { + item: { + title: '变量 & 外部工具', + desc: '插入变量和外部工具', + }, + outputToolDisabledItem: { + title: '变量', + desc: '插入变量', + }, + modal: { + add: '添加新变量', + addTool: '添加工具', + }, + }, + query: { + item: { + title: '查询内容', + desc: '插入用户查询模板', + }, + }, + existed: 'Prompt 中已存在', + }, + imageUploader: { + uploadFromComputer: '从本地上传', + uploadFromComputerReadError: '图片读取失败,请重新选择。', + uploadFromComputerUploadError: '图片上传失败,请重新上传。', + uploadFromComputerLimit: '上传图片不能超过 {{size}} MB', + pasteImageLink: '粘贴图片链接', + pasteImageLinkInputPlaceholder: '将图像链接粘贴到此处', + pasteImageLinkInvalid: '图片链接无效', + imageUpload: '图片上传', + }, + fileUploader: { + uploadFromComputer: '从本地上传', + pasteFileLink: '粘贴文件链接', + pasteFileLinkInputPlaceholder: '输入文件链接', + uploadFromComputerReadError: '文件读取失败,请重新选择。', + uploadFromComputerUploadError: '文件上传失败,请重新上传。', + uploadFromComputerLimit: '上传 {{type}} 不能超过 {{size}}', + pasteFileLinkInvalid: '文件链接无效', + fileExtensionNotSupport: '文件类型不支持', + fileExtensionBlocked: '出于安全考虑,该文件类型已被禁止上传', + }, + tag: { + placeholder: '全部标签', + addNew: '创建新标签', + noTag: '没有标签', + noTagYet: '还没有标签', + addTag: '添加标签', + editTag: '修改标签', + manageTags: '管理标签', + selectorPlaceholder: '搜索或者创建', + create: '创建', + delete: '删除标签', + deleteTip: '标签正在使用中,是否删除?', + created: '标签创建成功', + failed: '标签创建失败', + }, + license: { + expiring: '许可证还有 1 天到期', + expiring_plural: '许可证还有 {{count}} 天到期', + unlimited: '无限制', + }, + pagination: { + perPage: '每页显示', + }, + avatar: { + deleteTitle: '删除头像', + deleteDescription: '确定要删除你的个人头像吗?你的账号将使用默认的首字母头像。', + }, + imageInput: { + dropImageHere: '将图片拖放到此处,或', + browse: '浏览', + supportedFormats: '支持 PNG、JPG、JPEG、WEBP 和 GIF 格式', + }, + you: '你', + feedback: { + content: '反馈内容', + subtitle: '请告诉我们这次回应出错的原因。', + title: '提供反馈', + placeholder: '请描述发生了什么问题或我们可以如何改进...', + }, + dynamicSelect: { + error: '加载选项失败', + noData: '没有可用的选项', + loading: '加载选项...', + selected: '已选择 {{count}} 项', + }, +} + +export default translation diff --git a/web/i18n/zh-Hans/explore.json b/web/i18n/zh-Hans/explore.json index fb4c4ace80..b1005aec4d 100644 --- a/web/i18n/zh-Hans/explore.json +++ b/web/i18n/zh-Hans/explore.json @@ -1,12 +1,14 @@ { - "appCard.addToWorkspace": "添加到工作区", - "appCard.customize": "自定义", + "appCard.addToWorkspace": "使用模板", + "appCard.try": "详情", "appCustomize.nameRequired": "应用程序名称不能为空", "appCustomize.subTitle": "应用程序图标和名称", "appCustomize.title": "从 {{name}} 创建应用程序", - "apps.allCategories": "推荐", - "apps.description": "使用这些模板应用程序,或根据模板自定义您自己的应用程序。", - "apps.title": "探索应用", + "apps.allCategories": "所有", + "apps.resetFilter": "清除筛选", + "apps.resultNum": "{{num}} 个结果", + "apps.title": "试用 Dify 精选示例应用,为您的业务寻找 AI 解决方案", + "banner.viewMore": "查看更多", "category.Agent": "Agent", "category.Assistant": "助手", "category.Entertainment": "娱乐", @@ -23,7 +25,16 @@ "sidebar.chat": "智聊", "sidebar.delete.content": "您确定要删除此程序吗?", "sidebar.delete.title": "删除程序", - "sidebar.discovery": "发现", - "sidebar.workspace": "工作区", - "title": "探索" + "sidebar.noApps.description": "已发布的 web apps 将出现在此处", + "sidebar.noApps.learnMore": "了解更多", + "sidebar.noApps.title": "没有 web apps", + "sidebar.title": "应用库", + "sidebar.webApps": "WEB APPS", + "title": "探索", + "tryApp.category": "分类", + "tryApp.createFromSampleApp": "从此模板创建应用", + "tryApp.requirements": "必须配置项", + "tryApp.tabHeader.detail": "编排详情", + "tryApp.tabHeader.try": "试用", + "tryApp.tryInfo": "这是一个示例应用,您可以试用最多 5 条消息。如需继续使用,请点击 “从此模板创建应用” 并完成配置!" } diff --git a/web/i18n/zh-Hans/explore.ts b/web/i18n/zh-Hans/explore.ts new file mode 100644 index 0000000000..2080033904 --- /dev/null +++ b/web/i18n/zh-Hans/explore.ts @@ -0,0 +1,64 @@ +const translation = { + title: '探索', + sidebar: { + title: '应用库', + chat: '智聊', + webApps: 'WEB APPS', + action: { + pin: '置顶', + unpin: '取消置顶', + rename: '重命名', + delete: '删除', + }, + delete: { + title: '删除程序', + content: '您确定要删除此程序吗?', + }, + noApps: { + title: '没有 web apps', + description: '已发布的 web apps 将出现在此处', + learnMore: '了解更多', + }, + }, + apps: { + title: '试用 Dify 精选示例应用,为您的业务寻找 AI 解决方案', + allCategories: '所有', + resultNum: '{{num}} 个结果', + resetFilter: '清除筛选', + }, + appCard: { + addToWorkspace: '使用模板', + try: '详情', + customize: '自定义', + }, + tryApp: { + tabHeader: { + try: '试用', + detail: '编排详情', + }, + createFromSampleApp: '从此模板创建应用', + category: '分类', + requirements: '必须配置项', + tryInfo: '这是一个示例应用,您可以试用最多 5 条消息。如需继续使用,请点击 “从此模板创建应用” 并完成配置!', + }, + appCustomize: { + title: '从 {{name}} 创建应用程序', + subTitle: '应用程序图标和名称', + nameRequired: '应用程序名称不能为空', + }, + category: { + Agent: 'Agent', + Assistant: '助手', + Writing: '写作', + Translate: '翻译', + Programming: '编程', + HR: '人力资源', + Workflow: '工作流', + Entertainment: '娱乐', + }, + banner: { + viewMore: '查看更多', + }, +} + +export default translation diff --git a/web/i18n/zh-Hans/workflow.json b/web/i18n/zh-Hans/workflow.json index 7787c9db4b..a133c3234d 100644 --- a/web/i18n/zh-Hans/workflow.json +++ b/web/i18n/zh-Hans/workflow.json @@ -126,6 +126,7 @@ "common.currentDraftUnpublished": "当前草稿未发布", "common.currentView": "当前视图", "common.currentWorkflow": "整个工作流", + "common.data": "数据", "common.debugAndPreview": "预览", "common.disconnect": "断开连接", "common.duplicate": "复制", @@ -650,6 +651,7 @@ "nodes.llm.jsonSchema.warningTips.saveSchema": "请先完成当前字段的编辑", "nodes.llm.model": "模型", "nodes.llm.notSetContextInPromptTip": "要启用上下文功能,请在提示中填写上下文变量。", + "nodes.llm.outputVars.generation": "生成信息", "nodes.llm.outputVars.output": "生成内容", "nodes.llm.outputVars.reasoning_content": "推理内容", "nodes.llm.outputVars.usage": "模型用量信息", diff --git a/web/i18n/zh-Hant/explore.json b/web/i18n/zh-Hant/explore.json index 5a19e649ff..edfab80fcc 100644 --- a/web/i18n/zh-Hant/explore.json +++ b/web/i18n/zh-Hant/explore.json @@ -1,12 +1,7 @@ { - "appCard.addToWorkspace": "新增到工作區", - "appCard.customize": "自定義", "appCustomize.nameRequired": "應用程式名稱不能為空", "appCustomize.subTitle": "應用程式圖示和名稱", "appCustomize.title": "從 {{name}} 建立應用程式", - "apps.allCategories": "推薦", - "apps.description": "使用這些模板應用程式,或根據模板自定義您自己的應用程式。", - "apps.title": "探索應用", "category.Agent": "代理", "category.Assistant": "助手", "category.Entertainment": "娛樂", @@ -23,7 +18,5 @@ "sidebar.chat": "智聊", "sidebar.delete.content": "您確定要刪除此程式嗎?", "sidebar.delete.title": "刪除程式", - "sidebar.discovery": "發現", - "sidebar.workspace": "工作區", "title": "探索" } diff --git a/web/i18n/zh-Hant/explore.ts b/web/i18n/zh-Hant/explore.ts new file mode 100644 index 0000000000..cbb20f0e77 --- /dev/null +++ b/web/i18n/zh-Hant/explore.ts @@ -0,0 +1,38 @@ +const translation = { + title: '探索', + sidebar: { + chat: '智聊', + action: { + pin: '置頂', + unpin: '取消置頂', + rename: '重新命名', + delete: '刪除', + }, + delete: { + title: '刪除程式', + content: '您確定要刪除此程式嗎?', + }, + }, + apps: { + }, + appCard: { + customize: '自定義', + }, + appCustomize: { + title: '從 {{name}} 建立應用程式', + subTitle: '應用程式圖示和名稱', + nameRequired: '應用程式名稱不能為空', + }, + category: { + Assistant: '助手', + Writing: '寫作', + Translate: '翻譯', + Programming: '程式設計', + HR: '人力資源', + Agent: '代理', + Workflow: '工作流', + Entertainment: '娛樂', + }, +} + +export default translation diff --git a/web/middleware.ts b/web/middleware.ts index 9fa7d85b2f..67903f1c53 100644 --- a/web/middleware.ts +++ b/web/middleware.ts @@ -33,7 +33,7 @@ export function middleware(request: NextRequest) { const cspHeader = ` default-src 'self' ${scheme_source} ${csp} ${whiteList}; connect-src 'self' ${scheme_source} ${csp} ${whiteList}; - script-src 'self' ${scheme_source} ${csp} ${whiteList}; + script-src 'self' 'wasm-unsafe-eval' ${scheme_source} ${csp} ${whiteList}; style-src 'self' 'unsafe-inline' ${scheme_source} ${whiteList}; worker-src 'self' ${scheme_source} ${csp} ${whiteList}; media-src 'self' ${scheme_source} ${csp} ${whiteList}; diff --git a/web/models/debug.ts b/web/models/debug.ts index 73d0910e82..fb75ae7946 100644 --- a/web/models/debug.ts +++ b/web/models/debug.ts @@ -134,6 +134,7 @@ export type ModelConfig = { provider: string // LLM Provider: for example "OPENAI" model_id: string mode: ModelModeType + prompt_type?: PromptMode configs: PromptConfig chat_prompt_config?: ChatPromptConfig | null completion_prompt_config?: CompletionPromptConfig | null diff --git a/web/models/explore.ts b/web/models/explore.ts index 1d513e9b70..bca92abee5 100644 --- a/web/models/explore.ts +++ b/web/models/explore.ts @@ -28,6 +28,7 @@ export type App = { installed: boolean editable: boolean is_agent: boolean + can_trial: boolean } export type InstalledApp = { diff --git a/web/package.json b/web/package.json index 44cc9196f4..f84455ca3b 100644 --- a/web/package.json +++ b/web/package.json @@ -88,6 +88,8 @@ "echarts": "^5.6.0", "echarts-for-react": "^3.0.5", "elkjs": "^0.9.3", + "embla-carousel-autoplay": "^8.6.0", + "embla-carousel-react": "^8.6.0", "emoji-mart": "^5.6.0", "es-toolkit": "^1.43.0", "fast-deep-equal": "^3.1.3", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 853c366025..44dfa0b44f 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -165,6 +165,12 @@ importers: elkjs: specifier: ^0.9.3 version: 0.9.3 + embla-carousel-autoplay: + specifier: ^8.6.0 + version: 8.6.0(embla-carousel@8.6.0) + embla-carousel-react: + specifier: ^8.6.0 + version: 8.6.0(react@19.2.3) emoji-mart: specifier: ^5.6.0 version: 5.6.0 @@ -182,7 +188,7 @@ importers: version: 1.11.13 i18next: specifier: ^25.7.3 - version: 25.7.3(typescript@5.9.3) + version: 25.7.4(typescript@5.9.3) i18next-resources-to-backend: specifier: ^1.2.1 version: 1.2.1 @@ -266,7 +272,7 @@ importers: version: 4.6.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) react-i18next: specifier: ^16.5.0 - version: 16.5.0(i18next@25.7.3(typescript@5.9.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(typescript@5.9.3) + version: 16.5.1(i18next@25.7.4(typescript@5.9.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(typescript@5.9.3) react-markdown: specifier: ^9.1.0 version: 9.1.0(@types/react@19.2.7)(react@19.2.3) @@ -402,10 +408,10 @@ importers: version: 5.91.2(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@tanstack/react-devtools': specifier: ^0.9.0 - version: 0.9.0(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) + version: 0.9.1(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) '@tanstack/react-form-devtools': specifier: ^0.2.9 - version: 0.2.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10) + version: 0.2.11(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10) '@tanstack/react-query-devtools': specifier: ^5.90.2 version: 5.91.1(@tanstack/react-query@5.90.12(react@19.2.3))(react@19.2.3) @@ -513,7 +519,7 @@ importers: version: 1.16.0 knip: specifier: ^5.78.0 - version: 5.78.0(@types/node@18.15.0)(typescript@5.9.3) + version: 5.80.0(@types/node@18.15.0)(typescript@5.9.3) lint-staged: specifier: ^15.5.2 version: 15.5.2 @@ -3279,17 +3285,20 @@ packages: peerDependencies: solid-js: '>=1.9.7' - '@tanstack/devtools-utils@0.0.9': - resolution: {integrity: sha512-tCObM6wbEjuHeGNs3JDhrqBhoMxpJpVuVIg5Kc33EmUI1ZO7KLpC1277Qf6AmSWy3aVOreGwn3y5bJzxmAJNXg==} + '@tanstack/devtools-utils@0.2.3': + resolution: {integrity: sha512-Ob7wAGTNs7SfOJWZlV+3fi7JGT8ApgeNKoaKV4trRk3TDjThm0w4EwbNDsfZu5NCvefSOY0sequp2qG8g7zG/g==} engines: {node: '>=18'} peerDependencies: '@types/react': ~19.2.7 + preact: '>=10.0.0' react: '>=17.0.0' solid-js: '>=1.9.7' vue: '>=3.2.0' peerDependenciesMeta: '@types/react': optional: true + preact: + optional: true react: optional: true solid-js: @@ -3297,8 +3306,8 @@ packages: vue: optional: true - '@tanstack/devtools@0.10.1': - resolution: {integrity: sha512-1gtPmCDXV4Pl1nVtoqwjV0tc4E9GMuFtlkBX1Lz1KfqI3W9JojT5YsVifOQ/g8BTQ5w5+tyIANwHU7WYgLq/MQ==} + '@tanstack/devtools@0.10.2': + resolution: {integrity: sha512-6TPNl3jTrCFpyV3m9lBeHxum6btmiihbv+A3xkDpt3JScRcWP1a8G5rZzKhlOtikzG1QSiceRrbckKnIAvZ7FQ==} engines: {node: '>=18'} peerDependencies: solid-js: '>=1.9.7' @@ -3311,11 +3320,11 @@ packages: '@tanstack/form-core@1.27.1': resolution: {integrity: sha512-hPM+0tUnZ2C2zb2TE1lar1JJ0S0cbnQHlUwFcCnVBpMV3rjtUzkoM766gUpWrlmTGCzNad0GbJ0aTxVsjT6J8g==} - '@tanstack/form-core@1.27.6': - resolution: {integrity: sha512-1C4PUpOcCpivddKxtAeqdeqncxnPKiPpTVDRknDExCba+6zCsAjxgL+p3qYA3hu+EFyUAdW71rU+uqYbEa7qqA==} + '@tanstack/form-core@1.27.7': + resolution: {integrity: sha512-nvogpyE98fhb0NDw1Bf2YaCH+L7ZIUgEpqO9TkHucDn6zg3ni521boUpv0i8HKIrmmFwDYjWZoCnrgY4HYWTkw==} - '@tanstack/form-devtools@0.2.9': - resolution: {integrity: sha512-KOJiwvlFPsHeuWXvHUXRVdciXG1OPhg1c476MsLre0YLdaw1jeMlDYSlqq7sdEULX+2Sg/lhNpX86QbQuxzd2A==} + '@tanstack/form-devtools@0.2.11': + resolution: {integrity: sha512-wCQ5uicGfxs34ytZmVhppKELijrx4OU5zRj4PYs0RbBJH3bJYOj5MATsj+rkdDLi1FeVrwLgK2qX4a6c/hWF4g==} peerDependencies: solid-js: '>=1.9.9' @@ -3333,8 +3342,8 @@ packages: '@tanstack/query-devtools@5.91.1': resolution: {integrity: sha512-l8bxjk6BMsCaVQH6NzQEE/bEgFy1hAs5qbgXl0xhzezlaQbPk6Mgz9BqEg2vTLPOHD8N4k+w/gdgCbEzecGyNg==} - '@tanstack/react-devtools@0.9.0': - resolution: {integrity: sha512-Lq0svXOTG5N61SHgx8F0on6zz2GB0kmFjN/yyfNLrJyRgJ+U3jYFRd9ti3uBPABsXzHQMHYYujnTXrOYp/OaUg==} + '@tanstack/react-devtools@0.9.1': + resolution: {integrity: sha512-ONDMs117FrzWxFD1JQ9Z94QnTx+63RuQ+9Z3ieSS9bHAWmty4PWiLddAZPvyHCbV9iljlpUEkCoKCO1HMywR2Q==} engines: {node: '>=18'} peerDependencies: '@types/react': ~19.2.7 @@ -3342,8 +3351,8 @@ packages: react: '>=16.8' react-dom: '>=16.8' - '@tanstack/react-form-devtools@0.2.9': - resolution: {integrity: sha512-wg0xrcVY8evIFGVHrnl9s+/9ENzuVbqv5Ru4HyAJjjL4uECtl6KdDJsi0lZdOyoM1UYEQoVdcN8jfBbxkA3q1g==} + '@tanstack/react-form-devtools@0.2.11': + resolution: {integrity: sha512-Hv6aZqH2dfamFPWwonA20xKj61xU3omilOYEGdihldOddl+cFLY6P7q66Zqs0p9a9mOQvBS+/i0zBcZjRzvupg==} peerDependencies: react: ^17.0.0 || ^18.0.0 || ^19.0.0 @@ -3597,9 +3606,6 @@ packages: '@types/node@20.19.26': resolution: {integrity: sha512-0l6cjgF0XnihUpndDhk+nyD3exio3iKaYROSgvh/qSevPXax3L8p5DBRFjbvalnwatGgHEQn2R88y2fA3g4irg==} - '@types/node@20.19.28': - resolution: {integrity: sha512-VyKBr25BuFDzBFCK5sUM6ZXiWfqgCTwTAOK8qzGV/m9FCirXYDlmczJ+d5dXBAQALGCdRRdbteKYfJ84NGEusw==} - '@types/papaparse@5.5.1': resolution: {integrity: sha512-esEO+VISsLIyE+JZBmb89NzsYYbpwV8lmv2rPo6oX5y9KhBaIP7hhHgjuTut54qjdKVMufTEcrh5fUl9+58huw==} @@ -3685,12 +3691,6 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/project-service@8.50.1': - resolution: {integrity: sha512-E1ur1MCVf+YiP89+o4Les/oBAVzmSbeRB0MQLfSlYtbWU17HPxZ6Bhs5iYmKZRALvEuBoXIZMOIRRc/P++Ortg==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - peerDependencies: - typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/scope-manager@8.49.0': resolution: {integrity: sha512-npgS3zi+/30KSOkXNs0LQXtsg9ekZ8OISAOLGWA/ZOEn0ZH74Ginfl7foziV8DT+D98WfQ5Kopwqb/PZOaIJGg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3699,10 +3699,6 @@ packages: resolution: {integrity: sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/scope-manager@8.50.1': - resolution: {integrity: sha512-mfRx06Myt3T4vuoHaKi8ZWNTPdzKPNBhiblze5N50//TSHOAQQevl/aolqA/BcqqbJ88GUnLqjjcBc8EWdBcVw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/tsconfig-utils@8.49.0': resolution: {integrity: sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3715,12 +3711,6 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/tsconfig-utils@8.50.1': - resolution: {integrity: sha512-ooHmotT/lCWLXi55G4mvaUF60aJa012QzvLK0Y+Mp4WdSt17QhMhWOaBWeGTFVkb2gDgBe19Cxy1elPXylslDw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - peerDependencies: - typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/type-utils@8.49.0': resolution: {integrity: sha512-KTExJfQ+svY8I10P4HdxKzWsvtVnsuCifU5MvXrRwoP2KOlNZ9ADNEWWsQTJgMxLzS5VLQKDjkCT/YzgsnqmZg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3743,10 +3733,6 @@ packages: resolution: {integrity: sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/types@8.50.1': - resolution: {integrity: sha512-v5lFIS2feTkNyMhd7AucE/9j/4V9v5iIbpVRncjk/K0sQ6Sb+Np9fgYS/63n6nwqahHQvbmujeBL7mp07Q9mlA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/typescript-estree@8.49.0': resolution: {integrity: sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3759,12 +3745,6 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/typescript-estree@8.50.1': - resolution: {integrity: sha512-woHPdW+0gj53aM+cxchymJCrh0cyS7BTIdcDxWUNsclr9VDkOSbqC13juHzxOmQ22dDkMZEpZB+3X1WpUvzgVQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - peerDependencies: - typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/utils@8.49.0': resolution: {integrity: sha512-N3W7rJw7Rw+z1tRsHZbK395TWSYvufBXumYtEGzypgMUthlg0/hmCImeA8hgO2d2G4pd7ftpxxul2J8OdtdaFA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3779,13 +3759,6 @@ packages: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/utils@8.50.1': - resolution: {integrity: sha512-lCLp8H1T9T7gPbEuJSnHwnSuO9mDf8mfK/Nion5mZmiEaQD9sWf9W4dfeFqRyqRjF06/kBuTmAqcs9sewM2NbQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - peerDependencies: - eslint: ^8.57.0 || ^9.0.0 - typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/visitor-keys@8.49.0': resolution: {integrity: sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3794,10 +3767,6 @@ packages: resolution: {integrity: sha512-Xzmnb58+Db78gT/CCj/PVCvK+zxbnsw6F+O1oheYszJbBSdEjVhQi3C/Xttzxgi/GLmpvOggRs1RFpiJ8+c34Q==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/visitor-keys@8.50.1': - resolution: {integrity: sha512-IrDKrw7pCRUR94zeuCSUWQ+w8JEf5ZX5jl/e6AHGSLi1/zIr0lgutfn/7JpfCey+urpgQEdrZVYzCaVVKiTwhQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20251209.1': resolution: {integrity: sha512-F1cnYi+ZeinYQnaTQKKIsbuoq8vip5iepBkSZXlB8PjbG62LW1edUdktd/nVEc+Q+SEysSQ3jRdk9eU766s5iw==} cpu: [arm64] @@ -4947,6 +4916,24 @@ packages: elliptic@6.6.1: resolution: {integrity: sha512-RaddvvMatK2LJHqFJ+YA4WysVN5Ita9E35botqIYspQ4TkRAlCicdzKOjlyv/1Za5RyTNn7di//eEV0uTAfe3g==} + embla-carousel-autoplay@8.6.0: + resolution: {integrity: sha512-OBu5G3nwaSXkZCo1A6LTaFMZ8EpkYbwIaH+bPqdBnDGQ2fh4+NbzjXjs2SktoPNKCtflfVMc75njaDHOYXcrsA==} + peerDependencies: + embla-carousel: 8.6.0 + + embla-carousel-react@8.6.0: + resolution: {integrity: sha512-0/PjqU7geVmo6F734pmPqpyHqiM99olvyecY7zdweCw+6tKEXnrE90pBiBbMMU8s5tICemzpQ3hi5EpxzGW+JA==} + peerDependencies: + react: ^16.8.0 || ^17.0.1 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc + + embla-carousel-reactive-utils@8.6.0: + resolution: {integrity: sha512-fMVUDUEx0/uIEDM0Mz3dHznDhfX+znCCDCeIophYb1QGVM7YThSWX+wz11zlYwWFOr74b4QLGg0hrGPJeG2s4A==} + peerDependencies: + embla-carousel: 8.6.0 + + embla-carousel@8.6.0: + resolution: {integrity: sha512-SjWyZBHJPbqxHOzckOfo8lHisEaJWmwd23XppYFYVh10bU66/Pn5tkVkbkCMZVdbUE5eTCI2nD8OyIP4Z+uwkA==} + emoji-mart@5.6.0: resolution: {integrity: sha512-eJp3QRe79pjwa+duv+n7+5YsNhRcMl812EcFVwrnRvYKoNPoQb5qxU8DG6Bgwji0akHdp6D4Ln6tYLG58MFSow==} @@ -5750,8 +5737,8 @@ packages: i18next-resources-to-backend@1.2.1: resolution: {integrity: sha512-okHbVA+HZ7n1/76MsfhPqDou0fptl2dAlhRDu2ideXloRRduzHsqDOznJBef+R3DFZnbvWoBW+KxJ7fnFjd6Yw==} - i18next@25.7.3: - resolution: {integrity: sha512-2XaT+HpYGuc2uTExq9TVRhLsso+Dxym6PWaKpn36wfBmTI779OQ7iP/XaZHzrnGyzU4SHpFrTYLKfVyBfAhVNA==} + i18next@25.7.4: + resolution: {integrity: sha512-hRkpEblXXcXSNbw8mBNq9042OEetgyB/ahc/X17uV/khPwzV+uB8RHceHh3qavyrkPJvmXFKXME2Sy1E0KjAfw==} peerDependencies: typescript: ^5 peerDependenciesMeta: @@ -6090,8 +6077,8 @@ packages: resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} engines: {node: '>=6'} - knip@5.78.0: - resolution: {integrity: sha512-nB7i/fgiJl7WVxdv5lX4ZPfDt9/zrw/lOgZtyioy988xtFhKuFJCRdHWT1Zg9Avc0yaojvnmEuAXU8SeMblKww==} + knip@5.80.0: + resolution: {integrity: sha512-K/Ga2f/SHEUXXriVdaw2GfeIUJ5muwdqusHGkCtaG/1qeMmQJiuwZj9KnPxaDbnYPAu8RWjYYh8Nyb+qlJ3d8A==} engines: {node: '>=18.18.0'} hasBin: true peerDependencies: @@ -6128,8 +6115,8 @@ packages: lexical@0.38.2: resolution: {integrity: sha512-JJmfsG3c4gwBHzUGffbV7ifMNkKAWMCnYE3xJl87gty7hjyV5f3xq7eqTjP5HFYvO4XpjJvvWO2/djHp5S10tw==} - lib0@0.2.117: - resolution: {integrity: sha512-DeXj9X5xDCjgKLU/7RR+/HQEVzuuEUiwldwOGsHK/sfAfELGWEyTcf0x+uOvCvK3O2zPmZePXWL85vtia6GyZw==} + lib0@0.2.115: + resolution: {integrity: sha512-noaW4yNp6hCjOgDnWWxW0vGXE3kZQI5Kqiwz+jIWXavI9J9WyfJ9zjsbQlQlgjIbHBrvlA/x3TSIXBUJj+0L6g==} engines: {node: '>=16'} hasBin: true @@ -7146,8 +7133,8 @@ packages: react: '>=16.8.1' react-dom: '>=16.8.1' - react-i18next@16.5.0: - resolution: {integrity: sha512-IMpPTyCTKxEj8klCrLKUTIUa8uYTd851+jcu2fJuUB9Agkk9Qq8asw4omyeHVnOXHrLgQJGTm5zTvn8HpaPiqw==} + react-i18next@16.5.1: + resolution: {integrity: sha512-Hks6UIRZWW4c+qDAnx1csVsCGYeIR4MoBGQgJ+NUoNnO6qLxXuf8zu0xdcinyXUORgGzCdRsexxO1Xzv3sTdnw==} peerDependencies: i18next: '>= 25.6.2' react: '>= 16.8.0' @@ -8422,7 +8409,6 @@ packages: whatwg-encoding@3.1.1: resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} engines: {node: '>=18'} - deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation whatwg-mimetype@3.0.0: resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==} @@ -8746,7 +8732,7 @@ snapshots: '@amplitude/targeting@0.2.0': dependencies: '@amplitude/analytics-client-common': 2.4.16 - '@amplitude/analytics-core': 2.35.0 + '@amplitude/analytics-core': 2.33.0 '@amplitude/analytics-types': 2.11.0 '@amplitude/experiment-core': 0.7.2 idb: 8.0.0 @@ -9729,7 +9715,7 @@ snapshots: '@es-joy/jsdoccomment@0.76.0': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.50.1 + '@typescript-eslint/types': 8.50.0 comment-parser: 1.4.1 esquery: 1.6.0 jsdoc-type-pratt-parser: 6.10.0 @@ -9737,7 +9723,7 @@ snapshots: '@es-joy/jsdoccomment@0.78.0': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.50.1 + '@typescript-eslint/types': 8.50.0 comment-parser: 1.4.1 esquery: 1.6.0 jsdoc-type-pratt-parser: 7.0.0 @@ -9842,7 +9828,7 @@ snapshots: '@eslint-react/eff': 2.3.13 '@typescript-eslint/types': 8.50.0 '@typescript-eslint/typescript-estree': 8.50.0(typescript@5.9.3) - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) string-ts: 2.3.1 typescript: 5.9.3 @@ -9857,7 +9843,7 @@ snapshots: '@eslint-react/var': 2.3.13(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.50.0 '@typescript-eslint/types': 8.50.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) birecord: 0.1.1 eslint: 9.39.2(jiti@1.21.7) ts-pattern: 5.9.0 @@ -9874,7 +9860,7 @@ snapshots: '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/type-utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/types': 8.49.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) eslint-plugin-react-dom: 2.3.13(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint-plugin-react-hooks-extra: 2.3.13(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) @@ -9889,7 +9875,7 @@ snapshots: '@eslint-react/shared@2.3.13(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3)': dependencies: '@eslint-react/eff': 2.3.13 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) ts-pattern: 5.9.0 typescript: 5.9.3 @@ -9903,7 +9889,7 @@ snapshots: '@eslint-react/eff': 2.3.13 '@typescript-eslint/scope-manager': 8.50.0 '@typescript-eslint/types': 8.50.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) ts-pattern: 5.9.0 typescript: 5.9.3 @@ -11494,7 +11480,7 @@ snapshots: '@stylistic/eslint-plugin@5.6.1(eslint@9.39.2(jiti@1.21.7))': dependencies: '@eslint-community/eslint-utils': 4.9.0(eslint@9.39.2(jiti@1.21.7)) - '@typescript-eslint/types': 8.50.1 + '@typescript-eslint/types': 8.49.0 eslint: 9.39.2(jiti@1.21.7) eslint-visitor-keys: 4.2.1 espree: 10.4.0 @@ -11592,17 +11578,18 @@ snapshots: transitivePeerDependencies: - csstype - '@tanstack/devtools-utils@0.0.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10)': + '@tanstack/devtools-utils@0.2.3(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10)': dependencies: '@tanstack/devtools-ui': 0.4.4(csstype@3.2.3)(solid-js@1.9.10) optionalDependencies: '@types/react': 19.2.7 + preact: 10.28.0 react: 19.2.3 solid-js: 1.9.10 transitivePeerDependencies: - csstype - '@tanstack/devtools@0.10.1(csstype@3.2.3)(solid-js@1.9.10)': + '@tanstack/devtools@0.10.2(csstype@3.2.3)(solid-js@1.9.10)': dependencies: '@solid-primitives/event-listener': 2.4.3(solid-js@1.9.10) '@solid-primitives/keyboard': 1.3.3(solid-js@1.9.10) @@ -11620,7 +11607,7 @@ snapshots: '@tanstack/eslint-plugin-query@5.91.2(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3)': dependencies: - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.50.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) transitivePeerDependencies: - supports-color @@ -11632,17 +11619,17 @@ snapshots: '@tanstack/pacer': 0.15.4 '@tanstack/store': 0.7.7 - '@tanstack/form-core@1.27.6': + '@tanstack/form-core@1.27.7': dependencies: '@tanstack/devtools-event-client': 0.4.0 '@tanstack/pacer-lite': 0.1.1 '@tanstack/store': 0.7.7 - '@tanstack/form-devtools@0.2.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10)': + '@tanstack/form-devtools@0.2.11(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10)': dependencies: '@tanstack/devtools-ui': 0.4.4(csstype@3.2.3)(solid-js@1.9.10) - '@tanstack/devtools-utils': 0.0.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10) - '@tanstack/form-core': 1.27.6 + '@tanstack/devtools-utils': 0.2.3(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10) + '@tanstack/form-core': 1.27.7 clsx: 2.1.1 dayjs: 1.11.19 goober: 2.1.18(csstype@3.2.3) @@ -11650,6 +11637,7 @@ snapshots: transitivePeerDependencies: - '@types/react' - csstype + - preact - react - vue @@ -11664,9 +11652,9 @@ snapshots: '@tanstack/query-devtools@5.91.1': {} - '@tanstack/react-devtools@0.9.0(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10)': + '@tanstack/react-devtools@0.9.1(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10)': dependencies: - '@tanstack/devtools': 0.10.1(csstype@3.2.3)(solid-js@1.9.10) + '@tanstack/devtools': 0.10.2(csstype@3.2.3)(solid-js@1.9.10) '@types/react': 19.2.7 '@types/react-dom': 19.2.3(@types/react@19.2.7) react: 19.2.3 @@ -11677,14 +11665,15 @@ snapshots: - solid-js - utf-8-validate - '@tanstack/react-form-devtools@0.2.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10)': + '@tanstack/react-form-devtools@0.2.11(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10)': dependencies: - '@tanstack/devtools-utils': 0.0.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10) - '@tanstack/form-devtools': 0.2.9(@types/react@19.2.7)(csstype@3.2.3)(react@19.2.3)(solid-js@1.9.10) + '@tanstack/devtools-utils': 0.2.3(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10) + '@tanstack/form-devtools': 0.2.11(@types/react@19.2.7)(csstype@3.2.3)(preact@10.28.0)(react@19.2.3)(solid-js@1.9.10) react: 19.2.3 transitivePeerDependencies: - '@types/react' - csstype + - preact - solid-js - vue @@ -11972,11 +11961,6 @@ snapshots: dependencies: undici-types: 6.21.0 - '@types/node@20.19.28': - dependencies: - undici-types: 6.21.0 - optional: true - '@types/papaparse@5.5.1': dependencies: '@types/node': 18.15.0 @@ -12067,17 +12051,8 @@ snapshots: '@typescript-eslint/project-service@8.50.0(typescript@5.9.3)': dependencies: - '@typescript-eslint/tsconfig-utils': 8.50.1(typescript@5.9.3) - '@typescript-eslint/types': 8.50.1 - debug: 4.4.3 - typescript: 5.9.3 - transitivePeerDependencies: - - supports-color - - '@typescript-eslint/project-service@8.50.1(typescript@5.9.3)': - dependencies: - '@typescript-eslint/tsconfig-utils': 8.50.1(typescript@5.9.3) - '@typescript-eslint/types': 8.50.1 + '@typescript-eslint/tsconfig-utils': 8.50.0(typescript@5.9.3) + '@typescript-eslint/types': 8.50.0 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: @@ -12093,11 +12068,6 @@ snapshots: '@typescript-eslint/types': 8.50.0 '@typescript-eslint/visitor-keys': 8.50.0 - '@typescript-eslint/scope-manager@8.50.1': - dependencies: - '@typescript-eslint/types': 8.50.1 - '@typescript-eslint/visitor-keys': 8.50.1 - '@typescript-eslint/tsconfig-utils@8.49.0(typescript@5.9.3)': dependencies: typescript: 5.9.3 @@ -12106,10 +12076,6 @@ snapshots: dependencies: typescript: 5.9.3 - '@typescript-eslint/tsconfig-utils@8.50.1(typescript@5.9.3)': - dependencies: - typescript: 5.9.3 - '@typescript-eslint/type-utils@8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3)': dependencies: '@typescript-eslint/types': 8.49.0 @@ -12138,8 +12104,6 @@ snapshots: '@typescript-eslint/types@8.50.0': {} - '@typescript-eslint/types@8.50.1': {} - '@typescript-eslint/typescript-estree@8.49.0(typescript@5.9.3)': dependencies: '@typescript-eslint/project-service': 8.49.0(typescript@5.9.3) @@ -12170,21 +12134,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/typescript-estree@8.50.1(typescript@5.9.3)': - dependencies: - '@typescript-eslint/project-service': 8.50.1(typescript@5.9.3) - '@typescript-eslint/tsconfig-utils': 8.50.1(typescript@5.9.3) - '@typescript-eslint/types': 8.50.1 - '@typescript-eslint/visitor-keys': 8.50.1 - debug: 4.4.3 - minimatch: 9.0.5 - semver: 7.7.3 - tinyglobby: 0.2.15 - ts-api-utils: 2.1.0(typescript@5.9.3) - typescript: 5.9.3 - transitivePeerDependencies: - - supports-color - '@typescript-eslint/utils@8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.0(eslint@9.39.2(jiti@1.21.7)) @@ -12207,17 +12156,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3)': - dependencies: - '@eslint-community/eslint-utils': 4.9.0(eslint@9.39.2(jiti@1.21.7)) - '@typescript-eslint/scope-manager': 8.50.1 - '@typescript-eslint/types': 8.50.1 - '@typescript-eslint/typescript-estree': 8.50.1(typescript@5.9.3) - eslint: 9.39.2(jiti@1.21.7) - typescript: 5.9.3 - transitivePeerDependencies: - - supports-color - '@typescript-eslint/visitor-keys@8.49.0': dependencies: '@typescript-eslint/types': 8.49.0 @@ -12228,11 +12166,6 @@ snapshots: '@typescript-eslint/types': 8.50.0 eslint-visitor-keys: 4.2.1 - '@typescript-eslint/visitor-keys@8.50.1': - dependencies: - '@typescript-eslint/types': 8.50.1 - eslint-visitor-keys: 4.2.1 - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20251209.1': optional: true @@ -12297,8 +12230,8 @@ snapshots: '@vitest/eslint-plugin@1.6.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3)(vitest@4.0.16(@types/node@18.15.0)(happy-dom@20.0.11)(jiti@1.21.7)(jsdom@27.3.0(canvas@3.2.0))(sass@1.95.0)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@typescript-eslint/scope-manager': 8.50.1 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.49.0 + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) optionalDependencies: typescript: 5.9.3 @@ -13488,6 +13421,22 @@ snapshots: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 + embla-carousel-autoplay@8.6.0(embla-carousel@8.6.0): + dependencies: + embla-carousel: 8.6.0 + + embla-carousel-react@8.6.0(react@19.2.3): + dependencies: + embla-carousel: 8.6.0 + embla-carousel-reactive-utils: 8.6.0(embla-carousel@8.6.0) + react: 19.2.3 + + embla-carousel-reactive-utils@8.6.0(embla-carousel@8.6.0): + dependencies: + embla-carousel: 8.6.0 + + embla-carousel@8.6.0: {} + emoji-mart@5.6.0: {} emoji-regex@8.0.0: {} @@ -13699,8 +13648,8 @@ snapshots: eslint-plugin-perfectionist@4.15.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3): dependencies: - '@typescript-eslint/types': 8.50.1 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/types': 8.49.0 + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) natural-orderby: 5.0.0 transitivePeerDependencies: @@ -13727,7 +13676,7 @@ snapshots: '@eslint-react/var': 2.3.13(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/types': 8.49.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) compare-versions: 6.1.1 eslint: 9.39.2(jiti@1.21.7) string-ts: 2.3.1 @@ -13746,7 +13695,7 @@ snapshots: '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/type-utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/types': 8.49.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) string-ts: 2.3.1 ts-pattern: 5.9.0 @@ -13775,7 +13724,7 @@ snapshots: '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/type-utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/types': 8.49.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) string-ts: 2.3.1 ts-pattern: 5.9.0 @@ -13796,7 +13745,7 @@ snapshots: '@eslint-react/var': 2.3.13(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/types': 8.49.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) string-ts: 2.3.1 ts-pattern: 5.9.0 @@ -13814,7 +13763,7 @@ snapshots: '@typescript-eslint/scope-manager': 8.49.0 '@typescript-eslint/type-utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) '@typescript-eslint/types': 8.49.0 - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) compare-versions: 6.1.1 eslint: 9.39.2(jiti@1.21.7) is-immutable-type: 5.0.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) @@ -13852,7 +13801,7 @@ snapshots: eslint-plugin-storybook@10.1.10(eslint@9.39.2(jiti@1.21.7))(storybook@9.1.17(@testing-library/dom@10.4.1)(vite@7.3.0(@types/node@18.15.0)(jiti@1.21.7)(sass@1.95.0)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(typescript@5.9.3): dependencies: - '@typescript-eslint/utils': 8.50.1(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) + '@typescript-eslint/utils': 8.50.0(eslint@9.39.2(jiti@1.21.7))(typescript@5.9.3) eslint: 9.39.2(jiti@1.21.7) storybook: 9.1.17(@testing-library/dom@10.4.1)(vite@7.3.0(@types/node@18.15.0)(jiti@1.21.7)(sass@1.95.0)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: @@ -14313,7 +14262,7 @@ snapshots: happy-dom@20.0.11: dependencies: - '@types/node': 20.19.28 + '@types/node': 20.19.26 '@types/whatwg-mimetype': 3.0.2 whatwg-mimetype: 3.0.0 optional: true @@ -14569,7 +14518,7 @@ snapshots: dependencies: '@babel/runtime': 7.28.4 - i18next@25.7.3(typescript@5.9.3): + i18next@25.7.4(typescript@5.9.3): dependencies: '@babel/runtime': 7.28.4 optionalDependencies: @@ -14853,7 +14802,7 @@ snapshots: kleur@4.1.5: {} - knip@5.78.0(@types/node@18.15.0)(typescript@5.9.3): + knip@5.80.0(@types/node@18.15.0)(typescript@5.9.3): dependencies: '@nodelib/fs.walk': 1.2.8 '@types/node': 18.15.0 @@ -14902,7 +14851,7 @@ snapshots: lexical@0.38.2: {} - lib0@0.2.117: + lib0@0.2.115: dependencies: isomorphic.js: 0.2.5 @@ -16260,11 +16209,11 @@ snapshots: react: 19.2.3 react-dom: 19.2.3(react@19.2.3) - react-i18next@16.5.0(i18next@25.7.3(typescript@5.9.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(typescript@5.9.3): + react-i18next@16.5.1(i18next@25.7.4(typescript@5.9.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(typescript@5.9.3): dependencies: '@babel/runtime': 7.28.4 html-parse-stringify: 3.0.1 - i18next: 25.7.3(typescript@5.9.3) + i18next: 25.7.4(typescript@5.9.3) react: 19.2.3 use-sync-external-store: 1.6.0(react@19.2.3) optionalDependencies: @@ -17762,7 +17711,7 @@ snapshots: yjs@13.6.27: dependencies: - lib0: 0.2.117 + lib0: 0.2.115 yocto-queue@0.1.0: {} diff --git a/web/service/base.ts b/web/service/base.ts index 2ab115f96c..bbdcf80450 100644 --- a/web/service/base.ts +++ b/web/service/base.ts @@ -34,12 +34,27 @@ import { getWebAppPassport } from './webapp-auth' const TIME_OUT = 100000 +export type IconObject = { + background: string + content: string +} + export type IOnDataMoreInfo = { conversationId?: string taskId?: string messageId: string errorMessage?: string errorCode?: string + chunk_type?: 'text' | 'tool_call' | 'tool_result' | 'thought' | 'thought_start' | 'thought_end' + tool_call_id?: string + tool_name?: string + tool_arguments?: string + tool_icon?: string | IconObject + tool_icon_dark?: string | IconObject + + tool_files?: string[] + tool_error?: string + tool_elapsed_time?: number } export type IOnData = (message: string, isFirstMessage: boolean, moreInfo: IOnDataMoreInfo) => void @@ -245,6 +260,15 @@ export const handleStream = ( conversationId: bufferObj.conversation_id, taskId: bufferObj.task_id, messageId: bufferObj.id, + chunk_type: bufferObj.chunk_type, + tool_call_id: bufferObj.tool_call_id, + tool_name: bufferObj.tool_name, + tool_arguments: bufferObj.tool_arguments, + tool_icon: bufferObj.tool_icon, + tool_icon_dark: bufferObj.tool_icon_dark, + tool_files: bufferObj.tool_files, + tool_error: bufferObj.tool_error, + tool_elapsed_time: bufferObj.tool_elapsed_time, }) isFirstMessage = false } diff --git a/web/service/debug.ts b/web/service/debug.ts index 850f3dfc24..9f11643e7f 100644 --- a/web/service/debug.ts +++ b/web/service/debug.ts @@ -1,4 +1,4 @@ -import type { IOnCompleted, IOnData, IOnError, IOnFile, IOnMessageEnd, IOnMessageReplace, IOnThought } from './base' +import type { IOnCompleted, IOnData, IOnError, IOnMessageReplace } from './base' import type { ModelParameterRule } from '@/app/components/header/account-setting/model-provider-page/declarations' import type { ChatPromptConfig, CompletionPromptConfig } from '@/models/debug' import type { AppModeEnum, ModelModeType } from '@/types/app' @@ -25,24 +25,6 @@ export type CodeGenRes = { error?: string } -export const sendChatMessage = async (appId: string, body: Record, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: { - onData: IOnData - onCompleted: IOnCompleted - onFile: IOnFile - onThought: IOnThought - onMessageEnd: IOnMessageEnd - onMessageReplace: IOnMessageReplace - onError: IOnError - getAbortController?: (abortController: AbortController) => void -}) => { - return ssePost(`apps/${appId}/chat-messages`, { - body: { - ...body, - response_mode: 'streaming', - }, - }, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }) -} - export const stopChatMessageResponding = async (appId: string, taskId: string) => { return post(`apps/${appId}/chat-messages/${taskId}/stop`) } diff --git a/web/service/explore.ts b/web/service/explore.ts index b4056da4ab..5837544e52 100644 --- a/web/service/explore.ts +++ b/web/service/explore.ts @@ -32,3 +32,8 @@ export const updatePinStatus = (id: string, isPinned: boolean) => { export const getAppAccessModeByAppId = (appId: string) => { return get<{ accessMode: AccessMode }>(`/enterprise/webapp/app/access-mode?appId=${appId}`) } + +export const fetchBanners = (language?: string): Promise => { + const url = language ? `/explore/banners?language=${language}` : '/explore/banners' + return get(url) +} diff --git a/web/service/share.ts b/web/service/share.ts index 203dc896db..add1256f30 100644 --- a/web/service/share.ts +++ b/web/service/share.ts @@ -2,22 +2,17 @@ import type { IOnCompleted, IOnData, IOnError, - IOnFile, IOnIterationFinished, IOnIterationNext, IOnIterationStarted, IOnLoopFinished, IOnLoopNext, IOnLoopStarted, - IOnMessageEnd, IOnMessageReplace, IOnNodeFinished, IOnNodeStarted, IOnTextChunk, IOnTextReplace, - IOnThought, - IOnTTSChunk, - IOnTTSEnd, IOnWorkflowFinished, IOnWorkflowStarted, } from './base' @@ -44,45 +39,43 @@ import { } from './base' import { getWebAppAccessToken } from './webapp-auth' -function getAction(action: 'get' | 'post' | 'del' | 'patch', isInstalledApp: boolean) { +export enum AppSourceType { + webApp = 'webApp', + installedApp = 'installedApp', + tryApp = 'tryApp', +} + +const apiPrefix = { + [AppSourceType.webApp]: '', + [AppSourceType.installedApp]: 'installed-apps', + [AppSourceType.tryApp]: 'trial-apps', +} + +function getIsPublicAPI(appSourceType: AppSourceType) { + return appSourceType === AppSourceType.webApp +} + +function getAction(action: 'get' | 'post' | 'del' | 'patch', appSourceType: AppSourceType) { + const isNeedLogin = !getIsPublicAPI(appSourceType) switch (action) { case 'get': - return isInstalledApp ? consoleGet : get + return isNeedLogin ? consoleGet : get case 'post': - return isInstalledApp ? consolePost : post + return isNeedLogin ? consolePost : post case 'patch': - return isInstalledApp ? consolePatch : patch + return isNeedLogin ? consolePatch : patch case 'del': - return isInstalledApp ? consoleDel : del + return isNeedLogin ? consoleDel : del } } -export function getUrl(url: string, isInstalledApp: boolean, installedAppId: string) { - return isInstalledApp ? `installed-apps/${installedAppId}/${url.startsWith('/') ? url.slice(1) : url}` : url +export function getUrl(url: string, appSourceType: AppSourceType, appId: string) { + const hasPrefix = appSourceType !== AppSourceType.webApp + return hasPrefix ? `${apiPrefix[appSourceType]}/${appId}/${url.startsWith('/') ? url.slice(1) : url}` : url } -export const sendChatMessage = async (body: Record, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace, onTTSChunk, onTTSEnd }: { - onData: IOnData - onCompleted: IOnCompleted - onFile: IOnFile - onThought: IOnThought - onError: IOnError - onMessageEnd?: IOnMessageEnd - onMessageReplace?: IOnMessageReplace - getAbortController?: (abortController: AbortController) => void - onTTSChunk?: IOnTTSChunk - onTTSEnd?: IOnTTSEnd -}, isInstalledApp: boolean, installedAppId = '') => { - return ssePost(getUrl('chat-messages', isInstalledApp, installedAppId), { - body: { - ...body, - response_mode: 'streaming', - }, - }, { onData, onCompleted, onThought, onFile, isPublicAPI: !isInstalledApp, onError, getAbortController, onMessageEnd, onMessageReplace, onTTSChunk, onTTSEnd }) -} - -export const stopChatMessageResponding = async (appId: string, taskId: string, isInstalledApp: boolean, installedAppId = '') => { - return getAction('post', isInstalledApp)(getUrl(`chat-messages/${taskId}/stop`, isInstalledApp, installedAppId)) +export const stopChatMessageResponding = async (appId: string, taskId: string, appSourceType: AppSourceType, installedAppId = '') => { + return getAction('post', appSourceType)(getUrl(`chat-messages/${taskId}/stop`, appSourceType, installedAppId)) } export const sendCompletionMessage = async (body: Record, { onData, onCompleted, onError, onMessageReplace, getAbortController }: { @@ -91,13 +84,13 @@ export const sendCompletionMessage = async (body: Record, { onData, onError: IOnError onMessageReplace: IOnMessageReplace getAbortController?: (abortController: AbortController) => void -}, isInstalledApp: boolean, installedAppId = '') => { - return ssePost(getUrl('completion-messages', isInstalledApp, installedAppId), { +}, appSourceType: AppSourceType, installedAppId = '') => { + return ssePost(getUrl('completion-messages', appSourceType, installedAppId), { body: { ...body, response_mode: 'streaming', }, - }, { onData, onCompleted, isPublicAPI: !isInstalledApp, onError, onMessageReplace, getAbortController }) + }, { onData, onCompleted, isPublicAPI: getIsPublicAPI(appSourceType), onError, onMessageReplace, getAbortController }) } export const sendWorkflowMessage = async ( @@ -129,10 +122,10 @@ export const sendWorkflowMessage = async ( onTextChunk: IOnTextChunk onTextReplace: IOnTextReplace }, - isInstalledApp: boolean, - installedAppId = '', + appSourceType: AppSourceType, + appId = '', ) => { - return ssePost(getUrl('workflows/run', isInstalledApp, installedAppId), { + return ssePost(getUrl('workflows/run', appSourceType, appId), { body: { ...body, response_mode: 'streaming', @@ -141,7 +134,7 @@ export const sendWorkflowMessage = async ( onNodeStarted, onWorkflowStarted, onWorkflowFinished, - isPublicAPI: !isInstalledApp, + isPublicAPI: getIsPublicAPI(appSourceType), onNodeFinished, onIterationStart, onIterationNext, @@ -154,42 +147,42 @@ export const sendWorkflowMessage = async ( }) } -export const stopWorkflowMessage = async (_appId: string, taskId: string, isInstalledApp: boolean, installedAppId = '') => { +export const stopWorkflowMessage = async (_appId: string, taskId: string, appSourceType: AppSourceType, installedAppId = '') => { if (!taskId) return - return getAction('post', isInstalledApp)(getUrl(`workflows/tasks/${taskId}/stop`, isInstalledApp, installedAppId)) + return getAction('post', appSourceType)(getUrl(`workflows/tasks/${taskId}/stop`, appSourceType, installedAppId)) } export const fetchAppInfo = async () => { return get('/site') as Promise } -export const fetchConversations = async (isInstalledApp: boolean, installedAppId = '', last_id?: string, pinned?: boolean, limit?: number) => { - return getAction('get', isInstalledApp)(getUrl('conversations', isInstalledApp, installedAppId), { params: { limit: limit || 20, ...(last_id ? { last_id } : {}), ...(pinned !== undefined ? { pinned } : {}) } }) as Promise +export const fetchConversations = async (appSourceType: AppSourceType, installedAppId = '', last_id?: string, pinned?: boolean, limit?: number) => { + return getAction('get', appSourceType)(getUrl('conversations', appSourceType, installedAppId), { params: { limit: limit || 20, ...(last_id ? { last_id } : {}), ...(pinned !== undefined ? { pinned } : {}) } }) as Promise } -export const pinConversation = async (isInstalledApp: boolean, installedAppId = '', id: string) => { - return getAction('patch', isInstalledApp)(getUrl(`conversations/${id}/pin`, isInstalledApp, installedAppId)) +export const pinConversation = async (appSourceType: AppSourceType, installedAppId = '', id: string) => { + return getAction('patch', appSourceType)(getUrl(`conversations/${id}/pin`, appSourceType, installedAppId)) } -export const unpinConversation = async (isInstalledApp: boolean, installedAppId = '', id: string) => { - return getAction('patch', isInstalledApp)(getUrl(`conversations/${id}/unpin`, isInstalledApp, installedAppId)) +export const unpinConversation = async (appSourceType: AppSourceType, installedAppId = '', id: string) => { + return getAction('patch', appSourceType)(getUrl(`conversations/${id}/unpin`, appSourceType, installedAppId)) } -export const delConversation = async (isInstalledApp: boolean, installedAppId = '', id: string) => { - return getAction('del', isInstalledApp)(getUrl(`conversations/${id}`, isInstalledApp, installedAppId)) +export const delConversation = async (appSourceType: AppSourceType, installedAppId = '', id: string) => { + return getAction('del', appSourceType)(getUrl(`conversations/${id}`, appSourceType, installedAppId)) } -export const renameConversation = async (isInstalledApp: boolean, installedAppId = '', id: string, name: string) => { - return getAction('post', isInstalledApp)(getUrl(`conversations/${id}/name`, isInstalledApp, installedAppId), { body: { name } }) +export const renameConversation = async (appSourceType: AppSourceType, installedAppId = '', id: string, name: string) => { + return getAction('post', appSourceType)(getUrl(`conversations/${id}/name`, appSourceType, installedAppId), { body: { name } }) } -export const generationConversationName = async (isInstalledApp: boolean, installedAppId = '', id: string) => { - return getAction('post', isInstalledApp)(getUrl(`conversations/${id}/name`, isInstalledApp, installedAppId), { body: { auto_generate: true } }) as Promise +export const generationConversationName = async (appSourceType: AppSourceType, installedAppId = '', id: string) => { + return getAction('post', appSourceType)(getUrl(`conversations/${id}/name`, appSourceType, installedAppId), { body: { auto_generate: true } }) as Promise } -export const fetchChatList = async (conversationId: string, isInstalledApp: boolean, installedAppId = '') => { - return getAction('get', isInstalledApp)(getUrl('messages', isInstalledApp, installedAppId), { params: { conversation_id: conversationId, limit: 20, last_id: '' } }) as any +export const fetchChatList = async (conversationId: string, appSourceType: AppSourceType, installedAppId = '') => { + return getAction('get', appSourceType)(getUrl('messages', appSourceType, installedAppId), { params: { conversation_id: conversationId, limit: 20, last_id: '' } }) as any } // Abandoned API interface @@ -198,12 +191,12 @@ export const fetchChatList = async (conversationId: string, isInstalledApp: bool // } // init value. wait for server update -export const fetchAppParams = async (isInstalledApp: boolean, installedAppId = '') => { - return (getAction('get', isInstalledApp))(getUrl('parameters', isInstalledApp, installedAppId)) as Promise +export const fetchAppParams = async (appSourceType: AppSourceType, appId = '') => { + return (getAction('get', appSourceType))(getUrl('parameters', appSourceType, appId)) as Promise } export const fetchWebSAMLSSOUrl = async (appCode: string, redirectUrl: string) => { - return (getAction('get', false))(getUrl('/enterprise/sso/saml/login', false, ''), { + return (getAction('get', AppSourceType.webApp))(getUrl('/enterprise/sso/saml/login', AppSourceType.webApp, ''), { params: { app_code: appCode, redirect_url: redirectUrl, @@ -212,7 +205,7 @@ export const fetchWebSAMLSSOUrl = async (appCode: string, redirectUrl: string) = } export const fetchWebOIDCSSOUrl = async (appCode: string, redirectUrl: string) => { - return (getAction('get', false))(getUrl('/enterprise/sso/oidc/login', false, ''), { + return (getAction('get', AppSourceType.webApp))(getUrl('/enterprise/sso/oidc/login', AppSourceType.webApp, ''), { params: { app_code: appCode, redirect_url: redirectUrl, @@ -222,7 +215,7 @@ export const fetchWebOIDCSSOUrl = async (appCode: string, redirectUrl: string) = } export const fetchWebOAuth2SSOUrl = async (appCode: string, redirectUrl: string) => { - return (getAction('get', false))(getUrl('/enterprise/sso/oauth2/login', false, ''), { + return (getAction('get', AppSourceType.webApp))(getUrl('/enterprise/sso/oauth2/login', AppSourceType.webApp, ''), { params: { app_code: appCode, redirect_url: redirectUrl, @@ -231,7 +224,7 @@ export const fetchWebOAuth2SSOUrl = async (appCode: string, redirectUrl: string) } export const fetchMembersSAMLSSOUrl = async (appCode: string, redirectUrl: string) => { - return (getAction('get', false))(getUrl('/enterprise/sso/members/saml/login', false, ''), { + return (getAction('get', AppSourceType.webApp))(getUrl('/enterprise/sso/members/saml/login', AppSourceType.webApp, ''), { params: { app_code: appCode, redirect_url: redirectUrl, @@ -240,7 +233,7 @@ export const fetchMembersSAMLSSOUrl = async (appCode: string, redirectUrl: strin } export const fetchMembersOIDCSSOUrl = async (appCode: string, redirectUrl: string) => { - return (getAction('get', false))(getUrl('/enterprise/sso/members/oidc/login', false, ''), { + return (getAction('get', AppSourceType.webApp))(getUrl('/enterprise/sso/members/oidc/login', AppSourceType.webApp, ''), { params: { app_code: appCode, redirect_url: redirectUrl, @@ -250,7 +243,7 @@ export const fetchMembersOIDCSSOUrl = async (appCode: string, redirectUrl: strin } export const fetchMembersOAuth2SSOUrl = async (appCode: string, redirectUrl: string) => { - return (getAction('get', false))(getUrl('/enterprise/sso/members/oauth2/login', false, ''), { + return (getAction('get', AppSourceType.webApp))(getUrl('/enterprise/sso/members/oauth2/login', AppSourceType.webApp, ''), { params: { app_code: appCode, redirect_url: redirectUrl, @@ -258,48 +251,50 @@ export const fetchMembersOAuth2SSOUrl = async (appCode: string, redirectUrl: str }) as Promise<{ url: string }> } -export const fetchAppMeta = async (isInstalledApp: boolean, installedAppId = '') => { - return (getAction('get', isInstalledApp))(getUrl('meta', isInstalledApp, installedAppId)) as Promise +export const fetchAppMeta = async (appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('get', appSourceType))(getUrl('meta', appSourceType, installedAppId)) as Promise } -export const updateFeedback = async ({ url, body }: { url: string, body: FeedbackType }, isInstalledApp: boolean, installedAppId = '') => { - return (getAction('post', isInstalledApp))(getUrl(url, isInstalledApp, installedAppId), { body }) +export const updateFeedback = async ({ url, body }: { url: string, body: FeedbackType }, appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('post', appSourceType))(getUrl(url, appSourceType, installedAppId), { body }) } -export const fetchMoreLikeThis = async (messageId: string, isInstalledApp: boolean, installedAppId = '') => { - return (getAction('get', isInstalledApp))(getUrl(`/messages/${messageId}/more-like-this`, isInstalledApp, installedAppId), { +export const fetchMoreLikeThis = async (messageId: string, appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('get', appSourceType))(getUrl(`/messages/${messageId}/more-like-this`, appSourceType, installedAppId), { params: { response_mode: 'blocking', }, }) } -export const saveMessage = (messageId: string, isInstalledApp: boolean, installedAppId = '') => { - return (getAction('post', isInstalledApp))(getUrl('/saved-messages', isInstalledApp, installedAppId), { body: { message_id: messageId } }) +export const saveMessage = (messageId: string, appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('post', appSourceType))(getUrl('/saved-messages', appSourceType, installedAppId), { body: { message_id: messageId } }) } -export const fetchSavedMessage = async (isInstalledApp: boolean, installedAppId = '') => { - return (getAction('get', isInstalledApp))(getUrl('/saved-messages', isInstalledApp, installedAppId)) +export const fetchSavedMessage = async (appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('get', appSourceType))(getUrl('/saved-messages', appSourceType, installedAppId), {}, { + silent: true, + }) } -export const removeMessage = (messageId: string, isInstalledApp: boolean, installedAppId = '') => { - return (getAction('del', isInstalledApp))(getUrl(`/saved-messages/${messageId}`, isInstalledApp, installedAppId)) +export const removeMessage = (messageId: string, appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('del', appSourceType))(getUrl(`/saved-messages/${messageId}`, appSourceType, installedAppId)) } -export const fetchSuggestedQuestions = (messageId: string, isInstalledApp: boolean, installedAppId = '') => { - return (getAction('get', isInstalledApp))(getUrl(`/messages/${messageId}/suggested-questions`, isInstalledApp, installedAppId)) +export const fetchSuggestedQuestions = (messageId: string, appSourceType: AppSourceType, installedAppId = '') => { + return (getAction('get', appSourceType))(getUrl(`/messages/${messageId}/suggested-questions`, appSourceType, installedAppId)) } -export const audioToText = (url: string, isPublicAPI: boolean, body: FormData) => { - return (getAction('post', !isPublicAPI))(url, { body }, { bodyStringify: false, deleteContentType: true }) as Promise<{ text: string }> +export const audioToText = (url: string, appSourceType: AppSourceType, body: FormData) => { + return (getAction('post', appSourceType))(url, { body }, { bodyStringify: false, deleteContentType: true }) as Promise<{ text: string }> } -export const textToAudio = (url: string, isPublicAPI: boolean, body: FormData) => { - return (getAction('post', !isPublicAPI))(url, { body }, { bodyStringify: false, deleteContentType: true }) as Promise<{ data: string }> +export const textToAudioStream = (url: string, appSourceType: AppSourceType, header: { content_type: string }, body: { streaming: boolean, voice?: string, message_id?: string, text?: string | null | undefined }) => { + return (getAction('post', appSourceType))(url, { body, header }, { needAllResponseContent: true }) } -export const textToAudioStream = (url: string, isPublicAPI: boolean, header: { content_type: string }, body: { streaming: boolean, voice?: string, message_id?: string, text?: string | null | undefined }) => { - return (getAction('post', !isPublicAPI))(url, { body, header }, { needAllResponseContent: true }) +export const textToAudio = (url: string, appSourceType: AppSourceType, body: FormData) => { + return (getAction('post', appSourceType))(url, { body }, { bodyStringify: false, deleteContentType: true }) as Promise<{ data: string }> } export const fetchAccessToken = async ({ userId, appCode }: { userId?: string, appCode: string }) => { diff --git a/web/service/try-app.ts b/web/service/try-app.ts new file mode 100644 index 0000000000..c1bf79a74a --- /dev/null +++ b/web/service/try-app.ts @@ -0,0 +1,41 @@ +import type { Viewport } from 'reactflow' +import type { Edge, Node } from '@/app/components/workflow/types' +import type { DataSetListResponse } from '@/models/datasets' +import type { + SiteInfo, +} from '@/models/share' +import type { AppModeEnum, ModelConfig } from '@/types/app' +import qs from 'qs' +import { + get, +} from './base' + +export type TryAppInfo = { + name: string + description: string + mode: AppModeEnum + site: SiteInfo + model_config: ModelConfig + deleted_tools: any[] +} + +export const fetchTryAppInfo = async (appId: string) => { + return get(`/trial-apps/${appId}`) as Promise +} + +export const fetchTryAppDatasets = (appId: string, ids: string[]) => { + const urlParams = qs.stringify({ ids }, { indices: false }) + return get(`/trial-apps/${appId}/datasets?${urlParams}`) +} + +type TryAppFlowPreview = { + graph: { + nodes: Node[] + edges: Edge[] + viewport: Viewport + } +} + +export const fetchTryAppFlowPreview = (appId: string) => { + return get(`/trial-apps/${appId}/workflows`) +} diff --git a/web/service/use-explore.ts b/web/service/use-explore.ts index a15b926306..3e3b9ff255 100644 --- a/web/service/use-explore.ts +++ b/web/service/use-explore.ts @@ -2,8 +2,8 @@ import type { App, AppCategory } from '@/models/explore' import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query' import { useGlobalPublicStore } from '@/context/global-public-context' import { AccessMode } from '@/models/access-control' -import { fetchAppList, fetchInstalledAppList, getAppAccessModeByAppId, uninstallApp, updatePinStatus } from './explore' -import { fetchAppMeta, fetchAppParams } from './share' +import { fetchAppList, fetchBanners, fetchInstalledAppList, getAppAccessModeByAppId, uninstallApp, updatePinStatus } from './explore' +import { AppSourceType, fetchAppMeta, fetchAppParams } from './share' const NAME_SPACE = 'explore' @@ -81,7 +81,7 @@ export const useGetInstalledAppParams = (appId: string | null) => { queryFn: () => { if (!appId || appId.length === 0) return Promise.reject(new Error('App ID is required to get app params')) - return fetchAppParams(true, appId) + return fetchAppParams(AppSourceType.installedApp, appId) }, enabled: !!appId, }) @@ -93,8 +93,17 @@ export const useGetInstalledAppMeta = (appId: string | null) => { queryFn: () => { if (!appId || appId.length === 0) return Promise.reject(new Error('App ID is required to get app meta')) - return fetchAppMeta(true, appId) + return fetchAppMeta(AppSourceType.installedApp, appId) }, enabled: !!appId, }) } + +export const useGetBanners = (locale?: string) => { + return useQuery({ + queryKey: [NAME_SPACE, 'banners', locale], + queryFn: () => { + return fetchBanners(locale) + }, + }) +} diff --git a/web/service/use-share.spec.tsx b/web/service/use-share.spec.tsx index db20329767..5b1501bead 100644 --- a/web/service/use-share.spec.tsx +++ b/web/service/use-share.spec.tsx @@ -3,6 +3,7 @@ import type { AppConversationData, ConversationItem } from '@/models/share' import { QueryClient, QueryClientProvider } from '@tanstack/react-query' import { act, renderHook, waitFor } from '@testing-library/react' import { + AppSourceType, fetchChatList, fetchConversations, generationConversationName, @@ -80,6 +81,7 @@ describe('useShareConversations', () => { appId: undefined, pinned: true, limit: 50, + appSourceType: AppSourceType.webApp, } const response = createConversationData() mockFetchConversations.mockResolvedValueOnce(response) @@ -102,6 +104,7 @@ describe('useShareConversations', () => { const params = { isInstalledApp: true, appId: undefined, + appSourceType: AppSourceType.installedApp, } // Act @@ -127,6 +130,7 @@ describe('useShareChatList', () => { conversationId: 'conversation-1', isInstalledApp: true, appId: 'app-1', + appSourceType: AppSourceType.installedApp, } const response = { data: [] } mockFetchChatList.mockResolvedValueOnce(response) @@ -149,6 +153,7 @@ describe('useShareChatList', () => { conversationId: '', isInstalledApp: false, appId: undefined, + appSourceType: AppSourceType.webApp, } // Act @@ -171,6 +176,7 @@ describe('useShareChatList', () => { conversationId: 'conversation-1', isInstalledApp: false, appId: undefined, + appSourceType: AppSourceType.webApp, } const initialResponse = { data: [{ id: '1', content: 'initial' }] } const updatedResponse = { data: [{ id: '1', content: 'initial' }, { id: '2', content: 'new message' }] } @@ -219,6 +225,7 @@ describe('useShareConversationName', () => { conversationId: 'conversation-2', isInstalledApp: false, appId: undefined, + appSourceType: AppSourceType.webApp, } const response = createConversationItem({ id: 'conversation-2', name: 'Generated' }) mockGenerationConversationName.mockResolvedValueOnce(response) @@ -241,6 +248,7 @@ describe('useShareConversationName', () => { conversationId: 'conversation-3', isInstalledApp: false, appId: undefined, + appSourceType: AppSourceType.webApp, } // Act diff --git a/web/service/use-share.ts b/web/service/use-share.ts index eef61ccc29..cb99525a78 100644 --- a/web/service/use-share.ts +++ b/web/service/use-share.ts @@ -1,6 +1,7 @@ import type { AppConversationData, ConversationItem } from '@/models/share' import { useQuery } from '@tanstack/react-query' import { + AppSourceType, fetchAppInfo, fetchAppMeta, fetchAppParams, @@ -14,7 +15,7 @@ import { useInvalid } from './use-base' const NAME_SPACE = 'webapp' type ShareConversationsParams = { - isInstalledApp: boolean + appSourceType: AppSourceType appId?: string lastId?: string pinned?: boolean @@ -23,13 +24,13 @@ type ShareConversationsParams = { type ShareChatListParams = { conversationId: string - isInstalledApp: boolean + appSourceType: AppSourceType appId?: string } type ShareConversationNameParams = { conversationId: string - isInstalledApp: boolean + appSourceType: AppSourceType appId?: string } @@ -73,7 +74,7 @@ export const useGetWebAppParams = () => { return useQuery({ queryKey: shareQueryKeys.appParams, queryFn: () => { - return fetchAppParams(false) + return fetchAppParams(AppSourceType.webApp) }, }) } @@ -82,7 +83,7 @@ export const useGetWebAppMeta = () => { return useQuery({ queryKey: shareQueryKeys.appMeta, queryFn: () => { - return fetchAppMeta(false) + return fetchAppMeta(AppSourceType.webApp) }, }) } @@ -93,11 +94,11 @@ export const useShareConversations = (params: ShareConversationsParams, options: refetchOnReconnect, refetchOnWindowFocus, } = options - const isEnabled = enabled && (!params.isInstalledApp || !!params.appId) + const isEnabled = enabled && params.appSourceType !== AppSourceType.tryApp && (params.appSourceType !== AppSourceType.installedApp || !!params.appId) return useQuery({ queryKey: shareQueryKeys.conversationList(params), queryFn: () => fetchConversations( - params.isInstalledApp, + params.appSourceType, params.appId, params.lastId, params.pinned, @@ -115,10 +116,10 @@ export const useShareChatList = (params: ShareChatListParams, options: ShareQuer refetchOnReconnect, refetchOnWindowFocus, } = options - const isEnabled = enabled && (!params.isInstalledApp || !!params.appId) && !!params.conversationId + const isEnabled = enabled && params.appSourceType !== AppSourceType.tryApp && (params.appSourceType !== AppSourceType.installedApp || !!params.appId) && !!params.conversationId return useQuery({ queryKey: shareQueryKeys.chatList(params), - queryFn: () => fetchChatList(params.conversationId, params.isInstalledApp, params.appId), + queryFn: () => fetchChatList(params.conversationId, params.appSourceType, params.appId), enabled: isEnabled, refetchOnReconnect, refetchOnWindowFocus, @@ -135,10 +136,10 @@ export const useShareConversationName = (params: ShareConversationNameParams, op refetchOnReconnect, refetchOnWindowFocus, } = options - const isEnabled = enabled && (!params.isInstalledApp || !!params.appId) && !!params.conversationId + const isEnabled = enabled && (params.appSourceType !== AppSourceType.installedApp || !!params.appId) && !!params.conversationId return useQuery({ queryKey: shareQueryKeys.conversationName(params), - queryFn: () => generationConversationName(params.isInstalledApp, params.appId, params.conversationId), + queryFn: () => generationConversationName(params.appSourceType, params.appId, params.conversationId), enabled: isEnabled, refetchOnReconnect, refetchOnWindowFocus, diff --git a/web/service/use-try-app.ts b/web/service/use-try-app.ts new file mode 100644 index 0000000000..a2f170bff7 --- /dev/null +++ b/web/service/use-try-app.ts @@ -0,0 +1,46 @@ +import type { DataSetListResponse } from '@/models/datasets' +import { useQuery } from '@tanstack/react-query' +import { AppSourceType, fetchAppParams } from './share' +import { fetchTryAppDatasets, fetchTryAppFlowPreview, fetchTryAppInfo } from './try-app' + +const NAME_SPACE = 'try-app' + +export const useGetTryAppInfo = (appId: string) => { + return useQuery({ + queryKey: [NAME_SPACE, 'appInfo', appId], + queryFn: () => { + return fetchTryAppInfo(appId) + }, + enabled: !!appId, + }) +} + +export const useGetTryAppParams = (appId: string) => { + return useQuery({ + queryKey: [NAME_SPACE, 'appParams', appId], + queryFn: () => { + return fetchAppParams(AppSourceType.tryApp, appId) + }, + enabled: !!appId, + }) +} + +export const useGetTryAppDataSets = (appId: string, ids: string[]) => { + return useQuery({ + queryKey: [NAME_SPACE, 'dataSets', appId, ids], + queryFn: () => { + return fetchTryAppDatasets(appId, ids) + }, + enabled: ids.length > 0, + }) +} + +export const useGetTryAppFlowPreview = (appId: string, disabled?: boolean) => { + return useQuery({ + queryKey: [NAME_SPACE, 'preview', appId], + enabled: !disabled, + queryFn: () => { + return fetchTryAppFlowPreview(appId) + }, + }) +} diff --git a/web/service/use-workspace.ts b/web/service/use-workspace.ts new file mode 100644 index 0000000000..1b675c66ff --- /dev/null +++ b/web/service/use-workspace.ts @@ -0,0 +1,17 @@ +import type { ICurrentWorkspace } from '@/models/common' +import { useQuery } from '@tanstack/react-query' +import { get } from './base' + +type WorkspacePermissions = { + workspace_id: ICurrentWorkspace['id'] + allow_member_invite: boolean + allow_owner_transfer: boolean +} + +export function useWorkspacePermissions(enabled: boolean) { + return useQuery({ + queryKey: ['workspace-permissions'], + queryFn: () => get('/workspaces/current/permission'), + enabled, + }) +} diff --git a/web/types/feature.ts b/web/types/feature.ts index bd331d4508..9dd2c694d2 100644 --- a/web/types/feature.ts +++ b/web/types/feature.ts @@ -59,6 +59,8 @@ export type SystemFeatures = { allow_email_code_login: boolean allow_email_password_login: boolean } + enable_trial_app: boolean + enable_explore_banner: boolean } export const defaultSystemFeatures: SystemFeatures = { @@ -98,6 +100,8 @@ export const defaultSystemFeatures: SystemFeatures = { allow_email_code_login: false, allow_email_password_login: false, }, + enable_trial_app: false, + enable_explore_banner: false, } export enum DatasetAttr { diff --git a/web/types/workflow.ts b/web/types/workflow.ts index 5f74ef2c12..8de0df840e 100644 --- a/web/types/workflow.ts +++ b/web/types/workflow.ts @@ -28,6 +28,68 @@ export type AgentLogItemWithChildren = AgentLogItem & { children: AgentLogItemWithChildren[] } +export type IconObject = { + background: string + content: string +} + +export type ToolCallItem = { + id: string + type: 'model' | 'tool' | 'thought' + thoughtCompleted?: boolean + thoughtOutput?: string + + toolName?: string + toolProvider?: string + toolIcon?: string | IconObject + toolIconDark?: string | IconObject + toolArguments?: string + toolOutput?: Record | string + toolFiles?: string[] + toolError?: string + toolDuration?: number + + modelName?: string + modelProvider?: string + modelOutput?: Record | string + modelDuration?: number + modelIcon?: string | IconObject + modelIconDark?: string | IconObject +} + +export type ToolCallDetail = { + id: string + name: string + arguments: string + output: string + files: string[] + error: string + elapsed_time?: number + status: string +} +export type SequenceSegment + = | { type: 'context', start: number, end: number } + | { type: 'reasoning', index: number } + | { type: 'tool_call', index: number } + +export type LLMLogItem = { + reasoning_content: string[] + tool_calls: ToolCallDetail[] + sequence: SequenceSegment[] +} + +export type LLMTraceItem = { + type: 'model' | 'tool' + duration: number + output: Record + provider?: string + name: string + icon?: string | IconObject + icon_dark?: string | IconObject + error?: string + status?: 'success' | 'error' +} + export type NodeTracing = { id: string index: number @@ -72,6 +134,7 @@ export type NodeTracing = { icon?: string } loop_variable_map?: Record + llm_trace?: LLMTraceItem[] } metadata: { iterator_length: number @@ -104,6 +167,7 @@ export type NodeTracing = { parent_parallel_id?: string parent_parallel_start_node_id?: string agentLog?: AgentLogItemWithChildren[] // agent log + generation_detail?: LLMLogItem } export type FetchWorkflowDraftResponse = {