diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b4a6eb9adb..f4a5f754e0 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,25 +1,23 @@ -# Summary +> [!IMPORTANT] +> +> 1. Make sure you have read our [contribution guidelines](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) +> 2. Ensure there is an associated issue and you have been assigned to it +> 3. Use the correct syntax to link this PR: `Fixes #`. -Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. +## Summary -> [!Tip] -> Close issue syntax: `Fixes #` or `Resolves #`, see [documentation](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) for more details. + - -# Screenshots +## Screenshots | Before | After | |--------|-------| | ... | ... | -# Checklist - -> [!IMPORTANT] -> Please review the checklist below before submitting your pull request. +## Checklist - [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs) - [x] I understand that this PR may be closed in case there was no previous discussion or issues. (This doesn't apply to typos!) - [x] I've added a test for each change that was introduced, and I tried as much as possible to make a single atomic change. - [x] I've updated the documentation accordingly. - [x] I ran `dev/reformat`(backend) and `cd web && npx lint-staged`(frontend) to appease the lint gods - diff --git a/api/controllers/console/app/workflow_app_log.py b/api/controllers/console/app/workflow_app_log.py index c475aea9fc..b9579e2120 100644 --- a/api/controllers/console/app/workflow_app_log.py +++ b/api/controllers/console/app/workflow_app_log.py @@ -6,12 +6,12 @@ from sqlalchemy.orm import Session from controllers.console import api from controllers.console.app.wraps import get_app_model from controllers.console.wraps import account_initialization_required, setup_required +from core.workflow.entities.workflow_execution import WorkflowExecutionStatus from extensions.ext_database import db from fields.workflow_app_log_fields import workflow_app_log_pagination_fields from libs.login import login_required from models import App from models.model import AppMode -from models.workflow import WorkflowRunStatus from services.workflow_app_service import WorkflowAppService @@ -38,7 +38,7 @@ class WorkflowAppLogApi(Resource): parser.add_argument("limit", type=int_range(1, 100), default=20, location="args") args = parser.parse_args() - args.status = WorkflowRunStatus(args.status) if args.status else None + args.status = WorkflowExecutionStatus(args.status) if args.status else None if args.created_at__before: args.created_at__before = isoparse(args.created_at__before) diff --git a/api/controllers/service_api/app/annotation.py b/api/controllers/service_api/app/annotation.py index 1a7f0c935b..595ae118ef 100644 --- a/api/controllers/service_api/app/annotation.py +++ b/api/controllers/service_api/app/annotation.py @@ -9,13 +9,13 @@ from fields.annotation_fields import ( annotation_fields, ) from libs.login import current_user -from models.model import App, EndUser +from models.model import App from services.annotation_service import AppAnnotationService class AnnotationReplyActionApi(Resource): @validate_app_token - def post(self, app_model: App, end_user: EndUser, action): + def post(self, app_model: App, action): parser = reqparse.RequestParser() parser.add_argument("score_threshold", required=True, type=float, location="json") parser.add_argument("embedding_provider_name", required=True, type=str, location="json") @@ -32,7 +32,7 @@ class AnnotationReplyActionApi(Resource): class AnnotationReplyActionStatusApi(Resource): @validate_app_token - def get(self, app_model: App, end_user: EndUser, job_id, action): + def get(self, app_model: App, job_id, action): job_id = str(job_id) app_annotation_job_key = "{}_app_annotation_job_{}".format(action, str(job_id)) cache_result = redis_client.get(app_annotation_job_key) @@ -50,7 +50,7 @@ class AnnotationReplyActionStatusApi(Resource): class AnnotationListApi(Resource): @validate_app_token - def get(self, app_model: App, end_user: EndUser): + def get(self, app_model: App): page = request.args.get("page", default=1, type=int) limit = request.args.get("limit", default=20, type=int) keyword = request.args.get("keyword", default="", type=str) @@ -67,7 +67,7 @@ class AnnotationListApi(Resource): @validate_app_token @marshal_with(annotation_fields) - def post(self, app_model: App, end_user: EndUser): + def post(self, app_model: App): parser = reqparse.RequestParser() parser.add_argument("question", required=True, type=str, location="json") parser.add_argument("answer", required=True, type=str, location="json") @@ -79,7 +79,7 @@ class AnnotationListApi(Resource): class AnnotationUpdateDeleteApi(Resource): @validate_app_token @marshal_with(annotation_fields) - def put(self, app_model: App, end_user: EndUser, annotation_id): + def put(self, app_model: App, annotation_id): if not current_user.is_editor: raise Forbidden() @@ -92,7 +92,7 @@ class AnnotationUpdateDeleteApi(Resource): return annotation @validate_app_token - def delete(self, app_model: App, end_user: EndUser, annotation_id): + def delete(self, app_model: App, annotation_id): if not current_user.is_editor: raise Forbidden() diff --git a/api/controllers/service_api/app/workflow.py b/api/controllers/service_api/app/workflow.py index e9bb2b046a..df52b49424 100644 --- a/api/controllers/service_api/app/workflow.py +++ b/api/controllers/service_api/app/workflow.py @@ -24,12 +24,13 @@ from core.errors.error import ( QuotaExceededError, ) from core.model_runtime.errors.invoke import InvokeError +from core.workflow.entities.workflow_execution import WorkflowExecutionStatus from extensions.ext_database import db from fields.workflow_app_log_fields import workflow_app_log_pagination_fields from libs import helper from libs.helper import TimestampField from models.model import App, AppMode, EndUser -from models.workflow import WorkflowRun, WorkflowRunStatus +from models.workflow import WorkflowRun from services.app_generate_service import AppGenerateService from services.errors.llm import InvokeRateLimitError from services.workflow_app_service import WorkflowAppService @@ -138,7 +139,7 @@ class WorkflowAppLogApi(Resource): parser.add_argument("limit", type=int_range(1, 100), default=20, location="args") args = parser.parse_args() - args.status = WorkflowRunStatus(args.status) if args.status else None + args.status = WorkflowExecutionStatus(args.status) if args.status else None if args.created_at__before: args.created_at__before = isoparse(args.created_at__before) diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 394f36c3ff..c100f53078 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -1,19 +1,21 @@ from flask import request -from flask_restful import marshal, reqparse +from flask_restful import marshal, marshal_with, reqparse from werkzeug.exceptions import Forbidden, NotFound import services.dataset_service from controllers.service_api import api from controllers.service_api.dataset.error import DatasetInUseError, DatasetNameDuplicateError -from controllers.service_api.wraps import DatasetApiResource +from controllers.service_api.wraps import DatasetApiResource, validate_dataset_token from core.model_runtime.entities.model_entities import ModelType from core.plugin.entities.plugin import ModelProviderID from core.provider_manager import ProviderManager from fields.dataset_fields import dataset_detail_fields +from fields.tag_fields import tag_fields from libs.login import current_user from models.dataset import Dataset, DatasetPermissionEnum from services.dataset_service import DatasetPermissionService, DatasetService from services.entities.knowledge_entities.knowledge_entities import RetrievalModel +from services.tag_service import TagService def _validate_name(name): @@ -320,5 +322,134 @@ class DatasetApi(DatasetApiResource): raise DatasetInUseError() +class DatasetTagsApi(DatasetApiResource): + @validate_dataset_token + @marshal_with(tag_fields) + def get(self, _, dataset_id): + """Get all knowledge type tags.""" + tags = TagService.get_tags("knowledge", current_user.current_tenant_id) + + return tags, 200 + + @validate_dataset_token + def post(self, _, dataset_id): + """Add a knowledge type tag.""" + if not (current_user.is_editor or current_user.is_dataset_editor): + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument( + "name", + nullable=False, + required=True, + help="Name must be between 1 to 50 characters.", + type=DatasetTagsApi._validate_tag_name, + ) + + args = parser.parse_args() + args["type"] = "knowledge" + tag = TagService.save_tags(args) + + response = {"id": tag.id, "name": tag.name, "type": tag.type, "binding_count": 0} + + return response, 200 + + @validate_dataset_token + def patch(self, _, dataset_id): + if not (current_user.is_editor or current_user.is_dataset_editor): + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument( + "name", + nullable=False, + required=True, + help="Name must be between 1 to 50 characters.", + type=DatasetTagsApi._validate_tag_name, + ) + parser.add_argument("tag_id", nullable=False, required=True, help="Id of a tag.", type=str) + args = parser.parse_args() + tag = TagService.update_tags(args, args.get("tag_id")) + + binding_count = TagService.get_tag_binding_count(args.get("tag_id")) + + response = {"id": tag.id, "name": tag.name, "type": tag.type, "binding_count": binding_count} + + return response, 200 + + @validate_dataset_token + def delete(self, _, dataset_id): + """Delete a knowledge type tag.""" + if not current_user.is_editor: + raise Forbidden() + parser = reqparse.RequestParser() + parser.add_argument("tag_id", nullable=False, required=True, help="Id of a tag.", type=str) + args = parser.parse_args() + TagService.delete_tag(args.get("tag_id")) + + return 204 + + @staticmethod + def _validate_tag_name(name): + if not name or len(name) < 1 or len(name) > 50: + raise ValueError("Name must be between 1 to 50 characters.") + return name + + +class DatasetTagBindingApi(DatasetApiResource): + @validate_dataset_token + def post(self, _, dataset_id): + # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator + if not (current_user.is_editor or current_user.is_dataset_editor): + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument( + "tag_ids", type=list, nullable=False, required=True, location="json", help="Tag IDs is required." + ) + parser.add_argument( + "target_id", type=str, nullable=False, required=True, location="json", help="Target Dataset ID is required." + ) + + args = parser.parse_args() + args["type"] = "knowledge" + TagService.save_tag_binding(args) + + return 204 + + +class DatasetTagUnbindingApi(DatasetApiResource): + @validate_dataset_token + def post(self, _, dataset_id): + # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator + if not (current_user.is_editor or current_user.is_dataset_editor): + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument("tag_id", type=str, nullable=False, required=True, help="Tag ID is required.") + parser.add_argument("target_id", type=str, nullable=False, required=True, help="Target ID is required.") + + args = parser.parse_args() + args["type"] = "knowledge" + TagService.delete_tag_binding(args) + + return 204 + + +class DatasetTagsBindingStatusApi(DatasetApiResource): + @validate_dataset_token + def get(self, _, *args, **kwargs): + """Get all knowledge type tags.""" + dataset_id = kwargs.get("dataset_id") + tags = TagService.get_tags_by_target_id("knowledge", current_user.current_tenant_id, str(dataset_id)) + tags_list = [{"id": tag.id, "name": tag.name} for tag in tags] + response = {"data": tags_list, "total": len(tags)} + return response, 200 + + api.add_resource(DatasetListApi, "/datasets") api.add_resource(DatasetApi, "/datasets/") +api.add_resource(DatasetTagsApi, "/datasets/tags") +api.add_resource(DatasetTagBindingApi, "/datasets/tags/binding") +api.add_resource(DatasetTagUnbindingApi, "/datasets/tags/unbinding") +api.add_resource(DatasetTagsBindingStatusApi, "/datasets//tags") diff --git a/api/controllers/service_api/dataset/segment.py b/api/controllers/service_api/dataset/segment.py index ea4be4e511..337752275a 100644 --- a/api/controllers/service_api/dataset/segment.py +++ b/api/controllers/service_api/dataset/segment.py @@ -208,6 +208,28 @@ class DatasetSegmentApi(DatasetApiResource): ) return {"data": marshal(updated_segment, segment_fields), "doc_form": document.doc_form}, 200 + def get(self, tenant_id, dataset_id, document_id, segment_id): + # check dataset + dataset_id = str(dataset_id) + tenant_id = str(tenant_id) + dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first() + if not dataset: + raise NotFound("Dataset not found.") + # check user's model setting + DatasetService.check_dataset_model_setting(dataset) + # check document + document_id = str(document_id) + document = DocumentService.get_document(dataset_id, document_id) + if not document: + raise NotFound("Document not found.") + # check segment + segment_id = str(segment_id) + segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id) + if not segment: + raise NotFound("Segment not found.") + + return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 + class ChildChunkApi(DatasetApiResource): """Resource for child chunks.""" diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 5212d797d8..4979f63432 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -63,7 +63,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): self._instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs) iteration_step = 1 - max_iteration_steps = min(app_config.agent.max_iteration if app_config.agent else 5, 5) + 1 + max_iteration_steps = min(app_config.agent.max_iteration, 99) + 1 # convert tools into ModelRuntime Tool format tool_instances, prompt_messages_tools = self._init_prompt_tools() diff --git a/api/core/agent/entities.py b/api/core/agent/entities.py index e68b4f2356..143a3a51aa 100644 --- a/api/core/agent/entities.py +++ b/api/core/agent/entities.py @@ -82,7 +82,7 @@ class AgentEntity(BaseModel): strategy: Strategy prompt: Optional[AgentPromptEntity] = None tools: Optional[list[AgentToolEntity]] = None - max_iteration: int = 5 + max_iteration: int = 10 class AgentInvokeMessage(ToolInvokeMessage): diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 611a55b30a..5491689ece 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -48,7 +48,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): assert app_config.agent iteration_step = 1 - max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1 + max_iteration_steps = min(app_config.agent.max_iteration, 99) + 1 # continue to run until there is not any tool call function_call_state = True diff --git a/api/core/app/app_config/easy_ui_based_app/agent/manager.py b/api/core/app/app_config/easy_ui_based_app/agent/manager.py index f503543d7b..590b944c0d 100644 --- a/api/core/app/app_config/easy_ui_based_app/agent/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/agent/manager.py @@ -75,7 +75,7 @@ class AgentConfigManager: strategy=strategy, prompt=agent_prompt_entity, tools=agent_tools, - max_iteration=agent_dict.get("max_iteration", 5), + max_iteration=agent_dict.get("max_iteration", 10), ) return None diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index fdd1a776f8..8c85f91d7e 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -27,8 +27,8 @@ from core.ops.ops_trace_manager import TraceQueueManager from core.prompt.utils.get_thread_messages_length import get_thread_messages_length from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository from core.repositories.sqlalchemy_workflow_execution_repository import SQLAlchemyWorkflowExecutionRepository -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository from extensions.ext_database import db from factories import file_factory from models import Account, App, Conversation, EndUser, Message, Workflow, WorkflowNodeExecutionTriggeredFrom diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index c83e06bf15..d9b3833862 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -140,7 +140,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): SystemVariableKey.DIALOGUE_COUNT: self._dialogue_count, SystemVariableKey.APP_ID: app_config.app_id, SystemVariableKey.WORKFLOW_ID: app_config.workflow_id, - SystemVariableKey.WORKFLOW_RUN_ID: self.application_generate_entity.workflow_run_id, + SystemVariableKey.WORKFLOW_EXECUTION_ID: self.application_generate_entity.workflow_run_id, } # init variable pool diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 0a2401f953..8c5645bbb7 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -1,4 +1,3 @@ -import json import logging import time from collections.abc import Generator, Mapping @@ -57,26 +56,23 @@ from core.app.entities.task_entities import ( WorkflowTaskState, ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline -from core.app.task_pipeline.message_cycle_manage import MessageCycleManage +from core.app.task_pipeline.message_cycle_manager import MessageCycleManager from core.base.tts import AppGeneratorTTSPublisher, AudioTrunk from core.model_runtime.entities.llm_entities import LLMUsage -from core.model_runtime.utils.encoders import jsonable_encoder from core.ops.ops_trace_manager import TraceQueueManager +from core.workflow.entities.workflow_execution import WorkflowExecutionStatus, WorkflowType from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState from core.workflow.nodes import NodeType -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository -from core.workflow.workflow_cycle_manager import WorkflowCycleManager +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository +from core.workflow.workflow_cycle_manager import CycleManagerWorkflowInfo, WorkflowCycleManager from events.message_event import message_was_created from extensions.ext_database import db from models import Conversation, EndUser, Message, MessageFile from models.account import Account from models.enums import CreatorUserRole -from models.workflow import ( - Workflow, - WorkflowRunStatus, -) +from models.workflow import Workflow logger = logging.getLogger(__name__) @@ -126,8 +122,14 @@ class AdvancedChatAppGenerateTaskPipeline: SystemVariableKey.DIALOGUE_COUNT: dialogue_count, SystemVariableKey.APP_ID: application_generate_entity.app_config.app_id, SystemVariableKey.WORKFLOW_ID: workflow.id, - SystemVariableKey.WORKFLOW_RUN_ID: application_generate_entity.workflow_run_id, + SystemVariableKey.WORKFLOW_EXECUTION_ID: application_generate_entity.workflow_run_id, }, + workflow_info=CycleManagerWorkflowInfo( + workflow_id=workflow.id, + workflow_type=WorkflowType(workflow.type), + version=workflow.version, + graph_data=workflow.graph_dict, + ), workflow_execution_repository=workflow_execution_repository, workflow_node_execution_repository=workflow_node_execution_repository, ) @@ -137,7 +139,7 @@ class AdvancedChatAppGenerateTaskPipeline: ) self._task_state = WorkflowTaskState() - self._message_cycle_manager = MessageCycleManage( + self._message_cycle_manager = MessageCycleManager( application_generate_entity=application_generate_entity, task_state=self._task_state ) @@ -158,7 +160,7 @@ class AdvancedChatAppGenerateTaskPipeline: :return: """ # start generate conversation name thread - self._conversation_name_generate_thread = self._message_cycle_manager._generate_conversation_name( + self._conversation_name_generate_thread = self._message_cycle_manager.generate_conversation_name( conversation_id=self._conversation_id, query=self._application_generate_entity.query ) @@ -302,15 +304,12 @@ class AdvancedChatAppGenerateTaskPipeline: with Session(db.engine, expire_on_commit=False) as session: # init workflow run - workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start( - session=session, - workflow_id=self._workflow_id, - ) - self._workflow_run_id = workflow_execution.id + workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start() + self._workflow_run_id = workflow_execution.id_ message = self._get_message(session=session) if not message: raise ValueError(f"Message not found: {self._message_id}") - message.workflow_run_id = workflow_execution.id + message.workflow_run_id = workflow_execution.id_ workflow_start_resp = self._workflow_response_converter.workflow_start_to_stream_response( task_id=self._application_generate_entity.task_id, workflow_execution=workflow_execution, @@ -550,7 +549,7 @@ class AdvancedChatAppGenerateTaskPipeline: workflow_run_id=self._workflow_run_id, total_tokens=graph_runtime_state.total_tokens, total_steps=graph_runtime_state.node_run_steps, - status=WorkflowRunStatus.FAILED, + status=WorkflowExecutionStatus.FAILED, error_message=event.error, conversation_id=self._conversation_id, trace_manager=trace_manager, @@ -576,7 +575,7 @@ class AdvancedChatAppGenerateTaskPipeline: workflow_run_id=self._workflow_run_id, total_tokens=graph_runtime_state.total_tokens, total_steps=graph_runtime_state.node_run_steps, - status=WorkflowRunStatus.STOPPED, + status=WorkflowExecutionStatus.STOPPED, error_message=event.get_stop_reason(), conversation_id=self._conversation_id, trace_manager=trace_manager, @@ -604,22 +603,18 @@ class AdvancedChatAppGenerateTaskPipeline: yield self._message_end_to_stream_response() break elif isinstance(event, QueueRetrieverResourcesEvent): - self._message_cycle_manager._handle_retriever_resources(event) + self._message_cycle_manager.handle_retriever_resources(event) with Session(db.engine, expire_on_commit=False) as session: message = self._get_message(session=session) - message.message_metadata = ( - json.dumps(jsonable_encoder(self._task_state.metadata)) if self._task_state.metadata else None - ) + message.message_metadata = self._task_state.metadata.model_dump_json() session.commit() elif isinstance(event, QueueAnnotationReplyEvent): - self._message_cycle_manager._handle_annotation_reply(event) + self._message_cycle_manager.handle_annotation_reply(event) with Session(db.engine, expire_on_commit=False) as session: message = self._get_message(session=session) - message.message_metadata = ( - json.dumps(jsonable_encoder(self._task_state.metadata)) if self._task_state.metadata else None - ) + message.message_metadata = self._task_state.metadata.model_dump_json() session.commit() elif isinstance(event, QueueTextChunkEvent): delta_text = event.text @@ -636,12 +631,12 @@ class AdvancedChatAppGenerateTaskPipeline: tts_publisher.publish(queue_message) self._task_state.answer += delta_text - yield self._message_cycle_manager._message_to_stream_response( + yield self._message_cycle_manager.message_to_stream_response( answer=delta_text, message_id=self._message_id, from_variable_selector=event.from_variable_selector ) elif isinstance(event, QueueMessageReplaceEvent): # published by moderation - yield self._message_cycle_manager._message_replace_to_stream_response( + yield self._message_cycle_manager.message_replace_to_stream_response( answer=event.text, reason=event.reason ) elif isinstance(event, QueueAdvancedChatMessageEndEvent): @@ -653,7 +648,7 @@ class AdvancedChatAppGenerateTaskPipeline: ) if output_moderation_answer: self._task_state.answer = output_moderation_answer - yield self._message_cycle_manager._message_replace_to_stream_response( + yield self._message_cycle_manager.message_replace_to_stream_response( answer=output_moderation_answer, reason=QueueMessageReplaceEvent.MessageReplaceReason.OUTPUT_MODERATION, ) @@ -682,9 +677,7 @@ class AdvancedChatAppGenerateTaskPipeline: message = self._get_message(session=session) message.answer = self._task_state.answer message.provider_response_latency = time.perf_counter() - self._base_task_pipeline._start_at - message.message_metadata = ( - json.dumps(jsonable_encoder(self._task_state.metadata)) if self._task_state.metadata else None - ) + message.message_metadata = self._task_state.metadata.model_dump_json() message_files = [ MessageFile( message_id=message.id, @@ -712,9 +705,9 @@ class AdvancedChatAppGenerateTaskPipeline: message.answer_price_unit = usage.completion_price_unit message.total_price = usage.total_price message.currency = usage.currency - self._task_state.metadata["usage"] = jsonable_encoder(usage) + self._task_state.metadata.usage = usage else: - self._task_state.metadata["usage"] = jsonable_encoder(LLMUsage.empty_usage()) + self._task_state.metadata.usage = LLMUsage.empty_usage() message_was_created.send( message, application_generate_entity=self._application_generate_entity, @@ -725,18 +718,16 @@ class AdvancedChatAppGenerateTaskPipeline: Message end to stream response. :return: """ - extras = {} - if self._task_state.metadata: - extras["metadata"] = self._task_state.metadata.copy() + extras = self._task_state.metadata.model_dump() - if "annotation_reply" in extras["metadata"]: - del extras["metadata"]["annotation_reply"] + if self._task_state.metadata.annotation_reply: + del extras["annotation_reply"] return MessageEndStreamResponse( task_id=self._application_generate_entity.task_id, id=self._message_id, files=self._recorded_files, - metadata=extras.get("metadata", {}), + metadata=extras, ) def _handle_output_moderation_chunk(self, text: str) -> bool: diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 7669bf74bb..6f524a5872 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -44,15 +44,14 @@ from core.app.entities.task_entities import ( ) from core.file import FILE_MODEL_IDENTITY, File from core.tools.tool_manager import ToolManager -from core.workflow.entities.node_execution_entities import NodeExecution -from core.workflow.entities.workflow_execution_entities import WorkflowExecution +from core.workflow.entities.workflow_execution import WorkflowExecution +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution, WorkflowNodeExecutionStatus from core.workflow.nodes import NodeType from core.workflow.nodes.tool.entities import ToolNodeData from models import ( Account, CreatorUserRole, EndUser, - WorkflowNodeExecutionStatus, WorkflowRun, ) @@ -73,11 +72,10 @@ class WorkflowResponseConverter: ) -> WorkflowStartStreamResponse: return WorkflowStartStreamResponse( task_id=task_id, - workflow_run_id=workflow_execution.id, + workflow_run_id=workflow_execution.id_, data=WorkflowStartStreamResponse.Data( - id=workflow_execution.id, + id=workflow_execution.id_, workflow_id=workflow_execution.workflow_id, - sequence_number=workflow_execution.sequence_number, inputs=workflow_execution.inputs, created_at=int(workflow_execution.started_at.timestamp()), ), @@ -91,7 +89,7 @@ class WorkflowResponseConverter: workflow_execution: WorkflowExecution, ) -> WorkflowFinishStreamResponse: created_by = None - workflow_run = session.scalar(select(WorkflowRun).where(WorkflowRun.id == workflow_execution.id)) + workflow_run = session.scalar(select(WorkflowRun).where(WorkflowRun.id == workflow_execution.id_)) assert workflow_run is not None if workflow_run.created_by_role == CreatorUserRole.ACCOUNT: stmt = select(Account).where(Account.id == workflow_run.created_by) @@ -122,11 +120,10 @@ class WorkflowResponseConverter: return WorkflowFinishStreamResponse( task_id=task_id, - workflow_run_id=workflow_execution.id, + workflow_run_id=workflow_execution.id_, data=WorkflowFinishStreamResponse.Data( - id=workflow_execution.id, + id=workflow_execution.id_, workflow_id=workflow_execution.workflow_id, - sequence_number=workflow_execution.sequence_number, status=workflow_execution.status, outputs=workflow_execution.outputs, error=workflow_execution.error_message, @@ -146,16 +143,16 @@ class WorkflowResponseConverter: *, event: QueueNodeStartedEvent, task_id: str, - workflow_node_execution: NodeExecution, + workflow_node_execution: WorkflowNodeExecution, ) -> Optional[NodeStartStreamResponse]: if workflow_node_execution.node_type in {NodeType.ITERATION, NodeType.LOOP}: return None - if not workflow_node_execution.workflow_run_id: + if not workflow_node_execution.workflow_execution_id: return None response = NodeStartStreamResponse( task_id=task_id, - workflow_run_id=workflow_node_execution.workflow_run_id, + workflow_run_id=workflow_node_execution.workflow_execution_id, data=NodeStartStreamResponse.Data( id=workflow_node_execution.id, node_id=workflow_node_execution.node_id, @@ -196,18 +193,18 @@ class WorkflowResponseConverter: | QueueNodeInLoopFailedEvent | QueueNodeExceptionEvent, task_id: str, - workflow_node_execution: NodeExecution, + workflow_node_execution: WorkflowNodeExecution, ) -> Optional[NodeFinishStreamResponse]: if workflow_node_execution.node_type in {NodeType.ITERATION, NodeType.LOOP}: return None - if not workflow_node_execution.workflow_run_id: + if not workflow_node_execution.workflow_execution_id: return None if not workflow_node_execution.finished_at: return None return NodeFinishStreamResponse( task_id=task_id, - workflow_run_id=workflow_node_execution.workflow_run_id, + workflow_run_id=workflow_node_execution.workflow_execution_id, data=NodeFinishStreamResponse.Data( id=workflow_node_execution.id, node_id=workflow_node_execution.node_id, @@ -239,18 +236,18 @@ class WorkflowResponseConverter: *, event: QueueNodeRetryEvent, task_id: str, - workflow_node_execution: NodeExecution, + workflow_node_execution: WorkflowNodeExecution, ) -> Optional[Union[NodeRetryStreamResponse, NodeFinishStreamResponse]]: if workflow_node_execution.node_type in {NodeType.ITERATION, NodeType.LOOP}: return None - if not workflow_node_execution.workflow_run_id: + if not workflow_node_execution.workflow_execution_id: return None if not workflow_node_execution.finished_at: return None return NodeRetryStreamResponse( task_id=task_id, - workflow_run_id=workflow_node_execution.workflow_run_id, + workflow_run_id=workflow_node_execution.workflow_execution_id, data=NodeRetryStreamResponse.Data( id=workflow_node_execution.id, node_id=workflow_node_execution.node_id, diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 6ea90e5a3d..f4aec3479b 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -25,8 +25,8 @@ from core.model_runtime.errors.invoke import InvokeAuthorizationError from core.ops.ops_trace_manager import TraceQueueManager from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository from core.repositories.sqlalchemy_workflow_execution_repository import SQLAlchemyWorkflowExecutionRepository -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository from extensions.ext_database import db from factories import file_factory from models import Account, App, EndUser, Workflow, WorkflowNodeExecutionTriggeredFrom @@ -132,7 +132,7 @@ class WorkflowAppGenerator(BaseAppGenerator): invoke_from=invoke_from, call_depth=call_depth, trace_manager=trace_manager, - workflow_run_id=workflow_run_id, + workflow_execution_id=workflow_run_id, ) contexts.plugin_tool_providers.set({}) @@ -279,7 +279,7 @@ class WorkflowAppGenerator(BaseAppGenerator): single_iteration_run=WorkflowAppGenerateEntity.SingleIterationRunEntity( node_id=node_id, inputs=args["inputs"] ), - workflow_run_id=str(uuid.uuid4()), + workflow_execution_id=str(uuid.uuid4()), ) contexts.plugin_tool_providers.set({}) contexts.plugin_tool_providers_lock.set(threading.Lock()) @@ -355,7 +355,7 @@ class WorkflowAppGenerator(BaseAppGenerator): invoke_from=InvokeFrom.DEBUGGER, extras={"auto_generate_conversation_name": False}, single_loop_run=WorkflowAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args["inputs"]), - workflow_run_id=str(uuid.uuid4()), + workflow_execution_id=str(uuid.uuid4()), ) contexts.plugin_tool_providers.set({}) contexts.plugin_tool_providers_lock.set(threading.Lock()) diff --git a/api/core/app/apps/workflow/app_runner.py b/api/core/app/apps/workflow/app_runner.py index b38ee18ac4..b59e34e222 100644 --- a/api/core/app/apps/workflow/app_runner.py +++ b/api/core/app/apps/workflow/app_runner.py @@ -95,7 +95,7 @@ class WorkflowAppRunner(WorkflowBasedAppRunner): SystemVariableKey.USER_ID: user_id, SystemVariableKey.APP_ID: app_config.app_id, SystemVariableKey.WORKFLOW_ID: app_config.workflow_id, - SystemVariableKey.WORKFLOW_RUN_ID: self.application_generate_entity.workflow_run_id, + SystemVariableKey.WORKFLOW_EXECUTION_ID: self.application_generate_entity.workflow_execution_id, } variable_pool = VariablePool( diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 0291f49cac..1734dbb598 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -50,16 +50,15 @@ from core.app.entities.task_entities import ( WorkflowAppStreamResponse, WorkflowFinishStreamResponse, WorkflowStartStreamResponse, - WorkflowTaskState, ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline from core.base.tts import AppGeneratorTTSPublisher, AudioTrunk from core.ops.ops_trace_manager import TraceQueueManager -from core.workflow.entities.workflow_execution_entities import WorkflowExecution +from core.workflow.entities.workflow_execution import WorkflowExecution, WorkflowExecutionStatus, WorkflowType from core.workflow.enums import SystemVariableKey -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository -from core.workflow.workflow_cycle_manager import WorkflowCycleManager +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository +from core.workflow.workflow_cycle_manager import CycleManagerWorkflowInfo, WorkflowCycleManager from extensions.ext_database import db from models.account import Account from models.enums import CreatorUserRole @@ -69,7 +68,6 @@ from models.workflow import ( WorkflowAppLog, WorkflowAppLogCreatedFrom, WorkflowRun, - WorkflowRunStatus, ) logger = logging.getLogger(__name__) @@ -114,8 +112,14 @@ class WorkflowAppGenerateTaskPipeline: SystemVariableKey.USER_ID: user_session_id, SystemVariableKey.APP_ID: application_generate_entity.app_config.app_id, SystemVariableKey.WORKFLOW_ID: workflow.id, - SystemVariableKey.WORKFLOW_RUN_ID: application_generate_entity.workflow_run_id, + SystemVariableKey.WORKFLOW_EXECUTION_ID: application_generate_entity.workflow_execution_id, }, + workflow_info=CycleManagerWorkflowInfo( + workflow_id=workflow.id, + workflow_type=WorkflowType(workflow.type), + version=workflow.version, + graph_data=workflow.graph_dict, + ), workflow_execution_repository=workflow_execution_repository, workflow_node_execution_repository=workflow_node_execution_repository, ) @@ -125,9 +129,7 @@ class WorkflowAppGenerateTaskPipeline: ) self._application_generate_entity = application_generate_entity - self._workflow_id = workflow.id self._workflow_features_dict = workflow.features_dict - self._task_state = WorkflowTaskState() self._workflow_run_id = "" def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: @@ -266,17 +268,13 @@ class WorkflowAppGenerateTaskPipeline: # override graph runtime state graph_runtime_state = event.graph_runtime_state - with Session(db.engine, expire_on_commit=False) as session: - # init workflow run - workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start( - session=session, - workflow_id=self._workflow_id, - ) - self._workflow_run_id = workflow_execution.id - start_resp = self._workflow_response_converter.workflow_start_to_stream_response( - task_id=self._application_generate_entity.task_id, - workflow_execution=workflow_execution, - ) + # init workflow run + workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start() + self._workflow_run_id = workflow_execution.id_ + start_resp = self._workflow_response_converter.workflow_start_to_stream_response( + task_id=self._application_generate_entity.task_id, + workflow_execution=workflow_execution, + ) yield start_resp elif isinstance( @@ -511,9 +509,9 @@ class WorkflowAppGenerateTaskPipeline: workflow_run_id=self._workflow_run_id, total_tokens=graph_runtime_state.total_tokens, total_steps=graph_runtime_state.node_run_steps, - status=WorkflowRunStatus.FAILED + status=WorkflowExecutionStatus.FAILED if isinstance(event, QueueWorkflowFailedEvent) - else WorkflowRunStatus.STOPPED, + else WorkflowExecutionStatus.STOPPED, error_message=event.error if isinstance(event, QueueWorkflowFailedEvent) else event.get_stop_reason(), @@ -542,7 +540,6 @@ class WorkflowAppGenerateTaskPipeline: if tts_publisher: tts_publisher.publish(queue_message) - self._task_state.answer += delta_text yield self._text_chunk_to_stream_response( delta_text, from_variable_selector=event.from_variable_selector ) @@ -557,7 +554,7 @@ class WorkflowAppGenerateTaskPipeline: tts_publisher.publish(None) def _save_workflow_app_log(self, *, session: Session, workflow_execution: WorkflowExecution) -> None: - workflow_run = session.scalar(select(WorkflowRun).where(WorkflowRun.id == workflow_execution.id)) + workflow_run = session.scalar(select(WorkflowRun).where(WorkflowRun.id == workflow_execution.id_)) assert workflow_run is not None invoke_from = self._application_generate_entity.invoke_from if invoke_from == InvokeFrom.SERVICE_API: diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 0884fac4a9..facc24b4ca 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -29,8 +29,8 @@ from core.app.entities.queue_entities import ( QueueWorkflowStartedEvent, QueueWorkflowSucceededEvent, ) -from core.workflow.entities.node_entities import NodeRunMetadataKey from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.graph_engine.entities.event import ( AgentLogEvent, GraphEngineEvent, @@ -295,7 +295,7 @@ class WorkflowBasedAppRunner(AppRunner): inputs: Mapping[str, Any] | None = {} process_data: Mapping[str, Any] | None = {} outputs: Mapping[str, Any] | None = {} - execution_metadata: Mapping[NodeRunMetadataKey, Any] | None = {} + execution_metadata: Mapping[WorkflowNodeExecutionMetadataKey, Any] | None = {} if node_run_result: inputs = node_run_result.inputs process_data = node_run_result.process_data diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index 56e6b46a60..c0d99693b0 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -76,6 +76,8 @@ class AppGenerateEntity(BaseModel): App Generate Entity. """ + model_config = ConfigDict(arbitrary_types_allowed=True) + task_id: str # app config @@ -99,9 +101,6 @@ class AppGenerateEntity(BaseModel): # tracing instance trace_manager: Optional[TraceQueueManager] = None - class Config: - arbitrary_types_allowed = True - class EasyUIBasedAppGenerateEntity(AppGenerateEntity): """ @@ -205,7 +204,7 @@ class WorkflowAppGenerateEntity(AppGenerateEntity): # app config app_config: WorkflowUIBasedAppConfig - workflow_run_id: str + workflow_execution_id: str class SingleIterationRunEntity(BaseModel): """ diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index 7228020e9b..42e6a1519c 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -1,4 +1,4 @@ -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from datetime import datetime from enum import Enum, StrEnum from typing import Any, Optional @@ -6,7 +6,9 @@ from typing import Any, Optional from pydantic import BaseModel from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk -from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunMetadataKey +from core.rag.entities.citation_metadata import RetrievalSourceMetadata +from core.workflow.entities.node_entities import AgentNodeStrategyInit +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState from core.workflow.nodes import NodeType from core.workflow.nodes.base import BaseNodeData @@ -282,7 +284,7 @@ class QueueRetrieverResourcesEvent(AppQueueEvent): """ event: QueueEvent = QueueEvent.RETRIEVER_RESOURCES - retriever_resources: list[dict] + retriever_resources: Sequence[RetrievalSourceMetadata] in_iteration_id: Optional[str] = None """iteration id if node is in iteration""" in_loop_id: Optional[str] = None @@ -412,7 +414,7 @@ class QueueNodeSucceededEvent(AppQueueEvent): inputs: Optional[Mapping[str, Any]] = None process_data: Optional[Mapping[str, Any]] = None outputs: Optional[Mapping[str, Any]] = None - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None error: Optional[str] = None """single iteration duration map""" @@ -446,7 +448,7 @@ class QueueNodeRetryEvent(QueueNodeStartedEvent): inputs: Optional[Mapping[str, Any]] = None process_data: Optional[Mapping[str, Any]] = None outputs: Optional[Mapping[str, Any]] = None - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None error: str retry_index: int # retry index @@ -480,7 +482,7 @@ class QueueNodeInIterationFailedEvent(AppQueueEvent): inputs: Optional[Mapping[str, Any]] = None process_data: Optional[Mapping[str, Any]] = None outputs: Optional[Mapping[str, Any]] = None - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None error: str @@ -513,7 +515,7 @@ class QueueNodeInLoopFailedEvent(AppQueueEvent): inputs: Optional[Mapping[str, Any]] = None process_data: Optional[Mapping[str, Any]] = None outputs: Optional[Mapping[str, Any]] = None - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None error: str @@ -546,7 +548,7 @@ class QueueNodeExceptionEvent(AppQueueEvent): inputs: Optional[Mapping[str, Any]] = None process_data: Optional[Mapping[str, Any]] = None outputs: Optional[Mapping[str, Any]] = None - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None error: str @@ -579,7 +581,7 @@ class QueueNodeFailedEvent(AppQueueEvent): inputs: Optional[Mapping[str, Any]] = None process_data: Optional[Mapping[str, Any]] = None outputs: Optional[Mapping[str, Any]] = None - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None error: str diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 9b2bfcbf61..25c889e922 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -2,12 +2,29 @@ from collections.abc import Mapping, Sequence from enum import Enum from typing import Any, Optional -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, Field -from core.model_runtime.entities.llm_entities import LLMResult +from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage from core.model_runtime.utils.encoders import jsonable_encoder -from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunMetadataKey -from models.workflow import WorkflowNodeExecutionStatus +from core.rag.entities.citation_metadata import RetrievalSourceMetadata +from core.workflow.entities.node_entities import AgentNodeStrategyInit +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus + + +class AnnotationReplyAccount(BaseModel): + id: str + name: str + + +class AnnotationReply(BaseModel): + id: str + account: AnnotationReplyAccount + + +class TaskStateMetadata(BaseModel): + annotation_reply: AnnotationReply | None = None + retriever_resources: Sequence[RetrievalSourceMetadata] = Field(default_factory=list) + usage: LLMUsage | None = None class TaskState(BaseModel): @@ -15,7 +32,7 @@ class TaskState(BaseModel): TaskState entity """ - metadata: dict = {} + metadata: TaskStateMetadata = Field(default_factory=TaskStateMetadata) class EasyUITaskState(TaskState): @@ -189,7 +206,6 @@ class WorkflowStartStreamResponse(StreamResponse): id: str workflow_id: str - sequence_number: int inputs: Mapping[str, Any] created_at: int @@ -210,7 +226,6 @@ class WorkflowFinishStreamResponse(StreamResponse): id: str workflow_id: str - sequence_number: int status: str outputs: Optional[Mapping[str, Any]] = None error: Optional[str] = None @@ -307,7 +322,7 @@ class NodeFinishStreamResponse(StreamResponse): status: str error: Optional[str] = None elapsed_time: float - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None created_at: int finished_at: int files: Optional[Sequence[Mapping[str, Any]]] = [] @@ -376,7 +391,7 @@ class NodeRetryStreamResponse(StreamResponse): status: str error: Optional[str] = None elapsed_time: float - execution_metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None + execution_metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None created_at: int finished_at: int files: Optional[Sequence[Mapping[str, Any]]] = [] diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 6c768fd86c..1ea50a5778 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -1,4 +1,3 @@ -import json import logging import time from collections.abc import Generator @@ -43,7 +42,7 @@ from core.app.entities.task_entities import ( StreamResponse, ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline -from core.app.task_pipeline.message_cycle_manage import MessageCycleManage +from core.app.task_pipeline.message_cycle_manager import MessageCycleManager from core.base.tts import AppGeneratorTTSPublisher, AudioTrunk from core.model_manager import ModelInstance from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage @@ -51,7 +50,6 @@ from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, ) from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.utils.encoders import jsonable_encoder from core.ops.entities.trace_entity import TraceTaskName from core.ops.ops_trace_manager import TraceQueueManager, TraceTask from core.prompt.utils.prompt_message_util import PromptMessageUtil @@ -63,7 +61,7 @@ from models.model import AppMode, Conversation, Message, MessageAgentThought logger = logging.getLogger(__name__) -class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleManage): +class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): """ EasyUIBasedGenerateTaskPipeline is a class that generate stream output and state management for Application. """ @@ -104,6 +102,11 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan ) ) + self._message_cycle_manager = MessageCycleManager( + application_generate_entity=application_generate_entity, + task_state=self._task_state, + ) + self._conversation_name_generate_thread: Optional[Thread] = None def process( @@ -115,7 +118,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan ]: if self._application_generate_entity.app_config.app_mode != AppMode.COMPLETION: # start generate conversation name thread - self._conversation_name_generate_thread = self._generate_conversation_name( + self._conversation_name_generate_thread = self._message_cycle_manager.generate_conversation_name( conversation_id=self._conversation_id, query=self._application_generate_entity.query or "" ) @@ -136,9 +139,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan if isinstance(stream_response, ErrorStreamResponse): raise stream_response.err elif isinstance(stream_response, MessageEndStreamResponse): - extras = {"usage": jsonable_encoder(self._task_state.llm_result.usage)} + extras = {"usage": self._task_state.llm_result.usage.model_dump()} if self._task_state.metadata: - extras["metadata"] = self._task_state.metadata + extras["metadata"] = self._task_state.metadata.model_dump() response: Union[ChatbotAppBlockingResponse, CompletionAppBlockingResponse] if self._conversation_mode == AppMode.COMPLETION.value: response = CompletionAppBlockingResponse( @@ -277,7 +280,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan ) if output_moderation_answer: self._task_state.llm_result.message.content = output_moderation_answer - yield self._message_replace_to_stream_response(answer=output_moderation_answer) + yield self._message_cycle_manager.message_replace_to_stream_response( + answer=output_moderation_answer + ) with Session(db.engine) as session: # Save message @@ -286,9 +291,9 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan message_end_resp = self._message_end_to_stream_response() yield message_end_resp elif isinstance(event, QueueRetrieverResourcesEvent): - self._handle_retriever_resources(event) + self._message_cycle_manager.handle_retriever_resources(event) elif isinstance(event, QueueAnnotationReplyEvent): - annotation = self._handle_annotation_reply(event) + annotation = self._message_cycle_manager.handle_annotation_reply(event) if annotation: self._task_state.llm_result.message.content = annotation.content elif isinstance(event, QueueAgentThoughtEvent): @@ -296,7 +301,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan if agent_thought_response is not None: yield agent_thought_response elif isinstance(event, QueueMessageFileEvent): - response = self._message_file_to_stream_response(event) + response = self._message_cycle_manager.message_file_to_stream_response(event) if response: yield response elif isinstance(event, QueueLLMChunkEvent | QueueAgentMessageEvent): @@ -318,7 +323,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan self._task_state.llm_result.message.content = current_content if isinstance(event, QueueLLMChunkEvent): - yield self._message_to_stream_response( + yield self._message_cycle_manager.message_to_stream_response( answer=cast(str, delta_text), message_id=self._message_id, ) @@ -328,7 +333,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan message_id=self._message_id, ) elif isinstance(event, QueueMessageReplaceEvent): - yield self._message_replace_to_stream_response(answer=event.text) + yield self._message_cycle_manager.message_replace_to_stream_response(answer=event.text) elif isinstance(event, QueuePingEvent): yield self._ping_stream_response() else: @@ -372,9 +377,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan message.provider_response_latency = time.perf_counter() - self._start_at message.total_price = usage.total_price message.currency = usage.currency - message.message_metadata = ( - json.dumps(jsonable_encoder(self._task_state.metadata)) if self._task_state.metadata else None - ) + message.message_metadata = self._task_state.metadata.model_dump_json() if trace_manager: trace_manager.add_trace_task( @@ -423,16 +426,12 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan Message end to stream response. :return: """ - self._task_state.metadata["usage"] = jsonable_encoder(self._task_state.llm_result.usage) - - extras = {} - if self._task_state.metadata: - extras["metadata"] = self._task_state.metadata - + self._task_state.metadata.usage = self._task_state.llm_result.usage + metadata_dict = self._task_state.metadata.model_dump() return MessageEndStreamResponse( task_id=self._application_generate_entity.task_id, id=self._message_id, - metadata=extras.get("metadata", {}), + metadata=metadata_dict, ) def _agent_message_to_stream_response(self, answer: str, message_id: str) -> AgentMessageStreamResponse: diff --git a/api/core/app/task_pipeline/message_cycle_manage.py b/api/core/app/task_pipeline/message_cycle_manager.py similarity index 85% rename from api/core/app/task_pipeline/message_cycle_manage.py rename to api/core/app/task_pipeline/message_cycle_manager.py index a6d826f08b..2343081eaf 100644 --- a/api/core/app/task_pipeline/message_cycle_manage.py +++ b/api/core/app/task_pipeline/message_cycle_manager.py @@ -17,6 +17,8 @@ from core.app.entities.queue_entities import ( QueueRetrieverResourcesEvent, ) from core.app.entities.task_entities import ( + AnnotationReply, + AnnotationReplyAccount, EasyUITaskState, MessageFileStreamResponse, MessageReplaceStreamResponse, @@ -30,7 +32,7 @@ from models.model import AppMode, Conversation, MessageAnnotation, MessageFile from services.annotation_service import AppAnnotationService -class MessageCycleManage: +class MessageCycleManager: def __init__( self, *, @@ -45,7 +47,7 @@ class MessageCycleManage: self._application_generate_entity = application_generate_entity self._task_state = task_state - def _generate_conversation_name(self, *, conversation_id: str, query: str) -> Optional[Thread]: + def generate_conversation_name(self, *, conversation_id: str, query: str) -> Optional[Thread]: """ Generate conversation name. :param conversation_id: conversation id @@ -102,7 +104,7 @@ class MessageCycleManage: db.session.commit() db.session.close() - def _handle_annotation_reply(self, event: QueueAnnotationReplyEvent) -> Optional[MessageAnnotation]: + def handle_annotation_reply(self, event: QueueAnnotationReplyEvent) -> Optional[MessageAnnotation]: """ Handle annotation reply. :param event: event @@ -111,25 +113,28 @@ class MessageCycleManage: annotation = AppAnnotationService.get_annotation_by_id(event.message_annotation_id) if annotation: account = annotation.account - self._task_state.metadata["annotation_reply"] = { - "id": annotation.id, - "account": {"id": annotation.account_id, "name": account.name if account else "Dify user"}, - } + self._task_state.metadata.annotation_reply = AnnotationReply( + id=annotation.id, + account=AnnotationReplyAccount( + id=annotation.account_id, + name=account.name if account else "Dify user", + ), + ) return annotation return None - def _handle_retriever_resources(self, event: QueueRetrieverResourcesEvent) -> None: + def handle_retriever_resources(self, event: QueueRetrieverResourcesEvent) -> None: """ Handle retriever resources. :param event: event :return: """ if self._application_generate_entity.app_config.additional_features.show_retrieve_source: - self._task_state.metadata["retriever_resources"] = event.retriever_resources + self._task_state.metadata.retriever_resources = event.retriever_resources - def _message_file_to_stream_response(self, event: QueueMessageFileEvent) -> Optional[MessageFileStreamResponse]: + def message_file_to_stream_response(self, event: QueueMessageFileEvent) -> Optional[MessageFileStreamResponse]: """ Message file to stream response. :param event: event @@ -166,7 +171,7 @@ class MessageCycleManage: return None - def _message_to_stream_response( + def message_to_stream_response( self, answer: str, message_id: str, from_variable_selector: Optional[list[str]] = None ) -> MessageStreamResponse: """ @@ -182,7 +187,7 @@ class MessageCycleManage: from_variable_selector=from_variable_selector, ) - def _message_replace_to_stream_response(self, answer: str, reason: str = "") -> MessageReplaceStreamResponse: + def message_replace_to_stream_response(self, answer: str, reason: str = "") -> MessageReplaceStreamResponse: """ Message replace to stream response. :param answer: answer diff --git a/api/core/callback_handler/index_tool_callback_handler.py b/api/core/callback_handler/index_tool_callback_handler.py index 13c22213c4..a3a7b4b812 100644 --- a/api/core/callback_handler/index_tool_callback_handler.py +++ b/api/core/callback_handler/index_tool_callback_handler.py @@ -1,8 +1,10 @@ import logging +from collections.abc import Sequence from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.queue_entities import QueueRetrieverResourcesEvent +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.rag.index_processor.constant.index_type import IndexType from core.rag.models.document import Document from extensions.ext_database import db @@ -85,7 +87,8 @@ class DatasetIndexToolCallbackHandler: db.session.commit() - def return_retriever_resource_info(self, resource: list): + # TODO(-LAN-): Improve type check + def return_retriever_resource_info(self, resource: Sequence[RetrievalSourceMetadata]): """Handle return_retriever_resource_info.""" self._queue_manager.publish( QueueRetrieverResourcesEvent(retriever_resources=resource), PublishFrom.APPLICATION_MANAGER diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 34ea3aec26..ddfa1e7a66 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -1,61 +1,20 @@ -# Written by YORKI MINAKO🤡, Edited by Xiaoyi -CONVERSATION_TITLE_PROMPT = """You need to decompose the user's input into "subject" and "intention" in order to accurately figure out what the user's input language actually is. -Notice: the language type user uses could be diverse, which can be English, Chinese, Italian, Español, Arabic, Japanese, French, and etc. -ENSURE your output is in the SAME language as the user's input! -Your output is restricted only to: (Input language) Intention + Subject(short as possible) -Your output MUST be a valid JSON. +# Written by YORKI MINAKO🤡, Edited by Xiaoyi, Edited by yasu-oh +CONVERSATION_TITLE_PROMPT = """You are asked to generate a concise chat title by decomposing the user’s input into two parts: “Intention” and “Subject”. -Tip: When the user's question is directed at you (the language model), you can add an emoji to make it more fun. +1. Detect Input Language +Automatically identify the language of the user’s input (e.g. English, Chinese, Italian, Español, Arabic, Japanese, French, and etc.). +2. Generate Title +- Combine Intention + Subject into a single, as-short-as-possible phrase. +- The title must be natural, friendly, and in the same language as the input. +- If the input is a direct question to the model, you may add an emoji at the end. -example 1: -User Input: hi, yesterday i had some burgers. +3. Output Format +Return **only** a valid JSON object with these exact keys and no additional text: { - "Language Type": "The user's input is pure English", - "Your Reasoning": "The language of my output must be pure English.", - "Your Output": "sharing yesterday's food" -} - -example 2: -User Input: hello -{ - "Language Type": "The user's input is pure English", - "Your Reasoning": "The language of my output must be pure English.", - "Your Output": "Greeting myself☺️" -} - - -example 3: -User Input: why mmap file: oom -{ - "Language Type": "The user's input is written in pure English", - "Your Reasoning": "The language of my output must be pure English.", - "Your Output": "Asking about the reason for mmap file: oom" -} - - -example 4: -User Input: www.convinceme.yesterday-you-ate-seafood.tv讲了什么? -{ - "Language Type": "The user's input English-Chinese mixed", - "Your Reasoning": "The English-part is an URL, the main intention is still written in Chinese, so the language of my output must be using Chinese.", - "Your Output": "询问网站www.convinceme.yesterday-you-ate-seafood.tv" -} - -example 5: -User Input: why小红的年龄is老than小明? -{ - "Language Type": "The user's input is English-Chinese mixed", - "Your Reasoning": "The English parts are filler words, the main intention is written in Chinese, besides, Chinese occupies a greater \"actual meaning\" than English, so the language of my output must be using Chinese.", - "Your Output": "询问小红和小明的年龄" -} - -example 6: -User Input: yo, 你今天咋样? -{ - "Language Type": "The user's input is English-Chinese mixed", - "Your Reasoning": "The English-part is a subjective particle, the main intention is written in Chinese, so the language of my output must be using Chinese.", - "Your Output": "查询今日我的状态☺️" + "Language Type": "", + "Your Reasoning": "", + "Your Output": "" } User Input: diff --git a/api/core/model_runtime/utils/encoders.py b/api/core/model_runtime/utils/encoders.py index 03e3506271..a5c11aeeba 100644 --- a/api/core/model_runtime/utils/encoders.py +++ b/api/core/model_runtime/utils/encoders.py @@ -129,17 +129,18 @@ def jsonable_encoder( sqlalchemy_safe=sqlalchemy_safe, ) if dataclasses.is_dataclass(obj): - # FIXME: mypy error, try to fix it instead of using type: ignore - obj_dict = dataclasses.asdict(obj) # type: ignore - return jsonable_encoder( - obj_dict, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - custom_encoder=custom_encoder, - sqlalchemy_safe=sqlalchemy_safe, - ) + # Ensure obj is a dataclass instance, not a dataclass type + if not isinstance(obj, type): + obj_dict = dataclasses.asdict(obj) + return jsonable_encoder( + obj_dict, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) if isinstance(obj, Enum): return obj.value if isinstance(obj, PurePath): diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index f0e34c0cd7..151fa2aaf4 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -3,7 +3,7 @@ from datetime import datetime from enum import StrEnum from typing import Any, Optional, Union -from pydantic import BaseModel, ConfigDict, field_validator +from pydantic import BaseModel, ConfigDict, field_serializer, field_validator class BaseTraceInfo(BaseModel): @@ -24,10 +24,13 @@ class BaseTraceInfo(BaseModel): return v return "" - class Config: - json_encoders = { - datetime: lambda v: v.isoformat(), - } + model_config = ConfigDict(protected_namespaces=()) + + @field_serializer("start_time", "end_time") + def serialize_datetime(self, dt: datetime | None) -> str | None: + if dt is None: + return None + return dt.isoformat() class WorkflowTraceInfo(BaseTraceInfo): diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 120c36f53d..fa1c6b4557 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -32,6 +32,7 @@ from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository from core.workflow.nodes.enums import NodeType from extensions.ext_database import db from models import Account, App, EndUser, WorkflowNodeExecutionTriggeredFrom +from models.account import TenantAccountJoin logger = logging.getLogger(__name__) @@ -130,6 +131,12 @@ class LangFuseDataTrace(BaseTraceInstance): service_account = session.query(Account).filter(Account.id == app.created_by).first() if not service_account: raise ValueError(f"Creator account with id {app.created_by} not found for app {app_id}") + current_tenant = ( + session.query(TenantAccountJoin).filter_by(account_id=service_account.id, current=True).first() + ) + if not current_tenant: + raise ValueError(f"Current tenant not found for account {service_account.id}") + service_account.set_tenant_id(current_tenant.tenant_id) workflow_node_execution_repository = SQLAlchemyWorkflowNodeExecutionRepository( session_factory=session_factory, diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index 6631727c79..e4183ea1e2 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -28,7 +28,7 @@ from core.ops.langsmith_trace.entities.langsmith_trace_entity import ( ) from core.ops.utils import filter_none_values, generate_dotted_order from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository -from core.workflow.entities.node_entities import NodeRunMetadataKey +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.nodes.enums import NodeType from extensions.ext_database import db from models import Account, App, EndUser, MessageFile, WorkflowNodeExecutionTriggeredFrom @@ -185,7 +185,7 @@ class LangSmithDataTrace(BaseTraceInstance): finished_at = created_at + timedelta(seconds=elapsed_time) execution_metadata = node_execution.metadata if node_execution.metadata else {} - node_total_tokens = execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS) or 0 + node_total_tokens = execution_metadata.get(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS) or 0 metadata = {str(key): value for key, value in execution_metadata.items()} metadata.update( { diff --git a/api/core/ops/opik_trace/opik_trace.py b/api/core/ops/opik_trace/opik_trace.py index 6c159a4831..f7a4464267 100644 --- a/api/core/ops/opik_trace/opik_trace.py +++ b/api/core/ops/opik_trace/opik_trace.py @@ -22,7 +22,7 @@ from core.ops.entities.trace_entity import ( WorkflowTraceInfo, ) from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository -from core.workflow.entities.node_entities import NodeRunMetadataKey +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.nodes.enums import NodeType from extensions.ext_database import db from models import Account, App, EndUser, MessageFile, WorkflowNodeExecutionTriggeredFrom @@ -246,7 +246,7 @@ class OpikDataTrace(BaseTraceInstance): parent_span_id = trace_info.workflow_app_log_id or trace_info.workflow_run_id if not total_tokens: - total_tokens = execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS) or 0 + total_tokens = execution_metadata.get(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS) or 0 span_data = { "trace_id": opik_trace_id, diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 32301e11e7..dc4cfc48db 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -30,7 +30,7 @@ from core.ops.entities.trace_entity import ( WorkflowTraceInfo, ) from core.ops.utils import get_message_data -from core.workflow.entities.workflow_execution_entities import WorkflowExecution +from core.workflow.entities.workflow_execution import WorkflowExecution from extensions.ext_database import db from extensions.ext_storage import storage from models.model import App, AppModelConfig, Conversation, Message, MessageFile, TraceAppConfig @@ -386,7 +386,7 @@ class TraceTask: ): self.trace_type = trace_type self.message_id = message_id - self.workflow_run_id = workflow_execution.id if workflow_execution else None + self.workflow_run_id = workflow_execution.id_ if workflow_execution else None self.conversation_id = conversation_id self.user_id = user_id self.timer = timer @@ -487,6 +487,7 @@ class TraceTask: "file_list": file_list, "triggered_from": workflow_run.triggered_from, "user_id": user_id, + "app_id": workflow_run.app_id, } workflow_trace_info = WorkflowTraceInfo( diff --git a/api/core/ops/weave_trace/weave_trace.py b/api/core/ops/weave_trace/weave_trace.py index a4f38dfbba..b12380be47 100644 --- a/api/core/ops/weave_trace/weave_trace.py +++ b/api/core/ops/weave_trace/weave_trace.py @@ -23,7 +23,7 @@ from core.ops.entities.trace_entity import ( ) from core.ops.weave_trace.entities.weave_trace_entity import WeaveTraceModel from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository -from core.workflow.entities.node_entities import NodeRunMetadataKey +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey from core.workflow.nodes.enums import NodeType from extensions.ext_database import db from models import Account, App, EndUser, MessageFile, WorkflowNodeExecutionTriggeredFrom @@ -179,7 +179,7 @@ class WeaveDataTrace(BaseTraceInstance): finished_at = created_at + timedelta(seconds=elapsed_time) execution_metadata = node_execution.metadata if node_execution.metadata else {} - node_total_tokens = execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS) or 0 + node_total_tokens = execution_metadata.get(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS) or 0 attributes = {str(k): v for k, v in execution_metadata.items()} attributes.update( { diff --git a/api/core/plugin/backwards_invocation/model.py b/api/core/plugin/backwards_invocation/model.py index 5ec9620f22..17cfaf2edf 100644 --- a/api/core/plugin/backwards_invocation/model.py +++ b/api/core/plugin/backwards_invocation/model.py @@ -58,6 +58,7 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation): LLMNode.deduct_llm_quota( tenant_id=tenant.id, model_instance=model_instance, usage=chunk.delta.usage ) + chunk.prompt_messages = [] yield chunk return handle() @@ -68,7 +69,7 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation): def handle_non_streaming(response: LLMResult) -> Generator[LLMResultChunk, None, None]: yield LLMResultChunk( model=response.model, - prompt_messages=response.prompt_messages, + prompt_messages=[], system_fingerprint=response.system_fingerprint, delta=LLMResultChunkDelta( index=0, diff --git a/api/core/plugin/impl/base.py b/api/core/plugin/impl/base.py index 591e7b0525..7b9592bff3 100644 --- a/api/core/plugin/impl/base.py +++ b/api/core/plugin/impl/base.py @@ -6,6 +6,7 @@ from typing import TypeVar import requests from pydantic import BaseModel +from requests.exceptions import HTTPError from yarl import URL from configs import dify_config @@ -136,12 +137,31 @@ class BasePluginClient: """ Make a request to the plugin daemon inner API and return the response as a model. """ - response = self._request(method, path, headers, data, params, files) - json_response = response.json() - if transformer: - json_response = transformer(json_response) + try: + response = self._request(method, path, headers, data, params, files) + response.raise_for_status() + except HTTPError as e: + msg = f"Failed to request plugin daemon, status: {e.response.status_code}, url: {path}" + logging.exception(msg) + raise e + except Exception as e: + msg = f"Failed to request plugin daemon, url: {path}" + logging.exception(msg) + raise ValueError(msg) from e + + try: + json_response = response.json() + if transformer: + json_response = transformer(json_response) + rep = PluginDaemonBasicResponse[type](**json_response) # type: ignore + except Exception: + msg = ( + f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type.__name__)}]," + f" url: {path}" + ) + logging.exception(msg) + raise ValueError(msg) - rep = PluginDaemonBasicResponse[type](**json_response) # type: ignore if rep.code != 0: try: error = PluginDaemonError(**json.loads(rep.message)) diff --git a/api/core/rag/datasource/vdb/baidu/baidu_vector.py b/api/core/rag/datasource/vdb/baidu/baidu_vector.py index 86f1f5bfe4..db7ffc9c4f 100644 --- a/api/core/rag/datasource/vdb/baidu/baidu_vector.py +++ b/api/core/rag/datasource/vdb/baidu/baidu_vector.py @@ -85,7 +85,6 @@ class BaiduVector(BaseVector): end = min(start + batch_size, total_count) rows = [] assert len(metadatas) == total_count, "metadatas length should be equal to total_count" - # FIXME do you need this assert? for i in range(start, end, 1): row = Row( id=metadatas[i].get("doc_id", str(uuid.uuid4())), diff --git a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py index 3958280bd5..184b5f2142 100644 --- a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py +++ b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py @@ -245,4 +245,4 @@ class TidbService: return cluster_infos else: response.raise_for_status() - return [] # FIXME for mypy, This line will not be reached as raise_for_status() will raise an exception + return [] diff --git a/api/core/rag/entities/citation_metadata.py b/api/core/rag/entities/citation_metadata.py new file mode 100644 index 0000000000..00120425c9 --- /dev/null +++ b/api/core/rag/entities/citation_metadata.py @@ -0,0 +1,23 @@ +from typing import Any, Optional + +from pydantic import BaseModel + + +class RetrievalSourceMetadata(BaseModel): + position: Optional[int] = None + dataset_id: Optional[str] = None + dataset_name: Optional[str] = None + document_id: Optional[str] = None + document_name: Optional[str] = None + data_source_type: Optional[str] = None + segment_id: Optional[str] = None + retriever_from: Optional[str] = None + score: Optional[float] = None + hit_count: Optional[int] = None + word_count: Optional[int] = None + segment_position: Optional[int] = None + index_node_hash: Optional[str] = None + content: Optional[str] = None + page: Optional[int] = None + doc_metadata: Optional[dict[str, Any]] = None + title: Optional[str] = None diff --git a/api/core/rag/extractor/entity/extract_setting.py b/api/core/rag/extractor/entity/extract_setting.py index 7c00c668dd..1593ad1475 100644 --- a/api/core/rag/extractor/entity/extract_setting.py +++ b/api/core/rag/extractor/entity/extract_setting.py @@ -27,6 +27,8 @@ class WebsiteInfo(BaseModel): website import info. """ + model_config = ConfigDict(arbitrary_types_allowed=True) + provider: str job_id: str url: str @@ -34,12 +36,6 @@ class WebsiteInfo(BaseModel): tenant_id: str only_main_content: bool = False - class Config: - arbitrary_types_allowed = True - - def __init__(self, **data) -> None: - super().__init__(**data) - class ExtractSetting(BaseModel): """ diff --git a/api/core/rag/models/document.py b/api/core/rag/models/document.py index 421cdc05df..04a3428ad8 100644 --- a/api/core/rag/models/document.py +++ b/api/core/rag/models/document.py @@ -45,13 +45,12 @@ class BaseDocumentTransformer(ABC): .. code-block:: python class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + embeddings: Embeddings similarity_fn: Callable = cosine_similarity similarity_threshold: float = 0.95 - class Config: - arbitrary_types_allowed = True - def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index c4adf6de4d..6978860529 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -35,6 +35,7 @@ from core.prompt.simple_prompt_transform import ModelMode from core.rag.data_post_processor.data_post_processor import DataPostProcessor from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.retrieval_service import RetrievalService +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.rag.entities.context_entities import DocumentContext from core.rag.entities.metadata_entities import Condition, MetadataCondition from core.rag.index_processor.constant.index_type import IndexType @@ -198,21 +199,21 @@ class DatasetRetrieval: dify_documents = [item for item in all_documents if item.provider == "dify"] external_documents = [item for item in all_documents if item.provider == "external"] - document_context_list = [] - retrieval_resource_list = [] + document_context_list: list[DocumentContext] = [] + retrieval_resource_list: list[RetrievalSourceMetadata] = [] # deal with external documents for item in external_documents: document_context_list.append(DocumentContext(content=item.page_content, score=item.metadata.get("score"))) - source = { - "dataset_id": item.metadata.get("dataset_id"), - "dataset_name": item.metadata.get("dataset_name"), - "document_id": item.metadata.get("document_id") or item.metadata.get("title"), - "document_name": item.metadata.get("title"), - "data_source_type": "external", - "retriever_from": invoke_from.to_source(), - "score": item.metadata.get("score"), - "content": item.page_content, - } + source = RetrievalSourceMetadata( + dataset_id=item.metadata.get("dataset_id"), + dataset_name=item.metadata.get("dataset_name"), + document_id=item.metadata.get("document_id") or item.metadata.get("title"), + document_name=item.metadata.get("title"), + data_source_type="external", + retriever_from=invoke_from.to_source(), + score=item.metadata.get("score"), + content=item.page_content, + ) retrieval_resource_list.append(source) # deal with dify documents if dify_documents: @@ -248,32 +249,32 @@ class DatasetRetrieval: .first() ) if dataset and document: - source = { - "dataset_id": dataset.id, - "dataset_name": dataset.name, - "document_id": document.id, - "document_name": document.name, - "data_source_type": document.data_source_type, - "segment_id": segment.id, - "retriever_from": invoke_from.to_source(), - "score": record.score or 0.0, - "doc_metadata": document.doc_metadata, - } + source = RetrievalSourceMetadata( + dataset_id=dataset.id, + dataset_name=dataset.name, + document_id=document.id, + document_name=document.name, + data_source_type=document.data_source_type, + segment_id=segment.id, + retriever_from=invoke_from.to_source(), + score=record.score or 0.0, + doc_metadata=document.doc_metadata, + ) if invoke_from.to_source() == "dev": - source["hit_count"] = segment.hit_count - source["word_count"] = segment.word_count - source["segment_position"] = segment.position - source["index_node_hash"] = segment.index_node_hash + source.hit_count = segment.hit_count + source.word_count = segment.word_count + source.segment_position = segment.position + source.index_node_hash = segment.index_node_hash if segment.answer: - source["content"] = f"question:{segment.content} \nanswer:{segment.answer}" + source.content = f"question:{segment.content} \nanswer:{segment.answer}" else: - source["content"] = segment.content + source.content = segment.content retrieval_resource_list.append(source) if hit_callback and retrieval_resource_list: - retrieval_resource_list = sorted(retrieval_resource_list, key=lambda x: x.get("score") or 0.0, reverse=True) + retrieval_resource_list = sorted(retrieval_resource_list, key=lambda x: x.score or 0.0, reverse=True) for position, item in enumerate(retrieval_resource_list, start=1): - item["position"] = position + item.position = position hit_callback.return_retriever_resource_info(retrieval_resource_list) if document_context_list: document_context_list = sorted(document_context_list, key=lambda x: x.score or 0.0, reverse=True) @@ -936,6 +937,9 @@ class DatasetRetrieval: return metadata_filter_document_ids, metadata_condition def _replace_metadata_filter_value(self, text: str, inputs: dict) -> str: + if not inputs: + return text + def replacer(match): key = match.group(1) return str(inputs.get(key, f"{{{{{key}}}}}")) diff --git a/api/core/repositories/sqlalchemy_workflow_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_execution_repository.py index c1a71b45d0..19086cffff 100644 --- a/api/core/repositories/sqlalchemy_workflow_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_execution_repository.py @@ -10,12 +10,12 @@ from sqlalchemy import select from sqlalchemy.engine import Engine from sqlalchemy.orm import sessionmaker -from core.workflow.entities.workflow_execution_entities import ( +from core.workflow.entities.workflow_execution import ( WorkflowExecution, WorkflowExecutionStatus, WorkflowType, ) -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository from models import ( Account, CreatorUserRole, @@ -104,10 +104,9 @@ class SQLAlchemyWorkflowExecutionRepository(WorkflowExecutionRepository): status = WorkflowExecutionStatus(db_model.status) return WorkflowExecution( - id=db_model.id, + id_=db_model.id, workflow_id=db_model.workflow_id, - sequence_number=db_model.sequence_number, - type=WorkflowType(db_model.type), + workflow_type=WorkflowType(db_model.type), workflow_version=db_model.version, graph=graph, inputs=inputs, @@ -140,14 +139,29 @@ class SQLAlchemyWorkflowExecutionRepository(WorkflowExecutionRepository): raise ValueError("created_by_role is required in repository constructor") db_model = WorkflowRun() - db_model.id = domain_model.id + db_model.id = domain_model.id_ db_model.tenant_id = self._tenant_id if self._app_id is not None: db_model.app_id = self._app_id db_model.workflow_id = domain_model.workflow_id db_model.triggered_from = self._triggered_from - db_model.sequence_number = domain_model.sequence_number - db_model.type = domain_model.type + + # Check if this is a new record + with self._session_factory() as session: + existing = session.scalar(select(WorkflowRun).where(WorkflowRun.id == domain_model.id_)) + if not existing: + # For new records, get the next sequence number + stmt = select(WorkflowRun.sequence_number).where( + WorkflowRun.app_id == self._app_id, + WorkflowRun.tenant_id == self._tenant_id, + ) + max_sequence = session.scalar(stmt.order_by(WorkflowRun.sequence_number.desc())) + db_model.sequence_number = (max_sequence or 0) + 1 + else: + # For updates, keep the existing sequence number + db_model.sequence_number = existing.sequence_number + + db_model.type = domain_model.workflow_type db_model.version = domain_model.workflow_version db_model.graph = json.dumps(domain_model.graph) if domain_model.graph else None db_model.inputs = json.dumps(domain_model.inputs) if domain_model.inputs else None diff --git a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py index 8d916a19db..2f27442616 100644 --- a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py @@ -12,19 +12,18 @@ from sqlalchemy.engine import Engine from sqlalchemy.orm import sessionmaker from core.model_runtime.utils.encoders import jsonable_encoder -from core.workflow.entities.node_entities import NodeRunMetadataKey -from core.workflow.entities.node_execution_entities import ( - NodeExecution, - NodeExecutionStatus, +from core.workflow.entities.workflow_node_execution import ( + WorkflowNodeExecution, + WorkflowNodeExecutionMetadataKey, + WorkflowNodeExecutionStatus, ) from core.workflow.nodes.enums import NodeType -from core.workflow.repository.workflow_node_execution_repository import OrderConfig, WorkflowNodeExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import OrderConfig, WorkflowNodeExecutionRepository from models import ( Account, CreatorUserRole, EndUser, - WorkflowNodeExecution, - WorkflowNodeExecutionStatus, + WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom, ) @@ -87,9 +86,9 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) # Initialize in-memory cache for node executions # Key: node_execution_id, Value: WorkflowNodeExecution (DB model) - self._node_execution_cache: dict[str, WorkflowNodeExecution] = {} + self._node_execution_cache: dict[str, WorkflowNodeExecutionModel] = {} - def _to_domain_model(self, db_model: WorkflowNodeExecution) -> NodeExecution: + def _to_domain_model(self, db_model: WorkflowNodeExecutionModel) -> WorkflowNodeExecution: """ Convert a database model to a domain model. @@ -103,16 +102,16 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) inputs = db_model.inputs_dict process_data = db_model.process_data_dict outputs = db_model.outputs_dict - metadata = {NodeRunMetadataKey(k): v for k, v in db_model.execution_metadata_dict.items()} + metadata = {WorkflowNodeExecutionMetadataKey(k): v for k, v in db_model.execution_metadata_dict.items()} # Convert status to domain enum - status = NodeExecutionStatus(db_model.status) + status = WorkflowNodeExecutionStatus(db_model.status) - return NodeExecution( + return WorkflowNodeExecution( id=db_model.id, node_execution_id=db_model.node_execution_id, workflow_id=db_model.workflow_id, - workflow_run_id=db_model.workflow_run_id, + workflow_execution_id=db_model.workflow_run_id, index=db_model.index, predecessor_node_id=db_model.predecessor_node_id, node_id=db_model.node_id, @@ -129,7 +128,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) finished_at=db_model.finished_at, ) - def to_db_model(self, domain_model: NodeExecution) -> WorkflowNodeExecution: + def to_db_model(self, domain_model: WorkflowNodeExecution) -> WorkflowNodeExecutionModel: """ Convert a domain model to a database model. @@ -147,14 +146,14 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) if not self._creator_user_role: raise ValueError("created_by_role is required in repository constructor") - db_model = WorkflowNodeExecution() + db_model = WorkflowNodeExecutionModel() db_model.id = domain_model.id db_model.tenant_id = self._tenant_id if self._app_id is not None: db_model.app_id = self._app_id db_model.workflow_id = domain_model.workflow_id db_model.triggered_from = self._triggered_from - db_model.workflow_run_id = domain_model.workflow_run_id + db_model.workflow_run_id = domain_model.workflow_execution_id db_model.index = domain_model.index db_model.predecessor_node_id = domain_model.predecessor_node_id db_model.node_execution_id = domain_model.node_execution_id @@ -176,7 +175,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) db_model.finished_at = domain_model.finished_at return db_model - def save(self, execution: NodeExecution) -> None: + def save(self, execution: WorkflowNodeExecution) -> None: """ Save or update a NodeExecution domain entity to the database. @@ -208,7 +207,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) logger.debug(f"Updating cache for node_execution_id: {db_model.node_execution_id}") self._node_execution_cache[db_model.node_execution_id] = db_model - def get_by_node_execution_id(self, node_execution_id: str) -> Optional[NodeExecution]: + def get_by_node_execution_id(self, node_execution_id: str) -> Optional[WorkflowNodeExecution]: """ Retrieve a NodeExecution by its node_execution_id. @@ -231,13 +230,13 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) # If not in cache, query the database logger.debug(f"Cache miss for node_execution_id: {node_execution_id}, querying database") with self._session_factory() as session: - stmt = select(WorkflowNodeExecution).where( - WorkflowNodeExecution.node_execution_id == node_execution_id, - WorkflowNodeExecution.tenant_id == self._tenant_id, + stmt = select(WorkflowNodeExecutionModel).where( + WorkflowNodeExecutionModel.node_execution_id == node_execution_id, + WorkflowNodeExecutionModel.tenant_id == self._tenant_id, ) if self._app_id: - stmt = stmt.where(WorkflowNodeExecution.app_id == self._app_id) + stmt = stmt.where(WorkflowNodeExecutionModel.app_id == self._app_id) db_model = session.scalar(stmt) if db_model: @@ -253,7 +252,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) self, workflow_run_id: str, order_config: Optional[OrderConfig] = None, - ) -> Sequence[WorkflowNodeExecution]: + ) -> Sequence[WorkflowNodeExecutionModel]: """ Retrieve all WorkflowNodeExecution database models for a specific workflow run. @@ -271,20 +270,20 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) A list of WorkflowNodeExecution database models """ with self._session_factory() as session: - stmt = select(WorkflowNodeExecution).where( - WorkflowNodeExecution.workflow_run_id == workflow_run_id, - WorkflowNodeExecution.tenant_id == self._tenant_id, - WorkflowNodeExecution.triggered_from == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + stmt = select(WorkflowNodeExecutionModel).where( + WorkflowNodeExecutionModel.workflow_run_id == workflow_run_id, + WorkflowNodeExecutionModel.tenant_id == self._tenant_id, + WorkflowNodeExecutionModel.triggered_from == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, ) if self._app_id: - stmt = stmt.where(WorkflowNodeExecution.app_id == self._app_id) + stmt = stmt.where(WorkflowNodeExecutionModel.app_id == self._app_id) # Apply ordering if provided if order_config and order_config.order_by: order_columns: list[UnaryExpression] = [] for field in order_config.order_by: - column = getattr(WorkflowNodeExecution, field, None) + column = getattr(WorkflowNodeExecutionModel, field, None) if not column: continue if order_config.order_direction == "desc": @@ -308,7 +307,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) self, workflow_run_id: str, order_config: Optional[OrderConfig] = None, - ) -> Sequence[NodeExecution]: + ) -> Sequence[WorkflowNodeExecution]: """ Retrieve all NodeExecution instances for a specific workflow run. @@ -335,7 +334,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) return domain_models - def get_running_executions(self, workflow_run_id: str) -> Sequence[NodeExecution]: + def get_running_executions(self, workflow_run_id: str) -> Sequence[WorkflowNodeExecution]: """ Retrieve all running NodeExecution instances for a specific workflow run. @@ -349,15 +348,15 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) A list of running NodeExecution instances """ with self._session_factory() as session: - stmt = select(WorkflowNodeExecution).where( - WorkflowNodeExecution.workflow_run_id == workflow_run_id, - WorkflowNodeExecution.tenant_id == self._tenant_id, - WorkflowNodeExecution.status == WorkflowNodeExecutionStatus.RUNNING, - WorkflowNodeExecution.triggered_from == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + stmt = select(WorkflowNodeExecutionModel).where( + WorkflowNodeExecutionModel.workflow_run_id == workflow_run_id, + WorkflowNodeExecutionModel.tenant_id == self._tenant_id, + WorkflowNodeExecutionModel.status == WorkflowNodeExecutionStatus.RUNNING, + WorkflowNodeExecutionModel.triggered_from == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, ) if self._app_id: - stmt = stmt.where(WorkflowNodeExecution.app_id == self._app_id) + stmt = stmt.where(WorkflowNodeExecutionModel.app_id == self._app_id) db_models = session.scalars(stmt).all() domain_models = [] @@ -382,10 +381,10 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) It also clears the in-memory cache. """ with self._session_factory() as session: - stmt = delete(WorkflowNodeExecution).where(WorkflowNodeExecution.tenant_id == self._tenant_id) + stmt = delete(WorkflowNodeExecutionModel).where(WorkflowNodeExecutionModel.tenant_id == self._tenant_id) if self._app_id: - stmt = stmt.where(WorkflowNodeExecution.app_id == self._app_id) + stmt = stmt.where(WorkflowNodeExecutionModel.app_id == self._app_id) result = session.execute(stmt) session.commit() diff --git a/api/core/tools/entities/tool_entities.py b/api/core/tools/entities/tool_entities.py index 37375f4a71..03047c0545 100644 --- a/api/core/tools/entities/tool_entities.py +++ b/api/core/tools/entities/tool_entities.py @@ -279,7 +279,6 @@ class ToolParameter(PluginParameter): :param options: the options of the parameter """ # convert options to ToolParameterOption - # FIXME fix the type error if options: option_objs = [ PluginParameterOption(value=option, label=I18nObject(en_US=option, zh_Hans=option)) diff --git a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py index 04437ea6d8..93d3fcc49d 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py @@ -8,6 +8,7 @@ from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCa from core.model_manager import ModelManager from core.model_runtime.entities.model_entities import ModelType from core.rag.datasource.retrieval_service import RetrievalService +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.rag.models.document import Document as RagDocument from core.rag.rerank.rerank_model import RerankModelRunner from core.rag.retrieval.retrieval_methods import RetrievalMethod @@ -107,7 +108,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool): else: document_context_list.append(segment.get_sign_content()) if self.return_resource: - context_list = [] + context_list: list[RetrievalSourceMetadata] = [] resource_number = 1 for segment in sorted_segments: dataset = db.session.query(Dataset).filter_by(id=segment.dataset_id).first() @@ -121,28 +122,28 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool): .first() ) if dataset and document: - source = { - "position": resource_number, - "dataset_id": dataset.id, - "dataset_name": dataset.name, - "document_id": document.id, - "document_name": document.name, - "data_source_type": document.data_source_type, - "segment_id": segment.id, - "retriever_from": self.retriever_from, - "score": document_score_list.get(segment.index_node_id, None), - "doc_metadata": document.doc_metadata, - } + source = RetrievalSourceMetadata( + position=resource_number, + dataset_id=dataset.id, + dataset_name=dataset.name, + document_id=document.id, + document_name=document.name, + data_source_type=document.data_source_type, + segment_id=segment.id, + retriever_from=self.retriever_from, + score=document_score_list.get(segment.index_node_id, None), + doc_metadata=document.doc_metadata, + ) if self.retriever_from == "dev": - source["hit_count"] = segment.hit_count - source["word_count"] = segment.word_count - source["segment_position"] = segment.position - source["index_node_hash"] = segment.index_node_hash + source.hit_count = segment.hit_count + source.word_count = segment.word_count + source.segment_position = segment.position + source.index_node_hash = segment.index_node_hash if segment.answer: - source["content"] = f"question:{segment.content} \nanswer:{segment.answer}" + source.content = f"question:{segment.content} \nanswer:{segment.answer}" else: - source["content"] = segment.content + source.content = segment.content context_list.append(source) resource_number += 1 diff --git a/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py index 7b6882ed52..ff1d9021ce 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py @@ -4,6 +4,7 @@ from pydantic import BaseModel, Field from core.app.app_config.entities import DatasetRetrieveConfigEntity, ModelConfig from core.rag.datasource.retrieval_service import RetrievalService +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.rag.entities.context_entities import DocumentContext from core.rag.models.document import Document as RetrievalDocument from core.rag.retrieval.dataset_retrieval import DatasetRetrieval @@ -14,7 +15,7 @@ from models.dataset import Dataset from models.dataset import Document as DatasetDocument from services.external_knowledge_service import ExternalDatasetService -default_retrieval_model = { +default_retrieval_model: dict[str, Any] = { "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, @@ -79,7 +80,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool): else: document_ids_filter = None if dataset.provider == "external": - results = [] + results: list[RetrievalDocument] = [] external_documents = ExternalDatasetService.fetch_external_knowledge_retrieval( tenant_id=dataset.tenant_id, dataset_id=dataset.id, @@ -100,21 +101,21 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool): document.metadata["dataset_name"] = dataset.name results.append(document) # deal with external documents - context_list = [] + context_list: list[RetrievalSourceMetadata] = [] for position, item in enumerate(results, start=1): if item.metadata is not None: - source = { - "position": position, - "dataset_id": item.metadata.get("dataset_id"), - "dataset_name": item.metadata.get("dataset_name"), - "document_id": item.metadata.get("document_id") or item.metadata.get("title"), - "document_name": item.metadata.get("title"), - "data_source_type": "external", - "retriever_from": self.retriever_from, - "score": item.metadata.get("score"), - "title": item.metadata.get("title"), - "content": item.page_content, - } + source = RetrievalSourceMetadata( + position=position, + dataset_id=item.metadata.get("dataset_id"), + dataset_name=item.metadata.get("dataset_name"), + document_id=item.metadata.get("document_id") or item.metadata.get("title"), + document_name=item.metadata.get("title"), + data_source_type="external", + retriever_from=self.retriever_from, + score=item.metadata.get("score"), + title=item.metadata.get("title"), + content=item.page_content, + ) context_list.append(source) for hit_callback in self.hit_callbacks: hit_callback.return_retriever_resource_info(context_list) @@ -125,7 +126,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool): return "" # get retrieval model , if the model is not setting , using default retrieval_model: dict[str, Any] = dataset.retrieval_model or default_retrieval_model - retrieval_resource_list = [] + retrieval_resource_list: list[RetrievalSourceMetadata] = [] if dataset.indexing_technique == "economy": # use keyword table query documents = RetrievalService.retrieve( @@ -163,7 +164,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool): for item in documents: if item.metadata is not None and item.metadata.get("score"): document_score_list[item.metadata["doc_id"]] = item.metadata["score"] - document_context_list = [] + document_context_list: list[DocumentContext] = [] records = RetrievalService.format_retrieval_documents(documents) if records: for record in records: @@ -197,37 +198,37 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool): .first() ) if dataset and document: - source = { - "dataset_id": dataset.id, - "dataset_name": dataset.name, - "document_id": document.id, # type: ignore - "document_name": document.name, # type: ignore - "data_source_type": document.data_source_type, # type: ignore - "segment_id": segment.id, - "retriever_from": self.retriever_from, - "score": record.score or 0.0, - "doc_metadata": document.doc_metadata, # type: ignore - } + source = RetrievalSourceMetadata( + dataset_id=dataset.id, + dataset_name=dataset.name, + document_id=document.id, # type: ignore + document_name=document.name, # type: ignore + data_source_type=document.data_source_type, # type: ignore + segment_id=segment.id, + retriever_from=self.retriever_from, + score=record.score or 0.0, + doc_metadata=document.doc_metadata, # type: ignore + ) if self.retriever_from == "dev": - source["hit_count"] = segment.hit_count - source["word_count"] = segment.word_count - source["segment_position"] = segment.position - source["index_node_hash"] = segment.index_node_hash + source.hit_count = segment.hit_count + source.word_count = segment.word_count + source.segment_position = segment.position + source.index_node_hash = segment.index_node_hash if segment.answer: - source["content"] = f"question:{segment.content} \nanswer:{segment.answer}" + source.content = f"question:{segment.content} \nanswer:{segment.answer}" else: - source["content"] = segment.content + source.content = segment.content retrieval_resource_list.append(source) if self.return_resource and retrieval_resource_list: retrieval_resource_list = sorted( retrieval_resource_list, - key=lambda x: x.get("score") or 0.0, + key=lambda x: x.score or 0.0, reverse=True, ) for position, item in enumerate(retrieval_resource_list, start=1): # type: ignore - item["position"] = position # type: ignore + item.position = position # type: ignore for hit_callback in self.hit_callbacks: hit_callback.return_retriever_resource_info(retrieval_resource_list) if document_context_list: diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 257d96133e..80e8b54343 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -66,7 +66,6 @@ class ToolFileMessageTransformer: if not isinstance(message.message, ToolInvokeMessage.BlobMessage): raise ValueError("unexpected message type") - # FIXME: should do a type check here. assert isinstance(message.message.blob, bytes) tool_file_manager = ToolFileManager() file = tool_file_manager.create_file_by_raw( diff --git a/api/core/workflow/entities/node_entities.py b/api/core/workflow/entities/node_entities.py index 82fd6cdc30..687ec8e47c 100644 --- a/api/core/workflow/entities/node_entities.py +++ b/api/core/workflow/entities/node_entities.py @@ -1,36 +1,10 @@ from collections.abc import Mapping -from enum import StrEnum from typing import Any, Optional from pydantic import BaseModel from core.model_runtime.entities.llm_entities import LLMUsage -from models.workflow import WorkflowNodeExecutionStatus - - -class NodeRunMetadataKey(StrEnum): - """ - Node Run Metadata Key. - """ - - TOTAL_TOKENS = "total_tokens" - TOTAL_PRICE = "total_price" - CURRENCY = "currency" - TOOL_INFO = "tool_info" - AGENT_LOG = "agent_log" - ITERATION_ID = "iteration_id" - ITERATION_INDEX = "iteration_index" - LOOP_ID = "loop_id" - LOOP_INDEX = "loop_index" - PARALLEL_ID = "parallel_id" - PARALLEL_START_NODE_ID = "parallel_start_node_id" - PARENT_PARALLEL_ID = "parent_parallel_id" - PARENT_PARALLEL_START_NODE_ID = "parent_parallel_start_node_id" - PARALLEL_MODE_RUN_ID = "parallel_mode_run_id" - ITERATION_DURATION_MAP = "iteration_duration_map" # single iteration duration if iteration node runs - LOOP_DURATION_MAP = "loop_duration_map" # single loop duration if loop node runs - ERROR_STRATEGY = "error_strategy" # node in continue on error mode return the field - LOOP_VARIABLE_MAP = "loop_variable_map" # single loop variable output +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus class NodeRunResult(BaseModel): @@ -43,7 +17,7 @@ class NodeRunResult(BaseModel): inputs: Optional[Mapping[str, Any]] = None # node inputs process_data: Optional[Mapping[str, Any]] = None # process data outputs: Optional[Mapping[str, Any]] = None # node outputs - metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None # node metadata + metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None # node metadata llm_usage: Optional[LLMUsage] = None # llm usage edge_source_handle: Optional[str] = None # source handle id of node with multiple branches diff --git a/api/core/workflow/entities/workflow_execution_entities.py b/api/core/workflow/entities/workflow_execution.py similarity index 89% rename from api/core/workflow/entities/workflow_execution_entities.py rename to api/core/workflow/entities/workflow_execution.py index 200d4697b5..781be4b3c6 100644 --- a/api/core/workflow/entities/workflow_execution_entities.py +++ b/api/core/workflow/entities/workflow_execution.py @@ -36,12 +36,10 @@ class WorkflowExecution(BaseModel): user, tenant, and app attributes. """ - id: str = Field(...) + id_: str = Field(...) workflow_id: str = Field(...) workflow_version: str = Field(...) - sequence_number: int = Field(...) - - type: WorkflowType = Field(...) + workflow_type: WorkflowType = Field(...) graph: Mapping[str, Any] = Field(...) inputs: Mapping[str, Any] = Field(...) @@ -69,20 +67,18 @@ class WorkflowExecution(BaseModel): def new( cls, *, - id: str, + id_: str, workflow_id: str, - sequence_number: int, - type: WorkflowType, + workflow_type: WorkflowType, workflow_version: str, graph: Mapping[str, Any], inputs: Mapping[str, Any], started_at: datetime, ) -> "WorkflowExecution": return WorkflowExecution( - id=id, + id_=id_, workflow_id=workflow_id, - sequence_number=sequence_number, - type=type, + workflow_type=workflow_type, workflow_version=workflow_version, graph=graph, inputs=inputs, diff --git a/api/core/workflow/entities/node_execution_entities.py b/api/core/workflow/entities/workflow_node_execution.py similarity index 68% rename from api/core/workflow/entities/node_execution_entities.py rename to api/core/workflow/entities/workflow_node_execution.py index 5e5ead062f..773f5b777b 100644 --- a/api/core/workflow/entities/node_execution_entities.py +++ b/api/core/workflow/entities/workflow_node_execution.py @@ -13,11 +13,35 @@ from typing import Any, Optional from pydantic import BaseModel, Field -from core.workflow.entities.node_entities import NodeRunMetadataKey from core.workflow.nodes.enums import NodeType -class NodeExecutionStatus(StrEnum): +class WorkflowNodeExecutionMetadataKey(StrEnum): + """ + Node Run Metadata Key. + """ + + TOTAL_TOKENS = "total_tokens" + TOTAL_PRICE = "total_price" + CURRENCY = "currency" + TOOL_INFO = "tool_info" + AGENT_LOG = "agent_log" + ITERATION_ID = "iteration_id" + ITERATION_INDEX = "iteration_index" + LOOP_ID = "loop_id" + LOOP_INDEX = "loop_index" + PARALLEL_ID = "parallel_id" + PARALLEL_START_NODE_ID = "parallel_start_node_id" + PARENT_PARALLEL_ID = "parent_parallel_id" + PARENT_PARALLEL_START_NODE_ID = "parent_parallel_start_node_id" + PARALLEL_MODE_RUN_ID = "parallel_mode_run_id" + ITERATION_DURATION_MAP = "iteration_duration_map" # single iteration duration if iteration node runs + LOOP_DURATION_MAP = "loop_duration_map" # single loop duration if loop node runs + ERROR_STRATEGY = "error_strategy" # node in continue on error mode return the field + LOOP_VARIABLE_MAP = "loop_variable_map" # single loop variable output + + +class WorkflowNodeExecutionStatus(StrEnum): """ Node Execution Status Enum. """ @@ -29,7 +53,7 @@ class NodeExecutionStatus(StrEnum): RETRY = "retry" -class NodeExecution(BaseModel): +class WorkflowNodeExecution(BaseModel): """ Domain model for workflow node execution. @@ -46,7 +70,7 @@ class NodeExecution(BaseModel): id: str # Unique identifier for this execution record node_execution_id: Optional[str] = None # Optional secondary ID for cross-referencing workflow_id: str # ID of the workflow this node belongs to - workflow_run_id: Optional[str] = None # ID of the specific workflow run (null for single-step debugging) + workflow_execution_id: Optional[str] = None # ID of the specific workflow run (null for single-step debugging) # Execution positioning and flow index: int # Sequence number for ordering in trace visualization @@ -61,12 +85,12 @@ class NodeExecution(BaseModel): outputs: Optional[Mapping[str, Any]] = None # Output variables produced by this node # Execution state - status: NodeExecutionStatus = NodeExecutionStatus.RUNNING # Current execution status + status: WorkflowNodeExecutionStatus = WorkflowNodeExecutionStatus.RUNNING # Current execution status error: Optional[str] = None # Error message if execution failed elapsed_time: float = Field(default=0.0) # Time taken for execution in seconds # Additional metadata - metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None # Execution metadata (tokens, cost, etc.) + metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None # Execution metadata (tokens, cost, etc.) # Timing information created_at: datetime # When execution started @@ -77,7 +101,7 @@ class NodeExecution(BaseModel): inputs: Optional[Mapping[str, Any]] = None, process_data: Optional[Mapping[str, Any]] = None, outputs: Optional[Mapping[str, Any]] = None, - metadata: Optional[Mapping[NodeRunMetadataKey, Any]] = None, + metadata: Optional[Mapping[WorkflowNodeExecutionMetadataKey, Any]] = None, ) -> None: """ Update the model from mappings. diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index 9642efa1a5..b52a2b0e6e 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -13,4 +13,4 @@ class SystemVariableKey(StrEnum): DIALOGUE_COUNT = "dialogue_count" APP_ID = "app_id" WORKFLOW_ID = "workflow_id" - WORKFLOW_RUN_ID = "workflow_run_id" + WORKFLOW_EXECUTION_ID = "workflow_run_id" diff --git a/api/core/workflow/graph_engine/entities/event.py b/api/core/workflow/graph_engine/entities/event.py index 689a07c4f6..9a4939502e 100644 --- a/api/core/workflow/graph_engine/entities/event.py +++ b/api/core/workflow/graph_engine/entities/event.py @@ -1,9 +1,10 @@ -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from datetime import datetime from typing import Any, Optional from pydantic import BaseModel, Field +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.workflow.entities.node_entities import AgentNodeStrategyInit from core.workflow.graph_engine.entities.runtime_route_state import RouteNodeState from core.workflow.nodes import NodeType @@ -82,7 +83,7 @@ class NodeRunStreamChunkEvent(BaseNodeEvent): class NodeRunRetrieverResourceEvent(BaseNodeEvent): - retriever_resources: list[dict] = Field(..., description="retriever resources") + retriever_resources: Sequence[RetrievalSourceMetadata] = Field(..., description="retriever resources") context: str = Field(..., description="context") diff --git a/api/core/workflow/graph_engine/entities/runtime_route_state.py b/api/core/workflow/graph_engine/entities/runtime_route_state.py index 7683dcc9dc..f2d9c98936 100644 --- a/api/core/workflow/graph_engine/entities/runtime_route_state.py +++ b/api/core/workflow/graph_engine/entities/runtime_route_state.py @@ -6,7 +6,7 @@ from typing import Optional from pydantic import BaseModel, Field from core.workflow.entities.node_entities import NodeRunResult -from models.workflow import WorkflowNodeExecutionStatus +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus class RouteNodeState(BaseModel): diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index f61965e07e..3eb99fde81 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -14,8 +14,9 @@ from flask import Flask, current_app, has_request_context from configs import dify_config from core.app.apps.base_app_queue_manager import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom -from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunResult from core.workflow.entities.variable_pool import VariablePool, VariableValue +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.graph_engine.condition_handlers.condition_manager import ConditionManager from core.workflow.graph_engine.entities.event import ( BaseAgentEvent, @@ -54,7 +55,7 @@ from core.workflow.nodes.event import RunCompletedEvent, RunRetrieverResourceEve from core.workflow.nodes.node_mapping import NODE_TYPE_CLASSES_MAPPING from extensions.ext_database import db from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType logger = logging.getLogger(__name__) @@ -759,10 +760,12 @@ class GraphEngine: and node_instance.node_data.error_strategy is ErrorStrategy.FAIL_BRANCH ): run_result.edge_source_handle = FailBranchSourceHandle.SUCCESS - if run_result.metadata and run_result.metadata.get(NodeRunMetadataKey.TOTAL_TOKENS): + if run_result.metadata and run_result.metadata.get( + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS + ): # plus state total_tokens self.graph_runtime_state.total_tokens += int( - run_result.metadata.get(NodeRunMetadataKey.TOTAL_TOKENS) # type: ignore[arg-type] + run_result.metadata.get(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS) # type: ignore[arg-type] ) if run_result.llm_usage: @@ -785,13 +788,17 @@ class GraphEngine: if parallel_id and parallel_start_node_id: metadata_dict = dict(run_result.metadata) - metadata_dict[NodeRunMetadataKey.PARALLEL_ID] = parallel_id - metadata_dict[NodeRunMetadataKey.PARALLEL_START_NODE_ID] = parallel_start_node_id + metadata_dict[WorkflowNodeExecutionMetadataKey.PARALLEL_ID] = parallel_id + metadata_dict[WorkflowNodeExecutionMetadataKey.PARALLEL_START_NODE_ID] = ( + parallel_start_node_id + ) if parent_parallel_id and parent_parallel_start_node_id: - metadata_dict[NodeRunMetadataKey.PARENT_PARALLEL_ID] = parent_parallel_id - metadata_dict[NodeRunMetadataKey.PARENT_PARALLEL_START_NODE_ID] = ( - parent_parallel_start_node_id + metadata_dict[WorkflowNodeExecutionMetadataKey.PARENT_PARALLEL_ID] = ( + parent_parallel_id ) + metadata_dict[ + WorkflowNodeExecutionMetadataKey.PARENT_PARALLEL_START_NODE_ID + ] = parent_parallel_start_node_id run_result.metadata = metadata_dict yield NodeRunSucceededEvent( @@ -923,7 +930,7 @@ class GraphEngine: "error": error_result.error, "inputs": error_result.inputs, "metadata": { - NodeRunMetadataKey.ERROR_STRATEGY: node_instance.node_data.error_strategy, + WorkflowNodeExecutionMetadataKey.ERROR_STRATEGY: node_instance.node_data.error_strategy, }, } diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 9bed8862fc..30b17cbd84 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -15,6 +15,7 @@ from core.tools.tool_manager import ToolManager from core.variables.segments import StringSegment from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.nodes.agent.entities import AgentNodeData, AgentOldVersionModelFeatures, ParamsAutoGenerated from core.workflow.nodes.base.entities import BaseNodeData @@ -25,7 +26,6 @@ from core.workflow.utils.variable_template_parser import VariableTemplateParser from extensions.ext_database import db from factories.agent_factory import get_plugin_agent_strategy from models.model import Conversation -from models.workflow import WorkflowNodeExecutionStatus class AgentNode(ToolNode): diff --git a/api/core/workflow/nodes/answer/answer_node.py b/api/core/workflow/nodes/answer/answer_node.py index 520cbdbb60..aa030870e2 100644 --- a/api/core/workflow/nodes/answer/answer_node.py +++ b/api/core/workflow/nodes/answer/answer_node.py @@ -3,6 +3,7 @@ from typing import Any, cast from core.variables import ArrayFileSegment, FileSegment from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.answer.answer_stream_generate_router import AnswerStreamGeneratorRouter from core.workflow.nodes.answer.entities import ( AnswerNodeData, @@ -13,7 +14,6 @@ from core.workflow.nodes.answer.entities import ( from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.utils.variable_template_parser import VariableTemplateParser -from models.workflow import WorkflowNodeExecutionStatus class AnswerNode(BaseNode[AnswerNodeData]): diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index e566770870..7da0c19740 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -4,9 +4,9 @@ from collections.abc import Generator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Generic, Optional, TypeVar, Union, cast from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.enums import CONTINUE_ON_ERROR_NODE_TYPE, RETRY_ON_ERROR_NODE_TYPE, NodeType from core.workflow.nodes.event import NodeEvent, RunCompletedEvent -from models.workflow import WorkflowNodeExecutionStatus from .entities import BaseNodeData diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 804c05f9f4..61c08a7d71 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -8,10 +8,10 @@ from core.helper.code_executor.javascript.javascript_code_provider import Javasc from core.helper.code_executor.python3.python3_code_provider import Python3CodeProvider from core.variables.segments import ArrayFileSegment from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.code.entities import CodeNodeData from core.workflow.nodes.enums import NodeType -from models.workflow import WorkflowNodeExecutionStatus from .exc import ( CodeNodeError, @@ -167,8 +167,11 @@ class CodeNode(BaseNode[CodeNodeData]): value=value, variable=f"{prefix}.{output_name}[{i}]" if prefix else f"{output_name}[{i}]", ) - elif isinstance(first_element, dict) and all( - value is None or isinstance(value, dict) for value in output_value + elif ( + isinstance(first_element, dict) + and all(value is None or isinstance(value, dict) for value in output_value) + or isinstance(first_element, list) + and all(value is None or isinstance(value, list) for value in output_value) ): for i, value in enumerate(output_value): if value is not None: diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index 65b5623a2e..d39eb9c932 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -26,9 +26,9 @@ from core.helper import ssrf_proxy from core.variables import ArrayFileSegment from core.variables.segments import FileSegment from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType -from models.workflow import WorkflowNodeExecutionStatus from .entities import DocumentExtractorNodeData from .exc import DocumentExtractorError, FileDownloadError, TextExtractionError, UnsupportedFileTypeError diff --git a/api/core/workflow/nodes/end/end_node.py b/api/core/workflow/nodes/end/end_node.py index 6acc915ab5..0e9756b243 100644 --- a/api/core/workflow/nodes/end/end_node.py +++ b/api/core/workflow/nodes/end/end_node.py @@ -1,8 +1,8 @@ from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.end.entities import EndNodeData from core.workflow.nodes.enums import NodeType -from models.workflow import WorkflowNodeExecutionStatus class EndNode(BaseNode[EndNodeData]): diff --git a/api/core/workflow/nodes/event/event.py b/api/core/workflow/nodes/event/event.py index 9fea3fbda3..b72d111f49 100644 --- a/api/core/workflow/nodes/event/event.py +++ b/api/core/workflow/nodes/event/event.py @@ -1,10 +1,12 @@ +from collections.abc import Sequence from datetime import datetime from pydantic import BaseModel, Field from core.model_runtime.entities.llm_entities import LLMUsage +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.workflow.entities.node_entities import NodeRunResult -from models.workflow import WorkflowNodeExecutionStatus +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus class RunCompletedEvent(BaseModel): @@ -17,7 +19,7 @@ class RunStreamChunkEvent(BaseModel): class RunRetrieverResourceEvent(BaseModel): - retriever_resources: list[dict] = Field(..., description="retriever resources") + retriever_resources: Sequence[RetrievalSourceMetadata] = Field(..., description="retriever resources") context: str = Field(..., description="context") diff --git a/api/core/workflow/nodes/http_request/node.py b/api/core/workflow/nodes/http_request/node.py index 1c82637974..6b1ac57c06 100644 --- a/api/core/workflow/nodes/http_request/node.py +++ b/api/core/workflow/nodes/http_request/node.py @@ -8,12 +8,12 @@ from core.file import File, FileTransferMethod from core.tools.tool_file_manager import ToolFileManager from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_entities import VariableSelector +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.http_request.executor import Executor from core.workflow.utils import variable_template_parser from factories import file_factory -from models.workflow import WorkflowNodeExecutionStatus from .entities import ( HttpRequestNodeData, diff --git a/api/core/workflow/nodes/if_else/if_else_node.py b/api/core/workflow/nodes/if_else/if_else_node.py index cb51b1ddd5..976922f75d 100644 --- a/api/core/workflow/nodes/if_else/if_else_node.py +++ b/api/core/workflow/nodes/if_else/if_else_node.py @@ -4,12 +4,12 @@ from typing_extensions import deprecated from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.if_else.entities import IfElseNodeData from core.workflow.utils.condition.entities import Condition from core.workflow.utils.condition.processor import ConditionProcessor -from models.workflow import WorkflowNodeExecutionStatus class IfElseNode(BaseNode[IfElseNodeData]): diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index ea0b6863c9..2592823540 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -12,10 +12,10 @@ from flask import Flask, current_app, has_request_context from configs import dify_config from core.variables import ArrayVariable, IntegerVariable, NoneVariable from core.workflow.entities.node_entities import ( - NodeRunMetadataKey, NodeRunResult, ) from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.graph_engine.entities.event import ( BaseGraphEvent, BaseNodeEvent, @@ -37,7 +37,6 @@ from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.event import NodeEvent, RunCompletedEvent from core.workflow.nodes.iteration.entities import ErrorHandleMode, IterationNodeData -from models.workflow import WorkflowNodeExecutionStatus from .exc import ( InvalidIteratorValueError, @@ -249,8 +248,8 @@ class IterationNode(BaseNode[IterationNodeData]): status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs={"output": outputs}, metadata={ - NodeRunMetadataKey.ITERATION_DURATION_MAP: iter_run_map, - NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, }, ) ) @@ -361,16 +360,16 @@ class IterationNode(BaseNode[IterationNodeData]): event.parallel_mode_run_id = parallel_mode_run_id iter_metadata = { - NodeRunMetadataKey.ITERATION_ID: self.node_id, - NodeRunMetadataKey.ITERATION_INDEX: iter_run_index, + WorkflowNodeExecutionMetadataKey.ITERATION_ID: self.node_id, + WorkflowNodeExecutionMetadataKey.ITERATION_INDEX: iter_run_index, } if parallel_mode_run_id: # for parallel, the specific branch ID is more important than the sequential index - iter_metadata[NodeRunMetadataKey.PARALLEL_MODE_RUN_ID] = parallel_mode_run_id + iter_metadata[WorkflowNodeExecutionMetadataKey.PARALLEL_MODE_RUN_ID] = parallel_mode_run_id if event.route_node_state.node_run_result: current_metadata = event.route_node_state.node_run_result.metadata or {} - if NodeRunMetadataKey.ITERATION_ID not in current_metadata: + if WorkflowNodeExecutionMetadataKey.ITERATION_ID not in current_metadata: event.route_node_state.node_run_result.metadata = {**current_metadata, **iter_metadata} return event diff --git a/api/core/workflow/nodes/iteration/iteration_start_node.py b/api/core/workflow/nodes/iteration/iteration_start_node.py index fe955e47d1..bee481ebdb 100644 --- a/api/core/workflow/nodes/iteration/iteration_start_node.py +++ b/api/core/workflow/nodes/iteration/iteration_start_node.py @@ -1,8 +1,8 @@ from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.iteration.entities import IterationStartNodeData -from models.workflow import WorkflowNodeExecutionStatus class IterationStartNode(BaseNode[IterationStartNodeData]): diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 5955022e5f..2ddb4f8a0b 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -24,6 +24,7 @@ from core.rag.retrieval.dataset_retrieval import DatasetRetrieval from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.variables import StringSegment from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.enums import NodeType from core.workflow.nodes.event.event import ModelInvokeCompletedEvent from core.workflow.nodes.knowledge_retrieval.template_prompts import ( @@ -41,7 +42,6 @@ from extensions.ext_database import db from extensions.ext_redis import redis_client from libs.json_in_md_parser import parse_and_check_json_markdown from models.dataset import Dataset, DatasetMetadata, Document, RateLimitLog -from models.workflow import WorkflowNodeExecutionStatus from services.feature_service import FeatureService from .entities import KnowledgeRetrievalNodeData, ModelConfig diff --git a/api/core/workflow/nodes/list_operator/node.py b/api/core/workflow/nodes/list_operator/node.py index 04ccfc5405..e698d3f5d8 100644 --- a/api/core/workflow/nodes/list_operator/node.py +++ b/api/core/workflow/nodes/list_operator/node.py @@ -4,9 +4,9 @@ from typing import Any, Literal, Union from core.file import File from core.variables import ArrayFileSegment, ArrayNumberSegment, ArrayStringSegment from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType -from models.workflow import WorkflowNodeExecutionStatus from .entities import ListOperatorNodeData from .exc import InvalidConditionError, InvalidFilterValueError, InvalidKeyError, ListOperatorError diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index eeb44601ec..0fd7c31ffb 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -43,6 +43,7 @@ from core.model_runtime.utils.encoders import jsonable_encoder from core.plugin.entities.plugin import ModelProviderID from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig from core.prompt.utils.prompt_message_util import PromptMessageUtil +from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.variables import ( ArrayAnySegment, ArrayFileSegment, @@ -53,9 +54,10 @@ from core.variables import ( StringSegment, ) from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_entities import VariableSelector from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.event import InNodeEvent from core.workflow.nodes.base import BaseNode @@ -77,7 +79,6 @@ from core.workflow.utils.variable_template_parser import VariableTemplateParser from extensions.ext_database import db from models.model import Conversation from models.provider import Provider, ProviderType -from models.workflow import WorkflowNodeExecutionStatus from .entities import ( LLMNodeChatModelMessage, @@ -267,9 +268,9 @@ class LLMNode(BaseNode[LLMNodeData]): process_data=process_data, outputs=outputs, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, - NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, - NodeRunMetadataKey.CURRENCY: usage.currency, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, }, llm_usage=usage, ) @@ -474,7 +475,7 @@ class LLMNode(BaseNode[LLMNodeData]): yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value) elif isinstance(context_value_variable, ArraySegment): context_str = "" - original_retriever_resource = [] + original_retriever_resource: list[RetrievalSourceMetadata] = [] for item in context_value_variable.value: if isinstance(item, str): context_str += item + "\n" @@ -492,7 +493,7 @@ class LLMNode(BaseNode[LLMNodeData]): retriever_resources=original_retriever_resource, context=context_str.strip() ) - def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]: + def _convert_to_original_retriever_resource(self, context_dict: dict): if ( "metadata" in context_dict and "_source" in context_dict["metadata"] @@ -500,24 +501,24 @@ class LLMNode(BaseNode[LLMNodeData]): ): metadata = context_dict.get("metadata", {}) - source = { - "position": metadata.get("position"), - "dataset_id": metadata.get("dataset_id"), - "dataset_name": metadata.get("dataset_name"), - "document_id": metadata.get("document_id"), - "document_name": metadata.get("document_name"), - "data_source_type": metadata.get("data_source_type"), - "segment_id": metadata.get("segment_id"), - "retriever_from": metadata.get("retriever_from"), - "score": metadata.get("score"), - "hit_count": metadata.get("segment_hit_count"), - "word_count": metadata.get("segment_word_count"), - "segment_position": metadata.get("segment_position"), - "index_node_hash": metadata.get("segment_index_node_hash"), - "content": context_dict.get("content"), - "page": metadata.get("page"), - "doc_metadata": metadata.get("doc_metadata"), - } + source = RetrievalSourceMetadata( + position=metadata.get("position"), + dataset_id=metadata.get("dataset_id"), + dataset_name=metadata.get("dataset_name"), + document_id=metadata.get("document_id"), + document_name=metadata.get("document_name"), + data_source_type=metadata.get("data_source_type"), + segment_id=metadata.get("segment_id"), + retriever_from=metadata.get("retriever_from"), + score=metadata.get("score"), + hit_count=metadata.get("segment_hit_count"), + word_count=metadata.get("segment_word_count"), + segment_position=metadata.get("segment_position"), + index_node_hash=metadata.get("segment_index_node_hash"), + content=context_dict.get("content"), + page=metadata.get("page"), + doc_metadata=metadata.get("doc_metadata"), + ) return source diff --git a/api/core/workflow/nodes/loop/loop_end_node.py b/api/core/workflow/nodes/loop/loop_end_node.py index 5d4ce0ccbe..327b9e234b 100644 --- a/api/core/workflow/nodes/loop/loop_end_node.py +++ b/api/core/workflow/nodes/loop/loop_end_node.py @@ -1,8 +1,8 @@ from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.loop.entities import LoopEndNodeData -from models.workflow import WorkflowNodeExecutionStatus class LoopEndNode(BaseNode[LoopEndNodeData]): diff --git a/api/core/workflow/nodes/loop/loop_node.py b/api/core/workflow/nodes/loop/loop_node.py index bad3e2b928..fafa205386 100644 --- a/api/core/workflow/nodes/loop/loop_node.py +++ b/api/core/workflow/nodes/loop/loop_node.py @@ -15,7 +15,8 @@ from core.variables import ( SegmentType, StringSegment, ) -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.graph_engine.entities.event import ( BaseGraphEvent, BaseNodeEvent, @@ -37,7 +38,6 @@ from core.workflow.nodes.enums import NodeType from core.workflow.nodes.event import NodeEvent, RunCompletedEvent from core.workflow.nodes.loop.entities import LoopNodeData from core.workflow.utils.condition.processor import ConditionProcessor -from models.workflow import WorkflowNodeExecutionStatus if TYPE_CHECKING: from core.workflow.entities.variable_pool import VariablePool @@ -187,10 +187,10 @@ class LoopNode(BaseNode[LoopNodeData]): outputs=self.node_data.outputs, steps=loop_count, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, "completed_reason": "loop_break" if check_break_result else "loop_completed", - NodeRunMetadataKey.LOOP_DURATION_MAP: loop_duration_map, - NodeRunMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, + WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, + WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, ) @@ -198,9 +198,9 @@ class LoopNode(BaseNode[LoopNodeData]): run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, - NodeRunMetadataKey.LOOP_DURATION_MAP: loop_duration_map, - NodeRunMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, + WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, outputs=self.node_data.outputs, inputs=inputs, @@ -221,8 +221,8 @@ class LoopNode(BaseNode[LoopNodeData]): metadata={ "total_tokens": graph_engine.graph_runtime_state.total_tokens, "completed_reason": "error", - NodeRunMetadataKey.LOOP_DURATION_MAP: loop_duration_map, - NodeRunMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, + WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, + WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, error=str(e), ) @@ -232,9 +232,9 @@ class LoopNode(BaseNode[LoopNodeData]): status=WorkflowNodeExecutionStatus.FAILED, error=str(e), metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, - NodeRunMetadataKey.LOOP_DURATION_MAP: loop_duration_map, - NodeRunMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, + WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, ) ) @@ -322,7 +322,9 @@ class LoopNode(BaseNode[LoopNodeData]): inputs=inputs, steps=current_index, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: ( + graph_engine.graph_runtime_state.total_tokens + ), "completed_reason": "error", }, error=event.error, @@ -331,7 +333,11 @@ class LoopNode(BaseNode[LoopNodeData]): run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, error=event.error, - metadata={NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens}, + metadata={ + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: ( + graph_engine.graph_runtime_state.total_tokens + ) + }, ) ) return {"check_break_result": True} @@ -347,7 +353,7 @@ class LoopNode(BaseNode[LoopNodeData]): inputs=inputs, steps=current_index, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens, "completed_reason": "error", }, error=event.error, @@ -356,7 +362,9 @@ class LoopNode(BaseNode[LoopNodeData]): run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, error=event.error, - metadata={NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens}, + metadata={ + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens + }, ) ) return {"check_break_result": True} @@ -411,11 +419,11 @@ class LoopNode(BaseNode[LoopNodeData]): metadata = event.route_node_state.node_run_result.metadata if not metadata: metadata = {} - if NodeRunMetadataKey.LOOP_ID not in metadata: + if WorkflowNodeExecutionMetadataKey.LOOP_ID not in metadata: metadata = { **metadata, - NodeRunMetadataKey.LOOP_ID: self.node_id, - NodeRunMetadataKey.LOOP_INDEX: iter_run_index, + WorkflowNodeExecutionMetadataKey.LOOP_ID: self.node_id, + WorkflowNodeExecutionMetadataKey.LOOP_INDEX: iter_run_index, } event.route_node_state.node_run_result.metadata = metadata return event diff --git a/api/core/workflow/nodes/loop/loop_start_node.py b/api/core/workflow/nodes/loop/loop_start_node.py index 7cf145e4e5..5a15f36044 100644 --- a/api/core/workflow/nodes/loop/loop_start_node.py +++ b/api/core/workflow/nodes/loop/loop_start_node.py @@ -1,8 +1,8 @@ from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.loop.entities import LoopStartNodeData -from models.workflow import WorkflowNodeExecutionStatus class LoopStartNode(BaseNode[LoopStartNodeData]): diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 8db1e432fc..ea4070e224 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -25,13 +25,13 @@ from core.prompt.advanced_prompt_transform import AdvancedPromptTransform from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate from core.prompt.simple_prompt_transform import ModelMode from core.prompt.utils.prompt_message_util import PromptMessageUtil -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.nodes.enums import NodeType from core.workflow.nodes.llm import LLMNode, ModelConfig from core.workflow.utils import variable_template_parser from extensions.ext_database import db -from models.workflow import WorkflowNodeExecutionStatus from .entities import ParameterExtractorNodeData from .exc import ( @@ -244,9 +244,9 @@ class ParameterExtractorNode(LLMNode): process_data=process_data, outputs={"__is_success": 1 if not error else 0, "__reason": error, **result}, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, - NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, - NodeRunMetadataKey.CURRENCY: usage.currency, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, }, llm_usage=usage, ) @@ -816,7 +816,6 @@ class ParameterExtractorNode(LLMNode): :param node_data: node data :return: """ - # FIXME: fix the type error later variable_mapping: dict[str, Sequence[str]] = {"query": node_data.query} if node_data.instruction: diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index b4f34a3bef..e846b76280 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -10,7 +10,8 @@ from core.model_runtime.utils.encoders import jsonable_encoder from core.prompt.advanced_prompt_transform import AdvancedPromptTransform from core.prompt.simple_prompt_transform import ModelMode from core.prompt.utils.prompt_message_util import PromptMessageUtil -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.nodes.enums import NodeType from core.workflow.nodes.event import ModelInvokeCompletedEvent from core.workflow.nodes.llm import ( @@ -20,7 +21,6 @@ from core.workflow.nodes.llm import ( ) from core.workflow.utils.variable_template_parser import VariableTemplateParser from libs.json_in_md_parser import parse_and_check_json_markdown -from models.workflow import WorkflowNodeExecutionStatus from .entities import QuestionClassifierNodeData from .exc import InvalidModelTypeError @@ -142,9 +142,9 @@ class QuestionClassifierNode(LLMNode): outputs=outputs, edge_source_handle=category_id, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, - NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, - NodeRunMetadataKey.CURRENCY: usage.currency, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, }, llm_usage=usage, ) @@ -154,9 +154,9 @@ class QuestionClassifierNode(LLMNode): inputs=variables, error=str(e), metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, - NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, - NodeRunMetadataKey.CURRENCY: usage.currency, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, }, llm_usage=usage, ) diff --git a/api/core/workflow/nodes/start/start_node.py b/api/core/workflow/nodes/start/start_node.py index 1b47b81517..8839aec9d6 100644 --- a/api/core/workflow/nodes/start/start_node.py +++ b/api/core/workflow/nodes/start/start_node.py @@ -1,9 +1,9 @@ from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.start.entities import StartNodeData -from models.workflow import WorkflowNodeExecutionStatus class StartNode(BaseNode[StartNodeData]): diff --git a/api/core/workflow/nodes/template_transform/template_transform_node.py b/api/core/workflow/nodes/template_transform/template_transform_node.py index 22a1b21888..476cf7eee4 100644 --- a/api/core/workflow/nodes/template_transform/template_transform_node.py +++ b/api/core/workflow/nodes/template_transform/template_transform_node.py @@ -4,10 +4,10 @@ from typing import Any, Optional from core.helper.code_executor.code_executor import CodeExecutionError, CodeExecutor, CodeLanguage from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.template_transform.entities import TemplateTransformNodeData -from models.workflow import WorkflowNodeExecutionStatus MAX_TEMPLATE_TRANSFORM_OUTPUT_LENGTH = int(os.environ.get("TEMPLATE_TRANSFORM_MAX_LENGTH", "80000")) diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index c72ae5b69b..aaecc7b989 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -14,8 +14,9 @@ from core.tools.tool_engine import ToolEngine from core.tools.utils.message_transformer import ToolFileMessageTransformer from core.variables.segments import ArrayAnySegment from core.variables.variables import ArrayAnyVariable -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.event import AgentLogEvent from core.workflow.nodes.base import BaseNode @@ -25,7 +26,6 @@ from core.workflow.utils.variable_template_parser import VariableTemplateParser from extensions.ext_database import db from factories import file_factory from models import ToolFile -from models.workflow import WorkflowNodeExecutionStatus from services.tools.builtin_tools_manage_service import BuiltinToolManageService from .entities import ToolNodeData @@ -70,7 +70,7 @@ class ToolNode(BaseNode[ToolNodeData]): run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, inputs={}, - metadata={NodeRunMetadataKey.TOOL_INFO: tool_info}, + metadata={WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info}, error=f"Failed to get tool runtime: {str(e)}", error_type=type(e).__name__, ) @@ -110,7 +110,7 @@ class ToolNode(BaseNode[ToolNodeData]): run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, inputs=parameters_for_log, - metadata={NodeRunMetadataKey.TOOL_INFO: tool_info}, + metadata={WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info}, error=f"Failed to invoke tool: {str(e)}", error_type=type(e).__name__, ) @@ -125,7 +125,7 @@ class ToolNode(BaseNode[ToolNodeData]): run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, inputs=parameters_for_log, - metadata={NodeRunMetadataKey.TOOL_INFO: tool_info}, + metadata={WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info}, error=f"Failed to transform tool message: {str(e)}", error_type=type(e).__name__, ) @@ -201,7 +201,7 @@ class ToolNode(BaseNode[ToolNodeData]): json: list[dict] = [] agent_logs: list[AgentLogEvent] = [] - agent_execution_metadata: Mapping[NodeRunMetadataKey, Any] = {} + agent_execution_metadata: Mapping[WorkflowNodeExecutionMetadataKey, Any] = {} variables: dict[str, Any] = {} @@ -274,7 +274,7 @@ class ToolNode(BaseNode[ToolNodeData]): agent_execution_metadata = { key: value for key, value in msg_metadata.items() - if key in NodeRunMetadataKey.__members__.values() + if key in WorkflowNodeExecutionMetadataKey.__members__.values() } json.append(message.message.json_object) elif message.type == ToolInvokeMessage.MessageType.LINK: @@ -366,8 +366,8 @@ class ToolNode(BaseNode[ToolNodeData]): outputs={"text": text, "files": files, "json": json, **variables}, metadata={ **agent_execution_metadata, - NodeRunMetadataKey.TOOL_INFO: tool_info, - NodeRunMetadataKey.AGENT_LOG: agent_logs, + WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info, + WorkflowNodeExecutionMetadataKey.AGENT_LOG: agent_logs, }, inputs=parameters_for_log, ) diff --git a/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py b/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py index 372496a8fa..db3e25b015 100644 --- a/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py +++ b/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py @@ -1,8 +1,8 @@ from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.variable_aggregator.entities import VariableAssignerNodeData -from models.workflow import WorkflowNodeExecutionStatus class VariableAggregatorNode(BaseNode[VariableAssignerNodeData]): diff --git a/api/core/workflow/nodes/variable_assigner/v1/node.py b/api/core/workflow/nodes/variable_assigner/v1/node.py index 7c7f14c0b8..835e1d77b5 100644 --- a/api/core/workflow/nodes/variable_assigner/v1/node.py +++ b/api/core/workflow/nodes/variable_assigner/v1/node.py @@ -1,11 +1,11 @@ from core.variables import SegmentType, Variable from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.variable_assigner.common import helpers as common_helpers from core.workflow.nodes.variable_assigner.common.exc import VariableOperatorNodeError from factories import variable_factory -from models.workflow import WorkflowNodeExecutionStatus from .node_data import VariableAssignerData, WriteMode diff --git a/api/core/workflow/nodes/variable_assigner/v2/node.py b/api/core/workflow/nodes/variable_assigner/v2/node.py index 6a7ad86b51..8759a55b34 100644 --- a/api/core/workflow/nodes/variable_assigner/v2/node.py +++ b/api/core/workflow/nodes/variable_assigner/v2/node.py @@ -6,11 +6,11 @@ from core.app.entities.app_invoke_entities import InvokeFrom from core.variables import SegmentType, Variable from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.base import BaseNode from core.workflow.nodes.enums import NodeType from core.workflow.nodes.variable_assigner.common import helpers as common_helpers from core.workflow.nodes.variable_assigner.common.exc import VariableOperatorNodeError -from models.workflow import WorkflowNodeExecutionStatus from . import helpers from .constants import EMPTY_VALUE_MAPPING diff --git a/api/core/workflow/repository/__init__.py b/api/core/workflow/repositories/__init__.py similarity index 69% rename from api/core/workflow/repository/__init__.py rename to api/core/workflow/repositories/__init__.py index 672abb6583..a778151baa 100644 --- a/api/core/workflow/repository/__init__.py +++ b/api/core/workflow/repositories/__init__.py @@ -6,7 +6,7 @@ for accessing and manipulating data, regardless of the underlying storage mechanism. """ -from core.workflow.repository.workflow_node_execution_repository import OrderConfig, WorkflowNodeExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import OrderConfig, WorkflowNodeExecutionRepository __all__ = [ "OrderConfig", diff --git a/api/core/workflow/repository/workflow_execution_repository.py b/api/core/workflow/repositories/workflow_execution_repository.py similarity index 94% rename from api/core/workflow/repository/workflow_execution_repository.py rename to api/core/workflow/repositories/workflow_execution_repository.py index a39a98ee33..5917310c8b 100644 --- a/api/core/workflow/repository/workflow_execution_repository.py +++ b/api/core/workflow/repositories/workflow_execution_repository.py @@ -1,6 +1,6 @@ from typing import Optional, Protocol -from core.workflow.entities.workflow_execution_entities import WorkflowExecution +from core.workflow.entities.workflow_execution import WorkflowExecution class WorkflowExecutionRepository(Protocol): diff --git a/api/core/workflow/repository/workflow_node_execution_repository.py b/api/core/workflow/repositories/workflow_node_execution_repository.py similarity index 91% rename from api/core/workflow/repository/workflow_node_execution_repository.py rename to api/core/workflow/repositories/workflow_node_execution_repository.py index 3ca9e2ecab..1908a6b190 100644 --- a/api/core/workflow/repository/workflow_node_execution_repository.py +++ b/api/core/workflow/repositories/workflow_node_execution_repository.py @@ -2,7 +2,7 @@ from collections.abc import Sequence from dataclasses import dataclass from typing import Literal, Optional, Protocol -from core.workflow.entities.node_execution_entities import NodeExecution +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution @dataclass @@ -26,7 +26,7 @@ class WorkflowNodeExecutionRepository(Protocol): application domains or deployment scenarios. """ - def save(self, execution: NodeExecution) -> None: + def save(self, execution: WorkflowNodeExecution) -> None: """ Save or update a NodeExecution instance. @@ -39,7 +39,7 @@ class WorkflowNodeExecutionRepository(Protocol): """ ... - def get_by_node_execution_id(self, node_execution_id: str) -> Optional[NodeExecution]: + def get_by_node_execution_id(self, node_execution_id: str) -> Optional[WorkflowNodeExecution]: """ Retrieve a NodeExecution by its node_execution_id. @@ -55,7 +55,7 @@ class WorkflowNodeExecutionRepository(Protocol): self, workflow_run_id: str, order_config: Optional[OrderConfig] = None, - ) -> Sequence[NodeExecution]: + ) -> Sequence[WorkflowNodeExecution]: """ Retrieve all NodeExecution instances for a specific workflow run. @@ -70,7 +70,7 @@ class WorkflowNodeExecutionRepository(Protocol): """ ... - def get_running_executions(self, workflow_run_id: str) -> Sequence[NodeExecution]: + def get_running_executions(self, workflow_run_id: str) -> Sequence[WorkflowNodeExecution]: """ Retrieve all running NodeExecution instances for a specific workflow run. diff --git a/api/core/workflow/workflow_cycle_manager.py b/api/core/workflow/workflow_cycle_manager.py index 24e23af093..b88f9edd03 100644 --- a/api/core/workflow/workflow_cycle_manager.py +++ b/api/core/workflow/workflow_cycle_manager.py @@ -1,11 +1,9 @@ from collections.abc import Mapping +from dataclasses import dataclass from datetime import UTC, datetime from typing import Any, Optional, Union from uuid import uuid4 -from sqlalchemy import func, select -from sqlalchemy.orm import Session - from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, WorkflowAppGenerateEntity from core.app.entities.queue_entities import ( QueueNodeExceptionEvent, @@ -19,21 +17,24 @@ from core.app.entities.queue_entities import ( from core.app.task_pipeline.exc import WorkflowRunNotFoundError from core.ops.entities.trace_entity import TraceTaskName from core.ops.ops_trace_manager import TraceQueueManager, TraceTask -from core.workflow.entities.node_entities import NodeRunMetadataKey -from core.workflow.entities.node_execution_entities import ( - NodeExecution, - NodeExecutionStatus, +from core.workflow.entities.workflow_execution import WorkflowExecution, WorkflowExecutionStatus, WorkflowType +from core.workflow.entities.workflow_node_execution import ( + WorkflowNodeExecution, + WorkflowNodeExecutionMetadataKey, + WorkflowNodeExecutionStatus, ) -from core.workflow.entities.workflow_execution_entities import WorkflowExecution, WorkflowExecutionStatus, WorkflowType from core.workflow.enums import SystemVariableKey -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository from core.workflow.workflow_entry import WorkflowEntry -from models import ( - Workflow, - WorkflowRun, - WorkflowRunStatus, -) + + +@dataclass +class CycleManagerWorkflowInfo: + workflow_id: str + workflow_type: WorkflowType + version: str + graph_data: Mapping[str, Any] class WorkflowCycleManager: @@ -42,32 +43,17 @@ class WorkflowCycleManager: *, application_generate_entity: Union[AdvancedChatAppGenerateEntity, WorkflowAppGenerateEntity], workflow_system_variables: dict[SystemVariableKey, Any], + workflow_info: CycleManagerWorkflowInfo, workflow_execution_repository: WorkflowExecutionRepository, workflow_node_execution_repository: WorkflowNodeExecutionRepository, ) -> None: self._application_generate_entity = application_generate_entity self._workflow_system_variables = workflow_system_variables + self._workflow_info = workflow_info self._workflow_execution_repository = workflow_execution_repository self._workflow_node_execution_repository = workflow_node_execution_repository - def handle_workflow_run_start( - self, - *, - session: Session, - workflow_id: str, - ) -> WorkflowExecution: - workflow_stmt = select(Workflow).where(Workflow.id == workflow_id) - workflow = session.scalar(workflow_stmt) - if not workflow: - raise ValueError(f"Workflow not found: {workflow_id}") - - max_sequence_stmt = select(func.max(WorkflowRun.sequence_number)).where( - WorkflowRun.tenant_id == workflow.tenant_id, - WorkflowRun.app_id == workflow.app_id, - ) - max_sequence = session.scalar(max_sequence_stmt) or 0 - new_sequence_number = max_sequence + 1 - + def handle_workflow_run_start(self) -> WorkflowExecution: inputs = {**self._application_generate_entity.inputs} for key, value in (self._workflow_system_variables or {}).items(): if key.value == "conversation": @@ -79,14 +65,13 @@ class WorkflowCycleManager: # init workflow run # TODO: This workflow_run_id should always not be None, maybe we can use a more elegant way to handle this - execution_id = str(self._workflow_system_variables.get(SystemVariableKey.WORKFLOW_RUN_ID) or uuid4()) + execution_id = str(self._workflow_system_variables.get(SystemVariableKey.WORKFLOW_EXECUTION_ID) or uuid4()) execution = WorkflowExecution.new( - id=execution_id, - workflow_id=workflow.id, - sequence_number=new_sequence_number, - type=WorkflowType(workflow.type), - workflow_version=workflow.version, - graph=workflow.graph_dict, + id_=execution_id, + workflow_id=self._workflow_info.workflow_id, + workflow_type=self._workflow_info.workflow_type, + workflow_version=self._workflow_info.version, + graph=self._workflow_info.graph_data, inputs=inputs, started_at=datetime.now(UTC).replace(tzinfo=None), ) @@ -168,7 +153,7 @@ class WorkflowCycleManager: workflow_run_id: str, total_tokens: int, total_steps: int, - status: WorkflowRunStatus, + status: WorkflowExecutionStatus, error_message: str, conversation_id: Optional[str] = None, trace_manager: Optional[TraceQueueManager] = None, @@ -185,7 +170,7 @@ class WorkflowCycleManager: # Use the instance repository to find running executions for a workflow run running_node_executions = self._workflow_node_execution_repository.get_running_executions( - workflow_run_id=workflow_execution.id + workflow_run_id=workflow_execution.id_ ) # Update the domain models @@ -193,7 +178,7 @@ class WorkflowCycleManager: for node_execution in running_node_executions: if node_execution.node_execution_id: # Update the domain model - node_execution.status = NodeExecutionStatus.FAILED + node_execution.status = WorkflowNodeExecutionStatus.FAILED node_execution.error = error_message node_execution.finished_at = now node_execution.elapsed_time = (now - node_execution.created_at).total_seconds() @@ -219,28 +204,28 @@ class WorkflowCycleManager: *, workflow_execution_id: str, event: QueueNodeStartedEvent, - ) -> NodeExecution: + ) -> WorkflowNodeExecution: workflow_execution = self._get_workflow_execution_or_raise_error(workflow_execution_id) # Create a domain model created_at = datetime.now(UTC).replace(tzinfo=None) metadata = { - NodeRunMetadataKey.PARALLEL_MODE_RUN_ID: event.parallel_mode_run_id, - NodeRunMetadataKey.ITERATION_ID: event.in_iteration_id, - NodeRunMetadataKey.LOOP_ID: event.in_loop_id, + WorkflowNodeExecutionMetadataKey.PARALLEL_MODE_RUN_ID: event.parallel_mode_run_id, + WorkflowNodeExecutionMetadataKey.ITERATION_ID: event.in_iteration_id, + WorkflowNodeExecutionMetadataKey.LOOP_ID: event.in_loop_id, } - domain_execution = NodeExecution( + domain_execution = WorkflowNodeExecution( id=str(uuid4()), workflow_id=workflow_execution.workflow_id, - workflow_run_id=workflow_execution.id, + workflow_execution_id=workflow_execution.id_, predecessor_node_id=event.predecessor_node_id, index=event.node_run_index, node_execution_id=event.node_execution_id, node_id=event.node_id, node_type=event.node_type, title=event.node_data.title, - status=NodeExecutionStatus.RUNNING, + status=WorkflowNodeExecutionStatus.RUNNING, metadata=metadata, created_at=created_at, ) @@ -250,7 +235,7 @@ class WorkflowCycleManager: return domain_execution - def handle_workflow_node_execution_success(self, *, event: QueueNodeSucceededEvent) -> NodeExecution: + def handle_workflow_node_execution_success(self, *, event: QueueNodeSucceededEvent) -> WorkflowNodeExecution: # Get the domain model from repository domain_execution = self._workflow_node_execution_repository.get_by_node_execution_id(event.node_execution_id) if not domain_execution: @@ -271,7 +256,7 @@ class WorkflowCycleManager: elapsed_time = (finished_at - event.start_at).total_seconds() # Update domain model - domain_execution.status = NodeExecutionStatus.SUCCEEDED + domain_execution.status = WorkflowNodeExecutionStatus.SUCCEEDED domain_execution.update_from_mapping( inputs=inputs, process_data=process_data, outputs=outputs, metadata=execution_metadata_dict ) @@ -290,7 +275,7 @@ class WorkflowCycleManager: | QueueNodeInIterationFailedEvent | QueueNodeInLoopFailedEvent | QueueNodeExceptionEvent, - ) -> NodeExecution: + ) -> WorkflowNodeExecution: """ Workflow node execution failed :param event: queue node failed event @@ -317,9 +302,9 @@ class WorkflowCycleManager: # Update domain model domain_execution.status = ( - NodeExecutionStatus.FAILED + WorkflowNodeExecutionStatus.FAILED if not isinstance(event, QueueNodeExceptionEvent) - else NodeExecutionStatus.EXCEPTION + else WorkflowNodeExecutionStatus.EXCEPTION ) domain_execution.error = event.error domain_execution.update_from_mapping( @@ -335,7 +320,7 @@ class WorkflowCycleManager: def handle_workflow_node_execution_retried( self, *, workflow_execution_id: str, event: QueueNodeRetryEvent - ) -> NodeExecution: + ) -> WorkflowNodeExecution: workflow_execution = self._get_workflow_execution_or_raise_error(workflow_execution_id) created_at = event.start_at finished_at = datetime.now(UTC).replace(tzinfo=None) @@ -345,13 +330,13 @@ class WorkflowCycleManager: # Convert metadata keys to strings origin_metadata = { - NodeRunMetadataKey.ITERATION_ID: event.in_iteration_id, - NodeRunMetadataKey.PARALLEL_MODE_RUN_ID: event.parallel_mode_run_id, - NodeRunMetadataKey.LOOP_ID: event.in_loop_id, + WorkflowNodeExecutionMetadataKey.ITERATION_ID: event.in_iteration_id, + WorkflowNodeExecutionMetadataKey.PARALLEL_MODE_RUN_ID: event.parallel_mode_run_id, + WorkflowNodeExecutionMetadataKey.LOOP_ID: event.in_loop_id, } # Convert execution metadata keys to strings - execution_metadata_dict: dict[NodeRunMetadataKey, str | None] = {} + execution_metadata_dict: dict[WorkflowNodeExecutionMetadataKey, str | None] = {} if event.execution_metadata: for key, value in event.execution_metadata.items(): execution_metadata_dict[key] = value @@ -359,16 +344,16 @@ class WorkflowCycleManager: merged_metadata = {**execution_metadata_dict, **origin_metadata} if execution_metadata_dict else origin_metadata # Create a domain model - domain_execution = NodeExecution( + domain_execution = WorkflowNodeExecution( id=str(uuid4()), workflow_id=workflow_execution.workflow_id, - workflow_run_id=workflow_execution.id, + workflow_execution_id=workflow_execution.id_, predecessor_node_id=event.predecessor_node_id, node_execution_id=event.node_execution_id, node_id=event.node_id, node_type=event.node_type, title=event.node_data.title, - status=NodeExecutionStatus.RETRY, + status=WorkflowNodeExecutionStatus.RETRY, created_at=created_at, finished_at=finished_at, elapsed_time=elapsed_time, diff --git a/api/factories/variable_factory.py b/api/factories/variable_factory.py index bbca8448ec..fa8a90e79f 100644 --- a/api/factories/variable_factory.py +++ b/api/factories/variable_factory.py @@ -84,8 +84,8 @@ def _build_variable_from_mapping(*, mapping: Mapping[str, Any], selector: Sequen raise VariableError("missing value type") if (value := mapping.get("value")) is None: raise VariableError("missing value") - # FIXME: using Any here, fix it later - result: Any + + result: Variable match value_type: case SegmentType.STRING: result = StringVariable.model_validate(mapping) diff --git a/api/models/__init__.py b/api/models/__init__.py index f652449e98..83b50eb099 100644 --- a/api/models/__init__.py +++ b/api/models/__init__.py @@ -84,11 +84,9 @@ from .workflow import ( Workflow, WorkflowAppLog, WorkflowAppLogCreatedFrom, - WorkflowNodeExecution, - WorkflowNodeExecutionStatus, + WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom, WorkflowRun, - WorkflowRunStatus, WorkflowType, ) @@ -100,14 +98,14 @@ __all__ = [ "AccountStatus", "ApiRequest", "ApiToken", - "ApiToolProvider", # Added + "ApiToolProvider", "App", "AppAnnotationHitHistory", "AppAnnotationSetting", "AppDatasetJoin", "AppMode", "AppModelConfig", - "BuiltinToolProvider", # Added + "BuiltinToolProvider", "CeleryTask", "CeleryTaskSet", "Conversation", @@ -171,11 +169,9 @@ __all__ = [ "Workflow", "WorkflowAppLog", "WorkflowAppLogCreatedFrom", - "WorkflowNodeExecution", - "WorkflowNodeExecutionStatus", + "WorkflowNodeExecutionModel", "WorkflowNodeExecutionTriggeredFrom", "WorkflowRun", - "WorkflowRunStatus", "WorkflowRunTriggeredFrom", "WorkflowToolProvider", "WorkflowType", diff --git a/api/models/model.py b/api/models/model.py index 92a5c0d121..229e77134e 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -9,6 +9,7 @@ from typing import TYPE_CHECKING, Any, Literal, Optional, cast from core.plugin.entities.plugin import GenericProviderID from core.tools.entities.tool_entities import ToolProviderType from core.tools.signature import sign_tool_file +from core.workflow.entities.workflow_execution import WorkflowExecutionStatus from services.plugin.plugin_service import PluginService if TYPE_CHECKING: @@ -31,7 +32,6 @@ from .base import Base from .engine import db from .enums import CreatorUserRole from .types import StringUUID -from .workflow import WorkflowRunStatus if TYPE_CHECKING: from .workflow import Workflow @@ -794,22 +794,22 @@ class Conversation(Base): def status_count(self): messages = db.session.query(Message).filter(Message.conversation_id == self.id).all() status_counts = { - WorkflowRunStatus.RUNNING: 0, - WorkflowRunStatus.SUCCEEDED: 0, - WorkflowRunStatus.FAILED: 0, - WorkflowRunStatus.STOPPED: 0, - WorkflowRunStatus.PARTIAL_SUCCEEDED: 0, + WorkflowExecutionStatus.RUNNING: 0, + WorkflowExecutionStatus.SUCCEEDED: 0, + WorkflowExecutionStatus.FAILED: 0, + WorkflowExecutionStatus.STOPPED: 0, + WorkflowExecutionStatus.PARTIAL_SUCCEEDED: 0, } for message in messages: if message.workflow_run: - status_counts[WorkflowRunStatus(message.workflow_run.status)] += 1 + status_counts[WorkflowExecutionStatus(message.workflow_run.status)] += 1 return ( { - "success": status_counts[WorkflowRunStatus.SUCCEEDED], - "failed": status_counts[WorkflowRunStatus.FAILED], - "partial_success": status_counts[WorkflowRunStatus.PARTIAL_SUCCEEDED], + "success": status_counts[WorkflowExecutionStatus.SUCCEEDED], + "failed": status_counts[WorkflowExecutionStatus.FAILED], + "partial_success": status_counts[WorkflowExecutionStatus.PARTIAL_SUCCEEDED], } if messages else None diff --git a/api/models/workflow.py b/api/models/workflow.py index ae341dd1b5..e868fb77a7 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -377,18 +377,6 @@ class Workflow(Base): ) -class WorkflowRunStatus(StrEnum): - """ - Workflow Run Status Enum - """ - - RUNNING = "running" - SUCCEEDED = "succeeded" - FAILED = "failed" - STOPPED = "stopped" - PARTIAL_SUCCEEDED = "partial-succeeded" - - class WorkflowRun(Base): """ Workflow Run @@ -449,12 +437,12 @@ class WorkflowRun(Base): error: Mapped[Optional[str]] = mapped_column(db.Text) elapsed_time: Mapped[float] = mapped_column(db.Float, nullable=False, server_default=sa.text("0")) total_tokens: Mapped[int] = mapped_column(sa.BigInteger, server_default=sa.text("0")) - total_steps: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0")) + total_steps: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0"), nullable=True) created_by_role: Mapped[str] = mapped_column(db.String(255)) # account, end_user created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) finished_at: Mapped[Optional[datetime]] = mapped_column(db.DateTime) - exceptions_count: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0")) + exceptions_count: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0"), nullable=True) @property def created_by_account(self): @@ -553,19 +541,7 @@ class WorkflowNodeExecutionTriggeredFrom(StrEnum): WORKFLOW_RUN = "workflow-run" -class WorkflowNodeExecutionStatus(StrEnum): - """ - Workflow Node Execution Status Enum - """ - - RUNNING = "running" - SUCCEEDED = "succeeded" - FAILED = "failed" - EXCEPTION = "exception" - RETRY = "retry" - - -class WorkflowNodeExecution(Base): +class WorkflowNodeExecutionModel(Base): """ Workflow Node Execution diff --git a/api/pytest.ini b/api/pytest.ini index 618e921825..eb49619481 100644 --- a/api/pytest.ini +++ b/api/pytest.ini @@ -1,5 +1,4 @@ [pytest] -continue-on-collection-errors = true addopts = --cov=./api --cov-report=json --cov-report=xml env = ANTHROPIC_API_KEY = sk-ant-api11-IamNotARealKeyJustForMockTestKawaiiiiiiiiii-NotBaka-ASkksz diff --git a/api/schedule/clean_messages.py b/api/schedule/clean_messages.py index f41f5264c7..d02bc81f33 100644 --- a/api/schedule/clean_messages.py +++ b/api/schedule/clean_messages.py @@ -34,9 +34,8 @@ def clean_messages(): while True: try: # Main query with join and filter - # FIXME:for mypy no paginate method error messages = ( - db.session.query(Message) # type: ignore + db.session.query(Message) .filter(Message.created_at < plan_sandbox_clean_message_day) .order_by(Message.created_at.desc()) .limit(100) diff --git a/api/services/clear_free_plan_tenant_expired_logs.py b/api/services/clear_free_plan_tenant_expired_logs.py index 5762bf9600..1fd560d581 100644 --- a/api/services/clear_free_plan_tenant_expired_logs.py +++ b/api/services/clear_free_plan_tenant_expired_logs.py @@ -14,7 +14,7 @@ from extensions.ext_database import db from extensions.ext_storage import storage from models.account import Tenant from models.model import App, Conversation, Message -from models.workflow import WorkflowNodeExecution, WorkflowRun +from models.workflow import WorkflowNodeExecutionModel, WorkflowRun from services.billing_service import BillingService logger = logging.getLogger(__name__) @@ -108,10 +108,11 @@ class ClearFreePlanTenantExpiredLogs: while True: with Session(db.engine).no_autoflush as session: workflow_node_executions = ( - session.query(WorkflowNodeExecution) + session.query(WorkflowNodeExecutionModel) .filter( - WorkflowNodeExecution.tenant_id == tenant_id, - WorkflowNodeExecution.created_at < datetime.datetime.now() - datetime.timedelta(days=days), + WorkflowNodeExecutionModel.tenant_id == tenant_id, + WorkflowNodeExecutionModel.created_at + < datetime.datetime.now() - datetime.timedelta(days=days), ) .limit(batch) .all() @@ -135,8 +136,8 @@ class ClearFreePlanTenantExpiredLogs: ] # delete workflow node executions - session.query(WorkflowNodeExecution).filter( - WorkflowNodeExecution.id.in_(workflow_node_execution_ids), + session.query(WorkflowNodeExecutionModel).filter( + WorkflowNodeExecutionModel.id.in_(workflow_node_execution_ids), ).delete(synchronize_session=False) session.commit() diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 56e06cc33e..519d5abca5 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -2,8 +2,11 @@ import logging import time from typing import Any +from core.app.app_config.entities import ModelConfig +from core.model_runtime.entities import LLMMode from core.rag.datasource.retrieval_service import RetrievalService from core.rag.models.document import Document +from core.rag.retrieval.dataset_retrieval import DatasetRetrieval from core.rag.retrieval.retrieval_methods import RetrievalMethod from extensions.ext_database import db from models.account import Account @@ -34,7 +37,29 @@ class HitTestingService: # get retrieval model , if the model is not setting , using default if not retrieval_model: retrieval_model = dataset.retrieval_model or default_retrieval_model + document_ids_filter = None + metadata_filtering_conditions = retrieval_model.get("metadata_filtering_conditions", {}) + if metadata_filtering_conditions: + dataset_retrieval = DatasetRetrieval() + from core.app.app_config.entities import MetadataFilteringCondition + + metadata_filtering_conditions = MetadataFilteringCondition(**metadata_filtering_conditions) + + metadata_filter_document_ids, metadata_condition = dataset_retrieval.get_metadata_filter_condition( + dataset_ids=[dataset.id], + query=query, + metadata_filtering_mode="manual", + metadata_filtering_conditions=metadata_filtering_conditions, + inputs={}, + tenant_id="", + user_id="", + metadata_model_config=ModelConfig(provider="", name="", mode=LLMMode.CHAT, completion_params={}), + ) + if metadata_filter_document_ids: + document_ids_filter = metadata_filter_document_ids.get(dataset.id, []) + if metadata_condition and not document_ids_filter: + return cls.compact_retrieve_response(query, []) all_documents = RetrievalService.retrieve( retrieval_method=retrieval_model.get("search_method", "semantic_search"), dataset_id=dataset.id, @@ -48,6 +73,7 @@ class HitTestingService: else None, reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model", weights=retrieval_model.get("weights", None), + document_ids_filter=document_ids_filter, ) end = time.perf_counter() @@ -99,7 +125,7 @@ class HitTestingService: return dict(cls.compact_external_retrieve_response(dataset, query, all_documents)) @classmethod - def compact_retrieve_response(cls, query: str, documents: list[Document]): + def compact_retrieve_response(cls, query: str, documents: list[Document]) -> dict[Any, Any]: records = RetrievalService.format_retrieval_documents(documents) return { diff --git a/api/services/ops_service.py b/api/services/ops_service.py index a9c2b28476..792f50703e 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -1,5 +1,6 @@ -from typing import Optional +from typing import Any, Optional +from core.ops.entities.config_entity import BaseTracingConfig from core.ops.ops_trace_manager import OpsTraceManager, provider_config_map from extensions.ext_database import db from models.model import App, TraceAppConfig @@ -92,13 +93,12 @@ class OpsService: except KeyError: return {"error": f"Invalid tracing provider: {tracing_provider}"} - config_class, other_keys = ( - provider_config_map[tracing_provider]["config_class"], - provider_config_map[tracing_provider]["other_keys"], - ) - # FIXME: ignore type error - default_config_instance = config_class(**tracing_config) # type: ignore - for key in other_keys: # type: ignore + provider_config: dict[str, Any] = provider_config_map[tracing_provider] + config_class: type[BaseTracingConfig] = provider_config["config_class"] + other_keys: list[str] = provider_config["other_keys"] + + default_config_instance: BaseTracingConfig = config_class(**tracing_config) + for key in other_keys: if key in tracing_config and tracing_config[key] == "": tracing_config[key] = getattr(default_config_instance, key, None) diff --git a/api/services/tag_service.py b/api/services/tag_service.py index 21cb861f87..be748e8dd1 100644 --- a/api/services/tag_service.py +++ b/api/services/tag_service.py @@ -44,6 +44,17 @@ class TagService: results = [tag_binding.target_id for tag_binding in tag_bindings] return results + @staticmethod + def get_tag_by_tag_name(tag_type: str, current_tenant_id: str, tag_name: str) -> list: + tags = ( + db.session.query(Tag) + .filter(Tag.name == tag_name, Tag.tenant_id == current_tenant_id, Tag.type == tag_type) + .all() + ) + if not tags: + return [] + return tags + @staticmethod def get_tags_by_target_id(tag_type: str, current_tenant_id: str, target_id: str) -> list: tags = ( @@ -62,6 +73,8 @@ class TagService: @staticmethod def save_tags(args: dict) -> Tag: + if TagService.get_tag_by_tag_name(args["type"], current_user.current_tenant_id, args["name"]): + raise ValueError("Tag name already exists") tag = Tag( id=str(uuid.uuid4()), name=args["name"], @@ -75,6 +88,8 @@ class TagService: @staticmethod def update_tags(args: dict, tag_id: str) -> Tag: + if TagService.get_tag_by_tag_name(args["type"], current_user.current_tenant_id, args["name"]): + raise ValueError("Tag name already exists") tag = db.session.query(Tag).filter(Tag.id == tag_id).first() if not tag: raise NotFound("Tag not found") diff --git a/api/services/website_service.py b/api/services/website_service.py index 3913dc2efe..6720932a3a 100644 --- a/api/services/website_service.py +++ b/api/services/website_service.py @@ -173,26 +173,27 @@ class WebsiteService: return crawl_status_data @classmethod - def get_crawl_url_data(cls, job_id: str, provider: str, url: str, tenant_id: str) -> dict[Any, Any] | None: + def get_crawl_url_data(cls, job_id: str, provider: str, url: str, tenant_id: str) -> dict[str, Any] | None: credentials = ApiKeyAuthService.get_auth_credentials(tenant_id, "website", provider) # decrypt api_key api_key = encrypter.decrypt_token(tenant_id=tenant_id, token=credentials.get("config").get("api_key")) - # FIXME data is redefine too many times here, use Any to ease the type checking, fix it later - data: Any + if provider == "firecrawl": + crawl_data: list[dict[str, Any]] | None = None file_key = "website_files/" + job_id + ".txt" if storage.exists(file_key): - d = storage.load_once(file_key) - if d: - data = json.loads(d.decode("utf-8")) + stored_data = storage.load_once(file_key) + if stored_data: + crawl_data = json.loads(stored_data.decode("utf-8")) else: firecrawl_app = FirecrawlApp(api_key=api_key, base_url=credentials.get("config").get("base_url", None)) result = firecrawl_app.check_crawl_status(job_id) if result.get("status") != "completed": raise ValueError("Crawl job is not completed") - data = result.get("data") - if data: - for item in data: + crawl_data = result.get("data") + + if crawl_data: + for item in crawl_data: if item.get("source_url") == url: return dict(item) return None @@ -211,23 +212,24 @@ class WebsiteService: raise ValueError("Failed to crawl") return dict(response.json().get("data", {})) else: - api_key = encrypter.decrypt_token(tenant_id=tenant_id, token=credentials.get("config").get("api_key")) - response = requests.post( + # Get crawl status first + status_response = requests.post( "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app", headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}, json={"taskId": job_id}, ) - data = response.json().get("data", {}) - if data.get("status") != "completed": + status_data = status_response.json().get("data", {}) + if status_data.get("status") != "completed": raise ValueError("Crawl job is not completed") - response = requests.post( + # Get processed data + data_response = requests.post( "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app", headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}, - json={"taskId": job_id, "urls": list(data.get("processed", {}).keys())}, + json={"taskId": job_id, "urls": list(status_data.get("processed", {}).keys())}, ) - data = response.json().get("data", {}) - for item in data.get("processed", {}).values(): + processed_data = data_response.json().get("data", {}) + for item in processed_data.get("processed", {}).values(): if item.get("data", {}).get("url") == url: return dict(item.get("data", {})) return None diff --git a/api/services/workflow_app_service.py b/api/services/workflow_app_service.py index a899ebe278..6b30a70372 100644 --- a/api/services/workflow_app_service.py +++ b/api/services/workflow_app_service.py @@ -4,9 +4,9 @@ from datetime import datetime from sqlalchemy import and_, func, or_, select from sqlalchemy.orm import Session +from core.workflow.entities.workflow_execution import WorkflowExecutionStatus from models import App, EndUser, WorkflowAppLog, WorkflowRun from models.enums import CreatorUserRole -from models.workflow import WorkflowRunStatus class WorkflowAppService: @@ -16,7 +16,7 @@ class WorkflowAppService: session: Session, app_model: App, keyword: str | None = None, - status: WorkflowRunStatus | None = None, + status: WorkflowExecutionStatus | None = None, created_at_before: datetime | None = None, created_at_after: datetime | None = None, page: int = 1, diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index 21366a4552..483c0d3086 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -4,14 +4,14 @@ from typing import Optional import contexts from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import OrderConfig +from core.workflow.repositories.workflow_node_execution_repository import OrderConfig from extensions.ext_database import db from libs.infinite_scroll_pagination import InfiniteScrollPagination from models import ( Account, App, EndUser, - WorkflowNodeExecution, + WorkflowNodeExecutionModel, WorkflowRun, WorkflowRunTriggeredFrom, ) @@ -125,7 +125,7 @@ class WorkflowRunService: app_model: App, run_id: str, user: Account | EndUser, - ) -> Sequence[WorkflowNodeExecution]: + ) -> Sequence[WorkflowNodeExecutionModel]: """ Get workflow run node execution list """ diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 50bb8f40ae..bc213ccce6 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -13,7 +13,7 @@ from core.app.apps.workflow.app_config_manager import WorkflowAppConfigManager from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository from core.variables import Variable from core.workflow.entities.node_entities import NodeRunResult -from core.workflow.entities.node_execution_entities import NodeExecution, NodeExecutionStatus +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution, WorkflowNodeExecutionStatus from core.workflow.errors import WorkflowNodeRunFailedError from core.workflow.graph_engine.entities.event import InNodeEvent from core.workflow.nodes import NodeType @@ -30,8 +30,7 @@ from models.model import App, AppMode from models.tools import WorkflowToolProvider from models.workflow import ( Workflow, - WorkflowNodeExecution, - WorkflowNodeExecutionStatus, + WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom, WorkflowType, ) @@ -255,7 +254,7 @@ class WorkflowService: def run_draft_workflow_node( self, app_model: App, node_id: str, user_inputs: dict, account: Account - ) -> WorkflowNodeExecution: + ) -> WorkflowNodeExecutionModel: """ Run draft workflow node """ @@ -297,7 +296,7 @@ class WorkflowService: def run_free_workflow_node( self, node_data: dict, tenant_id: str, user_id: str, node_id: str, user_inputs: dict[str, Any] - ) -> NodeExecution: + ) -> WorkflowNodeExecution: """ Run draft workflow node """ @@ -323,7 +322,7 @@ class WorkflowService: invoke_node_fn: Callable[[], tuple[BaseNode, Generator[NodeEvent | InNodeEvent, None, None]]], start_at: float, node_id: str, - ) -> NodeExecution: + ) -> WorkflowNodeExecution: try: node_instance, generator = invoke_node_fn() @@ -375,7 +374,7 @@ class WorkflowService: error = e.error # Create a NodeExecution domain model - node_execution = NodeExecution( + node_execution = WorkflowNodeExecution( id=str(uuid4()), workflow_id="", # This is a single-step execution, so no workflow ID index=1, @@ -404,13 +403,13 @@ class WorkflowService: # Map status from WorkflowNodeExecutionStatus to NodeExecutionStatus if node_run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED: - node_execution.status = NodeExecutionStatus.SUCCEEDED + node_execution.status = WorkflowNodeExecutionStatus.SUCCEEDED elif node_run_result.status == WorkflowNodeExecutionStatus.EXCEPTION: - node_execution.status = NodeExecutionStatus.EXCEPTION + node_execution.status = WorkflowNodeExecutionStatus.EXCEPTION node_execution.error = node_run_result.error else: # Set failed status and error - node_execution.status = NodeExecutionStatus.FAILED + node_execution.status = WorkflowNodeExecutionStatus.FAILED node_execution.error = error return node_execution diff --git a/api/tasks/document_indexing_sync_task.py b/api/tasks/document_indexing_sync_task.py index fd1f6265b4..b4848be192 100644 --- a/api/tasks/document_indexing_sync_task.py +++ b/api/tasks/document_indexing_sync_task.py @@ -114,4 +114,4 @@ def document_indexing_sync_task(dataset_id: str, document_id: str): except DocumentIsPausedError as ex: logging.info(click.style(str(ex), fg="yellow")) except Exception: - pass + logging.exception("document_indexing_sync_task failed, document_id: {}".format(document_id)) diff --git a/api/tasks/document_indexing_task.py b/api/tasks/document_indexing_task.py index ee470d44e8..55cac6a9af 100644 --- a/api/tasks/document_indexing_task.py +++ b/api/tasks/document_indexing_task.py @@ -81,6 +81,6 @@ def document_indexing_task(dataset_id: str, document_ids: list): except DocumentIsPausedError as ex: logging.info(click.style(str(ex), fg="yellow")) except Exception: - pass + logging.exception("Document indexing task failed, dataset_id: {}".format(dataset_id)) finally: db.session.close() diff --git a/api/tasks/document_indexing_update_task.py b/api/tasks/document_indexing_update_task.py index b9ed11a8da..167b928f5d 100644 --- a/api/tasks/document_indexing_update_task.py +++ b/api/tasks/document_indexing_update_task.py @@ -73,6 +73,6 @@ def document_indexing_update_task(dataset_id: str, document_id: str): except DocumentIsPausedError as ex: logging.info(click.style(str(ex), fg="yellow")) except Exception: - pass + logging.exception("document_indexing_update_task failed, document_id: {}".format(document_id)) finally: db.session.close() diff --git a/api/tasks/duplicate_document_indexing_task.py b/api/tasks/duplicate_document_indexing_task.py index 100fc257ce..a6c93e110e 100644 --- a/api/tasks/duplicate_document_indexing_task.py +++ b/api/tasks/duplicate_document_indexing_task.py @@ -99,6 +99,6 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list): except DocumentIsPausedError as ex: logging.info(click.style(str(ex), fg="yellow")) except Exception: - pass + logging.exception("duplicate_document_indexing_task failed, dataset_id: {}".format(dataset_id)) finally: db.session.close() diff --git a/api/tasks/recover_document_indexing_task.py b/api/tasks/recover_document_indexing_task.py index eada2ff9db..e7d49c78dc 100644 --- a/api/tasks/recover_document_indexing_task.py +++ b/api/tasks/recover_document_indexing_task.py @@ -43,6 +43,6 @@ def recover_document_indexing_task(dataset_id: str, document_id: str): except DocumentIsPausedError as ex: logging.info(click.style(str(ex), fg="yellow")) except Exception: - pass + logging.exception("recover_document_indexing_task failed, document_id: {}".format(document_id)) finally: db.session.close() diff --git a/api/tasks/remove_app_and_related_data_task.py b/api/tasks/remove_app_and_related_data_task.py index 4e527bbaed..d366efd6f2 100644 --- a/api/tasks/remove_app_and_related_data_task.py +++ b/api/tasks/remove_app_and_related_data_task.py @@ -30,7 +30,7 @@ from models import ( ) from models.tools import WorkflowToolProvider from models.web import PinnedConversation, SavedMessage -from models.workflow import ConversationVariable, Workflow, WorkflowAppLog, WorkflowNodeExecution, WorkflowRun +from models.workflow import ConversationVariable, Workflow, WorkflowAppLog, WorkflowNodeExecutionModel, WorkflowRun @shared_task(queue="app_deletion", bind=True, max_retries=3) @@ -188,9 +188,9 @@ def _delete_app_workflow_runs(tenant_id: str, app_id: str): def _delete_app_workflow_node_executions(tenant_id: str, app_id: str): def del_workflow_node_execution(workflow_node_execution_id: str): - db.session.query(WorkflowNodeExecution).filter(WorkflowNodeExecution.id == workflow_node_execution_id).delete( - synchronize_session=False - ) + db.session.query(WorkflowNodeExecutionModel).filter( + WorkflowNodeExecutionModel.id == workflow_node_execution_id + ).delete(synchronize_session=False) _delete_records( """select id from workflow_node_executions where tenant_id=:tenant_id and app_id=:app_id limit 1000""", diff --git a/api/tasks/retry_document_indexing_task.py b/api/tasks/retry_document_indexing_task.py index 7e50eb9f8d..a6e7092216 100644 --- a/api/tasks/retry_document_indexing_task.py +++ b/api/tasks/retry_document_indexing_task.py @@ -95,7 +95,7 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]): db.session.commit() logging.info(click.style(str(ex), fg="yellow")) redis_client.delete(retry_indexing_cache_key) - pass + logging.exception("retry_document_indexing_task failed, document_id: {}".format(document_id)) finally: db.session.close() end_at = time.perf_counter() diff --git a/api/tasks/sync_website_document_indexing_task.py b/api/tasks/sync_website_document_indexing_task.py index e75252edbe..dba0a39c2d 100644 --- a/api/tasks/sync_website_document_indexing_task.py +++ b/api/tasks/sync_website_document_indexing_task.py @@ -87,6 +87,6 @@ def sync_website_document_indexing_task(dataset_id: str, document_id: str): db.session.commit() logging.info(click.style(str(ex), fg="yellow")) redis_client.delete(sync_indexing_cache_key) - pass + logging.exception("sync_website_document_indexing_task failed, document_id: {}".format(document_id)) end_at = time.perf_counter() logging.info(click.style("Sync document: {} latency: {}".format(document_id, end_at - start_at), fg="green")) diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py index 4de985ae7c..13d78c2d83 100644 --- a/api/tests/integration_tests/workflow/nodes/test_code.py +++ b/api/tests/integration_tests/workflow/nodes/test_code.py @@ -8,6 +8,7 @@ import pytest from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -15,7 +16,7 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntime from core.workflow.nodes.code.code_node import CodeNode from core.workflow.nodes.code.entities import CodeNodeData from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock CODE_MAX_STRING_LENGTH = int(getenv("CODE_MAX_STRING_LENGTH", "10000")) diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py index 777a04bd7f..5fbee266bd 100644 --- a/api/tests/integration_tests/workflow/nodes/test_llm.py +++ b/api/tests/integration_tests/workflow/nodes/test_llm.py @@ -9,6 +9,7 @@ import pytest from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -17,7 +18,7 @@ from core.workflow.nodes.event import RunCompletedEvent from core.workflow.nodes.llm.node import LLMNode from extensions.ext_database import db from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config """FOR MOCK FIXTURES, DO NOT REMOVE""" diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py index 5c6bb82024..e89e03ae86 100644 --- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py +++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py @@ -7,6 +7,7 @@ from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom from core.model_runtime.entities import AssistantPromptMessage from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -17,7 +18,7 @@ from models.enums import UserFrom from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config """FOR MOCK FIXTURES, DO NOT REMOVE""" -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock diff --git a/api/tests/integration_tests/workflow/nodes/test_template_transform.py b/api/tests/integration_tests/workflow/nodes/test_template_transform.py index 51d61a95ea..a5f2677a59 100644 --- a/api/tests/integration_tests/workflow/nodes/test_template_transform.py +++ b/api/tests/integration_tests/workflow/nodes/test_template_transform.py @@ -5,13 +5,14 @@ import pytest from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index 5a569a5983..039beedafe 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -5,6 +5,7 @@ from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom from core.tools.utils.configuration import ToolParameterConfigurationManager from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -12,7 +13,7 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntime from core.workflow.nodes.event.event import RunCompletedEvent from core.workflow.nodes.tool.tool_node import ToolNode from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType def init_tool_node(config: dict): diff --git a/api/tests/unit_tests/core/helper/test_marketplace.py b/api/tests/unit_tests/core/helper/test_marketplace.py deleted file mode 100644 index 6ccce7ac9f..0000000000 --- a/api/tests/unit_tests/core/helper/test_marketplace.py +++ /dev/null @@ -1,7 +0,0 @@ -from core.helper.marketplace import download_plugin_pkg - - -def test_download_plugin_pkg(): - pkg = download_plugin_pkg("langgenius/bing:0.0.1@e58735424d2104f208c2bd683c5142e0332045b425927067acf432b26f3d970b") - assert pkg is not None - assert len(pkg) > 0 diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py index ba3c1eb5e0..e3e500e310 100644 --- a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -4,7 +4,7 @@ from constants import UUID_NIL from core.prompt.utils.extract_thread_messages import extract_thread_messages -class TestMessage: +class MockMessage: def __init__(self, id, parent_message_id): self.id = id self.parent_message_id = parent_message_id @@ -14,7 +14,7 @@ class TestMessage: def test_extract_thread_messages_single_message(): - messages = [TestMessage(str(uuid4()), UUID_NIL)] + messages = [MockMessage(str(uuid4()), UUID_NIL)] result = extract_thread_messages(messages) assert len(result) == 1 assert result[0] == messages[0] @@ -23,11 +23,11 @@ def test_extract_thread_messages_single_message(): def test_extract_thread_messages_linear_thread(): id1, id2, id3, id4, id5 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) messages = [ - TestMessage(id5, id4), - TestMessage(id4, id3), - TestMessage(id3, id2), - TestMessage(id2, id1), - TestMessage(id1, UUID_NIL), + MockMessage(id5, id4), + MockMessage(id4, id3), + MockMessage(id3, id2), + MockMessage(id2, id1), + MockMessage(id1, UUID_NIL), ] result = extract_thread_messages(messages) assert len(result) == 5 @@ -37,10 +37,10 @@ def test_extract_thread_messages_linear_thread(): def test_extract_thread_messages_branched_thread(): id1, id2, id3, id4 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) messages = [ - TestMessage(id4, id2), - TestMessage(id3, id2), - TestMessage(id2, id1), - TestMessage(id1, UUID_NIL), + MockMessage(id4, id2), + MockMessage(id3, id2), + MockMessage(id2, id1), + MockMessage(id1, UUID_NIL), ] result = extract_thread_messages(messages) assert len(result) == 3 @@ -56,9 +56,9 @@ def test_extract_thread_messages_empty_list(): def test_extract_thread_messages_partially_loaded(): id0, id1, id2, id3 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) messages = [ - TestMessage(id3, id2), - TestMessage(id2, id1), - TestMessage(id1, id0), + MockMessage(id3, id2), + MockMessage(id2, id1), + MockMessage(id1, id0), ] result = extract_thread_messages(messages) assert len(result) == 3 @@ -68,9 +68,9 @@ def test_extract_thread_messages_partially_loaded(): def test_extract_thread_messages_legacy_messages(): id1, id2, id3 = str(uuid4()), str(uuid4()), str(uuid4()) messages = [ - TestMessage(id3, UUID_NIL), - TestMessage(id2, UUID_NIL), - TestMessage(id1, UUID_NIL), + MockMessage(id3, UUID_NIL), + MockMessage(id2, UUID_NIL), + MockMessage(id1, UUID_NIL), ] result = extract_thread_messages(messages) assert len(result) == 3 @@ -80,11 +80,11 @@ def test_extract_thread_messages_legacy_messages(): def test_extract_thread_messages_mixed_with_legacy_messages(): id1, id2, id3, id4, id5 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()) messages = [ - TestMessage(id5, id4), - TestMessage(id4, id2), - TestMessage(id3, id2), - TestMessage(id2, UUID_NIL), - TestMessage(id1, UUID_NIL), + MockMessage(id5, id4), + MockMessage(id4, id2), + MockMessage(id3, id2), + MockMessage(id2, UUID_NIL), + MockMessage(id1, UUID_NIL), ] result = extract_thread_messages(messages) assert len(result) == 4 diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py b/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py index bd414c88f4..48cc8a7e1c 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py @@ -1,5 +1,5 @@ import pytest -from pydantic.error_wrappers import ValidationError +from pydantic import ValidationError from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py b/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py index f3dbd1836b..7535ec4866 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py @@ -4,8 +4,9 @@ import pytest from flask import Flask from core.app.entities.app_invoke_entities import InvokeFrom -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult, WorkflowNodeExecutionMetadataKey from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.event import ( BaseNodeEvent, @@ -25,7 +26,7 @@ from core.workflow.nodes.event import RunCompletedEvent, RunStreamChunkEvent from core.workflow.nodes.llm.node import LLMNode from core.workflow.nodes.question_classifier.question_classifier_node import QuestionClassifierNode from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType @pytest.fixture @@ -201,9 +202,9 @@ def test_run_parallel_in_workflow(mock_close, mock_remove): process_data={}, outputs={}, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: 1, - NodeRunMetadataKey.TOTAL_PRICE: 1, - NodeRunMetadataKey.CURRENCY: "USD", + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 1, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: 1, + WorkflowNodeExecutionMetadataKey.CURRENCY: "USD", }, ) ) @@ -836,9 +837,9 @@ def test_condition_parallel_correct_output(mock_close, mock_remove, app): process_data={}, outputs={"class_name": "financial", "class_id": "1"}, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: 1, - NodeRunMetadataKey.TOTAL_PRICE: 1, - NodeRunMetadataKey.CURRENCY: "USD", + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 1, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: 1, + WorkflowNodeExecutionMetadataKey.CURRENCY: "USD", }, edge_source_handle="1", ) @@ -852,9 +853,9 @@ def test_condition_parallel_correct_output(mock_close, mock_remove, app): process_data={}, outputs={"result": "dify 123"}, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: 1, - NodeRunMetadataKey.TOTAL_PRICE: 1, - NodeRunMetadataKey.CURRENCY: "USD", + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 1, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: 1, + WorkflowNodeExecutionMetadataKey.CURRENCY: "USD", }, ) ) diff --git a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py index 0369f3fa44..b7f78d91fa 100644 --- a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py +++ b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -11,7 +12,7 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntime from core.workflow.nodes.answer.answer_node import AnswerNode from extensions.ext_database import db from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType def test_execute_answer(): diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py index 2073d355f0..7fd32a4826 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py @@ -4,6 +4,7 @@ from core.app.entities.app_invoke_entities import InvokeFrom from core.file import File, FileTransferMethod, FileType from core.variables import ArrayFileVariable, FileVariable from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.graph_engine import Graph, GraphInitParams, GraphRuntimeState from core.workflow.nodes.answer import AnswerStreamGenerateRoute from core.workflow.nodes.end import EndStreamParam @@ -15,7 +16,7 @@ from core.workflow.nodes.http_request import ( HttpRequestNodeData, ) from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType def test_http_request_node_binary_file(monkeypatch): diff --git a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration.py b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration.py index 29bd4d6c6c..6d854c950d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration.py +++ b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration.py @@ -5,6 +5,7 @@ from unittest.mock import patch from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -14,7 +15,7 @@ from core.workflow.nodes.iteration.entities import ErrorHandleMode from core.workflow.nodes.iteration.iteration_node import IterationNode from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType def test_run(): diff --git a/api/tests/unit_tests/core/workflow/nodes/test_answer.py b/api/tests/unit_tests/core/workflow/nodes/test_answer.py index 2f0aa28b48..abc822e98b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_answer.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_answer.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -11,7 +12,7 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntime from core.workflow.nodes.answer.answer_node import AnswerNode from extensions.ext_database import db from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType def test_execute_answer(): diff --git a/api/tests/unit_tests/core/workflow/nodes/test_continue_on_error.py b/api/tests/unit_tests/core/workflow/nodes/test_continue_on_error.py index 111c647d9c..ff60d5974b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_continue_on_error.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_continue_on_error.py @@ -1,7 +1,8 @@ from unittest.mock import patch from core.app.entities.app_invoke_entities import InvokeFrom -from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult +from core.workflow.entities.node_entities import NodeRunResult, WorkflowNodeExecutionMetadataKey +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.event import ( GraphRunPartialSucceededEvent, @@ -14,7 +15,7 @@ from core.workflow.graph_engine.graph_engine import GraphEngine from core.workflow.nodes.event.event import RunCompletedEvent, RunStreamChunkEvent from core.workflow.nodes.llm.node import LLMNode from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType class ContinueOnErrorTestHelper: @@ -542,9 +543,9 @@ def test_stream_output_with_fail_branch_continue_on_error(): process_data={}, outputs={}, metadata={ - NodeRunMetadataKey.TOTAL_TOKENS: 1, - NodeRunMetadataKey.TOTAL_PRICE: 1, - NodeRunMetadataKey.CURRENCY: "USD", + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 1, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: 1, + WorkflowNodeExecutionMetadataKey.CURRENCY: "USD", }, ) ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py index 6d46ea9b89..35d83449c3 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py @@ -7,6 +7,7 @@ from core.file import File, FileTransferMethod from core.variables import ArrayFileSegment from core.variables.variables import StringVariable from core.workflow.entities.node_entities import NodeRunResult +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.document_extractor import DocumentExtractorNode, DocumentExtractorNodeData from core.workflow.nodes.document_extractor.node import ( _extract_text_from_docx, @@ -15,7 +16,6 @@ from core.workflow.nodes.document_extractor.node import ( _extract_text_from_plain_text, ) from core.workflow.nodes.enums import NodeType -from models.workflow import WorkflowNodeExecutionStatus @pytest.fixture diff --git a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py index 41e2c5d484..c4e411f9d6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py @@ -6,6 +6,7 @@ from core.app.entities.app_invoke_entities import InvokeFrom from core.file import File, FileTransferMethod, FileType from core.variables import ArrayFileSegment from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.enums import SystemVariableKey from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams @@ -15,7 +16,7 @@ from core.workflow.nodes.if_else.if_else_node import IfElseNode from core.workflow.utils.condition.entities import Condition, SubCondition, SubVariableCondition from extensions.ext_database import db from models.enums import UserFrom -from models.workflow import WorkflowNodeExecutionStatus, WorkflowType +from models.workflow import WorkflowType def test_execute_if_else_result_true(): diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index 36116d3540..77d42e2692 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -4,6 +4,7 @@ import pytest from core.file import File, FileTransferMethod, FileType from core.variables import ArrayFileSegment +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.nodes.list_operator.entities import ( ExtractConfig, FilterBy, @@ -14,7 +15,6 @@ from core.workflow.nodes.list_operator.entities import ( ) from core.workflow.nodes.list_operator.exc import InvalidKeyError from core.workflow.nodes.list_operator.node import ListOperatorNode, _get_file_extract_string_func -from models.workflow import WorkflowNodeExecutionStatus @pytest.fixture diff --git a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py index f593510830..e121f6338c 100644 --- a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py @@ -7,6 +7,7 @@ from core.tools.entities.tool_entities import ToolInvokeMessage, ToolProviderTyp from core.tools.errors import ToolInvokeError from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.variable_pool import VariablePool +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus from core.workflow.graph_engine import Graph, GraphInitParams, GraphRuntimeState from core.workflow.nodes.answer import AnswerStreamGenerateRoute from core.workflow.nodes.end import EndStreamParam @@ -14,7 +15,7 @@ from core.workflow.nodes.enums import ErrorStrategy from core.workflow.nodes.event import RunCompletedEvent from core.workflow.nodes.tool import ToolNode from core.workflow.nodes.tool.entities import ToolNodeData -from models import UserFrom, WorkflowNodeExecutionStatus, WorkflowType +from models import UserFrom, WorkflowType def _create_tool_node(): diff --git a/api/tests/unit_tests/core/workflow/test_workflow_cycle_manager.py b/api/tests/unit_tests/core/workflow/test_workflow_cycle_manager.py index 9c955fc086..fddc182594 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_cycle_manager.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_cycle_manager.py @@ -12,21 +12,20 @@ from core.app.entities.queue_entities import ( QueueNodeStartedEvent, QueueNodeSucceededEvent, ) -from core.workflow.entities.node_entities import NodeRunMetadataKey -from core.workflow.entities.node_execution_entities import NodeExecution, NodeExecutionStatus -from core.workflow.entities.workflow_execution_entities import WorkflowExecution, WorkflowExecutionStatus, WorkflowType +from core.workflow.entities.workflow_execution import WorkflowExecution, WorkflowExecutionStatus, WorkflowType +from core.workflow.entities.workflow_node_execution import ( + WorkflowNodeExecution, + WorkflowNodeExecutionMetadataKey, + WorkflowNodeExecutionStatus, +) from core.workflow.enums import SystemVariableKey from core.workflow.nodes import NodeType -from core.workflow.repository.workflow_execution_repository import WorkflowExecutionRepository -from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository -from core.workflow.workflow_cycle_manager import WorkflowCycleManager +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository +from core.workflow.workflow_cycle_manager import CycleManagerWorkflowInfo, WorkflowCycleManager from models.enums import CreatorUserRole from models.model import AppMode -from models.workflow import ( - Workflow, - WorkflowRun, - WorkflowRunStatus, -) +from models.workflow import Workflow, WorkflowRun @pytest.fixture @@ -74,7 +73,7 @@ def real_workflow_system_variables(): SystemVariableKey.USER_ID: "test-user-id", SystemVariableKey.APP_ID: "test-app-id", SystemVariableKey.WORKFLOW_ID: "test-workflow-id", - SystemVariableKey.WORKFLOW_RUN_ID: "test-workflow-run-id", + SystemVariableKey.WORKFLOW_EXECUTION_ID: "test-workflow-run-id", } @@ -93,16 +92,38 @@ def mock_workflow_execution_repository(): return repo +@pytest.fixture +def real_workflow_entity(): + return CycleManagerWorkflowInfo( + workflow_id="test-workflow-id", # Matches ID used in other fixtures + workflow_type=WorkflowType.CHAT, + version="1.0.0", + graph_data={ + "nodes": [ + { + "id": "node1", + "type": "chat", # NodeType is a string enum + "name": "Chat Node", + "data": {"model": "gpt-3.5-turbo", "prompt": "test prompt"}, + } + ], + "edges": [], + }, + ) + + @pytest.fixture def workflow_cycle_manager( real_app_generate_entity, real_workflow_system_variables, mock_workflow_execution_repository, mock_node_execution_repository, + real_workflow_entity, ): return WorkflowCycleManager( application_generate_entity=real_app_generate_entity, workflow_system_variables=real_workflow_system_variables, + workflow_info=real_workflow_entity, workflow_execution_repository=mock_workflow_execution_repository, workflow_node_execution_repository=mock_node_execution_repository, ) @@ -148,7 +169,7 @@ def real_workflow_run(): workflow_run.version = "1.0" workflow_run.graph = json.dumps({"nodes": [], "edges": []}) workflow_run.inputs = json.dumps({"query": "test query"}) - workflow_run.status = WorkflowRunStatus.RUNNING + workflow_run.status = WorkflowExecutionStatus.RUNNING workflow_run.outputs = json.dumps({"answer": "test answer"}) workflow_run.created_by_role = CreatorUserRole.ACCOUNT workflow_run.created_by = "test-user-id" @@ -171,20 +192,13 @@ def test_init( assert workflow_cycle_manager._workflow_node_execution_repository == mock_node_execution_repository -def test_handle_workflow_run_start(workflow_cycle_manager, mock_session, real_workflow): +def test_handle_workflow_run_start(workflow_cycle_manager): """Test handle_workflow_run_start method""" - # Mock session.scalar to return the workflow and max sequence - mock_session.scalar.side_effect = [real_workflow, 5] - # Call the method - workflow_execution = workflow_cycle_manager.handle_workflow_run_start( - session=mock_session, - workflow_id="test-workflow-id", - ) + workflow_execution = workflow_cycle_manager.handle_workflow_run_start() # Verify the result - assert workflow_execution.workflow_id == real_workflow.id - assert workflow_execution.sequence_number == 6 # max_sequence + 1 + assert workflow_execution.workflow_id == "test-workflow-id" # Verify the workflow_execution_repository.save was called workflow_cycle_manager._workflow_execution_repository.save.assert_called_once_with(workflow_execution) @@ -195,11 +209,10 @@ def test_handle_workflow_run_success(workflow_cycle_manager, mock_workflow_execu # Create a real WorkflowExecution workflow_execution = WorkflowExecution( - id="test-workflow-run-id", + id_="test-workflow-run-id", workflow_id="test-workflow-id", workflow_version="1.0", - sequence_number=1, - type=WorkflowType.CHAT, + workflow_type=WorkflowType.CHAT, graph={"nodes": [], "edges": []}, inputs={"query": "test query"}, started_at=datetime.now(UTC).replace(tzinfo=None), @@ -230,11 +243,10 @@ def test_handle_workflow_run_failed(workflow_cycle_manager, mock_workflow_execut # Create a real WorkflowExecution workflow_execution = WorkflowExecution( - id="test-workflow-run-id", + id_="test-workflow-run-id", workflow_id="test-workflow-id", workflow_version="1.0", - sequence_number=1, - type=WorkflowType.CHAT, + workflow_type=WorkflowType.CHAT, graph={"nodes": [], "edges": []}, inputs={"query": "test query"}, started_at=datetime.now(UTC).replace(tzinfo=None), @@ -251,13 +263,13 @@ def test_handle_workflow_run_failed(workflow_cycle_manager, mock_workflow_execut workflow_run_id="test-workflow-run-id", total_tokens=50, total_steps=3, - status=WorkflowRunStatus.FAILED, + status=WorkflowExecutionStatus.FAILED, error_message="Test error message", ) # Verify the result assert result == workflow_execution - assert result.status == WorkflowExecutionStatus(WorkflowRunStatus.FAILED.value) + assert result.status == WorkflowExecutionStatus.FAILED assert result.error_message == "Test error message" assert result.total_tokens == 50 assert result.total_steps == 3 @@ -269,11 +281,10 @@ def test_handle_node_execution_start(workflow_cycle_manager, mock_workflow_execu # Create a real WorkflowExecution workflow_execution = WorkflowExecution( - id="test-workflow-execution-id", + id_="test-workflow-execution-id", workflow_id="test-workflow-id", workflow_version="1.0", - sequence_number=1, - type=WorkflowType.CHAT, + workflow_type=WorkflowType.CHAT, graph={"nodes": [], "edges": []}, inputs={"query": "test query"}, started_at=datetime.now(UTC).replace(tzinfo=None), @@ -301,18 +312,18 @@ def test_handle_node_execution_start(workflow_cycle_manager, mock_workflow_execu # Call the method result = workflow_cycle_manager.handle_node_execution_start( - workflow_execution_id=workflow_execution.id, + workflow_execution_id=workflow_execution.id_, event=event, ) # Verify the result assert result.workflow_id == workflow_execution.workflow_id - assert result.workflow_run_id == workflow_execution.id + assert result.workflow_execution_id == workflow_execution.id_ assert result.node_execution_id == event.node_execution_id assert result.node_id == event.node_id assert result.node_type == event.node_type assert result.title == event.node_data.title - assert result.status == NodeExecutionStatus.RUNNING + assert result.status == WorkflowNodeExecutionStatus.RUNNING # Verify save was called workflow_cycle_manager._workflow_node_execution_repository.save.assert_called_once_with(result) @@ -323,11 +334,10 @@ def test_get_workflow_execution_or_raise_error(workflow_cycle_manager, mock_work # Create a real WorkflowExecution workflow_execution = WorkflowExecution( - id="test-workflow-run-id", + id_="test-workflow-run-id", workflow_id="test-workflow-id", workflow_version="1.0", - sequence_number=1, - type=WorkflowType.CHAT, + workflow_type=WorkflowType.CHAT, graph={"nodes": [], "edges": []}, inputs={"query": "test query"}, started_at=datetime.now(UTC).replace(tzinfo=None), @@ -358,16 +368,16 @@ def test_handle_workflow_node_execution_success(workflow_cycle_manager): event.inputs = {"input": "test input"} event.process_data = {"process": "test process"} event.outputs = {"output": "test output"} - event.execution_metadata = {NodeRunMetadataKey.TOTAL_TOKENS: 100} + event.execution_metadata = {WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 100} event.start_at = datetime.now(UTC).replace(tzinfo=None) # Create a real node execution - node_execution = NodeExecution( + node_execution = WorkflowNodeExecution( id="test-node-execution-record-id", node_execution_id="test-node-execution-id", workflow_id="test-workflow-id", - workflow_run_id="test-workflow-run-id", + workflow_execution_id="test-workflow-run-id", index=1, node_id="test-node-id", node_type=NodeType.LLM, @@ -385,7 +395,7 @@ def test_handle_workflow_node_execution_success(workflow_cycle_manager): # Verify the result assert result == node_execution - assert result.status == NodeExecutionStatus.SUCCEEDED + assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED # Verify save was called workflow_cycle_manager._workflow_node_execution_repository.save.assert_called_once_with(node_execution) @@ -396,11 +406,10 @@ def test_handle_workflow_run_partial_success(workflow_cycle_manager, mock_workfl # Create a real WorkflowExecution workflow_execution = WorkflowExecution( - id="test-workflow-run-id", + id_="test-workflow-run-id", workflow_id="test-workflow-id", workflow_version="1.0", - sequence_number=1, - type=WorkflowType.CHAT, + workflow_type=WorkflowType.CHAT, graph={"nodes": [], "edges": []}, inputs={"query": "test query"}, started_at=datetime.now(UTC).replace(tzinfo=None), @@ -436,17 +445,17 @@ def test_handle_workflow_node_execution_failed(workflow_cycle_manager): event.inputs = {"input": "test input"} event.process_data = {"process": "test process"} event.outputs = {"output": "test output"} - event.execution_metadata = {NodeRunMetadataKey.TOTAL_TOKENS: 100} + event.execution_metadata = {WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 100} event.start_at = datetime.now(UTC).replace(tzinfo=None) event.error = "Test error message" # Create a real node execution - node_execution = NodeExecution( + node_execution = WorkflowNodeExecution( id="test-node-execution-record-id", node_execution_id="test-node-execution-id", workflow_id="test-workflow-id", - workflow_run_id="test-workflow-run-id", + workflow_execution_id="test-workflow-run-id", index=1, node_id="test-node-id", node_type=NodeType.LLM, @@ -464,7 +473,7 @@ def test_handle_workflow_node_execution_failed(workflow_cycle_manager): # Verify the result assert result == node_execution - assert result.status == NodeExecutionStatus.FAILED + assert result.status == WorkflowNodeExecutionStatus.FAILED assert result.error == "Test error message" # Verify save was called diff --git a/api/tests/unit_tests/models/test_workflow.py b/api/tests/unit_tests/models/test_workflow.py index 34802d47a7..b79e95c7ed 100644 --- a/api/tests/unit_tests/models/test_workflow.py +++ b/api/tests/unit_tests/models/test_workflow.py @@ -4,7 +4,7 @@ from uuid import uuid4 from constants import HIDDEN_VALUE from core.variables import FloatVariable, IntegerVariable, SecretVariable, StringVariable -from models.workflow import Workflow, WorkflowNodeExecution +from models.workflow import Workflow, WorkflowNodeExecutionModel def test_environment_variables(): @@ -156,7 +156,7 @@ def test_to_dict(): class TestWorkflowNodeExecution: def test_execution_metadata_dict(self): - node_exec = WorkflowNodeExecution() + node_exec = WorkflowNodeExecutionModel() node_exec.execution_metadata = None assert node_exec.execution_metadata_dict == {} diff --git a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py index 7c5020db02..643efb0a0c 100644 --- a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py +++ b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py @@ -13,12 +13,15 @@ from sqlalchemy.orm import Session, sessionmaker from core.model_runtime.utils.encoders import jsonable_encoder from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository -from core.workflow.entities.node_entities import NodeRunMetadataKey -from core.workflow.entities.node_execution_entities import NodeExecution, NodeExecutionStatus +from core.workflow.entities.workflow_node_execution import ( + WorkflowNodeExecution, + WorkflowNodeExecutionMetadataKey, + WorkflowNodeExecutionStatus, +) from core.workflow.nodes.enums import NodeType -from core.workflow.repository.workflow_node_execution_repository import OrderConfig +from core.workflow.repositories.workflow_node_execution_repository import OrderConfig from models.account import Account, Tenant -from models.workflow import WorkflowNodeExecution, WorkflowNodeExecutionStatus, WorkflowNodeExecutionTriggeredFrom +from models.workflow import WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom def configure_mock_execution(mock_execution): @@ -82,7 +85,7 @@ def test_save(repository, session): """Test save method.""" session_obj, _ = session # Create a mock execution - execution = MagicMock(spec=WorkflowNodeExecution) + execution = MagicMock(spec=WorkflowNodeExecutionModel) execution.tenant_id = None execution.app_id = None execution.inputs = None @@ -108,7 +111,7 @@ def test_save_with_existing_tenant_id(repository, session): """Test save method with existing tenant_id.""" session_obj, _ = session # Create a mock execution with existing tenant_id - execution = MagicMock(spec=WorkflowNodeExecution) + execution = MagicMock(spec=WorkflowNodeExecutionModel) execution.tenant_id = "existing-tenant" execution.app_id = None execution.inputs = None @@ -117,7 +120,7 @@ def test_save_with_existing_tenant_id(repository, session): execution.metadata = None # Create a modified execution that will be returned by _to_db_model - modified_execution = MagicMock(spec=WorkflowNodeExecution) + modified_execution = MagicMock(spec=WorkflowNodeExecutionModel) modified_execution.tenant_id = "existing-tenant" # Tenant ID should not change modified_execution.app_id = repository._app_id # App ID should be set @@ -144,7 +147,7 @@ def test_get_by_node_execution_id(repository, session, mocker: MockerFixture): mock_stmt.where.return_value = mock_stmt # Create a properly configured mock execution - mock_execution = mocker.MagicMock(spec=WorkflowNodeExecution) + mock_execution = mocker.MagicMock(spec=WorkflowNodeExecutionModel) configure_mock_execution(mock_execution) session_obj.scalar.return_value = mock_execution @@ -176,7 +179,7 @@ def test_get_by_workflow_run(repository, session, mocker: MockerFixture): mock_stmt.order_by.return_value = mock_stmt # Create a properly configured mock execution - mock_execution = mocker.MagicMock(spec=WorkflowNodeExecution) + mock_execution = mocker.MagicMock(spec=WorkflowNodeExecutionModel) configure_mock_execution(mock_execution) session_obj.scalars.return_value.all.return_value = [mock_execution] @@ -209,7 +212,7 @@ def test_get_running_executions(repository, session, mocker: MockerFixture): mock_stmt.where.return_value = mock_stmt # Create a properly configured mock execution - mock_execution = mocker.MagicMock(spec=WorkflowNodeExecution) + mock_execution = mocker.MagicMock(spec=WorkflowNodeExecutionModel) configure_mock_execution(mock_execution) session_obj.scalars.return_value.all.return_value = [mock_execution] @@ -235,7 +238,7 @@ def test_update_via_save(repository, session): """Test updating an existing record via save method.""" session_obj, _ = session # Create a mock execution - execution = MagicMock(spec=WorkflowNodeExecution) + execution = MagicMock(spec=WorkflowNodeExecutionModel) execution.tenant_id = None execution.app_id = None execution.inputs = None @@ -275,7 +278,7 @@ def test_clear(repository, session, mocker: MockerFixture): repository.clear() # Assert delete was called with correct parameters - mock_delete.assert_called_once_with(WorkflowNodeExecution) + mock_delete.assert_called_once_with(WorkflowNodeExecutionModel) mock_stmt.where.assert_called() session_obj.execute.assert_called_once_with(mock_stmt) session_obj.commit.assert_called_once() @@ -284,11 +287,11 @@ def test_clear(repository, session, mocker: MockerFixture): def test_to_db_model(repository): """Test to_db_model method.""" # Create a domain model - domain_model = NodeExecution( + domain_model = WorkflowNodeExecution( id="test-id", workflow_id="test-workflow-id", node_execution_id="test-node-execution-id", - workflow_run_id="test-workflow-run-id", + workflow_execution_id="test-workflow-run-id", index=1, predecessor_node_id="test-predecessor-id", node_id="test-node-id", @@ -297,10 +300,13 @@ def test_to_db_model(repository): inputs={"input_key": "input_value"}, process_data={"process_key": "process_value"}, outputs={"output_key": "output_value"}, - status=NodeExecutionStatus.RUNNING, + status=WorkflowNodeExecutionStatus.RUNNING, error=None, elapsed_time=1.5, - metadata={NodeRunMetadataKey.TOTAL_TOKENS: 100, NodeRunMetadataKey.TOTAL_PRICE: Decimal("0.0")}, + metadata={ + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 100, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: Decimal("0.0"), + }, created_at=datetime.now(), finished_at=None, ) @@ -309,13 +315,13 @@ def test_to_db_model(repository): db_model = repository.to_db_model(domain_model) # Assert DB model has correct values - assert isinstance(db_model, WorkflowNodeExecution) + assert isinstance(db_model, WorkflowNodeExecutionModel) assert db_model.id == domain_model.id assert db_model.tenant_id == repository._tenant_id assert db_model.app_id == repository._app_id assert db_model.workflow_id == domain_model.workflow_id assert db_model.triggered_from == repository._triggered_from - assert db_model.workflow_run_id == domain_model.workflow_run_id + assert db_model.workflow_run_id == domain_model.workflow_execution_id assert db_model.index == domain_model.index assert db_model.predecessor_node_id == domain_model.predecessor_node_id assert db_model.node_execution_id == domain_model.node_execution_id @@ -343,10 +349,10 @@ def test_to_domain_model(repository): inputs_dict = {"input_key": "input_value"} process_data_dict = {"process_key": "process_value"} outputs_dict = {"output_key": "output_value"} - metadata_dict = {str(NodeRunMetadataKey.TOTAL_TOKENS): 100} + metadata_dict = {str(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS): 100} # Create a DB model using our custom subclass - db_model = WorkflowNodeExecution() + db_model = WorkflowNodeExecutionModel() db_model.id = "test-id" db_model.tenant_id = "test-tenant-id" db_model.app_id = "test-app-id" @@ -375,10 +381,10 @@ def test_to_domain_model(repository): domain_model = repository._to_domain_model(db_model) # Assert domain model has correct values - assert isinstance(domain_model, NodeExecution) + assert isinstance(domain_model, WorkflowNodeExecution) assert domain_model.id == db_model.id assert domain_model.workflow_id == db_model.workflow_id - assert domain_model.workflow_run_id == db_model.workflow_run_id + assert domain_model.workflow_execution_id == db_model.workflow_run_id assert domain_model.index == db_model.index assert domain_model.predecessor_node_id == db_model.predecessor_node_id assert domain_model.node_execution_id == db_model.node_execution_id @@ -388,7 +394,7 @@ def test_to_domain_model(repository): assert domain_model.inputs == inputs_dict assert domain_model.process_data == process_data_dict assert domain_model.outputs == outputs_dict - assert domain_model.status == NodeExecutionStatus(db_model.status) + assert domain_model.status == WorkflowNodeExecutionStatus(db_model.status) assert domain_model.error == db_model.error assert domain_model.elapsed_time == db_model.elapsed_time assert domain_model.metadata == metadata_dict diff --git a/docker/.env.example b/docker/.env.example index 9d68527796..ac9536be03 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -802,7 +802,7 @@ MAX_TOOLS_NUM=10 MAX_PARALLEL_LIMIT=10 # The maximum number of iterations for agent setting -MAX_ITERATIONS_NUM=5 +MAX_ITERATIONS_NUM=99 # ------------------------------ # Environment Variables for web Service diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 256a6131ae..74a7b87bf9 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -75,7 +75,7 @@ services: LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index a13f115cd2..41e86d015f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -354,7 +354,7 @@ x-shared-env: &shared-api-worker-env LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} PGUSER: ${PGUSER:-${DB_USERNAME}} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} @@ -574,7 +574,7 @@ services: LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} diff --git a/docker/middleware.env.example b/docker/middleware.env.example index 2437026eec..ba6859885b 100644 --- a/docker/middleware.env.example +++ b/docker/middleware.env.example @@ -109,7 +109,7 @@ EXPOSE_PLUGIN_DEBUGGING_HOST=localhost EXPOSE_PLUGIN_DEBUGGING_PORT=5003 PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 -PLUGIN_DIFY_INNER_API_URL=http://api:5001 +PLUGIN_DIFY_INNER_API_URL=http://host.docker.internal:5001 MARKETPLACE_ENABLED=true MARKETPLACE_API_URL=https://marketplace.dify.ai diff --git a/web/.env.example b/web/.env.example index 51631c2437..78b4f33e8c 100644 --- a/web/.env.example +++ b/web/.env.example @@ -50,7 +50,7 @@ NEXT_PUBLIC_MAX_TOOLS_NUM=10 NEXT_PUBLIC_MAX_PARALLEL_LIMIT=10 # The maximum number of iterations for agent setting -NEXT_PUBLIC_MAX_ITERATIONS_NUM=5 +NEXT_PUBLIC_MAX_ITERATIONS_NUM=99 NEXT_PUBLIC_ENABLE_WEBSITE_JINAREADER=true NEXT_PUBLIC_ENABLE_WEBSITE_FIRECRAWL=true diff --git a/web/app/components/app/configuration/config/agent/agent-tools/index.tsx b/web/app/components/app/configuration/config/agent/agent-tools/index.tsx index 4b773c01ba..a3149447d4 100644 --- a/web/app/components/app/configuration/config/agent/agent-tools/index.tsx +++ b/web/app/components/app/configuration/config/agent/agent-tools/index.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import React, { useMemo, useState } from 'react' +import React, { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import { useContext } from 'use-context-selector' import copy from 'copy-to-clipboard' @@ -32,6 +32,7 @@ import cn from '@/utils/classnames' import ToolPicker from '@/app/components/workflow/block-selector/tool-picker' import type { ToolDefaultValue } from '@/app/components/workflow/block-selector/types' import { canFindTool } from '@/utils' +import { useMittContextSelector } from '@/context/mitt-context' type AgentToolWithMoreInfo = AgentTool & { icon: any; collection?: Collection } | null const AgentTools: FC = () => { @@ -39,7 +40,6 @@ const AgentTools: FC = () => { const [isShowChooseTool, setIsShowChooseTool] = useState(false) const { modelConfig, setModelConfig, collectionList } = useContext(ConfigContext) const formattingChangedDispatcher = useFormattingChangedDispatcher() - const [currentTool, setCurrentTool] = useState(null) const currentCollection = useMemo(() => { if (!currentTool) return null @@ -61,6 +61,17 @@ const AgentTools: FC = () => { collection, } }) + const useSubscribe = useMittContextSelector(s => s.useSubscribe) + const handleUpdateToolsWhenInstallToolSuccess = useCallback((installedPluginNames: string[]) => { + const newModelConfig = produce(modelConfig, (draft) => { + draft.agentConfig.tools.forEach((item: any) => { + if (item.isDeleted && installedPluginNames.includes(item.provider_id)) + item.isDeleted = false + }) + }) + setModelConfig(newModelConfig) + }, [modelConfig, setModelConfig]) + useSubscribe('plugin:install:success', handleUpdateToolsWhenInstallToolSuccess as any) const handleToolSettingChange = (value: Record) => { const newModelConfig = produce(modelConfig, (draft) => { @@ -132,7 +143,7 @@ const AgentTools: FC = () => { disabled={false} supportAddCustomTool onSelect={handleSelectTool} - selectedTools={tools} + selectedTools={tools as any} /> )} diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/index.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/index.tsx index 75ba8362ae..b876adfa3d 100644 --- a/web/app/components/app/configuration/debug/debug-with-multiple-model/index.tsx +++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/index.tsx @@ -99,7 +99,15 @@ const DebugWithMultipleModel = () => { }, [twoLine, threeLine, fourLine]) const setShowAppConfigureFeaturesModal = useAppStore(s => s.setShowAppConfigureFeaturesModal) - const inputsForm = modelConfig.configs.prompt_variables.filter(item => item.type !== 'api').map(item => ({ ...item, label: item.name, variable: item.key })) as InputForm[] + const inputsForm = modelConfig.configs.prompt_variables + .filter(item => item.type !== 'api') + .map(item => ({ + ...item, + label: item.name, + variable: item.key, + hide: item.hide ?? false, + required: item.required ?? false, + })) as InputForm[] return (
@@ -133,6 +141,7 @@ const DebugWithMultipleModel = () => { {isChatMode && (
{ }} > - <> +
{/* Header */} @@ -1060,7 +1061,7 @@ const Configuration: FC = () => { /> )} - + ) diff --git a/web/app/components/base/chat/chat/chat-input-area/index.tsx b/web/app/components/base/chat/chat/chat-input-area/index.tsx index 14d8185f99..52490e4024 100644 --- a/web/app/components/base/chat/chat/chat-input-area/index.tsx +++ b/web/app/components/base/chat/chat/chat-input-area/index.tsx @@ -29,6 +29,7 @@ import type { FileUpload } from '@/app/components/base/features/types' import { TransferMethod } from '@/types/app' type ChatInputAreaProps = { + botName?: string showFeatureBar?: boolean showFileUpload?: boolean featureBarDisabled?: boolean @@ -43,6 +44,7 @@ type ChatInputAreaProps = { disabled?: boolean } const ChatInputArea = ({ + botName, showFeatureBar, showFileUpload, featureBarDisabled, @@ -192,7 +194,7 @@ const ChatInputArea = ({ className={cn( 'body-lg-regular w-full resize-none bg-transparent p-1 leading-6 text-text-tertiary outline-none', )} - placeholder={t('common.chat.inputPlaceholder') || ''} + placeholder={t('common.chat.inputPlaceholder', { botName }) || ''} autoFocus minRows={1} onResize={handleTextareaResize} diff --git a/web/app/components/base/chat/chat/index.tsx b/web/app/components/base/chat/chat/index.tsx index 7c8eb23b1b..c0842af0c4 100644 --- a/web/app/components/base/chat/chat/index.tsx +++ b/web/app/components/base/chat/chat/index.tsx @@ -303,6 +303,7 @@ const Chat: FC = ({ { !noChatInput && ( = ({
{ diff --git a/web/app/components/base/chat/embedded-chatbot/theme/theme-context.ts b/web/app/components/base/chat/embedded-chatbot/theme/theme-context.ts index d4d617d4b7..321997ab1d 100644 --- a/web/app/components/base/chat/embedded-chatbot/theme/theme-context.ts +++ b/web/app/components/base/chat/embedded-chatbot/theme/theme-context.ts @@ -12,8 +12,7 @@ export class Theme { public colorPathOnHeader = 'text-text-primary-on-surface' public backgroundButtonDefaultColorStyle = 'backgroundColor: #1C64F2' public roundedBackgroundColorStyle = 'backgroundColor: rgb(245 248 255)' - public chatBubbleColorStyle = 'backgroundColor: rgb(225 239 254)' - public chatBubbleColor = 'rgb(225 239 254)' + public chatBubbleColorStyle = '' constructor(chatColorTheme: string | null = null, chatColorThemeInverted = false) { this.chatColorTheme = chatColorTheme @@ -29,7 +28,6 @@ export class Theme { this.backgroundButtonDefaultColorStyle = `backgroundColor: ${this.primaryColor}; color: ${this.colorFontOnHeaderStyle};` this.roundedBackgroundColorStyle = `backgroundColor: ${hexToRGBA(this.primaryColor, 0.05)}` this.chatBubbleColorStyle = `backgroundColor: ${hexToRGBA(this.primaryColor, 0.15)}` - this.chatBubbleColor = `${hexToRGBA(this.primaryColor, 0.15)}` } } diff --git a/web/app/components/base/icons/src/public/llm/OpenaiTale.json b/web/app/components/base/icons/src/public/llm/OpenaiTale.json new file mode 100644 index 0000000000..45943139f1 --- /dev/null +++ b/web/app/components/base/icons/src/public/llm/OpenaiTale.json @@ -0,0 +1,37 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "24", + "height": "24", + "viewBox": "0 0 24 24", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "rect", + "attributes": { + "width": "24", + "height": "24", + "rx": "6", + "fill": "#009688" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M19.7758 11.5959C19.9546 11.9948 20.0681 12.4213 20.1145 12.8563C20.1592 13.2913 20.1369 13.7315 20.044 14.1596C19.9529 14.5878 19.7947 14.9987 19.5746 15.377C19.4302 15.6298 19.2599 15.867 19.0639 16.0854C18.8696 16.3021 18.653 16.4981 18.4174 16.67C18.1801 16.842 17.9274 16.9864 17.6591 17.105C17.3926 17.222 17.1141 17.3114 16.8286 17.3698C16.6945 17.7859 16.4951 18.1797 16.2371 18.5339C15.9809 18.8881 15.6697 19.1993 15.3155 19.4555C14.9613 19.7134 14.5693 19.9129 14.1532 20.047C13.7371 20.1829 13.302 20.2499 12.8636 20.2499C12.573 20.2516 12.2807 20.2207 11.9953 20.1622C11.7116 20.102 11.433 20.0109 11.1665 19.8923C10.9 19.7736 10.6472 19.6258 10.4116 19.4538C10.1778 19.2819 9.96115 19.0841 9.76857 18.8658C9.33871 18.9586 8.89853 18.981 8.46351 18.9363C8.02849 18.8898 7.60207 18.7763 7.20143 18.5975C6.80252 18.4204 6.43284 18.1797 6.10786 17.8857C5.78289 17.5916 5.50606 17.2478 5.28769 16.8695C5.14153 16.6167 5.02117 16.3502 4.93004 16.0734C4.83891 15.7965 4.77873 15.5111 4.74778 15.2205C4.71683 14.9317 4.71855 14.6393 4.7495 14.3488C4.78045 14.0599 4.84407 13.7745 4.9352 13.4976C4.64289 13.1727 4.40217 12.803 4.22335 12.4041C4.04624 12.0034 3.93104 11.5787 3.88634 11.1437C3.83991 10.7087 3.86398 10.2685 3.95511 9.84036C4.04624 9.41222 4.20443 9.00127 4.42452 8.62299C4.56896 8.37023 4.73918 8.13123 4.93348 7.91458C5.12778 7.69793 5.34615 7.50191 5.58171 7.32997C5.81728 7.15802 6.07176 7.01187 6.33827 6.89495C6.6065 6.7763 6.88506 6.68861 7.17048 6.63015C7.3046 6.21232 7.50406 5.82029 7.76026 5.46608C8.01817 5.11188 8.32939 4.80066 8.6836 4.54274C9.03781 4.28654 9.42984 4.08708 9.84595 3.95125C10.2621 3.81713 10.6971 3.74835 11.1355 3.75007C11.4261 3.74835 11.7184 3.77758 12.0039 3.83776C12.2893 3.89794 12.5678 3.98736 12.8344 4.106C13.1009 4.22636 13.3536 4.37251 13.5892 4.54446C13.8248 4.71812 14.0414 4.91414 14.234 5.13251C14.6621 5.04138 15.1023 5.01903 15.5373 5.06373C15.9723 5.10844 16.3971 5.22364 16.7977 5.40074C17.1966 5.57957 17.5663 5.81857 17.8913 6.1126C18.2162 6.4049 18.4931 6.74707 18.7114 7.12707C18.8576 7.37811 18.9779 7.64463 19.0691 7.92318C19.1602 8.20001 19.2221 8.48544 19.2513 8.77602C19.2823 9.06661 19.2823 9.35892 19.2496 9.64951C19.2187 9.94009 19.155 10.2255 19.0639 10.5024C19.3579 10.8273 19.5969 11.1953 19.7758 11.5959ZM14.0466 18.9363C14.4214 18.7815 14.7619 18.5528 15.049 18.2657C15.3362 17.9785 15.5648 17.6381 15.7196 17.2615C15.8743 16.8867 15.9552 16.4843 15.9552 16.0785V12.2442C15.954 12.2407 15.9529 12.2367 15.9517 12.2321C15.9506 12.2287 15.9488 12.2252 15.9466 12.2218C15.9443 12.2184 15.9414 12.2155 15.938 12.2132C15.9345 12.2098 15.9311 12.2075 15.9276 12.2063L14.54 11.4051V16.0373C14.54 16.0837 14.5332 16.1318 14.5211 16.1765C14.5091 16.223 14.4919 16.2659 14.4678 16.3072C14.4438 16.3485 14.4162 16.3863 14.3819 16.419C14.3484 16.4523 14.3109 16.4812 14.2701 16.505L10.9842 18.4015C10.9567 18.4187 10.9103 18.4428 10.8862 18.4565C11.0221 18.5717 11.1699 18.6732 11.3247 18.7626C11.4811 18.852 11.6428 18.9277 11.8113 18.9896C11.9798 19.0497 12.1535 19.0962 12.3288 19.1271C12.5059 19.1581 12.6848 19.1735 12.8636 19.1735C13.2694 19.1735 13.6717 19.0927 14.0466 18.9363ZM6.22135 16.333C6.42596 16.6855 6.69592 16.9916 7.01745 17.2392C7.34071 17.4868 7.70695 17.6673 8.09899 17.7722C8.49102 17.8771 8.90025 17.9046 9.3026 17.8513C9.70495 17.798 10.0918 17.6673 10.4443 17.4644L13.7663 15.5472L13.7749 15.5386C13.7772 15.5363 13.7789 15.5329 13.78 15.5283C13.7823 15.5249 13.7841 15.5214 13.7852 15.518V13.9017L9.77545 16.2212C9.73418 16.2453 9.6912 16.2625 9.64649 16.2763C9.60007 16.2883 9.55364 16.2935 9.5055 16.2935C9.45907 16.2935 9.41265 16.2883 9.36622 16.2763C9.32152 16.2625 9.27681 16.2453 9.23554 16.2212L5.94967 14.323C5.92044 14.3058 5.87746 14.28 5.85339 14.2645C5.82244 14.4416 5.80696 14.6204 5.80696 14.7993C5.80696 14.9781 5.82415 15.1569 5.85511 15.334C5.88605 15.5094 5.9342 15.6831 5.99438 15.8516C6.05628 16.0201 6.13194 16.1817 6.22135 16.3364V16.333ZM5.35818 9.1629C5.15529 9.51539 5.02461 9.90398 4.97131 10.3063C4.918 10.7087 4.94552 11.1162 5.0504 11.51C5.15529 11.902 5.33583 12.2682 5.58343 12.5915C5.83103 12.913 6.13881 13.183 6.48958 13.3859L9.80984 15.3048C9.81328 15.3059 9.81729 15.3071 9.82188 15.3082H9.83391C9.8385 15.3082 9.84251 15.3071 9.84595 15.3048C9.84939 15.3036 9.85283 15.3019 9.85627 15.2996L11.249 14.4949L7.23926 12.1805C7.19971 12.1565 7.16189 12.1272 7.1275 12.0946C7.09418 12.0611 7.06529 12.0236 7.04153 11.9828C7.01917 11.9415 7.00026 11.8985 6.98822 11.8521C6.97619 11.8074 6.96931 11.761 6.97103 11.7128V7.80797C6.80252 7.86987 6.63917 7.94553 6.48442 8.03494C6.32967 8.12607 6.18352 8.22924 6.04596 8.34444C5.91013 8.45965 5.78289 8.58688 5.66769 8.72444C5.55248 8.86028 5.45103 9.00815 5.36162 9.1629H5.35818ZM16.7633 11.8177C16.8046 11.8418 16.8424 11.8693 16.8768 11.9037C16.9094 11.9364 16.9387 11.9742 16.9628 12.0155C16.9851 12.0567 17.004 12.1014 17.0161 12.1461C17.0264 12.1926 17.0332 12.239 17.0315 12.2871V16.192C17.5835 15.9891 18.0649 15.6332 18.4208 15.1655C18.7785 14.6978 18.9934 14.139 19.0433 13.5544C19.0931 12.9698 18.9762 12.3817 18.7046 11.8607C18.4329 11.3397 18.0185 10.9064 17.5095 10.6141L14.1893 8.69521C14.1858 8.69406 14.1818 8.69292 14.1772 8.69177H14.1652C14.1618 8.69292 14.1578 8.69406 14.1532 8.69521C14.1497 8.69636 14.1463 8.69808 14.1429 8.70037L12.757 9.50163L16.7667 11.8177H16.7633ZM18.1475 9.7372H18.1457V9.73892L18.1475 9.7372ZM18.1457 9.73548C18.2455 9.15774 18.1784 8.56281 17.9514 8.02119C17.7262 7.47956 17.3496 7.01359 16.8682 6.67658C16.3867 6.34128 15.8193 6.1487 15.233 6.12291C14.6449 6.09884 14.0638 6.24155 13.5548 6.53386L10.2345 8.45105C10.2311 8.45334 10.2282 8.45621 10.2259 8.45965L10.2191 8.46996C10.2179 8.4734 10.2168 8.47741 10.2156 8.482C10.2145 8.48544 10.2139 8.48945 10.2139 8.49403V10.0966L14.2237 7.78046C14.2649 7.75639 14.3096 7.7392 14.3543 7.72544C14.4008 7.7134 14.4472 7.70825 14.4936 7.70825C14.5418 7.70825 14.5882 7.7134 14.6346 7.72544C14.6793 7.7392 14.7223 7.75639 14.7636 7.78046L18.0494 9.67874C18.0787 9.69593 18.1217 9.72 18.1457 9.73548ZM9.45735 7.96101C9.45735 7.91458 9.46423 7.86816 9.47627 7.82173C9.4883 7.77702 9.5055 7.73232 9.52957 7.69105C9.55364 7.6515 9.58115 7.61368 9.61554 7.57929C9.64821 7.54662 9.68604 7.51739 9.72731 7.49503L13.0132 5.59848C13.0441 5.57957 13.0871 5.55549 13.1112 5.54346C12.6607 5.1669 12.1105 4.92618 11.5276 4.85224C10.9447 4.77658 10.3532 4.86943 9.82188 5.11875C9.28885 5.36807 8.83835 5.76527 8.52369 6.26047C8.20903 6.75739 8.04224 7.33169 8.04224 7.91974V11.7541C8.04339 11.7587 8.04454 11.7627 8.04568 11.7661C8.04683 11.7696 8.04855 11.773 8.05084 11.7765C8.05313 11.7799 8.056 11.7833 8.05944 11.7868C8.06173 11.7891 8.06517 11.7914 8.06976 11.7937L9.45735 12.5949V7.96101ZM10.2105 13.0282L11.997 14.0599L13.7835 13.0282V10.9666L11.9987 9.93493L10.2122 10.9666L10.2105 13.0282Z", + "fill": "white" + }, + "children": [] + } + ] + }, + "name": "OpenaiTale" +} diff --git a/web/app/components/base/icons/src/public/llm/OpenaiTale.tsx b/web/app/components/base/icons/src/public/llm/OpenaiTale.tsx new file mode 100644 index 0000000000..e7ae45e293 --- /dev/null +++ b/web/app/components/base/icons/src/public/llm/OpenaiTale.tsx @@ -0,0 +1,20 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import * as React from 'react' +import data from './OpenaiTale.json' +import IconBase from '@/app/components/base/icons/IconBase' +import type { IconData } from '@/app/components/base/icons/IconBase' + +const Icon = ( + { + ref, + ...props + }: React.SVGProps & { + ref?: React.RefObject>; + }, +) => + +Icon.displayName = 'OpenaiTale' + +export default Icon diff --git a/web/app/components/base/icons/src/public/llm/OpenaiYellow.json b/web/app/components/base/icons/src/public/llm/OpenaiYellow.json new file mode 100644 index 0000000000..d0a4f10744 --- /dev/null +++ b/web/app/components/base/icons/src/public/llm/OpenaiYellow.json @@ -0,0 +1,37 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "24", + "height": "24", + "viewBox": "0 0 24 24", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "rect", + "attributes": { + "width": "24", + "height": "24", + "rx": "6", + "fill": "#FAB005" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M19.7758 11.5959C19.9546 11.9948 20.0681 12.4213 20.1145 12.8563C20.1592 13.2913 20.1369 13.7315 20.044 14.1596C19.9529 14.5878 19.7947 14.9987 19.5746 15.377C19.4302 15.6298 19.2599 15.867 19.0639 16.0854C18.8696 16.3021 18.653 16.4981 18.4174 16.67C18.1801 16.842 17.9274 16.9864 17.6591 17.105C17.3926 17.222 17.1141 17.3114 16.8286 17.3698C16.6945 17.7859 16.4951 18.1797 16.2371 18.5339C15.9809 18.8881 15.6697 19.1993 15.3155 19.4555C14.9613 19.7134 14.5693 19.9129 14.1532 20.047C13.7371 20.1829 13.302 20.2499 12.8636 20.2499C12.573 20.2516 12.2807 20.2207 11.9953 20.1622C11.7116 20.102 11.433 20.0109 11.1665 19.8923C10.9 19.7736 10.6472 19.6258 10.4116 19.4538C10.1778 19.2819 9.96115 19.0841 9.76857 18.8658C9.33871 18.9586 8.89853 18.981 8.46351 18.9363C8.02849 18.8898 7.60207 18.7763 7.20143 18.5975C6.80252 18.4204 6.43284 18.1797 6.10786 17.8857C5.78289 17.5916 5.50606 17.2478 5.28769 16.8695C5.14153 16.6167 5.02117 16.3502 4.93004 16.0734C4.83891 15.7965 4.77873 15.5111 4.74778 15.2205C4.71683 14.9317 4.71855 14.6393 4.7495 14.3488C4.78045 14.0599 4.84407 13.7745 4.9352 13.4976C4.64289 13.1727 4.40217 12.803 4.22335 12.4041C4.04624 12.0034 3.93104 11.5787 3.88634 11.1437C3.83991 10.7087 3.86398 10.2685 3.95511 9.84036C4.04624 9.41222 4.20443 9.00127 4.42452 8.62299C4.56896 8.37023 4.73918 8.13123 4.93348 7.91458C5.12778 7.69793 5.34615 7.50191 5.58171 7.32997C5.81728 7.15802 6.07176 7.01187 6.33827 6.89495C6.6065 6.7763 6.88506 6.68861 7.17048 6.63015C7.3046 6.21232 7.50406 5.82029 7.76026 5.46608C8.01817 5.11188 8.32939 4.80066 8.6836 4.54274C9.03781 4.28654 9.42984 4.08708 9.84595 3.95125C10.2621 3.81713 10.6971 3.74835 11.1355 3.75007C11.4261 3.74835 11.7184 3.77758 12.0039 3.83776C12.2893 3.89794 12.5678 3.98736 12.8344 4.106C13.1009 4.22636 13.3536 4.37251 13.5892 4.54446C13.8248 4.71812 14.0414 4.91414 14.234 5.13251C14.6621 5.04138 15.1023 5.01903 15.5373 5.06373C15.9723 5.10844 16.3971 5.22364 16.7977 5.40074C17.1966 5.57957 17.5663 5.81857 17.8913 6.1126C18.2162 6.4049 18.4931 6.74707 18.7114 7.12707C18.8576 7.37811 18.9779 7.64463 19.0691 7.92318C19.1602 8.20001 19.2221 8.48544 19.2513 8.77602C19.2823 9.06661 19.2823 9.35892 19.2496 9.64951C19.2187 9.94009 19.155 10.2255 19.0639 10.5024C19.3579 10.8273 19.5969 11.1953 19.7758 11.5959ZM14.0466 18.9363C14.4214 18.7815 14.7619 18.5528 15.049 18.2657C15.3362 17.9785 15.5648 17.6381 15.7196 17.2615C15.8743 16.8867 15.9552 16.4843 15.9552 16.0785V12.2442C15.954 12.2407 15.9529 12.2367 15.9517 12.2321C15.9506 12.2287 15.9488 12.2252 15.9466 12.2218C15.9443 12.2184 15.9414 12.2155 15.938 12.2132C15.9345 12.2098 15.9311 12.2075 15.9276 12.2063L14.54 11.4051V16.0373C14.54 16.0837 14.5332 16.1318 14.5211 16.1765C14.5091 16.223 14.4919 16.2659 14.4678 16.3072C14.4438 16.3485 14.4162 16.3863 14.3819 16.419C14.3484 16.4523 14.3109 16.4812 14.2701 16.505L10.9842 18.4015C10.9567 18.4187 10.9103 18.4428 10.8862 18.4565C11.0221 18.5717 11.1699 18.6732 11.3247 18.7626C11.4811 18.852 11.6428 18.9277 11.8113 18.9896C11.9798 19.0497 12.1535 19.0962 12.3288 19.1271C12.5059 19.1581 12.6848 19.1735 12.8636 19.1735C13.2694 19.1735 13.6717 19.0927 14.0466 18.9363ZM6.22135 16.333C6.42596 16.6855 6.69592 16.9916 7.01745 17.2392C7.34071 17.4868 7.70695 17.6673 8.09899 17.7722C8.49102 17.8771 8.90025 17.9046 9.3026 17.8513C9.70495 17.798 10.0918 17.6673 10.4443 17.4644L13.7663 15.5472L13.7749 15.5386C13.7772 15.5363 13.7789 15.5329 13.78 15.5283C13.7823 15.5249 13.7841 15.5214 13.7852 15.518V13.9017L9.77545 16.2212C9.73418 16.2453 9.6912 16.2625 9.64649 16.2763C9.60007 16.2883 9.55364 16.2935 9.5055 16.2935C9.45907 16.2935 9.41265 16.2883 9.36622 16.2763C9.32152 16.2625 9.27681 16.2453 9.23554 16.2212L5.94967 14.323C5.92044 14.3058 5.87746 14.28 5.85339 14.2645C5.82244 14.4416 5.80696 14.6204 5.80696 14.7993C5.80696 14.9781 5.82415 15.1569 5.85511 15.334C5.88605 15.5094 5.9342 15.6831 5.99438 15.8516C6.05628 16.0201 6.13194 16.1817 6.22135 16.3364V16.333ZM5.35818 9.1629C5.15529 9.51539 5.02461 9.90398 4.97131 10.3063C4.918 10.7087 4.94552 11.1162 5.0504 11.51C5.15529 11.902 5.33583 12.2682 5.58343 12.5915C5.83103 12.913 6.13881 13.183 6.48958 13.3859L9.80984 15.3048C9.81328 15.3059 9.81729 15.3071 9.82188 15.3082H9.83391C9.8385 15.3082 9.84251 15.3071 9.84595 15.3048C9.84939 15.3036 9.85283 15.3019 9.85627 15.2996L11.249 14.4949L7.23926 12.1805C7.19971 12.1565 7.16189 12.1272 7.1275 12.0946C7.09418 12.0611 7.06529 12.0236 7.04153 11.9828C7.01917 11.9415 7.00026 11.8985 6.98822 11.8521C6.97619 11.8074 6.96931 11.761 6.97103 11.7128V7.80797C6.80252 7.86987 6.63917 7.94553 6.48442 8.03494C6.32967 8.12607 6.18352 8.22924 6.04596 8.34444C5.91013 8.45965 5.78289 8.58688 5.66769 8.72444C5.55248 8.86028 5.45103 9.00815 5.36162 9.1629H5.35818ZM16.7633 11.8177C16.8046 11.8418 16.8424 11.8693 16.8768 11.9037C16.9094 11.9364 16.9387 11.9742 16.9628 12.0155C16.9851 12.0567 17.004 12.1014 17.0161 12.1461C17.0264 12.1926 17.0332 12.239 17.0315 12.2871V16.192C17.5835 15.9891 18.0649 15.6332 18.4208 15.1655C18.7785 14.6978 18.9934 14.139 19.0433 13.5544C19.0931 12.9698 18.9762 12.3817 18.7046 11.8607C18.4329 11.3397 18.0185 10.9064 17.5095 10.6141L14.1893 8.69521C14.1858 8.69406 14.1818 8.69292 14.1772 8.69177H14.1652C14.1618 8.69292 14.1578 8.69406 14.1532 8.69521C14.1497 8.69636 14.1463 8.69808 14.1429 8.70037L12.757 9.50163L16.7667 11.8177H16.7633ZM18.1475 9.7372H18.1457V9.73892L18.1475 9.7372ZM18.1457 9.73548C18.2455 9.15774 18.1784 8.56281 17.9514 8.02119C17.7262 7.47956 17.3496 7.01359 16.8682 6.67658C16.3867 6.34128 15.8193 6.1487 15.233 6.12291C14.6449 6.09884 14.0638 6.24155 13.5548 6.53386L10.2345 8.45105C10.2311 8.45334 10.2282 8.45621 10.2259 8.45965L10.2191 8.46996C10.2179 8.4734 10.2168 8.47741 10.2156 8.482C10.2145 8.48544 10.2139 8.48945 10.2139 8.49403V10.0966L14.2237 7.78046C14.2649 7.75639 14.3096 7.7392 14.3543 7.72544C14.4008 7.7134 14.4472 7.70825 14.4936 7.70825C14.5418 7.70825 14.5882 7.7134 14.6346 7.72544C14.6793 7.7392 14.7223 7.75639 14.7636 7.78046L18.0494 9.67874C18.0787 9.69593 18.1217 9.72 18.1457 9.73548ZM9.45735 7.96101C9.45735 7.91458 9.46423 7.86816 9.47627 7.82173C9.4883 7.77702 9.5055 7.73232 9.52957 7.69105C9.55364 7.6515 9.58115 7.61368 9.61554 7.57929C9.64821 7.54662 9.68604 7.51739 9.72731 7.49503L13.0132 5.59848C13.0441 5.57957 13.0871 5.55549 13.1112 5.54346C12.6607 5.1669 12.1105 4.92618 11.5276 4.85224C10.9447 4.77658 10.3532 4.86943 9.82188 5.11875C9.28885 5.36807 8.83835 5.76527 8.52369 6.26047C8.20903 6.75739 8.04224 7.33169 8.04224 7.91974V11.7541C8.04339 11.7587 8.04454 11.7627 8.04568 11.7661C8.04683 11.7696 8.04855 11.773 8.05084 11.7765C8.05313 11.7799 8.056 11.7833 8.05944 11.7868C8.06173 11.7891 8.06517 11.7914 8.06976 11.7937L9.45735 12.5949V7.96101ZM10.2105 13.0282L11.997 14.0599L13.7835 13.0282V10.9666L11.9987 9.93493L10.2122 10.9666L10.2105 13.0282Z", + "fill": "white" + }, + "children": [] + } + ] + }, + "name": "OpenaiYellow" +} diff --git a/web/app/components/base/icons/src/public/llm/OpenaiYellow.tsx b/web/app/components/base/icons/src/public/llm/OpenaiYellow.tsx new file mode 100644 index 0000000000..77dac7e322 --- /dev/null +++ b/web/app/components/base/icons/src/public/llm/OpenaiYellow.tsx @@ -0,0 +1,20 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import * as React from 'react' +import data from './OpenaiYellow.json' +import IconBase from '@/app/components/base/icons/IconBase' +import type { IconData } from '@/app/components/base/icons/IconBase' + +const Icon = ( + { + ref, + ...props + }: React.SVGProps & { + ref?: React.RefObject>; + }, +) => + +Icon.displayName = 'OpenaiYellow' + +export default Icon diff --git a/web/app/components/base/icons/src/public/llm/index.ts b/web/app/components/base/icons/src/public/llm/index.ts index cc9b531ebf..c20f72d8be 100644 --- a/web/app/components/base/icons/src/public/llm/index.ts +++ b/web/app/components/base/icons/src/public/llm/index.ts @@ -30,7 +30,9 @@ export { default as OpenaiBlue } from './OpenaiBlue' export { default as OpenaiGreen } from './OpenaiGreen' export { default as OpenaiText } from './OpenaiText' export { default as OpenaiTransparent } from './OpenaiTransparent' +export { default as OpenaiTale } from './OpenaiTale' export { default as OpenaiViolet } from './OpenaiViolet' +export { default as OpenaiYellow } from './OpenaiYellow' export { default as OpenllmText } from './OpenllmText' export { default as Openllm } from './Openllm' export { default as ReplicateText } from './ReplicateText' diff --git a/web/app/components/base/markdown-blocks/audio-block.tsx b/web/app/components/base/markdown-blocks/audio-block.tsx new file mode 100644 index 0000000000..09001f105b --- /dev/null +++ b/web/app/components/base/markdown-blocks/audio-block.tsx @@ -0,0 +1,21 @@ +/** + * @fileoverview AudioBlock component for rendering audio elements in Markdown. + * Extracted from the main markdown renderer for modularity. + * Uses the AudioGallery component to display audio players. + */ +import React, { memo } from 'react' +import AudioGallery from '@/app/components/base/audio-gallery' + +const AudioBlock: any = memo(({ node }: any) => { + const srcs = node.children.filter((child: any) => 'properties' in child).map((child: any) => (child as any).properties.src) + if (srcs.length === 0) { + const src = node.properties?.src + if (src) + return + return null + } + return +}) +AudioBlock.displayName = 'AudioBlock' + +export default AudioBlock diff --git a/web/app/components/base/markdown.tsx b/web/app/components/base/markdown-blocks/code-block.tsx similarity index 63% rename from web/app/components/base/markdown.tsx rename to web/app/components/base/markdown-blocks/code-block.tsx index a47d93268c..9f8a6a87bb 100644 --- a/web/app/components/base/markdown.tsx +++ b/web/app/components/base/markdown-blocks/code-block.tsx @@ -1,34 +1,19 @@ -import ReactMarkdown from 'react-markdown' +import { memo, useEffect, useMemo, useRef, useState } from 'react' import ReactEcharts from 'echarts-for-react' -import 'katex/dist/katex.min.css' -import RemarkMath from 'remark-math' -import RemarkBreaks from 'remark-breaks' -import RehypeKatex from 'rehype-katex' -import RemarkGfm from 'remark-gfm' -import RehypeRaw from 'rehype-raw' import SyntaxHighlighter from 'react-syntax-highlighter' import { atelierHeathDark, atelierHeathLight, } from 'react-syntax-highlighter/dist/esm/styles/hljs' -import { Component, memo, useEffect, useMemo, useRef, useState } from 'react' -import { flow } from 'lodash-es' import ActionButton from '@/app/components/base/action-button' import CopyIcon from '@/app/components/base/copy-icon' import SVGBtn from '@/app/components/base/svg' import Flowchart from '@/app/components/base/mermaid' -import ImageGallery from '@/app/components/base/image-gallery' -import { useChatContext } from '@/app/components/base/chat/chat/context' -import VideoGallery from '@/app/components/base/video-gallery' -import AudioGallery from '@/app/components/base/audio-gallery' -import MarkdownButton from '@/app/components/base/markdown-blocks/button' -import MarkdownForm from '@/app/components/base/markdown-blocks/form' -import MarkdownMusic from '@/app/components/base/markdown-blocks/music' -import ThinkBlock from '@/app/components/base/markdown-blocks/think-block' import { Theme } from '@/types/app' import useTheme from '@/hooks/use-theme' -import cn from '@/utils/classnames' -import SVGRenderer from './svg-gallery' +import SVGRenderer from '../svg-gallery' // Assumes svg-gallery.tsx is in /base directory +import MarkdownMusic from '@/app/components/base/markdown-blocks/music' +import ErrorBoundary from '@/app/components/base/markdown/error-boundary' // Available language https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_HLJS.MD const capitalizationLanguageNameMap: Record = { @@ -64,50 +49,6 @@ const getCorrectCapitalizationLanguageName = (language: string) => { return language.charAt(0).toUpperCase() + language.substring(1) } -const preprocessLaTeX = (content: string) => { - if (typeof content !== 'string') - return content - - const codeBlockRegex = /```[\s\S]*?```/g - const codeBlocks = content.match(codeBlockRegex) || [] - let processedContent = content.replace(codeBlockRegex, 'CODE_BLOCK_PLACEHOLDER') - - processedContent = flow([ - (str: string) => str.replace(/\\\[(.*?)\\\]/g, (_, equation) => `$$${equation}$$`), - (str: string) => str.replace(/\\\[([\s\S]*?)\\\]/g, (_, equation) => `$$${equation}$$`), - (str: string) => str.replace(/\\\((.*?)\\\)/g, (_, equation) => `$$${equation}$$`), - (str: string) => str.replace(/(^|[^\\])\$(.+?)\$/g, (_, prefix, equation) => `${prefix}$${equation}$`), - ])(processedContent) - - codeBlocks.forEach((block) => { - processedContent = processedContent.replace('CODE_BLOCK_PLACEHOLDER', block) - }) - - return processedContent -} - -const preprocessThinkTag = (content: string) => { - const thinkOpenTagRegex = /\n/g - const thinkCloseTagRegex = /\n<\/think>/g - return flow([ - (str: string) => str.replace(thinkOpenTagRegex, '
\n'), - (str: string) => str.replace(thinkCloseTagRegex, '\n[ENDTHINKFLAG]
'), - ])(content) -} - -export function PreCode(props: { children: any }) { - const ref = useRef(null) - - return ( -
-      
-      {props.children}
-    
- ) -} - // **Add code block // Avoid error #185 (Maximum update depth exceeded. // This can happen when a component repeatedly calls setState inside componentWillUpdate or componentDidUpdate. @@ -444,150 +385,4 @@ const CodeBlock: any = memo(({ inline, className, children = '', ...props }: any }) CodeBlock.displayName = 'CodeBlock' -const VideoBlock: any = memo(({ node }: any) => { - const srcs = node.children.filter((child: any) => 'properties' in child).map((child: any) => (child as any).properties.src) - if (srcs.length === 0) { - const src = node.properties?.src - if (src) - return - return null - } - return -}) -VideoBlock.displayName = 'VideoBlock' - -const AudioBlock: any = memo(({ node }: any) => { - const srcs = node.children.filter((child: any) => 'properties' in child).map((child: any) => (child as any).properties.src) - if (srcs.length === 0) { - const src = node.properties?.src - if (src) - return - return null - } - return -}) -AudioBlock.displayName = 'AudioBlock' - -const ScriptBlock = memo(({ node }: any) => { - const scriptContent = node.children[0]?.value || '' - return `` -}) -ScriptBlock.displayName = 'ScriptBlock' - -const Paragraph = (paragraph: any) => { - const { node }: any = paragraph - const children_node = node.children - if (children_node && children_node[0] && 'tagName' in children_node[0] && children_node[0].tagName === 'img') { - return ( -
- - { - Array.isArray(paragraph.children) && paragraph.children.length > 1 && ( -
{paragraph.children.slice(1)}
- ) - } -
- ) - } - return

{paragraph.children}

-} - -const Img = ({ src }: any) => { - return
-} - -const Link = ({ node, children, ...props }: any) => { - if (node.properties?.href && node.properties.href?.toString().startsWith('abbr')) { - // eslint-disable-next-line react-hooks/rules-of-hooks - const { onSend } = useChatContext() - const hidden_text = decodeURIComponent(node.properties.href.toString().split('abbr:')[1]) - - return onSend?.(hidden_text)} title={node.children[0]?.value || ''}>{node.children[0]?.value || ''} - } - else { - return {children || 'Download'} - } -} - -export function Markdown(props: { content: string; className?: string; customDisallowedElements?: string[] }) { - const latexContent = flow([ - preprocessThinkTag, - preprocessLaTeX, - ])(props.content) - - return ( -
- { - return (tree) => { - const iterate = (node: any) => { - if (node.type === 'element' && node.properties?.ref) - delete node.properties.ref - - if (node.type === 'element' && !/^[a-z][a-z0-9]*$/i.test(node.tagName)) { - node.type = 'text' - node.value = `<${node.tagName}` - } - - if (node.children) - node.children.forEach(iterate) - } - tree.children.forEach(iterate) - } - }, - ]} - disallowedElements={['iframe', 'head', 'html', 'meta', 'link', 'style', 'body', ...(props.customDisallowedElements || [])]} - components={{ - code: CodeBlock, - img: Img, - video: VideoBlock, - audio: AudioBlock, - a: Link, - p: Paragraph, - button: MarkdownButton, - form: MarkdownForm, - script: ScriptBlock as any, - details: ThinkBlock, - }} - > - {/* Markdown detect has problem. */} - {latexContent} - -
- ) -} - -// **Add an ECharts runtime error handler -// Avoid error #7832 (Crash when ECharts accesses undefined objects) -// This can happen when a component attempts to access an undefined object that references an unregistered map, causing the program to crash. - -export default class ErrorBoundary extends Component { - constructor(props: any) { - super(props) - this.state = { hasError: false } - } - - componentDidCatch(error: any, errorInfo: any) { - this.setState({ hasError: true }) - console.error(error, errorInfo) - } - - render() { - // eslint-disable-next-line ts/ban-ts-comment - // @ts-expect-error - if (this.state.hasError) - return
Oops! An error occurred. This could be due to an ECharts runtime error or invalid SVG content.
(see the browser console for more information)
- // eslint-disable-next-line ts/ban-ts-comment - // @ts-expect-error - return this.props.children - } -} +export default CodeBlock diff --git a/web/app/components/base/markdown-blocks/img.tsx b/web/app/components/base/markdown-blocks/img.tsx new file mode 100644 index 0000000000..33fce13f0b --- /dev/null +++ b/web/app/components/base/markdown-blocks/img.tsx @@ -0,0 +1,13 @@ +/** + * @fileoverview Img component for rendering tags in Markdown. + * Extracted from the main markdown renderer for modularity. + * Uses the ImageGallery component to display images. + */ +import React from 'react' +import ImageGallery from '@/app/components/base/image-gallery' + +const Img = ({ src }: any) => { + return
+} + +export default Img diff --git a/web/app/components/base/markdown-blocks/index.ts b/web/app/components/base/markdown-blocks/index.ts new file mode 100644 index 0000000000..ba68b4e8b1 --- /dev/null +++ b/web/app/components/base/markdown-blocks/index.ts @@ -0,0 +1,18 @@ +/** + * @fileoverview Barrel file for all markdown block components. + * This allows for cleaner imports in other parts of the application. + */ + +export { default as AudioBlock } from './audio-block' +export { default as CodeBlock } from './code-block' +export { default as Img } from './img' +export { default as Link } from './link' +export { default as Paragraph } from './paragraph' +export { default as PreCode } from './pre-code' +export { default as ScriptBlock } from './script-block' +export { default as VideoBlock } from './video-block' + +// Assuming these are also standalone components in this directory intended for Markdown rendering +export { default as MarkdownButton } from './button' +export { default as MarkdownForm } from './form' +export { default as ThinkBlock } from './think-block' diff --git a/web/app/components/base/markdown-blocks/link.tsx b/web/app/components/base/markdown-blocks/link.tsx new file mode 100644 index 0000000000..b243a525a0 --- /dev/null +++ b/web/app/components/base/markdown-blocks/link.tsx @@ -0,0 +1,21 @@ +/** + * @fileoverview Link component for rendering tags in Markdown. + * Extracted from the main markdown renderer for modularity. + * Handles special rendering for "abbr:" type links for interactive chat actions. + */ +import React from 'react' +import { useChatContext } from '@/app/components/base/chat/chat/context' + +const Link = ({ node, children, ...props }: any) => { + const { onSend } = useChatContext() + if (node.properties?.href && node.properties.href?.toString().startsWith('abbr')) { + const hidden_text = decodeURIComponent(node.properties.href.toString().split('abbr:')[1]) + + return onSend?.(hidden_text)} title={node.children[0]?.value || ''}>{node.children[0]?.value || ''} + } + else { + return {children || 'Download'} + } +} + +export default Link diff --git a/web/app/components/base/markdown-blocks/paragraph.tsx b/web/app/components/base/markdown-blocks/paragraph.tsx new file mode 100644 index 0000000000..fb1612477a --- /dev/null +++ b/web/app/components/base/markdown-blocks/paragraph.tsx @@ -0,0 +1,27 @@ +/** + * @fileoverview Paragraph component for rendering

tags in Markdown. + * Extracted from the main markdown renderer for modularity. + * Handles special rendering for paragraphs that directly contain an image. + */ +import React from 'react' +import ImageGallery from '@/app/components/base/image-gallery' + +const Paragraph = (paragraph: any) => { + const { node }: any = paragraph + const children_node = node.children + if (children_node && children_node[0] && 'tagName' in children_node[0] && children_node[0].tagName === 'img') { + return ( +

+ + { + Array.isArray(paragraph.children) && paragraph.children.length > 1 && ( +
{paragraph.children.slice(1)}
+ ) + } +
+ ) + } + return

{paragraph.children}

+} + +export default Paragraph diff --git a/web/app/components/base/markdown-blocks/pre-code.tsx b/web/app/components/base/markdown-blocks/pre-code.tsx new file mode 100644 index 0000000000..a9d0cfb9aa --- /dev/null +++ b/web/app/components/base/markdown-blocks/pre-code.tsx @@ -0,0 +1,21 @@ +/** + * @fileoverview PreCode component for rendering
 tags in Markdown.
+ * Extracted from the main markdown renderer for modularity.
+ * This is a simple wrapper around the HTML 
 element.
+ */
+import React, { useRef } from 'react'
+
+function PreCode(props: { children: any }) {
+  const ref = useRef(null)
+
+  return (
+    
+      
+      {props.children}
+    
+ ) +} + +export default PreCode diff --git a/web/app/components/base/markdown-blocks/script-block.tsx b/web/app/components/base/markdown-blocks/script-block.tsx new file mode 100644 index 0000000000..921e2bf049 --- /dev/null +++ b/web/app/components/base/markdown-blocks/script-block.tsx @@ -0,0 +1,15 @@ +/** + * @fileoverview ScriptBlock component for handling ` +}) +ScriptBlock.displayName = 'ScriptBlock' + +export default ScriptBlock diff --git a/web/app/components/base/markdown-blocks/video-block.tsx b/web/app/components/base/markdown-blocks/video-block.tsx new file mode 100644 index 0000000000..9f1a36f678 --- /dev/null +++ b/web/app/components/base/markdown-blocks/video-block.tsx @@ -0,0 +1,21 @@ +/** + * @fileoverview VideoBlock component for rendering video elements in Markdown. + * Extracted from the main markdown renderer for modularity. + * Uses the VideoGallery component to display videos. + */ +import React, { memo } from 'react' +import VideoGallery from '@/app/components/base/video-gallery' + +const VideoBlock: any = memo(({ node }: any) => { + const srcs = node.children.filter((child: any) => 'properties' in child).map((child: any) => (child as any).properties.src) + if (srcs.length === 0) { + const src = node.properties?.src + if (src) + return + return null + } + return +}) +VideoBlock.displayName = 'VideoBlock' + +export default VideoBlock diff --git a/web/app/components/base/markdown/error-boundary.tsx b/web/app/components/base/markdown/error-boundary.tsx new file mode 100644 index 0000000000..0e6876191a --- /dev/null +++ b/web/app/components/base/markdown/error-boundary.tsx @@ -0,0 +1,33 @@ +/** + * @fileoverview ErrorBoundary component for React. + * This component was extracted from the main markdown renderer. + * It catches JavaScript errors anywhere in its child component tree, + * logs those errors, and displays a fallback UI instead of the crashed component tree. + * Primarily used around complex rendering logic like ECharts or SVG within Markdown. + */ +import React, { Component } from 'react' +// **Add an ECharts runtime error handler +// Avoid error #7832 (Crash when ECharts accesses undefined objects) +// This can happen when a component attempts to access an undefined object that references an unregistered map, causing the program to crash. + +export default class ErrorBoundary extends Component { + constructor(props: any) { + super(props) + this.state = { hasError: false } + } + + componentDidCatch(error: any, errorInfo: any) { + this.setState({ hasError: true }) + console.error(error, errorInfo) + } + + render() { + // eslint-disable-next-line ts/ban-ts-comment + // @ts-expect-error + if (this.state.hasError) + return
Oops! An error occurred. This could be due to an ECharts runtime error or invalid SVG content.
(see the browser console for more information)
+ // eslint-disable-next-line ts/ban-ts-comment + // @ts-expect-error + return this.props.children + } +} diff --git a/web/app/components/base/markdown/index.tsx b/web/app/components/base/markdown/index.tsx new file mode 100644 index 0000000000..0e0dc41cf2 --- /dev/null +++ b/web/app/components/base/markdown/index.tsx @@ -0,0 +1,87 @@ +import ReactMarkdown from 'react-markdown' +import 'katex/dist/katex.min.css' +import RemarkMath from 'remark-math' +import RemarkBreaks from 'remark-breaks' +import RehypeKatex from 'rehype-katex' +import RemarkGfm from 'remark-gfm' +import RehypeRaw from 'rehype-raw' +import { flow } from 'lodash-es' +import cn from '@/utils/classnames' +import { preprocessLaTeX, preprocessThinkTag } from './markdown-utils' +import { + AudioBlock, + CodeBlock, + Img, + Link, + MarkdownButton, + MarkdownForm, + Paragraph, + ScriptBlock, + ThinkBlock, + VideoBlock, +} from '@/app/components/base/markdown-blocks' + +/** + * @fileoverview Main Markdown rendering component. + * This file was refactored to extract individual block renderers and utility functions + * into separate modules for better organization and maintainability as of [Date of refactor]. + * Further refactoring candidates (custom block components not fitting general categories) + * are noted in their respective files if applicable. + */ + +export function Markdown(props: { content: string; className?: string; customDisallowedElements?: string[] }) { + const latexContent = flow([ + preprocessThinkTag, + preprocessLaTeX, + ])(props.content) + + return ( +
+ { + return (tree: any) => { + const iterate = (node: any) => { + if (node.type === 'element' && node.properties?.ref) + delete node.properties.ref + + if (node.type === 'element' && !/^[a-z][a-z0-9]*$/i.test(node.tagName)) { + node.type = 'text' + node.value = `<${node.tagName}` + } + + if (node.children) + node.children.forEach(iterate) + } + tree.children.forEach(iterate) + } + }, + ]} + disallowedElements={['iframe', 'head', 'html', 'meta', 'link', 'style', 'body', ...(props.customDisallowedElements || [])]} + components={{ + code: CodeBlock, + img: Img, + video: VideoBlock, + audio: AudioBlock, + a: Link, + p: Paragraph, + button: MarkdownButton, + form: MarkdownForm, + script: ScriptBlock as any, + details: ThinkBlock, + }} + > + {/* Markdown detect has problem. */} + {latexContent} + +
+ ) +} diff --git a/web/app/components/base/markdown/markdown-utils.ts b/web/app/components/base/markdown/markdown-utils.ts new file mode 100644 index 0000000000..ff7dd5db01 --- /dev/null +++ b/web/app/components/base/markdown/markdown-utils.ts @@ -0,0 +1,37 @@ +/** + * @fileoverview Utility functions for preprocessing Markdown content. + * These functions were extracted from the main markdown renderer for better separation of concerns. + * Includes preprocessing for LaTeX and custom "think" tags. + */ +import { flow } from 'lodash-es' + +export const preprocessLaTeX = (content: string) => { + if (typeof content !== 'string') + return content + + const codeBlockRegex = /```[\s\S]*?```/g + const codeBlocks = content.match(codeBlockRegex) || [] + let processedContent = content.replace(codeBlockRegex, 'CODE_BLOCK_PLACEHOLDER') + + processedContent = flow([ + (str: string) => str.replace(/\\\[(.*?)\\\]/g, (_, equation) => `$$${equation}$$`), + (str: string) => str.replace(/\\\[([\s\S]*?)\\\]/g, (_, equation) => `$$${equation}$$`), + (str: string) => str.replace(/\\\((.*?)\\\)/g, (_, equation) => `$$${equation}$$`), + (str: string) => str.replace(/(^|[^\\])\$(.+?)\$/g, (_, prefix, equation) => `${prefix}$${equation}$`), + ])(processedContent) + + codeBlocks.forEach((block) => { + processedContent = processedContent.replace('CODE_BLOCK_PLACEHOLDER', block) + }) + + return processedContent +} + +export const preprocessThinkTag = (content: string) => { + const thinkOpenTagRegex = /\n/g + const thinkCloseTagRegex = /\n<\/think>/g + return flow([ + (str: string) => str.replace(thinkOpenTagRegex, '
\n'), + (str: string) => str.replace(thinkCloseTagRegex, '\n[ENDTHINKFLAG]
'), + ])(content) +} diff --git a/web/app/components/custom/custom-web-app-brand/index.tsx b/web/app/components/custom/custom-web-app-brand/index.tsx index f6f617be85..ea2f44caea 100644 --- a/web/app/components/custom/custom-web-app-brand/index.tsx +++ b/web/app/components/custom/custom-web-app-brand/index.tsx @@ -130,7 +130,7 @@ const CustomWebAppBrand = () => {
{t('custom.webapp.changeLogoTip')}
- {(uploadDisabled || (!webappLogo && !webappBrandRemoved)) && ( + {(!uploadDisabled && webappLogo && !webappBrandRemoved) && ( <>
)} {hasSetIndexType && indexType === IndexingType.ECONOMICAL && ( -
+
{t('datasetCreation.stepTwo.indexSettingTip')} {t('datasetCreation.stepTwo.datasetSettingLink')}
diff --git a/web/app/components/datasets/documents/detail/batch-modal/csv-downloader.tsx b/web/app/components/datasets/documents/detail/batch-modal/csv-downloader.tsx index 90bdb707df..7f3be965b3 100644 --- a/web/app/components/datasets/documents/detail/batch-modal/csv-downloader.tsx +++ b/web/app/components/datasets/documents/detail/batch-modal/csv-downloader.tsx @@ -50,20 +50,20 @@ const CSVDownload: FC<{ docForm: ChunkingMode }> = ({ docForm }) => { return (
-
{t('share.generation.csvStructureTitle')}
+
{t('share.generation.csvStructureTitle')}
{docForm === ChunkingMode.qa && ( - - +
+ - - + + - + - - + + @@ -73,15 +73,15 @@ const CSVDownload: FC<{ docForm: ChunkingMode }> = ({ docForm }) => {
{t('datasetDocuments.list.batchModal.question')}{t('datasetDocuments.list.batchModal.answer')}{t('datasetDocuments.list.batchModal.question')}{t('datasetDocuments.list.batchModal.answer')}
{t('datasetDocuments.list.batchModal.question')} 1{t('datasetDocuments.list.batchModal.answer')} 1{t('datasetDocuments.list.batchModal.question')} 1{t('datasetDocuments.list.batchModal.answer')} 1
{t('datasetDocuments.list.batchModal.question')} 2
)} {docForm === ChunkingMode.text && ( - - +
+ - + - + - + diff --git a/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx b/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx index 471bf7be2f..c2224296d6 100644 --- a/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx +++ b/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx @@ -93,29 +93,29 @@ const CSVUploader: FC = ({ />
{!file && ( -
+
-
+
{t('datasetDocuments.list.batchModal.csvUploadTitle')} - {t('datasetDocuments.list.batchModal.browse')} + {t('datasetDocuments.list.batchModal.browse')}
{dragging &&
}
)} {file && ( -
+
- {file.name.replace(/.csv$/, '')} - .csv + {file.name.replace(/.csv$/, '')} + .csv
-
+
- +
diff --git a/web/app/components/datasets/documents/detail/batch-modal/index.tsx b/web/app/components/datasets/documents/detail/batch-modal/index.tsx index 775d755106..614471c565 100644 --- a/web/app/components/datasets/documents/detail/batch-modal/index.tsx +++ b/web/app/components/datasets/documents/detail/batch-modal/index.tsx @@ -41,9 +41,9 @@ const BatchModal: FC = ({ return ( -
{t('datasetDocuments.list.batchModal.title')}
+
{t('datasetDocuments.list.batchModal.title')}
- +
+ + +
+ Get details of a specific document segment in the specified knowledge base + + ### Path + + + Knowledge Base ID + + + Document ID + + + Segment ID + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request GET '${props.apiBaseUrl}/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + ```json {{ title: 'Response' }} + { + "data": { + "id": "chunk_id", + "position": 2, + "document_id": "document_id", + "content": "Segment content text", + "sign_content": "Signature content text", + "answer": "Answer content (if in Q&A mode)", + "word_count": 470, + "tokens": 382, + "keywords": ["keyword1", "keyword2"], + "index_node_id": "index_node_id", + "index_node_hash": "index_node_hash", + "hit_count": 0, + "enabled": true, + "status": "completed", + "created_by": "creator_id", + "created_at": creation_timestamp, + "updated_at": update_timestamp, + "indexing_at": indexing_timestamp, + "completed_at": completion_timestamp, + "error": null, + "child_chunks": [] + }, + "doc_form": "text_model" + } + ``` + + + + +
+ - Retrieval model (optional, if not filled, it will be recalled according to the default method) - - search_method (text) Search method: One of the following four keywords is required - - keyword_search Keyword search - - semantic_search Semantic search - - full_text_search Full-text search - - hybrid_search Hybrid search - - reranking_enable (bool) Whether to enable reranking, required if the search mode is semantic_search or hybrid_search (optional) - - reranking_mode (object) Rerank model configuration, required if reranking is enabled - - reranking_provider_name (string) Rerank model provider - - reranking_model_name (string) Rerank model name - - weights (float) Semantic search weight setting in hybrid search mode - - top_k (integer) Number of results to return (optional) - - score_threshold_enabled (bool) Whether to enable score threshold - - score_threshold (float) Score threshold + Retrieval parameters (optional, if not filled, it will be recalled according to the default method) + - search_method (text) Search method: One of the following four keywords is required + - keyword_search Keyword search + - semantic_search Semantic search + - full_text_search Full-text search + - hybrid_search Hybrid search + - reranking_enable (bool) Whether to enable reranking, required if the search mode is semantic_search or hybrid_search (optional) + - reranking_mode (object) Rerank model configuration, required if reranking is enabled + - reranking_provider_name (string) Rerank model provider + - reranking_model_name (string) Rerank model name + - weights (float) Semantic search weight setting in hybrid search mode + - top_k (integer) Number of results to return (optional) + - score_threshold_enabled (bool) Whether to enable score threshold + - score_threshold (float) Score threshold + - metadata_filtering_conditions (object) Metadata filtering conditions + - logical_operator (string) Logical operator: and | or + - conditions (array[object]) Conditions list + - name (string) Metadata field name + - comparison_operator (string) Comparison operator, allowed values: + - String comparison: + - contains: Contains + - not contains: Does not contain + - start with: Starts with + - end with: Ends with + - is: Equals + - is not: Does not equal + - empty: Is empty + - not empty: Is not empty + - Numeric comparison: + - =: Equals + - : Does not equal + - >: Greater than + - < : Less than + - : Greater than or equal + - : Less than or equal + - Time comparison: + - before: Before + - after: After + - value (string|number|null) Comparison value Unused field @@ -1809,7 +1904,17 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi "weights": null, "top_k": 1, "score_threshold_enabled": false, - "score_threshold": null + "score_threshold": null, + "metadata_filtering_conditions": { + "logical_operator": "and", + "conditions": [ + { + "name": "document_name", + "comparison_operator": "contains", + "value": "test" + } + ] + } } }'`} > @@ -2089,9 +2194,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi label="/datasets/{dataset_id}/documents/metadata" targetCode={`curl --location --request POST '${props.apiBaseUrl}/datasets/{dataset_id}/documents/metadata' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json'\\\n--data-raw '{"operation_data": [{"document_id": "document_id", "metadata_list": [{"id": "id", "value": "value", "name": "name"}]}]}'`} > - ```bash {{ title: 'cURL' }} - ``` - + ```bash {{ title: 'cURL' }} @@ -2246,6 +2349,316 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
+Okay, I will translate the Chinese text in your document while keeping all formatting and code content unchanged. + + + +
+ ### Request Body + + + (text) New tag name, required, maximum length 50 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"name": "testtag1"}' + ``` + + + ```json {{ title: 'Response' }} + { + "id": "eddb66c2-04a1-4e3a-8cb2-75abd01e12a6", + "name": "testtag1", + "type": "knowledge", + "binding_count": 0 + } + ``` + + + + + +
+ + + + + ### Request Body + + + + ```bash {{ title: 'cURL' }} + curl --location --request GET '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' + ``` + + + ```json {{ title: 'Response' }} + [ + { + "id": "39d6934c-ed36-463d-b4a7-377fa1503dc0", + "name": "testtag1", + "type": "knowledge", + "binding_count": "0" + }, + ... + ] + ``` + + + + +
+ + + + + ### Request Body + + + (text) Modified tag name, required, maximum length 50 + + + (text) Tag ID, required + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request PATCH '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"name": "testtag2", "tag_id": "e1a0a3db-ee34-4e04-842a-81555d5316fd"}' + ``` + + + ```json {{ title: 'Response' }} + { + "id": "eddb66c2-04a1-4e3a-8cb2-75abd01e12a6", + "name": "tag-renamed", + "type": "knowledge", + "binding_count": 0 + } + ``` + + + + +
+ + + + + + ### Request Body + + + (text) Tag ID, required + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request DELETE '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_id": "e1a0a3db-ee34-4e04-842a-81555d5316fd"}' + ``` + + + ```json {{ title: 'Response' }} + + {"result": "success"} + + ``` + + + + +
+ + + + + ### Request Body + + + (list) List of Tag IDs, required + + + (text) Dataset ID, required + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags/binding' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_ids": ["65cc29be-d072-4e26-adf4-2f727644da29","1e5348f3-d3ff-42b8-a1b7-0a86d518001a"], "target_id": "a932ea9f-fae1-4b2c-9b65-71c56e2cacd6"}' + ``` + + + ```json {{ title: 'Response' }} + {"result": "success"} + ``` + + + + +
+ + + + + ### Request Body + + + (text) Tag ID, required + + + (text) Dataset ID, required + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags/unbinding' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_id": "1e5348f3-d3ff-42b8-a1b7-0a86d518001a", "target_id": "a932ea9f-fae1-4b2c-9b65-71c56e2cacd6"}' + ``` + + + ```json {{ title: 'Response' }} + {"result": "success"} + ``` + + + + + +
+ + + + + ### Path + + + (text) Dataset ID + + + + + /tags' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n`} + > + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets//tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + ``` + + + ```json {{ title: 'Response' }} + { + "data": + [ + {"id": "4a601f4f-f8a2-4166-ae7c-58c3b252a524", + "name": "123" + }, + ... + ], + "total": 3 + } + ``` + + + + + +
+ diff --git a/web/app/components/datasets/list/template/template.ja.mdx b/web/app/components/datasets/list/template/template.ja.mdx index defd48816d..b9fab19948 100644 --- a/web/app/components/datasets/list/template/template.ja.mdx +++ b/web/app/components/datasets/list/template/template.ja.mdx @@ -1057,6 +1057,75 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi + + + 指定されたナレッジベース内の特定のドキュメントセグメントの詳細を表示します + + ### パス + + + ナレッジベースID + + + ドキュメントID + + + セグメントID + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request GET '${props.apiBaseUrl}/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + ```json {{ title: 'Response' }} + { + "data": { + "id": "セグメントID", + "position": 2, + "document_id": "ドキュメントID", + "content": "セグメント内容テキスト", + "sign_content": "署名内容テキスト", + "answer": "回答内容(Q&Aモードの場合)", + "word_count": 470, + "tokens": 382, + "keywords": ["キーワード1", "キーワード2"], + "index_node_id": "インデックスノードID", + "index_node_hash": "インデックスノードハッシュ", + "hit_count": 0, + "enabled": true, + "status": "completed", + "created_by": "作成者ID", + "created_at": 作成タイムスタンプ, + "updated_at": 更新タイムスタンプ, + "indexing_at": インデックス作成タイムスタンプ, + "completed_at": 完了タイムスタンプ, + "error": null, + "child_chunks": [] + }, + "doc_form": "text_model" + } + ``` + + + + +
+ + - 検索モデル (オプション、入力されない場合はデフォルトの方法でリコールされます) - - search_method (text) 検索方法: 以下の 4 つのキーワードのいずれかが必要です - - keyword_search キーワード検索 - - semantic_search セマンティック検索 - - full_text_search 全文検索 - - hybrid_search ハイブリッド検索 - - reranking_enable (bool) 再ランキングを有効にするかどうか、検索モードが semantic_search または hybrid_search の場合に必須 (オプション) - - reranking_mode (object) 再ランキングモデル構成、再ランキングが有効な場合に必須 - - reranking_provider_name (string) 再ランキングモデルプロバイダー - - reranking_model_name (string) 再ランキングモデル名 - - weights (float) ハイブリッド検索モードでのセマンティック検索の重み設定 - - top_k (integer) 返される結果の数 (オプション) - - score_threshold_enabled (bool) スコア閾値を有効にするかどうか - - score_threshold (float) スコア閾値 + 検索パラメータ(オプション、入力されない場合はデフォルトの方法でリコールされます) + - search_method (text) 検索方法: 以下の4つのキーワードのいずれかが必要です + - keyword_search キーワード検索 + - semantic_search セマンティック検索 + - full_text_search 全文検索 + - hybrid_search ハイブリッド検索 + - reranking_enable (bool) 再ランキングを有効にするかどうか、検索モードがsemantic_searchまたはhybrid_searchの場合に必須(オプション) + - reranking_mode (object) 再ランキングモデル構成、再ランキングが有効な場合に必須 + - reranking_provider_name (string) 再ランキングモデルプロバイダー + - reranking_model_name (string) 再ランキングモデル名 + - weights (float) ハイブリッド検索モードでのセマンティック検索の重み設定 + - top_k (integer) 返される結果の数(オプション) + - score_threshold_enabled (bool) スコア閾値を有効にするかどうか + - score_threshold (float) スコア閾値 + - metadata_filtering_conditions (object) メタデータフィルタリング条件 + - logical_operator (string) 論理演算子: and | or + - conditions (array[object]) 条件リスト + - name (string) メタデータフィールド名 + - comparison_operator (string) 比較演算子、許可される値: + - 文字列比較: + - contains: 含む + - not contains: 含まない + - start with: で始まる + - end with: で終わる + - is: 等しい + - is not: 等しくない + - empty: 空 + - not empty: 空でない + - 数値比較: + - =: 等しい + - : 等しくない + - >: より大きい + - < : より小さい + - : 以上 + - : 以下 + - 時間比較: + - before: より前 + - after: より後 + - value (string|number|null) 比較値 未使用フィールド @@ -1566,7 +1659,17 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi "weights": null, "top_k": 1, "score_threshold_enabled": false, - "score_threshold": null + "score_threshold": null, + "metadata_filtering_conditions": { + "logical_operator": "and", + "conditions": [ + { + "name": "document_name", + "comparison_operator": "contains", + "value": "test" + } + ] + } } }'`} > @@ -1898,6 +2001,313 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi +
+ + +
+ ### Request Body + + + (text) 新しいタグ名、必須、最大長50文字 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"name": "testtag1"}' + ``` + + + ```json {{ title: 'Response' }} + { + "id": "eddb66c2-04a1-4e3a-8cb2-75abd01e12a6", + "name": "testtag1", + "type": "knowledge", + "binding_count": 0 + } + ``` + + + + + +
+ + + + + ### Request Body + + + + ```bash {{ title: 'cURL' }} + curl --location --request GET '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' + ``` + + + ```json {{ title: 'Response' }} + [ + { + "id": "39d6934c-ed36-463d-b4a7-377fa1503dc0", + "name": "testtag1", + "type": "knowledge", + "binding_count": "0" + }, + ... + ] + ``` + + + + +
+ + + + + ### Request Body + + + (text) 変更後のタグ名、必須、最大長50文字 + + + (text) タグID、必須 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request PATCH '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"name": "testtag2", "tag_id": "e1a0a3db-ee34-4e04-842a-81555d5316fd"}' + ``` + + + ```json {{ title: 'Response' }} + { + "id": "eddb66c2-04a1-4e3a-8cb2-75abd01e12a6", + "name": "tag-renamed", + "type": "knowledge", + "binding_count": 0 + } + ``` + + + + +
+ + + + + + ### Request Body + + + (text) タグID、必須 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request DELETE '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_id": "e1a0a3db-ee34-4e04-842a-81555d5316fd"}' + ``` + + + ```json {{ title: 'Response' }} + + {"result": "success"} + + ``` + + + + +
+ + + + + ### Request Body + + + (list) タグIDリスト、必須 + + + (text) ナレッジベースID、必須 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags/binding' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_ids": ["65cc29be-d072-4e26-adf4-2f727644da29","1e5348f3-d3ff-42b8-a1b7-0a86d518001a"], "target_id": "a932ea9f-fae1-4b2c-9b65-71c56e2cacd6"}' + ``` + + + ```json {{ title: 'Response' }} + {"result": "success"} + ``` + + + + +
+ + + + + ### Request Body + + + (text) タグID、必須 + + + (text) ナレッジベースID、必須 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags/unbinding' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_id": "1e5348f3-d3ff-42b8-a1b7-0a86d518001a", "target_id": "a932ea9f-fae1-4b2c-9b65-71c56e2cacd6"}' + ``` + + + ```json {{ title: 'Response' }} + {"result": "success"} + ``` + + + + + +
+ + + + + ### Path + + + (text) ナレッジベースID + + + + + /tags' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n`} + > + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets//tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + ``` + + + ```json {{ title: 'Response' }} + { + "data": + [ + {"id": "4a601f4f-f8a2-4166-ae7c-58c3b252a524", + "name": "123" + }, + ... + ], + "total": 3 + } + ``` + + + + +
diff --git a/web/app/components/datasets/list/template/template.zh.mdx b/web/app/components/datasets/list/template/template.zh.mdx index 04b5837651..b10f22002a 100644 --- a/web/app/components/datasets/list/template/template.zh.mdx +++ b/web/app/components/datasets/list/template/template.zh.mdx @@ -1351,6 +1351,75 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi + + + 查看指定知识库中特定文档的分段详情 + + ### Path + + + 知识库 ID + + + 文档 ID + + + 分段 ID + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request GET '${props.apiBaseUrl}/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + ```json {{ title: 'Response' }} + { + "data": { + "id": "分段唯一ID", + "position": 2, + "document_id": "所属文档ID", + "content": "分段内容文本", + "sign_content": "签名内容文本", + "answer": "答案内容(如果有)", + "word_count": 470, + "tokens": 382, + "keywords": ["关键词1", "关键词2"], + "index_node_id": "索引节点ID", + "index_node_hash": "索引节点哈希值", + "hit_count": 0, + "enabled": true, + "status": "completed", + "created_by": "创建者ID", + "created_at": 创建时间戳, + "updated_at": 更新时间戳, + "indexing_at": 索引时间戳, + "completed_at": 完成时间戳, + "error": null, + "child_chunks": [] + }, + "doc_form": "text_model" + } + ``` + + + + +
+ +top_k (integer) 返回结果数量,非必填 - score_threshold_enabled (bool) 是否开启 score 阈值 - score_threshold (float) Score 阈值 + - metadata_filtering_conditions (object) 元数据过滤条件 + - logical_operator (string) 逻辑运算符: and | or + - conditions (array[object]) 条件列表 + - name (string) 元数据字段名 + - comparison_operator (string) 比较运算符,可选值: + - 字符串比较: + - contains: 包含 + - not contains: 不包含 + - start with: 以...开头 + - end with: 以...结尾 + - is: 等于 + - is not: 不等于 + - empty: 为空 + - not empty: 不为空 + - 数值比较: + - =: 等于 + - : 不等于 + - >: 大于 + - < : 小于 + - : 大于等于 + - : 小于等于 + - 时间比较: + - before: 早于 + - after: 晚于 + - value (string|number|null) 比较值 未启用字段 @@ -1851,7 +1945,17 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi "weights": null, "top_k": 1, "score_threshold_enabled": false, - "score_threshold": null + "score_threshold": null, + "metadata_filtering_conditions": { + "logical_operator": "and", + "conditions": [ + { + "name": "document_name", + "comparison_operator": "contains", + "value": "test" + } + ] + } } }'`} > @@ -2287,6 +2391,314 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi +
+ + + +
+ ### Request Body + + + (text) 新标签名称,必填,最大长度为50 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"name": "testtag1"}' + ``` + + + ```json {{ title: 'Response' }} + { + "id": "eddb66c2-04a1-4e3a-8cb2-75abd01e12a6", + "name": "testtag1", + "type": "knowledge", + "binding_count": 0 + } + ``` + + + + + +
+ + + + + ### Request Body + + + + ```bash {{ title: 'cURL' }} + curl --location --request GET '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' + ``` + + + ```json {{ title: 'Response' }} + [ + { + "id": "39d6934c-ed36-463d-b4a7-377fa1503dc0", + "name": "testtag1", + "type": "knowledge", + "binding_count": "0" + }, + ... + ] + ``` + + + + +
+ + + + + ### Request Body + + + (text) 修改后的标签名称,必填,最大长度为50 + + + (text) 标签ID,必填 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request PATCH '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"name": "testtag2", "tag_id": "e1a0a3db-ee34-4e04-842a-81555d5316fd"}' + ``` + + + ```json {{ title: 'Response' }} + { + "id": "eddb66c2-04a1-4e3a-8cb2-75abd01e12a6", + "name": "tag-renamed", + "type": "knowledge", + "binding_count": 0 + } + ``` + + + + +
+ + + + + + ### Request Body + + + (text) 标签ID,必填 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request DELETE '${props.apiBaseUrl}/datasets/tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_id": "e1a0a3db-ee34-4e04-842a-81555d5316fd"}' + ``` + + + ```json {{ title: 'Response' }} + + {"result": "success"} + + ``` + + + + +
+ + + + + ### Request Body + + + (list) 标签ID列表,必填 + + + (text) 知识库ID,必填 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags/binding' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_ids": ["65cc29be-d072-4e26-adf4-2f727644da29","1e5348f3-d3ff-42b8-a1b7-0a86d518001a"], "target_id": "a932ea9f-fae1-4b2c-9b65-71c56e2cacd6"}' + ``` + + + ```json {{ title: 'Response' }} + {"result": "success"} + ``` + + + + +
+ + + + + ### Request Body + + + (text) 标签ID,必填 + + + (text) 知识库ID,必填 + + + + + + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets/tags/unbinding' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{"tag_id": "1e5348f3-d3ff-42b8-a1b7-0a86d518001a", "target_id": "a932ea9f-fae1-4b2c-9b65-71c56e2cacd6"}' + ``` + + + ```json {{ title: 'Response' }} + {"result": "success"} + ``` + + + + + +
+ + + + + ### Path + + + (text) 知识库ID + + + + + /tags' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n`} + > + ```bash {{ title: 'cURL' }} + curl --location --request POST '${props.apiBaseUrl}/datasets//tags' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + ``` + + + ```json {{ title: 'Response' }} + { + "data": + [ + {"id": "4a601f4f-f8a2-4166-ae7c-58c3b252a524", + "name": "123" + }, + ... + ], + "total": 3 + } + ``` + + + + +
diff --git a/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx b/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx index d302defcfe..6f93e2871d 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx @@ -5,7 +5,7 @@ import type { } from '../declarations' import { useLanguage } from '../hooks' import { Group } from '@/app/components/base/icons/src/vender/other' -import { OpenaiBlue, OpenaiViolet } from '@/app/components/base/icons/src/public/llm' +import { OpenaiBlue, OpenaiTale, OpenaiViolet, OpenaiYellow } from '@/app/components/base/icons/src/public/llm' import cn from '@/utils/classnames' import { renderI18nObject } from '@/i18n' @@ -22,6 +22,10 @@ const ModelIcon: FC = ({ isDeprecated = false, }) => { const language = useLanguage() + if (provider?.provider && ['openai', 'langgenius/openai/openai'].includes(provider.provider) && modelName?.startsWith('o')) + return
+ if (provider?.provider && ['openai', 'langgenius/openai/openai'].includes(provider.provider) && modelName?.includes('gpt-4.1')) + return
if (provider?.provider && ['openai', 'langgenius/openai/openai'].includes(provider.provider) && modelName?.includes('gpt-4o')) return
if (provider?.provider && ['openai', 'langgenius/openai/openai'].includes(provider.provider) && modelName?.startsWith('gpt-4')) diff --git a/web/app/components/plugins/install-plugin/install-bundle/steps/install.tsx b/web/app/components/plugins/install-plugin/install-bundle/steps/install.tsx index ee2699b4bb..db24bdd97a 100644 --- a/web/app/components/plugins/install-plugin/install-bundle/steps/install.tsx +++ b/web/app/components/plugins/install-plugin/install-bundle/steps/install.tsx @@ -9,6 +9,7 @@ import InstallMulti from './install-multi' import { useInstallOrUpdate } from '@/service/use-plugins' import useRefreshPluginList from '../../hooks/use-refresh-plugin-list' import { useCanInstallPluginFromMarketplace } from '@/app/components/plugins/plugin-page/use-permission' +import { useMittContextSelector } from '@/context/mitt-context' const i18nPrefix = 'plugin.installModal' type Props = { @@ -29,6 +30,7 @@ const Install: FC = ({ isHideButton, }) => { const { t } = useTranslation() + const emit = useMittContextSelector(s => s.emit) const [selectedPlugins, setSelectedPlugins] = React.useState([]) const [selectedIndexes, setSelectedIndexes] = React.useState([]) const selectedPluginsNum = selectedPlugins.length @@ -63,8 +65,12 @@ const Install: FC = ({ }) })) const hasInstallSuccess = res.some(r => r.success) - if (hasInstallSuccess) + if (hasInstallSuccess) { refreshPluginList(undefined, true) + emit('plugin:install:success', selectedPlugins.map((p) => { + return `${p.plugin_id}/${p.name}` + })) + } }, }) const handleInstall = () => { diff --git a/web/app/components/workflow/nodes/http/components/curl-panel.tsx b/web/app/components/workflow/nodes/http/components/curl-panel.tsx index 52e28d7336..a5339a1f39 100644 --- a/web/app/components/workflow/nodes/http/components/curl-panel.tsx +++ b/web/app/components/workflow/nodes/http/components/curl-panel.tsx @@ -2,7 +2,7 @@ import type { FC } from 'react' import React, { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' -import { BodyType, type HttpNodeType, Method } from '../types' +import { BodyPayloadValueType, BodyType, type HttpNodeType, Method } from '../types' import Modal from '@/app/components/base/modal' import Button from '@/app/components/base/button' import Textarea from '@/app/components/base/textarea' @@ -51,11 +51,16 @@ const parseCurl = (curlCommand: string): { node: HttpNodeType | null; error: str case '-d': case '--data': case '--data-raw': - case '--data-binary': + case '--data-binary': { if (i + 1 >= args.length) return { node: null, error: 'Missing data value after -d, --data, --data-raw, or --data-binary.' } - node.body = { type: BodyType.rawText, data: args[++i].replace(/^['"]|['"]$/g, '') } + const bodyPayload = [{ + type: BodyPayloadValueType.text, + value: args[++i].replace(/^['"]|['"]$/g, ''), + }] + node.body = { type: BodyType.rawText, data: bodyPayload } break + } case '-F': case '--form': { if (i + 1 >= args.length) diff --git a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/code-editor.tsx b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/code-editor.tsx index a3c2552b45..2ae7fec78d 100644 --- a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/code-editor.tsx +++ b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/code-editor.tsx @@ -28,6 +28,7 @@ const CodeEditor: FC = ({ const { theme } = useTheme() const monacoRef = useRef(null) const editorRef = useRef(null) + const containerRef = useRef(null) useEffect(() => { if (monacoRef.current) { @@ -74,6 +75,19 @@ const CodeEditor: FC = ({ onUpdate?.(value) }, [onUpdate]) + useEffect(() => { + const resizeObserver = new ResizeObserver(() => { + editorRef.current?.layout() + }) + + if (containerRef.current) + resizeObserver.observe(containerRef.current) + + return () => { + resizeObserver.disconnect() + } + }, []) + return (
@@ -102,9 +116,11 @@ const CodeEditor: FC = ({
-
+
= ({ scrollBeyondLastLine: false, wordWrap: 'on', wrappingIndent: 'same', - // Add these options overviewRulerBorder: false, hideCursorInOverviewRuler: true, renderLineHighlightOnlyWhenFocus: false, diff --git a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx index 344d02c011..2b8574b285 100644 --- a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx +++ b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx @@ -21,7 +21,7 @@ import { MittProvider, VisualEditorContextProvider, useMittContext } from './vis import ErrorMessage from './error-message' import { useVisualEditorStore } from './visual-editor/store' import Toast from '@/app/components/base/toast' -import { useGetLanguage } from '@/context/i18n' +import { useGetDocLanguage } from '@/context/i18n' import { JSON_SCHEMA_MAX_DEPTH } from '@/config' type JsonSchemaConfigProps = { @@ -47,21 +47,13 @@ const DEFAULT_SCHEMA: SchemaRoot = { additionalProperties: false, } -const HELP_DOC_URL = { - zh_Hans: 'https://docs.dify.ai/zh-hans/guides/workflow/structured-outputs', - en_US: 'https://docs.dify.ai/en/guides/workflow/structured-outputs', - ja_JP: 'https://docs.dify.ai/ja-jp/guides/workflow/structured-outputs', -} - -type LocaleKey = keyof typeof HELP_DOC_URL - const JsonSchemaConfig: FC = ({ defaultSchema, onSave, onClose, }) => { const { t } = useTranslation() - const locale = useGetLanguage() as LocaleKey + const docLanguage = useGetDocLanguage() const [currentTab, setCurrentTab] = useState(SchemaView.VisualEditor) const [jsonSchema, setJsonSchema] = useState(defaultSchema || DEFAULT_SCHEMA) const [json, setJson] = useState(JSON.stringify(jsonSchema, null, 2)) @@ -260,7 +252,7 @@ const JsonSchemaConfig: FC = ({
diff --git a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/schema-editor.tsx b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/schema-editor.tsx index e78b9224b2..05a429ff72 100644 --- a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/schema-editor.tsx +++ b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/schema-editor.tsx @@ -12,7 +12,7 @@ const SchemaEditor: FC = ({ }) => { return ( { return null return ( -
+
{visibleVariables.map((variable, index) => (
+export const MittContext = createContext({ + emit: noop, + useSubscribe: noop, +}) + +export const MittProvider = ({ children }: { children: React.ReactNode }) => { + const mitt = useMitt() + + return ( + + {children} + + ) +} + +export const useMittContext = () => { + return useContext(MittContext) +} + +export function useMittContextSelector(selector: (value: ContextValueType) => T): T { + return useContextSelector(MittContext, selector) +} diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts index 5ae5c39b51..b9fdde58ff 100644 --- a/web/i18n/de-DE/app.ts +++ b/web/i18n/de-DE/app.ts @@ -216,6 +216,41 @@ const translation = { moreFillTip: 'Maximal 10 Ebenen der Verschachtelung anzeigen', LLMResponse: 'LLM-Antwort', }, + accessItemsDescription: { + anyone: 'Jeder kann auf die Webanwendung zugreifen.', + specific: 'Nur bestimmte Gruppen oder Mitglieder können auf die Webanwendung zugreifen.', + organization: 'Jeder in der Organisation kann auf die Webanwendung zugreifen.', + }, + accessControlDialog: { + accessItems: { + anyone: 'Jeder mit dem Link', + specific: 'Spezifische Gruppen oder Mitglieder', + organization: 'Nur Mitglieder innerhalb des Unternehmens', + }, + operateGroupAndMember: { + searchPlaceholder: 'Gruppen und Mitglieder suchen', + allMembers: 'Alle Mitglieder', + expand: 'Erweitern', + noResult: 'Kein Ergebnis', + }, + title: 'Zugriffskontrolle für Webanwendungen', + description: 'Webanwendungszugriffsberechtigungen festlegen', + accessLabel: 'Wer hat Zugang', + groups_one: '{{count}} GRUPPE', + members_one: '{{count}} MITGLIED', + members_other: '{{count}} MITGLIEDER', + noGroupsOrMembers: 'Keine Gruppen oder Mitglieder ausgewählt', + webAppSSONotEnabledTip: 'Bitte kontaktieren Sie den Unternehmensadministrator, um die Authentifizierungsmethode der Webanwendung zu konfigurieren.', + updateSuccess: 'Erfolgreich aktualisiert', + groups_other: '{{count}} GRUPPEN', + }, + publishApp: { + title: 'Wer kann auf die Webanwendung zugreifen?', + notSetDesc: 'Derzeit kann niemand auf die Webanwendung zugreifen. Bitte setzen Sie die Berechtigungen.', + notSet: 'Nicht festgelegt', + }, + accessControl: 'Zugriffskontrolle für Webanwendungen', + noAccessPermission: 'Keine Berechtigung zum Zugriff auf die Webanwendung', } export default translation diff --git a/web/i18n/de-DE/common.ts b/web/i18n/de-DE/common.ts index 54bde0b67e..3e00cfcaed 100644 --- a/web/i18n/de-DE/common.ts +++ b/web/i18n/de-DE/common.ts @@ -145,6 +145,8 @@ const translation = { newDataset: 'Wissen erstellen', tools: 'Werkzeuge', exploreMarketplace: 'Marketplace erkunden', + appDetail: 'App-Details', + account: 'Konto', }, userProfile: { settings: 'Einstellungen', @@ -550,7 +552,7 @@ const translation = { vectorHash: 'Vektorhash:', hitScore: 'Abrufwertung:', }, - inputPlaceholder: 'Sprechen Sie mit dem Bot', + inputPlaceholder: 'Sprechen Sie mit dem {{botName}}', thought: 'Gedanke', thinking: 'Denken...', resend: 'Erneut senden', @@ -644,6 +646,7 @@ const translation = { license: { expiring: 'Läuft an einem Tag ab', expiring_plural: 'Läuft in {{count}} Tagen ab', + unlimited: 'Unbegrenzt', }, pagination: { perPage: 'Artikel pro Seite', @@ -667,6 +670,7 @@ const translation = { browse: 'blättern', supportedFormats: 'Unterstützt PNG, JPG, JPEG, WEBP und GIF', }, + you: 'Du', } export default translation diff --git a/web/i18n/de-DE/login.ts b/web/i18n/de-DE/login.ts index 2e0d51cf85..23cd7ce11c 100644 --- a/web/i18n/de-DE/login.ts +++ b/web/i18n/de-DE/login.ts @@ -105,6 +105,11 @@ const translation = { licenseInactiveTip: 'Die Dify Enterprise-Lizenz für Ihren Arbeitsbereich ist inaktiv. Wenden Sie sich an Ihren Administrator, um Dify weiterhin zu verwenden.', licenseExpiredTip: 'Die Dify Enterprise-Lizenz für Ihren Arbeitsbereich ist abgelaufen. Wenden Sie sich an Ihren Administrator, um Dify weiterhin zu verwenden.', licenseLost: 'Lizenz verloren', + webapp: { + noLoginMethod: 'Authentifizierungsmethode ist nicht für die Webanwendung konfiguriert', + noLoginMethodTip: 'Bitte kontaktieren Sie den Systemadministrator, um eine Authentifizierungsmethode hinzuzufügen.', + disabled: 'Die Webanmeldeauthentifizierung ist deaktiviert. Bitte kontaktieren Sie den Systemadministrator, um sie zu aktivieren. Sie können versuchen, die App direkt zu verwenden.', + }, } export default translation diff --git a/web/i18n/de-DE/plugin.ts b/web/i18n/de-DE/plugin.ts index 9202c224de..498ac86573 100644 --- a/web/i18n/de-DE/plugin.ts +++ b/web/i18n/de-DE/plugin.ts @@ -62,6 +62,7 @@ const translation = { uninstalledTitle: 'Tool nicht installiert', toolLabel: 'Werkzeug', uninstalledContent: 'Dieses Plugin wird aus dem lokalen/GitHub-Repository installiert. Bitte nach der Installation verwenden.', + toolSetting: 'Werkzeugs Einstellungen', }, strategyNum: '{{num}} {{Strategie}} IINKLUSIVE', configureApp: 'App konfigurieren', @@ -210,6 +211,7 @@ const translation = { title: 'Plugins', }, difyVersionNotCompatible: 'Die aktuelle Dify-Version ist mit diesem Plugin nicht kompatibel, bitte aktualisieren Sie auf die erforderliche Mindestversion: {{minimalDifyVersion}}', + requestAPlugin: 'Ein Plugin anfordern', } export default translation diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index a87e58f75b..1eb7804271 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'von einem Tool generiertes JSON', }, + authorize: 'Autorisieren', }, questionClassifiers: { model: 'Modell', diff --git a/web/i18n/en-US/common.ts b/web/i18n/en-US/common.ts index dfe1b25d29..8b52fbcf64 100644 --- a/web/i18n/en-US/common.ts +++ b/web/i18n/en-US/common.ts @@ -573,7 +573,7 @@ const translation = { vectorHash: 'Vector hash:', hitScore: 'Retrieval Score:', }, - inputPlaceholder: 'Talk to Bot', + inputPlaceholder: 'Talk to {{botName}}', thinking: 'Thinking...', thought: 'Thought', resend: 'Resend', diff --git a/web/i18n/es-ES/app.ts b/web/i18n/es-ES/app.ts index de3a458d2b..c183485294 100644 --- a/web/i18n/es-ES/app.ts +++ b/web/i18n/es-ES/app.ts @@ -208,6 +208,41 @@ const translation = { structuredTip: 'Las Salidas Estructuradas son una función que garantiza que el modelo siempre generará respuestas que se ajusten a su esquema JSON proporcionado.', modelNotSupported: 'Modelo no soportado', }, + accessItemsDescription: { + anyone: 'Cualquiera puede acceder a la aplicación web.', + specific: 'Solo grupos o miembros específicos pueden acceder a la aplicación web', + organization: 'Cualquiera en la organización puede acceder a la aplicación web', + }, + accessControlDialog: { + accessItems: { + anyone: 'Cualquiera con el enlace', + specific: 'Grupos o miembros específicos', + organization: 'Solo miembros dentro de la empresa', + }, + operateGroupAndMember: { + searchPlaceholder: 'Buscar grupos y miembros', + allMembers: 'Todos los miembros', + expand: 'Expandir', + noResult: 'Sin resultado', + }, + title: 'Control de Acceso a la Aplicación Web', + description: 'Establecer permisos de acceso a la aplicación web', + accessLabel: '¿Quién tiene acceso?', + groups_one: '{{count}} GRUPO', + groups_other: '{{count}} GRUPOS', + members_one: '{{count}} MIEMBRO', + members_other: '{{count}} MIEMBROS', + noGroupsOrMembers: 'No grupos o miembros seleccionados', + webAppSSONotEnabledTip: 'Por favor, contacte al administrador de la empresa para configurar el método de autenticación de la aplicación web.', + updateSuccess: 'Actualización exitosa', + }, + publishApp: { + title: '¿Quién puede acceder a la aplicación web?', + notSet: 'No establecido', + notSetDesc: 'Actualmente nadie puede acceder a la aplicación web. Por favor, configure los permisos.', + }, + accessControl: 'Control de Acceso a la Aplicación Web', + noAccessPermission: 'No se permite el acceso a la aplicación web', } export default translation diff --git a/web/i18n/es-ES/common.ts b/web/i18n/es-ES/common.ts index 2c9e69320a..22c70f6bff 100644 --- a/web/i18n/es-ES/common.ts +++ b/web/i18n/es-ES/common.ts @@ -149,6 +149,8 @@ const translation = { newDataset: 'Crear Conocimiento', tools: 'Herramientas', exploreMarketplace: 'Explora el mercado', + appDetail: 'Detalles de la aplicación', + account: 'Cuenta', }, userProfile: { settings: 'Configuraciones', @@ -554,7 +556,7 @@ const translation = { vectorHash: 'Hash de vector:', hitScore: 'Puntuación de recuperación:', }, - inputPlaceholder: 'Hablar con el bot', + inputPlaceholder: 'Hablar con el {{botName}}', thinking: 'Pensamiento...', thought: 'Pensamiento', resend: 'Reenviar', @@ -666,6 +668,7 @@ const translation = { browse: 'navegar', dropImageHere: 'Deja tu imagen aquí, o', }, + you: 'Tú', } export default translation diff --git a/web/i18n/es-ES/login.ts b/web/i18n/es-ES/login.ts index bb465ac1be..8c575e58ee 100644 --- a/web/i18n/es-ES/login.ts +++ b/web/i18n/es-ES/login.ts @@ -105,6 +105,11 @@ const translation = { licenseLost: 'Licencia perdida', licenseExpiredTip: 'La licencia de Dify Enterprise para su espacio de trabajo ha caducado. Póngase en contacto con su administrador para seguir utilizando Dify.', licenseLostTip: 'No se pudo conectar el servidor de licencias de Dife. Póngase en contacto con su administrador para seguir utilizando Dify.', + webapp: { + disabled: 'La autenticación de la aplicación web está desactivada. Por favor, contacte al administrador del sistema para habilitarla. Puede intentar usar la aplicación directamente.', + noLoginMethodTip: 'Por favor, contacta al administrador del sistema para agregar un método de autenticación.', + noLoginMethod: 'Método de autenticación no configurado para la aplicación web', + }, } export default translation diff --git a/web/i18n/es-ES/plugin.ts b/web/i18n/es-ES/plugin.ts index 3e62e185c3..4c1f148235 100644 --- a/web/i18n/es-ES/plugin.ts +++ b/web/i18n/es-ES/plugin.ts @@ -62,6 +62,7 @@ const translation = { unsupportedTitle: 'Acción no admitida', params: 'CONFIGURACIÓN DE RAZONAMIENTO', uninstalledLink: 'Administrar en Plugins', + toolSetting: 'Configuraciones de la herramienta', }, endpointDeleteContent: '¿Te gustaría eliminar {{nombre}}?', endpointDisableTip: 'Deshabilitar punto de conexión', @@ -210,6 +211,7 @@ const translation = { title: 'Complementos', }, difyVersionNotCompatible: 'La versión actual de Dify no es compatible con este plugin, por favor actualiza a la versión mínima requerida: {{minimalDifyVersion}}', + requestAPlugin: 'Solicitar un plugin', } export default translation diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index 4f98b6ace6..7881728c89 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -658,6 +658,7 @@ const translation = { }, json: 'JSON generado por la herramienta', }, + authorize: 'autorizar', }, questionClassifiers: { model: 'modelo', diff --git a/web/i18n/fa-IR/app.ts b/web/i18n/fa-IR/app.ts index f048dfca1f..d37f4e8f90 100644 --- a/web/i18n/fa-IR/app.ts +++ b/web/i18n/fa-IR/app.ts @@ -209,6 +209,41 @@ const translation = { modelNotSupportedTip: 'مدل فعلی این ویژگی را پشتیبانی نمی‌کند و به‌طور خودکار به تزریق درخواست تنزل پیدا می‌کند.', structuredTip: 'خروجی‌های ساختاری یک ویژگی است که تضمین می‌کند مدل همیشه پاسخ‌هایی تولید می‌کند که به طرح JSON ارائه شده شما پایبند باشد.', }, + accessItemsDescription: { + specific: 'فقط گروه‌ها یا اعضای خاصی می‌توانند به اپلیکیشن وب دسترسی پیدا کنند.', + anyone: 'هر کسی می‌تواند به وب‌اپلیکیشن دسترسی پیدا کند', + organization: 'هر کسی در سازمان می‌تواند به اپلیکیشن وب دسترسی پیدا کند.', + }, + accessControlDialog: { + accessItems: { + specific: 'گروه‌ها یا اعضای خاص', + organization: 'فقط اعضای داخل سازمان', + anyone: 'هر کسی که لینک را داشته باشد', + }, + operateGroupAndMember: { + searchPlaceholder: 'گروه‌ها و اعضا را جستجو کنید', + allMembers: 'تمام اعضا', + noResult: 'نتیجه‌ای نیست', + expand: 'گسترش', + }, + description: 'مجوزهای دسترسی به برنامه وب را تنظیم کنید', + accessLabel: 'چه کسی به آن دسترسی دارد', + groups_one: '{{count}} گروه', + groups_other: '{{count}} گروه', + members_one: '{{count}} عضو', + members_other: '{{count}} عضو', + noGroupsOrMembers: 'هیچ گروه یا عضوی انتخاب نشده است', + title: 'کنترل دسترسی به وب اپلیکیشن', + updateSuccess: 'به‌روز رسانی با موفقیت انجام شد', + webAppSSONotEnabledTip: 'لطفاً با مدیر شرکت تماس بگیرید تا روش احراز هویت برنامه وب را پیکربندی کند.', + }, + publishApp: { + notSet: 'تنظیم نشده است', + notSetDesc: 'در حال حاضر هیچ‌کس نمی‌تواند به برنامه وب دسترسی پیدا کند. لطفاً مجوزها را تنظیم کنید.', + title: 'چه کسی می‌تواند به برنامه وب دسترسی داشته باشد؟', + }, + accessControl: 'کنترل دسترسی به وب اپلیکیشن', + noAccessPermission: 'دسترسی به برنامه وب مجاز نیست', } export default translation diff --git a/web/i18n/fa-IR/common.ts b/web/i18n/fa-IR/common.ts index 64a5c3b73d..7cd9a89684 100644 --- a/web/i18n/fa-IR/common.ts +++ b/web/i18n/fa-IR/common.ts @@ -149,6 +149,8 @@ const translation = { newDataset: 'ایجاد دانش', tools: 'ابزارها', exploreMarketplace: 'بازار را کاوش کنید', + appDetail: 'جزئیات برنامه', + account: 'حساب', }, userProfile: { settings: 'تنظیمات', @@ -644,6 +646,7 @@ const translation = { license: { expiring_plural: 'انقضا در {{count}} روز', expiring: 'انقضا در یک روز', + unlimited: 'نامحدود', }, pagination: { perPage: 'موارد در هر صفحه', @@ -667,6 +670,7 @@ const translation = { supportedFormats: 'از فرمت‌های PNG، JPG، JPEG، WEBP و GIF پشتیبانی می‌کند', browse: 'مرورگر', }, + you: 'تو', } export default translation diff --git a/web/i18n/fa-IR/login.ts b/web/i18n/fa-IR/login.ts index 7394ab325f..7d853c7b2d 100644 --- a/web/i18n/fa-IR/login.ts +++ b/web/i18n/fa-IR/login.ts @@ -105,6 +105,11 @@ const translation = { licenseExpiredTip: 'مجوز Dify Enterprise برای فضای کاری شما منقضی شده است. لطفا برای ادامه استفاده از Dify با سرپرست خود تماس بگیرید.', licenseInactiveTip: 'مجوز Dify Enterprise برای فضای کاری شما غیرفعال است. لطفا برای ادامه استفاده از Dify با سرپرست خود تماس بگیرید.', licenseLostTip: 'اتصال سرور مجوز Dify انجام نشد. لطفا برای ادامه استفاده از Dify با سرپرست خود تماس بگیرید.', + webapp: { + disabled: 'احراز هویت وب اپ غیرفعال است. لطفاً با مدیر سیستم تماس بگیرید تا آن را فعال کند. می‌توانید سعی کنید مستقیماً از اپلیکیشن استفاده کنید.', + noLoginMethodTip: 'لطفاً با مدیر سیستم تماس بگیرید تا یک روش احراز هویت اضافه کند.', + noLoginMethod: 'روش احراز هویت برای برنامه وب پیکربندی نشده است', + }, } export default translation diff --git a/web/i18n/fa-IR/plugin.ts b/web/i18n/fa-IR/plugin.ts index 81aa61ae84..80433900c1 100644 --- a/web/i18n/fa-IR/plugin.ts +++ b/web/i18n/fa-IR/plugin.ts @@ -62,6 +62,7 @@ const translation = { uninstalledContent: 'این افزونه از مخزن local/GitHub نصب شده است. لطفا پس از نصب استفاده کنید.', unsupportedTitle: 'اکشن پشتیبانی نشده', unsupportedContent2: 'برای تغییر نسخه کلیک کنید.', + toolSetting: 'تنظیمات ابزار', }, endpointDeleteTip: 'حذف نقطه پایانی', disabled: 'غیر فعال', @@ -210,6 +211,7 @@ const translation = { title: 'پلاگین ها', }, difyVersionNotCompatible: 'نسخه فعلی دیفی با این پلاگین سازگار نیست، لطفاً به نسخه حداقل مورد نیاز به‌روزرسانی کنید: {{minimalDifyVersion}}', + requestAPlugin: 'درخواست یک افزونه', } export default translation diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index 4fb6ad8f37..e548dc8ecb 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'json تولید شده توسط ابزار', }, + authorize: 'مجوز دادن', }, questionClassifiers: { model: 'مدل', diff --git a/web/i18n/fr-FR/app.ts b/web/i18n/fr-FR/app.ts index beea355ffc..ffa00c758a 100644 --- a/web/i18n/fr-FR/app.ts +++ b/web/i18n/fr-FR/app.ts @@ -208,6 +208,41 @@ const translation = { moreFillTip: 'Affichage d\'un maximum de 10 niveaux d\'imbrication', configure: 'Configurer', }, + accessItemsDescription: { + anyone: 'Tout le monde peut accéder à l\'application web.', + specific: 'Seules des groupes ou membres spécifiques peuvent accéder à l\'application web.', + organization: 'Toute personne dans l\'organisation peut accéder à l\'application web.', + }, + accessControlDialog: { + accessItems: { + anyone: 'Quiconque avec le lien', + specific: 'Groupes ou membres spécifiques', + organization: 'Seuls les membres au sein de l\'entreprise', + }, + operateGroupAndMember: { + searchPlaceholder: 'Rechercher des groupes et des membres', + allMembers: 'Tous les membres', + expand: 'Développer', + noResult: 'Aucun résultat', + }, + title: 'Contrôle d\'accès à l\'application Web', + description: 'Définir les autorisations d\'accès à l\'application web', + accessLabel: 'Qui a accès', + groups_one: '{{count}} GROUPE', + groups_other: '{{count}} GROUPES', + members_one: '{{count}} MEMBRE', + members_other: '{{count}} MEMBRES', + noGroupsOrMembers: 'Aucun groupe ou membre sélectionné', + webAppSSONotEnabledTip: 'Veuillez contacter l\'administrateur de l\'entreprise pour configurer la méthode d\'authentification de l\'application web.', + updateSuccess: 'Mise à jour réussie', + }, + publishApp: { + title: 'Qui peut accéder à l\'application web', + notSet: 'Non défini', + notSetDesc: 'Actuellement, personne ne peut accéder à l\'application web. Veuillez définir les autorisations.', + }, + accessControl: 'Contrôle d\'accès à l\'application Web', + noAccessPermission: 'Pas de permission d\'accéder à l\'application web', } export default translation diff --git a/web/i18n/fr-FR/common.ts b/web/i18n/fr-FR/common.ts index d26deb3a9f..f08ef40c4d 100644 --- a/web/i18n/fr-FR/common.ts +++ b/web/i18n/fr-FR/common.ts @@ -145,6 +145,8 @@ const translation = { newDataset: 'Créer des Connaissances', tools: 'Outils', exploreMarketplace: 'Explorer Marketplace', + appDetail: 'Détails de l\'application', + account: 'Compte', }, userProfile: { settings: 'Paramètres', @@ -550,7 +552,7 @@ const translation = { vectorHash: 'Hachage vectoriel:', hitScore: 'Score de Récupération:', }, - inputPlaceholder: 'Parler au bot', + inputPlaceholder: 'Parler au {{botName}}', thinking: 'Pensée...', thought: 'Pensée', resend: 'Renvoyer', @@ -644,6 +646,7 @@ const translation = { license: { expiring: 'Expirant dans un jour', expiring_plural: 'Expirant dans {{count}} jours', + unlimited: 'Illimité', }, pagination: { perPage: 'Articles par page', @@ -667,6 +670,7 @@ const translation = { dropImageHere: 'Déposez votre image ici, ou', supportedFormats: 'Prend en charge PNG, JPG, JPEG, WEBP et GIF', }, + you: 'Vous', } export default translation diff --git a/web/i18n/fr-FR/dataset.ts b/web/i18n/fr-FR/dataset.ts index ba2985ca2b..c9739e20a1 100644 --- a/web/i18n/fr-FR/dataset.ts +++ b/web/i18n/fr-FR/dataset.ts @@ -203,6 +203,7 @@ const translation = { values: '{{num}} Valeurs', deleteContent: 'Êtes-vous sûr de vouloir supprimer les métadonnées "{{name}}" ?', name: 'Nom', + disabled: 'handicapés', }, documentMetadata: { technicalParameters: 'Paramètres techniques', diff --git a/web/i18n/fr-FR/login.ts b/web/i18n/fr-FR/login.ts index a7a633f330..68a642b3ea 100644 --- a/web/i18n/fr-FR/login.ts +++ b/web/i18n/fr-FR/login.ts @@ -105,6 +105,11 @@ const translation = { licenseLost: 'Licence perdue', licenseExpiredTip: 'La licence Dify Enterprise de votre espace de travail a expiré. Veuillez contacter votre administrateur pour continuer à utiliser Dify.', licenseInactive: 'Licence inactive', + webapp: { + noLoginMethodTip: 'Veuillez contacter l\'administrateur système pour ajouter une méthode d\'authentification.', + noLoginMethod: 'Méthode d\'authentification non configurée pour l\'application web', + disabled: 'L\'authentification de l\'application web est désactivée. Veuillez contacter l\'administrateur du système pour l\'activer. Vous pouvez essayer d\'utiliser l\'application directement.', + }, } export default translation diff --git a/web/i18n/fr-FR/plugin.ts b/web/i18n/fr-FR/plugin.ts index 04269e75ed..52930bd249 100644 --- a/web/i18n/fr-FR/plugin.ts +++ b/web/i18n/fr-FR/plugin.ts @@ -62,6 +62,7 @@ const translation = { settings: 'PARAMÈTRES UTILISATEUR', paramsTip2: 'Lorsque « Automatique » est désactivé, la valeur par défaut est utilisée.', paramsTip1: 'Contrôle les paramètres d’inférence LLM.', + toolSetting: 'Paramètres de l\'outil', }, modelNum: '{{num}} MODÈLES INCLUS', endpointDeleteTip: 'Supprimer le point de terminaison', @@ -210,6 +211,7 @@ const translation = { title: 'Plugins', }, difyVersionNotCompatible: 'La version actuelle de Dify n\'est pas compatible avec ce plugin, veuillez mettre à niveau vers la version minimale requise : {{minimalDifyVersion}}', + requestAPlugin: 'Demander un plugin', } export default translation diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index 12061705de..7a15bcaa4d 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -659,6 +659,7 @@ const translation = { }, json: 'JSON généré par un outil', }, + authorize: 'Autoriser', }, questionClassifiers: { model: 'modèle', diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts index e5db983f45..f9486b93ec 100644 --- a/web/i18n/hi-IN/app.ts +++ b/web/i18n/hi-IN/app.ts @@ -209,6 +209,41 @@ const translation = { structuredTip: 'संरचित आउटपुट एक विशेषता है जो यह सुनिश्चित करती है कि मॉडल हमेशा आपके प्रदान किए गए JSON स्कीमा के अनुसार प्रतिक्रियाएँ生成 करेगा।', modelNotSupportedTip: 'वर्तमान मॉडल इस सुविधा का समर्थन नहीं करता है और स्वचालित रूप से प्रॉम्प्ट इंजेक्शन में डाउनग्रेड किया जाता है।', }, + accessItemsDescription: { + anyone: 'कोई भी वेब ऐप तक पहुँच सकता है', + organization: 'संस्थान के किसी भी व्यक्ति को वेब ऐप तक पहुंच प्राप्त है', + specific: 'केवल विशेष समूह या सदस्य ही वेब ऐप तक पहुंच सकते हैं', + }, + accessControlDialog: { + accessItems: { + anyone: 'लिंक के साथ कोई भी', + specific: 'विशिष्ट समूह या सदस्य', + organization: 'केवल उद्यम के भीतर के सदस्य', + }, + operateGroupAndMember: { + searchPlaceholder: 'समूहों और सदस्यों की खोज करें', + allMembers: 'सभी सदस्य', + expand: 'व्याप्त करें', + noResult: 'कोई परिणाम नहीं', + }, + title: 'वेब एप्लिकेशन पहुँच नियंत्रण', + description: 'वेब ऐप एक्सेस अनुमतियाँ सेट करें', + groups_one: '{{count}} समूह', + groups_other: '{{count}} समूह', + members_one: '{{count}} सदस्य', + members_other: '{{count}} सदस्य', + noGroupsOrMembers: 'कोई समूह या सदस्य चयनित नहीं किया गया', + updateSuccess: 'सफलता से अपडेट किया गया', + accessLabel: 'किसके पास पहुँच है', + webAppSSONotEnabledTip: 'कृपया वेब ऐप प्रमाणीकरण विधि कॉन्फ़िगर करने के लिए उद्यम प्रशासक से संपर्क करें।', + }, + publishApp: { + title: 'वेब ऐप तक कौन पहुँच सकता है', + notSet: 'अनुबंधित नहीं किया गया', + notSetDesc: 'वर्तमान में कोई भी वेब ऐप तक पहुंच नहीं बना सकता। कृपया अनुमतियाँ सेट करें।', + }, + accessControl: 'वेब एप्लिकेशन पहुँच नियंत्रण', + noAccessPermission: 'वेब एप्लिकेशन तक पहुँचने की अनुमति नहीं है', } export default translation diff --git a/web/i18n/hi-IN/common.ts b/web/i18n/hi-IN/common.ts index 4964bcbcc4..65565f9955 100644 --- a/web/i18n/hi-IN/common.ts +++ b/web/i18n/hi-IN/common.ts @@ -154,6 +154,8 @@ const translation = { newDataset: 'ज्ञान बनाएं', tools: 'उपकरण', exploreMarketplace: 'मार्केटप्लेस का अन्वेषण करें', + appDetail: 'ऐप विवरण', + account: 'खाता', }, userProfile: { settings: 'सेटिंग्स', @@ -666,6 +668,7 @@ const translation = { license: { expiring: 'एक दिन में समाप्त हो रहा है', expiring_plural: '{{गिनती}} दिनों में समाप्त हो रहा है', + unlimited: 'असीमित', }, pagination: { perPage: 'प्रति पृष्ठ आइटम', @@ -689,6 +692,7 @@ const translation = { browse: 'ब्राउज़ करें', dropImageHere: 'अपनी छवि यहाँ छोड़ें, या', }, + you: 'आप', } export default translation diff --git a/web/i18n/hi-IN/login.ts b/web/i18n/hi-IN/login.ts index 0be8cbc3ab..0c9f4451b6 100644 --- a/web/i18n/hi-IN/login.ts +++ b/web/i18n/hi-IN/login.ts @@ -110,6 +110,11 @@ const translation = { licenseLostTip: 'Dify लायसेंस सर्वर से कनेक्ट करने में विफल. Dify का उपयोग जारी रखने के लिए कृपया अपने व्यवस्थापक से संपर्क करें.', licenseInactiveTip: 'आपके कार्यस्थल के लिए डिफाई एंटरप्राइज लाइसेंस निष्क्रिय है। कृपया डिफाई का उपयोग जारी रखने के लिए अपने प्रशासक से संपर्क करें।', licenseExpiredTip: 'आपके कार्यस्थल के लिए डिफाई एंटरप्राइज लाइसेंस समाप्त हो गया है। कृपया डिफाई का उपयोग जारी रखने के लिए अपने प्रशासक से संपर्क करें।', + webapp: { + noLoginMethodTip: 'कृपया एक प्रमाणीकरण विधि जोड़ने के लिए सिस्टम प्रशासक से संपर्क करें।', + noLoginMethod: 'वेब ऐप के लिए प्रमाणीकरण विधि कॉन्फ़िगर नहीं की गई है', + disabled: 'वेब ऐप प्रमाणीकरण अक्षम है। कृपया इसे सक्षम करने के लिए सिस्टम प्रशासक से संपर्क करें। आप सीधे ऐप का उपयोग करने की कोशिश कर सकते हैं।', + }, } export default translation diff --git a/web/i18n/hi-IN/plugin.ts b/web/i18n/hi-IN/plugin.ts index 075e9a1da4..76eb84246a 100644 --- a/web/i18n/hi-IN/plugin.ts +++ b/web/i18n/hi-IN/plugin.ts @@ -62,6 +62,7 @@ const translation = { paramsTip2: 'जब \'स्वचालित\' बंद होता है, तो डिफ़ॉल्ट मान का उपयोग किया जाता है।', descriptionPlaceholder: 'उपकरण के उद्देश्य का संक्षिप्त विवरण, जैसे, किसी विशेष स्थान के लिए तापमान प्राप्त करना।', paramsTip1: 'एलएलएम अनुमान पैरामीटर को नियंत्रित करता है।', + toolSetting: 'टूल सेटिंग्स', }, switchVersion: 'स्विच संस्करण', endpointModalDesc: 'एक बार कॉन्फ़िगर होने के बाद, प्लगइन द्वारा API एंडपॉइंट्स के माध्यम से प्रदान की गई सुविधाओं का उपयोग किया जा सकता है।', @@ -210,6 +211,7 @@ const translation = { title: 'प्लगइन्स', }, difyVersionNotCompatible: 'वर्तमान डिफाई संस्करण इस प्लगइन के साथ संगत नहीं है, कृपया आवश्यक न्यूनतम संस्करण में अपग्रेड करें: {{minimalDifyVersion}}', + requestAPlugin: 'एक प्लगइन का अनुरोध करें', } export default translation diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index 8d8c929156..c295b16603 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -676,6 +676,7 @@ const translation = { }, json: 'उपकरण द्वारा उत्पन्न JSON', }, + authorize: 'अधिकृत करें', }, questionClassifiers: { model: 'मॉडल', diff --git a/web/i18n/it-IT/app.ts b/web/i18n/it-IT/app.ts index a1762bdea2..f6855873db 100644 --- a/web/i18n/it-IT/app.ts +++ b/web/i18n/it-IT/app.ts @@ -220,6 +220,41 @@ const translation = { notConfiguredTip: 'L\'output strutturato non è stato ancora configurato.', modelNotSupportedTip: 'Il modello attuale non supporta questa funzione e viene automaticamente downgradato a iniezione di prompt.', }, + accessItemsDescription: { + anyone: 'Chiunque può accedere all\'app web', + specific: 'Solo gruppi o membri specifici possono accedere all\'app web.', + organization: 'Qualsiasi persona nell\'organizzazione può accedere all\'app web', + }, + accessControlDialog: { + accessItems: { + anyone: 'Chiunque con il link', + specific: 'Gruppi o membri specifici', + organization: 'Solo i membri all\'interno dell\'impresa', + }, + operateGroupAndMember: { + searchPlaceholder: 'Cerca gruppi e membri', + allMembers: 'Tutti i membri', + expand: 'Espandere', + noResult: 'Nessun risultato', + }, + title: 'Controllo di accesso all\'app web', + description: 'Imposta le autorizzazioni di accesso all\'app web', + accessLabel: 'Chi ha accesso', + groups_one: '{{count}} GRUPPO', + groups_other: '{{count}} GRUPPI', + members_one: '{{count}} MEMBRO', + members_other: '{{count}} MEMBRI', + noGroupsOrMembers: 'Nessun gruppo o membro selezionato', + webAppSSONotEnabledTip: 'Si prega di contattare l\'amministratore dell\'impresa per configurare il metodo di autenticazione dell\'app web.', + updateSuccess: 'Aggiornamento avvenuto con successo', + }, + publishApp: { + title: 'Chi può accedere all\'app web', + notSet: 'Non impostato', + notSetDesc: 'Attualmente nessuno può accedere all\'app web. Si prega di impostare i permessi.', + }, + accessControl: 'Controllo di accesso all\'app web', + noAccessPermission: 'Nessun permesso per accedere all\'app web', } export default translation diff --git a/web/i18n/it-IT/common.ts b/web/i18n/it-IT/common.ts index f337990aab..b47bb23854 100644 --- a/web/i18n/it-IT/common.ts +++ b/web/i18n/it-IT/common.ts @@ -154,6 +154,8 @@ const translation = { newDataset: 'Crea Conoscenza', tools: 'Strumenti', exploreMarketplace: 'Esplora il Marketplace', + appDetail: 'Dettagli dell\'app', + account: 'Account', }, userProfile: { settings: 'Impostazioni', @@ -581,7 +583,7 @@ const translation = { vectorHash: 'Hash del vettore:', hitScore: 'Punteggio di recupero:', }, - inputPlaceholder: 'Parla con il bot', + inputPlaceholder: 'Parla con il {{botName}}', thinking: 'Pensante...', thought: 'Pensiero', resend: 'Reinvia', @@ -675,6 +677,7 @@ const translation = { license: { expiring_plural: 'Scadenza tra {{count}} giorni', expiring: 'Scadenza in un giorno', + unlimited: 'Illimitato', }, pagination: { perPage: 'Articoli per pagina', @@ -698,6 +701,7 @@ const translation = { browse: 'sfogliare', dropImageHere: 'Trascina la tua immagine qui, oppure', }, + you: 'Tu', } export default translation diff --git a/web/i18n/it-IT/login.ts b/web/i18n/it-IT/login.ts index 350424259e..cbc05d60c1 100644 --- a/web/i18n/it-IT/login.ts +++ b/web/i18n/it-IT/login.ts @@ -115,6 +115,11 @@ const translation = { licenseExpiredTip: 'La licenza Dify Enterprise per la tua area di lavoro è scaduta. Contatta il tuo amministratore per continuare a utilizzare Dify.', licenseInactiveTip: 'La licenza Dify Enterprise per la tua area di lavoro è inattiva. Contatta il tuo amministratore per continuare a utilizzare Dify.', licenseInactive: 'Licenza inattiva', + webapp: { + noLoginMethod: 'Metodo di autenticazione non configurato per l\'app web', + noLoginMethodTip: 'Si prega di contattare l\'amministratore del sistema per aggiungere un metodo di autenticazione.', + disabled: 'L\'autenticazione dell\'app web è disabilitata. Si prega di contattare l\'amministratore di sistema per abilitarla. Puoi provare a utilizzare l\'app direttamente.', + }, } export default translation diff --git a/web/i18n/it-IT/plugin.ts b/web/i18n/it-IT/plugin.ts index 2832776b9d..1c84b61fda 100644 --- a/web/i18n/it-IT/plugin.ts +++ b/web/i18n/it-IT/plugin.ts @@ -62,6 +62,7 @@ const translation = { descriptionLabel: 'Descrizione dell\'utensile', auto: 'Automatico', paramsTip2: 'Quando \'Automatico\' è disattivato, viene utilizzato il valore predefinito.', + toolSetting: 'Impostazioni degli strumenti', }, modelNum: '{{num}} MODELLI INCLUSI', endpointModalTitle: 'Endpoint di configurazione', @@ -210,6 +211,7 @@ const translation = { title: 'Plugin', }, difyVersionNotCompatible: 'L\'attuale versione di Dify non è compatibile con questo plugin, si prega di aggiornare alla versione minima richiesta: {{minimalDifyVersion}}', + requestAPlugin: 'Richiedi un plugin', } export default translation diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index 532dbec3b0..26c790f4a0 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -679,6 +679,7 @@ const translation = { }, json: 'json generato dallo strumento', }, + authorize: 'Autorizza', }, questionClassifiers: { model: 'modello', diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts index f9184332de..882e00cec8 100644 --- a/web/i18n/ja-JP/common.ts +++ b/web/i18n/ja-JP/common.ts @@ -570,7 +570,7 @@ const translation = { vectorHash: 'ベクトルハッシュ:', hitScore: '検索スコア:', }, - inputPlaceholder: 'ボットと話す', + inputPlaceholder: '{{botName}} と話す', thought: '思考', thinking: '考え中...', resend: '再送信してください', @@ -660,6 +660,7 @@ const translation = { license: { expiring_plural: '有効期限 {{count}} 日', expiring: '1日で有効期限が切れます', + unlimited: '無制限', }, pagination: { perPage: 'ページあたりのアイテム数', diff --git a/web/i18n/ja-JP/plugin.ts b/web/i18n/ja-JP/plugin.ts index 6a27048797..dfb4efe679 100644 --- a/web/i18n/ja-JP/plugin.ts +++ b/web/i18n/ja-JP/plugin.ts @@ -62,6 +62,7 @@ const translation = { paramsTip1: 'LLM推論パラメータを制御します。', toolLabel: '道具', unsupportedTitle: 'サポートされていないアクション', + toolSetting: 'ツール設定', }, endpointDisableTip: 'エンドポイントを無効にする', endpointModalDesc: '設定が完了すると、APIエンドポイントを介してプラグインが提供する機能を使用できます。', @@ -210,6 +211,7 @@ const translation = { metadata: { title: 'プラグイン', }, + requestAPlugin: 'プラグインをリクエストする', } export default translation diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index 3c3e8342a7..8495a7dfd5 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -666,6 +666,7 @@ const translation = { }, json: 'ツールで生成されたJSON', }, + authorize: '認証する', }, questionClassifiers: { model: 'モデル', diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts index b4ee4cf6ac..d800968eaa 100644 --- a/web/i18n/ko-KR/app.ts +++ b/web/i18n/ko-KR/app.ts @@ -205,6 +205,41 @@ const translation = { modelNotSupportedTip: '현재 모델은 이 기능을 지원하지 않으며 자동으로 프롬프트 주입으로 다운그레이드됩니다.', structuredTip: '구조화된 출력은 모델이 제공한 JSON 스키마를 항상 준수하는 응답을 생성하도록 보장하는 기능입니다.', }, + accessItemsDescription: { + anyone: '누구나 웹 앱에 접근할 수 있습니다.', + specific: '특정 그룹이나 회원만 웹 앱에 접근할 수 있습니다.', + organization: '조직 내 모든 사람이 웹 애플리케이션에 접근할 수 있습니다.', + }, + accessControlDialog: { + accessItems: { + anyone: '링크가 있는 누구나', + specific: '특정 그룹 또는 구성원', + organization: '기업 내의 회원만', + }, + operateGroupAndMember: { + searchPlaceholder: '그룹 및 구성원 검색', + allMembers: '모든 멤버들', + expand: '확장하다', + noResult: '결과 없음', + }, + title: '웹 애플리케이션 접근 제어', + accessLabel: '누가 접근할 수 있습니까?', + groups_one: '{{count}} 그룹', + groups_other: '{{count}} 그룹', + members_one: '{{count}} 회원', + members_other: '{{count}} 회원', + noGroupsOrMembers: '선택된 그룹 또는 멤버가 없습니다.', + webAppSSONotEnabledTip: '웹 앱 인증 방법을 구성하려면 엔터프라이즈 관리자인에게 문의하십시오.', + updateSuccess: '업데이트가 성공적으로 완료되었습니다.', + description: '웹 앱 접근 권한 설정', + }, + publishApp: { + title: '누가 웹 애플리케이션에 접근할 수 있나요?', + notSet: '설정되지 않음', + notSetDesc: '현재 아무도 웹 앱에 접근할 수 없습니다. 권한을 설정해 주세요.', + }, + accessControl: '웹 애플리케이션 접근 제어', + noAccessPermission: '웹 앱에 대한 접근 권한이 없습니다.', } export default translation diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts index 1d31761ebb..4fa8ec74e4 100644 --- a/web/i18n/ko-KR/common.ts +++ b/web/i18n/ko-KR/common.ts @@ -141,6 +141,8 @@ const translation = { newDataset: '지식 만들기', tools: '도구', exploreMarketplace: 'Marketplace 둘러보기', + appDetail: '앱 세부정보', + account: '계정', }, userProfile: { settings: '설정', @@ -640,6 +642,7 @@ const translation = { license: { expiring_plural: '{{count}}일 후에 만료', expiring: '하루 후에 만료', + unlimited: '무제한', }, pagination: { perPage: '페이지당 항목 수', @@ -663,6 +666,7 @@ const translation = { browse: '브라우즈', dropImageHere: '여기에 이미지를 드롭하거나', }, + you: '너', } export default translation diff --git a/web/i18n/ko-KR/login.ts b/web/i18n/ko-KR/login.ts index 05a60c7b68..4fbd5f5522 100644 --- a/web/i18n/ko-KR/login.ts +++ b/web/i18n/ko-KR/login.ts @@ -105,6 +105,11 @@ const translation = { licenseInactive: 'License Inactive(라이선스 비활성)', licenseExpired: '라이센스가 만료되었습니다.', licenseExpiredTip: '작업 영역에 대한 Dify Enterprise 라이선스가 만료되었습니다. Dify를 계속 사용하려면 관리자에게 문의하십시오.', + webapp: { + noLoginMethod: '웹 애플리케이션에 대한 인증 방법이 구성되어 있지 않습니다.', + disabled: '웹앱 인증이 비활성화되었습니다. 이를 활성화하려면 시스템 관리자에게 문의하십시오. 앱을 직접 사용해 볼 수 있습니다.', + noLoginMethodTip: '인증 방법을 추가하려면 시스템 관리자에게 연락하십시오.', + }, } export default translation diff --git a/web/i18n/ko-KR/plugin.ts b/web/i18n/ko-KR/plugin.ts index fd3603a07f..8d823136d0 100644 --- a/web/i18n/ko-KR/plugin.ts +++ b/web/i18n/ko-KR/plugin.ts @@ -62,6 +62,7 @@ const translation = { placeholder: '도구 선택...', paramsTip2: '\'자동\'이 꺼져 있으면 기본값이 사용됩니다.', unsupportedContent: '설치된 플러그인 버전은 이 작업을 제공하지 않습니다.', + toolSetting: '도구 설정', }, configureApp: '앱 구성', strategyNum: '{{번호}} {{전략}} 포함', @@ -210,6 +211,7 @@ const translation = { title: '플러그인', }, difyVersionNotCompatible: '현재 Dify 버전이 이 플러그인과 호환되지 않습니다. 필요한 최소 버전으로 업그레이드하십시오: {{minimalDifyVersion}}', + requestAPlugin: '플러그인을 요청하세요', } export default translation diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index b2c3e2d422..3cf22dfe13 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: '도구로 생성된 JSON', }, + authorize: '권한 부여', }, questionClassifiers: { model: '모델', diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts index c60b34f860..54759154ca 100644 --- a/web/i18n/pl-PL/app.ts +++ b/web/i18n/pl-PL/app.ts @@ -216,6 +216,41 @@ const translation = { modelNotSupported: 'Model nie jest obsługiwany', modelNotSupportedTip: 'Aktualny model nie obsługuje tej funkcji i zostaje automatycznie obniżony do wstrzyknięcia zapytania.', }, + accessItemsDescription: { + anyone: 'Każdy może uzyskać dostęp do aplikacji webowej', + specific: 'Tylko określone grupy lub członkowie mogą uzyskać dostęp do aplikacji internetowej', + organization: 'Każdy w organizacji ma dostęp do aplikacji internetowej.', + }, + accessControlDialog: { + accessItems: { + anyone: 'Każdy z linkiem', + specific: 'Specyficzne grupy lub członkowie', + organization: 'Tylko członkowie w obrębie przedsiębiorstwa', + }, + operateGroupAndMember: { + searchPlaceholder: 'Szukaj grup i członków', + allMembers: 'Wszyscy członkowie', + expand: 'Rozszerz', + noResult: 'Brak wyniku', + }, + title: 'Kontrola dostępu do aplikacji internetowej', + description: 'Ustaw uprawnienia dostępu do aplikacji webowej', + accessLabel: 'Kto ma dostęp', + groups_one: '{{count}} GRUPA', + groups_other: '{{count}} GRUPY', + members_one: '{{count}} CZŁONEK', + members_other: '{{count}} CZŁONKÓW', + noGroupsOrMembers: 'Nie wybrano żadnych grup ani członków', + webAppSSONotEnabledTip: 'Proszę skontaktować się z administratorem przedsiębiorstwa, aby skonfigurować metodę uwierzytelniania aplikacji internetowej.', + updateSuccess: 'Aktualizacja powiodła się', + }, + publishApp: { + title: 'Kto ma dostęp do aplikacji internetowej', + notSet: 'Nie ustawiono', + notSetDesc: 'Obecnie nikt nie może uzyskać dostępu do aplikacji internetowej. Proszę ustawić uprawnienia.', + }, + accessControl: 'Kontrola dostępu do aplikacji internetowej', + noAccessPermission: 'Brak uprawnień do dostępu do aplikacji internetowej', } export default translation diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts index fb94c8caaf..e081a1ed9e 100644 --- a/web/i18n/pl-PL/common.ts +++ b/web/i18n/pl-PL/common.ts @@ -150,6 +150,8 @@ const translation = { newDataset: 'Utwórz Wiedzę', tools: 'Narzędzia', exploreMarketplace: 'Zapoznaj się z Marketplace', + appDetail: 'Szczegóły aplikacji', + account: 'klient', }, userProfile: { settings: 'Ustawienia', @@ -565,7 +567,7 @@ const translation = { vectorHash: 'Wektor hash:', hitScore: 'Wynik trafień:', }, - inputPlaceholder: 'Porozmawiaj z botem', + inputPlaceholder: 'Porozmawiaj z {{botName}}', thought: 'Myśl', thinking: 'Myślenie...', resend: 'Prześlij ponownie', @@ -662,6 +664,7 @@ const translation = { license: { expiring_plural: 'Wygasa za {{count}} dni', expiring: 'Wygasa w ciągu jednego dnia', + unlimited: 'Nieograniczony', }, pagination: { perPage: 'Ilość elementów na stronie', @@ -685,6 +688,7 @@ const translation = { browse: 'przeglądaj', supportedFormats: 'Obsługuje PNG, JPG, JPEG, WEBP i GIF', }, + you: 'Ty', } export default translation diff --git a/web/i18n/pl-PL/login.ts b/web/i18n/pl-PL/login.ts index 99719fe71a..b1bb0b93c6 100644 --- a/web/i18n/pl-PL/login.ts +++ b/web/i18n/pl-PL/login.ts @@ -110,6 +110,11 @@ const translation = { licenseExpiredTip: 'Licencja Dify Enterprise dla Twojego obszaru roboczego wygasła. Skontaktuj się z administratorem, aby kontynuować korzystanie z Dify.', licenseLostTip: 'Nie udało się nawiązać połączenia z serwerem licencji Dify. Skontaktuj się z administratorem, aby kontynuować korzystanie z Dify.', licenseInactiveTip: 'Licencja Dify Enterprise dla Twojego obszaru roboczego jest nieaktywna. Skontaktuj się z administratorem, aby kontynuować korzystanie z Dify.', + webapp: { + noLoginMethod: 'Metoda uwierzytelniania nie jest skonfigurowana dla aplikacji internetowej', + noLoginMethodTip: 'Proszę skontaktować się z administratorem systemu, aby dodać metodę uwierzytelniania.', + disabled: 'Uwierzytelnianie aplikacji internetowej jest wyłączone. Proszę skontaktować się z administratorem systemu, aby je włączyć. Możesz spróbować użyć aplikacji bezpośrednio.', + }, } export default translation diff --git a/web/i18n/pl-PL/plugin.ts b/web/i18n/pl-PL/plugin.ts index 0883a98e07..f02575919e 100644 --- a/web/i18n/pl-PL/plugin.ts +++ b/web/i18n/pl-PL/plugin.ts @@ -62,6 +62,7 @@ const translation = { uninstalledTitle: 'Narzędzie nie jest zainstalowane', paramsTip2: 'Gdy opcja "Automatycznie" jest wyłączona, używana jest wartość domyślna.', toolLabel: 'Narzędzie', + toolSetting: 'Ustawienia narzędzi', }, strategyNum: '{{liczba}} {{strategia}} ZAWARTE', endpointsEmpty: 'Kliknij przycisk "+", aby dodać punkt końcowy', @@ -210,6 +211,7 @@ const translation = { title: 'Wtyczki', }, difyVersionNotCompatible: 'Obecna wersja Dify nie jest kompatybilna z tym wtyczką, proszę zaktualizować do minimalnej wymaganej wersji: {{minimalDifyVersion}}', + requestAPlugin: 'Poproś o wtyczkę', } export default translation diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index 0b8b12aa2c..c91af84e3a 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'JSON wygenerowany przez narzędzien', }, + authorize: 'Autoryzuj', }, questionClassifiers: { model: 'model', diff --git a/web/i18n/pt-BR/app.ts b/web/i18n/pt-BR/app.ts index 9e48b72895..5dd1753cac 100644 --- a/web/i18n/pt-BR/app.ts +++ b/web/i18n/pt-BR/app.ts @@ -209,6 +209,41 @@ const translation = { moreFillTip: 'Mostrando um máximo de 10 níveis de aninhamento', notConfiguredTip: 'A saída estruturada ainda não foi configurada.', }, + accessItemsDescription: { + anyone: 'Qualquer pessoa pode acessar o aplicativo web', + specific: 'Apenas grupos ou membros específicos podem acessar o aplicativo web', + organization: 'Qualquer pessoa na organização pode acessar o aplicativo web', + }, + accessControlDialog: { + accessItems: { + anyone: 'Qualquer pessoa com o link', + specific: 'Grupos específicos ou membros', + organization: 'Apenas membros dentro da empresa', + }, + operateGroupAndMember: { + searchPlaceholder: 'Pesquisar grupos e membros', + allMembers: 'Todos os membros', + expand: 'Expandir', + noResult: 'Nenhum resultado', + }, + title: 'Controle de Acesso do Aplicativo Web', + description: 'Defina as permissões de acesso do aplicativo da web', + accessLabel: 'Quem tem acesso', + groups_one: '{{count}} GRUPO', + groups_other: '{{count}} GRUPOS', + members_other: '{{count}} MEMBROS', + noGroupsOrMembers: 'Nenhum grupo ou membro selecionado', + updateSuccess: 'Atualização bem-sucedida', + members_one: '{{count}} MEMBRO', + webAppSSONotEnabledTip: 'Por favor, entre em contato com o administrador da empresa para configurar o método de autenticação da aplicação web.', + }, + publishApp: { + title: 'Quem pode acessar o aplicativo web', + notSet: 'Não definido', + notSetDesc: 'Atualmente, ninguém pode acessar o aplicativo web. Por favor, defina as permissões.', + }, + accessControl: 'Controle de Acesso do Aplicativo Web', + noAccessPermission: 'Sem permissão para acessar o aplicativo web', } export default translation diff --git a/web/i18n/pt-BR/common.ts b/web/i18n/pt-BR/common.ts index 7b7eeafbe7..d9409b5dd0 100644 --- a/web/i18n/pt-BR/common.ts +++ b/web/i18n/pt-BR/common.ts @@ -145,6 +145,8 @@ const translation = { newDataset: 'Criar Conhecimento', tools: 'Ferramentas', exploreMarketplace: 'Explorar Mercado', + appDetail: 'Detalhes do aplicativo', + account: 'Conta', }, userProfile: { settings: 'Configurações', @@ -550,7 +552,7 @@ const translation = { vectorHash: 'Hash de vetor:', hitScore: 'Pontuação de recuperação:', }, - inputPlaceholder: 'Fale com o bot', + inputPlaceholder: 'Fale com o {{botName}}', thinking: 'Pensante...', thought: 'Pensamento', resend: 'Reenviar', @@ -644,6 +646,7 @@ const translation = { license: { expiring: 'Expirando em um dia', expiring_plural: 'Expirando em {{count}} dias', + unlimited: 'Ilimitado', }, pagination: { perPage: 'Itens por página', @@ -667,6 +670,7 @@ const translation = { supportedFormats: 'Suporta PNG, JPG, JPEG, WEBP e GIF', browse: 'navegar', }, + you: 'Você', } export default translation diff --git a/web/i18n/pt-BR/login.ts b/web/i18n/pt-BR/login.ts index 7af5181bb9..0880b4776e 100644 --- a/web/i18n/pt-BR/login.ts +++ b/web/i18n/pt-BR/login.ts @@ -105,6 +105,11 @@ const translation = { licenseLost: 'Licença perdida', licenseInactive: 'Licença inativa', licenseExpiredTip: 'A licença do Dify Enterprise para seu espaço de trabalho expirou. Entre em contato com o administrador para continuar usando o Dify.', + webapp: { + noLoginMethod: 'Método de autenticação não configurado para o aplicativo web', + disabled: 'A autenticação do aplicativo da web está desativada. Por favor, entre em contato com o administrador do sistema para habilitá-la. Você pode tentar usar o aplicativo diretamente.', + noLoginMethodTip: 'Por favor, entre em contato com o administrador do sistema para adicionar um método de autenticação.', + }, } export default translation diff --git a/web/i18n/pt-BR/plugin.ts b/web/i18n/pt-BR/plugin.ts index c02f9cb7e3..8eb44f83d6 100644 --- a/web/i18n/pt-BR/plugin.ts +++ b/web/i18n/pt-BR/plugin.ts @@ -62,6 +62,7 @@ const translation = { descriptionPlaceholder: 'Breve descrição da finalidade da ferramenta, por exemplo, obter a temperatura para um local específico.', uninstalledTitle: 'Ferramenta não instalada', unsupportedTitle: 'Ação sem suporte', + toolSetting: 'Configurações da Ferramenta', }, serviceOk: 'Serviço OK', endpointsTip: 'Este plug-in fornece funcionalidades específicas por meio de endpoints e você pode configurar vários conjuntos de endpoints para o workspace atual.', @@ -210,6 +211,7 @@ const translation = { title: 'Plugins', }, difyVersionNotCompatible: 'A versão atual do Dify não é compatível com este plugin, por favor atualize para a versão mínima exigida: {{minimalDifyVersion}}', + requestAPlugin: 'Solicitar um plugin', } export default translation diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index 7724dfcf6a..11ee0ed9a1 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'JSON gerado por ferramenta', }, + authorize: 'Autorizar', }, questionClassifiers: { model: 'modelo', diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts index a96c94d02f..adf82aa38e 100644 --- a/web/i18n/ro-RO/app.ts +++ b/web/i18n/ro-RO/app.ts @@ -209,6 +209,41 @@ const translation = { configure: 'Configurează', modelNotSupportedTip: 'Modelul actual nu suportă această funcție și este downgradat automat la injecția de prompt.', }, + accessItemsDescription: { + specific: 'Numai grupuri sau membri specifici pot accesa aplicația web.', + organization: 'Oricine din organizație poate accesa aplicația web', + anyone: 'Oricine poate accesa aplicația web', + }, + accessControlDialog: { + accessItems: { + anyone: 'Oricine are linkul', + specific: 'Grupuri sau membri specifici', + organization: 'Numai membrii din cadrul întreprinderii', + }, + operateGroupAndMember: { + searchPlaceholder: 'Caută grupuri și membri', + allMembers: 'Toți membrii', + expand: 'Expandează', + noResult: 'Niciun rezultat', + }, + title: 'Controlul Accesului la Aplicația Web', + description: 'Setați permisiunile de acces la aplicația web', + accessLabel: 'Cine are acces', + groups_one: '{{count}} GRUP', + groups_other: '{{count}} GRUPURI', + members_one: '{{count}} MEMBRU', + members_other: '{{count}} MEMBRI', + noGroupsOrMembers: 'Niciun grup sau membri selectați', + webAppSSONotEnabledTip: 'Vă rugăm să contactați administratorul de întreprindere pentru a configura metoda de autentificare a aplicației web.', + updateSuccess: 'Actualizare reușită', + }, + publishApp: { + title: 'Cine poate accesa aplicația web', + notSet: 'Nu este setat', + notSetDesc: 'În prezent, nimeni nu poate accesa aplicația web. Vă rugăm să setați permisiunile.', + }, + accessControl: 'Controlul Accesului la Aplicația Web', + noAccessPermission: 'Nici o permisiune pentru a accesa aplicația web', } export default translation diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts index 13243404bd..f736240a78 100644 --- a/web/i18n/ro-RO/common.ts +++ b/web/i18n/ro-RO/common.ts @@ -145,6 +145,8 @@ const translation = { newDataset: 'Creează Cunoștințe', tools: 'Instrumente', exploreMarketplace: 'Explorați Marketplace', + appDetail: 'Detalii aplicație', + account: 'Cont', }, userProfile: { settings: 'Setări', @@ -550,7 +552,7 @@ const translation = { vectorHash: 'Hash vector:', hitScore: 'Scor de recuperare:', }, - inputPlaceholder: 'Vorbește cu Bot', + inputPlaceholder: 'Vorbește cu {{botName}}', thinking: 'Gândire...', thought: 'Gând', resend: 'Reexpediați', @@ -644,6 +646,7 @@ const translation = { license: { expiring: 'Expiră într-o zi', expiring_plural: 'Expiră în {{count}} zile', + unlimited: 'Nelimitat', }, pagination: { perPage: 'Articole pe pagină', @@ -667,6 +670,7 @@ const translation = { browse: 'naviga', dropImageHere: 'Trageți imaginea aici sau', }, + you: 'Tu', } export default translation diff --git a/web/i18n/ro-RO/login.ts b/web/i18n/ro-RO/login.ts index 12878d46c0..6a6a6edc64 100644 --- a/web/i18n/ro-RO/login.ts +++ b/web/i18n/ro-RO/login.ts @@ -105,6 +105,11 @@ const translation = { licenseExpired: 'Licență expirată', licenseLost: 'Licență pierdută', licenseExpiredTip: 'Licența Dify Enterprise pentru spațiul de lucru a expirat. Contactați administratorul pentru a continua să utilizați Dify.', + webapp: { + noLoginMethod: 'Metoda de autentificare nu este configurată pentru aplicația web', + noLoginMethodTip: 'Vă rugăm să contactați administratorul sistemului pentru a adăuga o metodă de autentificare.', + disabled: 'Autentificarea webapp-ului este dezactivată. Vă rugăm să contactați administratorul sistemului pentru a o activa. Puteți încerca să folosiți aplicația direct.', + }, } export default translation diff --git a/web/i18n/ro-RO/plugin.ts b/web/i18n/ro-RO/plugin.ts index 4fe0782496..11dc6b8f59 100644 --- a/web/i18n/ro-RO/plugin.ts +++ b/web/i18n/ro-RO/plugin.ts @@ -62,6 +62,7 @@ const translation = { descriptionPlaceholder: 'Scurtă descriere a scopului instrumentului, de exemplu, obțineți temperatura pentru o anumită locație.', toolLabel: 'Unealtă', uninstalledTitle: 'Instrumentul nu este instalat', + toolSetting: 'Setările instrumentului', }, endpointDeleteContent: 'Doriți să eliminați {{name}}?', strategyNum: '{{num}} {{strategie}} INCLUS', @@ -210,6 +211,7 @@ const translation = { title: 'Pluginuri', }, difyVersionNotCompatible: 'Versiunea curentă Dify nu este compatibilă cu acest plugin, vă rugăm să faceți upgrade la versiunea minimă necesară: {{minimalDifyVersion}}', + requestAPlugin: 'Solicitați un plugin', } export default translation diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index a93b7fcd14..fabea57556 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'JSON generat de instrument', }, + authorize: 'Autorizați', }, questionClassifiers: { model: 'model', diff --git a/web/i18n/ru-RU/app.ts b/web/i18n/ru-RU/app.ts index 609b891c5c..fa73e33197 100644 --- a/web/i18n/ru-RU/app.ts +++ b/web/i18n/ru-RU/app.ts @@ -209,6 +209,41 @@ const translation = { modelNotSupportedTip: 'Текущая модель не поддерживает эту функцию и автоматически понижается до инъекции подсказок.', structuredTip: 'Структурированные выходные данные — это функция, которая гарантирует, что модель всегда будет генерировать ответы, соответствующие вашей предоставленной JSON-схеме.', }, + accessItemsDescription: { + anyone: 'Любой может получить доступ к веб-приложению', + specific: 'Только определенные группы или участники могут получить доступ к веб-приложению.', + organization: 'Любой в организации может получить доступ к веб-приложению', + }, + accessControlDialog: { + accessItems: { + anyone: 'Кто угодно с ссылкой', + specific: 'Конкретные группы или члены', + organization: 'Только члены внутри предприятия', + }, + operateGroupAndMember: { + searchPlaceholder: 'Искать группы и участников', + expand: 'Расширить', + noResult: 'Нет результата', + allMembers: 'Все члены', + }, + title: 'Управление доступом к веб-приложению', + description: 'Установите разрешения на доступ к веб-приложению', + accessLabel: 'Кто имеет доступ', + groups_one: '{{count}} ГРУППА', + groups_other: '{{count}} ГРУПП', + members_one: '{{count}} УЧАСТНИК', + members_other: '{{count}} УЧАСТНИКИ', + noGroupsOrMembers: 'Группы или участники не выбраны', + updateSuccess: 'Обновление прошло успешно', + webAppSSONotEnabledTip: 'Пожалуйста, свяжитесь с администратором предприятия, чтобы настроить метод аутентификации веб-приложения.', + }, + publishApp: { + title: 'Кто может получить доступ к веб-приложению', + notSet: 'Не установлено', + notSetDesc: 'В настоящее время никто не может получить доступ к веб-приложению. Пожалуйста, установите права доступа.', + }, + accessControl: 'Управление доступом к веб-приложению', + noAccessPermission: 'Нет разрешения на доступ к веб-приложению', } export default translation diff --git a/web/i18n/ru-RU/common.ts b/web/i18n/ru-RU/common.ts index 1c7b41169b..37d207d357 100644 --- a/web/i18n/ru-RU/common.ts +++ b/web/i18n/ru-RU/common.ts @@ -149,6 +149,8 @@ const translation = { newDataset: 'Создать знания', tools: 'Инструменты', exploreMarketplace: 'Подробнее о Marketplace', + appDetail: 'Детали приложения', + account: 'Учетная запись', }, userProfile: { settings: 'Настройки', @@ -644,6 +646,7 @@ const translation = { license: { expiring: 'Срок действия истекает за один день', expiring_plural: 'Срок действия истекает через {{count}} дней', + unlimited: 'Неограниченный', }, pagination: { perPage: 'Элементов на странице', @@ -667,6 +670,7 @@ const translation = { dropImageHere: 'Перетащите ваше изображение сюда или', supportedFormats: 'Поддерживает PNG, JPG, JPEG, WEBP и GIF', }, + you: 'Ты', } export default translation diff --git a/web/i18n/ru-RU/login.ts b/web/i18n/ru-RU/login.ts index 5c46cb7ff9..9c623fe5b6 100644 --- a/web/i18n/ru-RU/login.ts +++ b/web/i18n/ru-RU/login.ts @@ -105,6 +105,11 @@ const translation = { licenseLost: 'Утеряна лицензия', licenseInactiveTip: 'Лицензия Dify Enterprise для рабочего пространства неактивна. Обратитесь к своему администратору, чтобы продолжить использование Dify.', licenseExpiredTip: 'Срок действия лицензии Dify Enterprise для рабочего пространства истек. Обратитесь к своему администратору, чтобы продолжить использование Dify.', + webapp: { + noLoginMethod: 'Метод аутентификации не настроен для веб-приложения', + noLoginMethodTip: 'Пожалуйста, свяжитесь с администратором системы, чтобы добавить метод аутентификации.', + disabled: 'Аутентификация веб-приложения отключена. Пожалуйста, свяжитесь с администратором системы, чтобы включить ее. Вы можете попробовать использовать приложение напрямую.', + }, } export default translation diff --git a/web/i18n/ru-RU/plugin.ts b/web/i18n/ru-RU/plugin.ts index 9d99bc11d9..3dc48a0327 100644 --- a/web/i18n/ru-RU/plugin.ts +++ b/web/i18n/ru-RU/plugin.ts @@ -62,6 +62,7 @@ const translation = { params: 'КОНФИГУРАЦИЯ РАССУЖДЕНИЙ', unsupportedContent2: 'Нажмите, чтобы переключить версию.', uninstalledLink: 'Управление в плагинах', + toolSetting: 'Настройки инструмента', }, configureTool: 'Инструмент настройки', endpointsTip: 'Этот плагин предоставляет определенные функциональные возможности через конечные точки, и вы можете настроить несколько наборов конечных точек для текущей рабочей области.', @@ -210,6 +211,7 @@ const translation = { title: 'Плагины', }, difyVersionNotCompatible: 'Текущая версия Dify не совместима с этим плагином, пожалуйста, обновите до минимально необходимой версии: {{minimalDifyVersion}}', + requestAPlugin: 'Запросите плагин', } export default translation diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index e170e64b83..7b22876f5a 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'json, сгенерированный инструментом', }, + authorize: 'Авторизовать', }, questionClassifiers: { model: 'модель', diff --git a/web/i18n/sl-SI/app.ts b/web/i18n/sl-SI/app.ts index f988114acd..6241d40f30 100644 --- a/web/i18n/sl-SI/app.ts +++ b/web/i18n/sl-SI/app.ts @@ -209,6 +209,41 @@ const translation = { modelNotSupportedTip: 'Trenutni model ne podpira te funkcije in se samodejno zniža na vbrizgavanje pozivov.', structuredTip: 'Strukturirani izhodi so funkcija, ki zagotavlja, da bo model vedno generiral odgovore, ki se držijo vašega posredovanega JSON sheme.', }, + accessItemsDescription: { + anyone: 'Vsakdo lahko dostopa do spletne aplikacije', + specific: 'Samo določenim skupinam ali članom je omogočen dostop do spletne aplikacije', + organization: 'Vsakdo v organizaciji lahko dostopa do spletne aplikacije', + }, + accessControlDialog: { + accessItems: { + anyone: 'Kdorkoli s povezavo', + specific: 'Specifične skupine ali člani', + organization: 'Samo člani znotraj podjetja', + }, + operateGroupAndMember: { + searchPlaceholder: 'Išči skupine in člane', + allMembers: 'Vsi člani', + expand: 'Razširi', + noResult: 'Brez rezultata', + }, + title: 'Nadzor dostopa do spletne aplikacije', + description: 'Nastavite dovoljenja za dostop do spletne aplikacije', + accessLabel: 'Kdo ima dostop', + groups_one: '{{count}} SKUPINA', + groups_other: '{{count}} SKUPIN', + members_one: '{{count}} ČLAN', + members_other: '{{count}} ČLANOV', + updateSuccess: 'Posodobitev uspešna', + noGroupsOrMembers: 'Nobene skupine ali članov ni izbranih', + webAppSSONotEnabledTip: 'Prosimo, da se obrnete na skrbnika podjetja, da konfigurira način avtentikacije spletne aplikacije.', + }, + publishApp: { + title: 'Kdo lahko dostopa do spletne aplikacije', + notSet: 'Ni nastavljeno', + notSetDesc: 'Trenutno nihče ne more dostopati do spletne aplikacije. Prosimo, nastavite dovoljenja.', + }, + accessControl: 'Nadzor dostopa do spletne aplikacije', + noAccessPermission: 'Brez dovoljenja za dostop do spletne aplikacije', } export default translation diff --git a/web/i18n/sl-SI/common.ts b/web/i18n/sl-SI/common.ts index d4b78d41e5..b43a2dbbb2 100644 --- a/web/i18n/sl-SI/common.ts +++ b/web/i18n/sl-SI/common.ts @@ -149,6 +149,8 @@ const translation = { newDataset: 'Ustvari znanje', tools: 'Orodja', exploreMarketplace: 'Raziščite Marketplace', + appDetail: 'Podrobnosti o aplikaciji', + account: 'Račun', }, userProfile: { settings: 'Nastavitve', @@ -843,6 +845,7 @@ const translation = { license: { expiring_plural: 'Poteče v {{count}} dneh', expiring: 'Poteče v enem dnevu', + unlimited: 'Brez omejitev', }, pagination: { perPage: 'Elementi na stran', @@ -866,6 +869,7 @@ const translation = { browse: 'brskati', dropImageHere: 'Tukaj spustite svojo sliko ali', }, + you: 'Ti', } export default translation diff --git a/web/i18n/sl-SI/login.ts b/web/i18n/sl-SI/login.ts index 70350021bc..12b424b0d7 100644 --- a/web/i18n/sl-SI/login.ts +++ b/web/i18n/sl-SI/login.ts @@ -105,6 +105,11 @@ const translation = { withSSO: 'Nadaljujte z enotno prijavo', licenseLostTip: 'Povezava z licenčnim strežnikom Dify ni uspela. Če želite še naprej uporabljati Dify, se obrnite na skrbnika.', licenseInactiveTip: 'Licenca Dify Enterprise za vaš delovni prostor je neaktivna. Če želite še naprej uporabljati Dify, se obrnite na skrbnika.', + webapp: { + noLoginMethod: 'Metoda overjanja ni nastavljena za spletno aplikacijo', + noLoginMethodTip: 'Prosimo, da se obrnete na sistemskega skrbnika, da dodate metodo za avtentikacijo.', + disabled: 'Avtentikacija v spletni aplikaciji je onemogočena. Prosimo, kontaktirajte skrbnika sistema, da jo omogoči. Poskusite lahko neposredno uporabljati aplikacijo.', + }, } export default translation diff --git a/web/i18n/sl-SI/plugin.ts b/web/i18n/sl-SI/plugin.ts index 848ef39170..5580d4b1f4 100644 --- a/web/i18n/sl-SI/plugin.ts +++ b/web/i18n/sl-SI/plugin.ts @@ -65,6 +65,7 @@ const translation = { empty: 'Kliknite gumb \' \' za dodajanje orodij. Dodate lahko več orodij.', paramsTip1: 'Nadzoruje parametre sklepanja LLM.', paramsTip2: 'Ko je \'Avtomatsko\' izklopljeno, se uporablja privzeta vrednost.', + toolSetting: 'Nastavitve orodja', }, endpointDisableContent: 'Ali želite onemogočiti {{name}}?', serviceOk: 'Storitve so v redu', @@ -210,6 +211,7 @@ const translation = { allCategories: 'Vse kategorije', submitPlugin: 'Oddajte vtičnik', difyVersionNotCompatible: 'Trenutna različica Dify ni združljiva s to vtičnico, prosimo, posodobite na minimalno zahtevano različico: {{minimalDifyVersion}}', + requestAPlugin: 'Zahtevajte vtičnik', } export default translation diff --git a/web/i18n/sl-SI/workflow.ts b/web/i18n/sl-SI/workflow.ts index 1aa639222e..a0f4f8d95e 100644 --- a/web/i18n/sl-SI/workflow.ts +++ b/web/i18n/sl-SI/workflow.ts @@ -1096,6 +1096,7 @@ const translation = { }, inputVars: 'Vhodne spremenljivke', toAuthorize: 'Za odobritev', + authorize: 'Pooblasti', }, questionClassifiers: { outputVars: { diff --git a/web/i18n/th-TH/app.ts b/web/i18n/th-TH/app.ts index f4999e9aff..9204c71d32 100644 --- a/web/i18n/th-TH/app.ts +++ b/web/i18n/th-TH/app.ts @@ -205,6 +205,41 @@ const translation = { modelNotSupported: 'โมเดลไม่ได้รับการสนับสนุน', modelNotSupportedTip: 'โมเดลปัจจุบันไม่รองรับฟีเจอร์นี้และจะถูกลดระดับเป็นการฉีดคำสั่งโดยอัตโนมัติ.', }, + accessItemsDescription: { + anyone: 'ใครก็สามารถเข้าถึงเว็บแอปได้', + specific: 'สมาชิกหรือกลุ่มเฉพาะเท่านั้นที่สามารถเข้าถึงแอปเว็บได้', + organization: 'ใครก็ได้ในองค์กรสามารถเข้าถึงแอปเว็บได้', + }, + accessControlDialog: { + accessItems: { + specific: 'กลุ่มหรือสมาชิกเฉพาะ', + organization: 'เฉพาะสมาชิกภายในองค์กร', + anyone: 'ใครก็ตามที่มีลิงก์', + }, + operateGroupAndMember: { + searchPlaceholder: 'ค้นหากลุ่มและสมาชิก', + allMembers: 'สมาชิกทั้งหมด', + noResult: 'ไม่มีผลลัพธ์', + expand: 'ขยาย', + }, + title: 'การควบคุมการเข้าถึงเว็บแอปพลิเคชัน', + description: 'ตั้งค่าสิทธิ์การเข้าถึงเว็บแอป', + accessLabel: 'ใครมีสิทธิ์เข้าถึง', + groups_one: '{{count}} กลุ่ม', + groups_other: '{{count}} กลุ่ม', + members_one: '{{count}} สมาชิก', + noGroupsOrMembers: 'ไม่มีกลุ่มหรือสมาชิกที่เลือก', + webAppSSONotEnabledTip: 'กรุณาติดต่อผู้ดูแลระบบองค์กรเพื่อกำหนดวิธีการตรวจสอบสิทธิ์แอปเว็บ.', + updateSuccess: 'อัปเดตสำเร็จแล้ว', + members_other: '{{count}} สมาชิก', + }, + publishApp: { + title: 'ใครสามารถเข้าถึงแอปเว็บได้', + notSet: 'ยังไม่ได้ตั้งค่า', + notSetDesc: 'ขณะนี้ไม่มีใครสามารถเข้าถึงแอปเว็บได้ กรุณาเพิ่มสิทธิ์การเข้าถึง.', + }, + accessControl: 'การควบคุมการเข้าถึงเว็บแอปพลิเคชัน', + noAccessPermission: 'ไม่มีสิทธิ์เข้าถึงเว็บแอป', } export default translation diff --git a/web/i18n/th-TH/common.ts b/web/i18n/th-TH/common.ts index 9b72257a1f..7425a178d3 100644 --- a/web/i18n/th-TH/common.ts +++ b/web/i18n/th-TH/common.ts @@ -144,6 +144,8 @@ const translation = { newDataset: 'สร้างความรู้', tools: 'เครื่อง มือ', exploreMarketplace: 'สํารวจ Marketplace', + appDetail: 'รายละเอียดแอป', + account: 'บัญชี', }, userProfile: { settings: 'การตั้งค่า', @@ -639,6 +641,7 @@ const translation = { license: { expiring: 'หมดอายุในหนึ่งวัน', expiring_plural: 'หมดอายุใน {{count}} วัน', + unlimited: 'ไม่มีขีดจำกัด', }, pagination: { perPage: 'รายการต่อหน้า', @@ -662,6 +665,7 @@ const translation = { browse: 'ท่องเว็บ', supportedFormats: 'รองรับ PNG, JPG, JPEG, WEBP และ GIF', }, + you: 'คุณ', } export default translation diff --git a/web/i18n/th-TH/login.ts b/web/i18n/th-TH/login.ts index 75f569d3a2..da24be7ea5 100644 --- a/web/i18n/th-TH/login.ts +++ b/web/i18n/th-TH/login.ts @@ -104,6 +104,11 @@ const translation = { licenseLostTip: 'เชื่อมต่อเซิร์ฟเวอร์ใบอนุญาต Dify ไม่สําเร็จ โปรดติดต่อผู้ดูแลระบบของคุณเพื่อใช้ Dify ต่อไป', licenseInactive: 'ใบอนุญาตไม่ใช้งาน', licenseInactiveTip: 'สิทธิ์การใช้งาน Dify Enterprise สําหรับพื้นที่ทํางานของคุณไม่ได้ใช้งาน โปรดติดต่อผู้ดูแลระบบของคุณเพื่อใช้ Dify ต่อไป', + webapp: { + noLoginMethodTip: 'กรุณาติดต่อผู้ดูแลระบบเพื่อเพิ่มวิธีการตรวจสอบสิทธิ์.', + noLoginMethod: 'ไม่ได้กำหนดวิธีการตรวจสอบสิทธิ์สำหรับเว็บแอป', + disabled: 'การรับรองความถูกต้องของเว็บแอปถูกปิดใช้งาน โปรดติดต่อผู้ดูแลระบบเพื่อเปิดใช้งาน คุณสามารถลองใช้แอปโดยตรงได้', + }, } export default translation diff --git a/web/i18n/th-TH/plugin.ts b/web/i18n/th-TH/plugin.ts index eb42371fbe..de7ed8c84e 100644 --- a/web/i18n/th-TH/plugin.ts +++ b/web/i18n/th-TH/plugin.ts @@ -62,6 +62,7 @@ const translation = { uninstalledTitle: 'ไม่ได้ติดตั้งเครื่องมือ', descriptionPlaceholder: 'คําอธิบายสั้น ๆ เกี่ยวกับวัตถุประสงค์ของเครื่องมือ เช่น รับอุณหภูมิสําหรับตําแหน่งเฉพาะ', uninstalledContent: 'ปลั๊กอินนี้ติดตั้งจากที่เก็บในเครื่อง/GitHub กรุณาใช้หลังการติดตั้ง', + toolSetting: 'การตั้งค่าเครื่องมือ', }, endpointDisableContent: 'คุณต้องการปิดการใช้งาน {{name}} หรือไม่?', configureApp: 'กําหนดค่าแอป', @@ -210,6 +211,7 @@ const translation = { title: 'ปลั๊กอิน', }, difyVersionNotCompatible: 'เวอร์ชั่นปัจจุบันของ Dify ไม่สามารถใช้งานร่วมกับปลั๊กอินนี้ได้ กรุณาอัปเกรดไปยังเวอร์ชั่นขั้นต่ำที่ต้องการ: {{minimalDifyVersion}}', + requestAPlugin: 'ขอปลั๊กอิน', } export default translation diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index d1f9084c81..cc2cee418f 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -659,6 +659,7 @@ const translation = { }, json: 'เครื่องมือสร้าง JSON', }, + authorize: 'อนุญาต', }, questionClassifiers: { model: 'แบบ', diff --git a/web/i18n/tr-TR/app.ts b/web/i18n/tr-TR/app.ts index 0dbc52bf36..995cc9c795 100644 --- a/web/i18n/tr-TR/app.ts +++ b/web/i18n/tr-TR/app.ts @@ -205,6 +205,41 @@ const translation = { modelNotSupportedTip: 'Mevcut model bu özelliği desteklemiyor ve otomatik olarak prompt enjeksiyonuna düşürülüyor.', structuredTip: 'Yapılandırılmış Çıktılar, modelin sağladığınız JSON Şemasına uyacak şekilde her zaman yanıtlar üretmesini sağlayan bir özelliktir.', }, + accessItemsDescription: { + anyone: 'Herkes web uygulamasına erişebilir', + organization: 'Kuruluşta herkes web uygulamasına erişebilir.', + specific: 'Sadece belirli gruplar veya üyeler web uygulamasına erişebilir.', + }, + accessControlDialog: { + accessItems: { + anyone: 'Bağlantıya sahip olan herkes', + organization: 'Sadece işletme içindeki üyeler', + specific: 'Belirli gruplar veya üyeler', + }, + operateGroupAndMember: { + searchPlaceholder: 'Grupları ve üyeleri ara', + expand: 'Genişlet', + allMembers: 'Tüm üyeler', + noResult: 'Sonuç yok', + }, + title: 'Web Uygulaması Erişim Kontrolü', + description: 'Web uygulaması erişim izinlerini ayarlayın', + accessLabel: 'Kimin erişimi var', + groups_other: '{{count}} GRUP', + members_one: '{{count}} ÜYE', + members_other: '{{count}} ÜYE', + noGroupsOrMembers: 'Seçilen grup veya üye yok', + webAppSSONotEnabledTip: 'Lütfen web uygulaması kimlik doğrulama yöntemini yapılandırmak için kurumsal yöneticinizle iletişime geçin.', + updateSuccess: 'Başarıyla güncellendi', + groups_one: '{{count}} GRUP', + }, + publishApp: { + title: 'Web uygulamasına kim erişebilir', + notSet: 'Ayar yapılmamış', + notSetDesc: 'Şu anda kimse web uygulamasına erişemiyor. Lütfen izinleri ayarlayın.', + }, + accessControl: 'Web Uygulaması Erişim Kontrolü', + noAccessPermission: 'Web uygulamasına erişim izni yok', } export default translation diff --git a/web/i18n/tr-TR/common.ts b/web/i18n/tr-TR/common.ts index 584c0a8096..62ce150986 100644 --- a/web/i18n/tr-TR/common.ts +++ b/web/i18n/tr-TR/common.ts @@ -149,6 +149,8 @@ const translation = { newDataset: 'Bilgi Oluştur', tools: 'Araçlar', exploreMarketplace: 'Marketplace\'i Keşfedin', + appDetail: 'Uygulama Detayı', + account: 'Hesap', }, userProfile: { settings: 'Ayarlar', @@ -554,7 +556,7 @@ const translation = { vectorHash: 'Vektör Hash:', hitScore: 'Geri Alım Skoru:', }, - inputPlaceholder: 'Bot ile konuş', + inputPlaceholder: '{{botName}} ile konuş', thought: 'Düşünce', thinking: 'Düşünü...', resend: 'Yeniden gönder', @@ -644,6 +646,7 @@ const translation = { license: { expiring_plural: '{{count}} gün içinde sona eriyor', expiring: 'Bir günde sona eriyor', + unlimited: 'Sınırsız', }, pagination: { perPage: 'Sayfa başına öğe sayısı', @@ -667,6 +670,7 @@ const translation = { dropImageHere: 'Görüntünüzü buraya bırakın veya', browse: 'tarayıcı', }, + you: 'Sen', } export default translation diff --git a/web/i18n/tr-TR/login.ts b/web/i18n/tr-TR/login.ts index e742548dc5..e6471d935f 100644 --- a/web/i18n/tr-TR/login.ts +++ b/web/i18n/tr-TR/login.ts @@ -105,6 +105,11 @@ const translation = { licenseExpired: 'Lisansın Süresi Doldu', licenseLost: 'Lisans Kaybedildi', licenseInactive: 'Lisans Etkin Değil', + webapp: { + disabled: 'Web uygulaması kimlik doğrulaması devre dışı. Lütfen bu özelliği etkinleştirmesi için sistem yöneticisi ile iletişime geçin. Uygulamayı doğrudan kullanmayı deneyebilirsiniz.', + noLoginMethod: 'Web uygulaması için kimlik doğrulama yöntemi yapılandırılmamış', + noLoginMethodTip: 'Lütfen bir kimlik doğrulama yöntemi eklemek için sistem yöneticisi ile iletişime geçin.', + }, } export default translation diff --git a/web/i18n/tr-TR/plugin.ts b/web/i18n/tr-TR/plugin.ts index c434052081..aef546589c 100644 --- a/web/i18n/tr-TR/plugin.ts +++ b/web/i18n/tr-TR/plugin.ts @@ -62,6 +62,7 @@ const translation = { params: 'AKIL YÜRÜTME YAPILANDIRMASI', paramsTip2: '\'Otomatik\' kapalıyken, varsayılan değer kullanılır.', unsupportedTitle: 'Desteklenmeyen Eylem', + toolSetting: 'Araç Ayarları', }, strategyNum: '{{sayı}} {{strateji}} DAHİL', switchVersion: 'Sürümü Değiştir', @@ -210,6 +211,7 @@ const translation = { title: 'Eklentiler', }, difyVersionNotCompatible: 'Mevcut Dify sürümü bu eklentiyle uyumlu değil, lütfen gerekli minimum sürüme güncelleyin: {{minimalDifyVersion}}', + requestAPlugin: 'Bir eklenti iste', } export default translation diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index 36bd101cbf..1358bbf645 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -661,6 +661,7 @@ const translation = { }, json: 'araç tarafından oluşturulan json', }, + authorize: 'Yetkilendirmek', }, questionClassifiers: { model: 'model', diff --git a/web/i18n/uk-UA/app.ts b/web/i18n/uk-UA/app.ts index a3834aa32b..4bbb0dcbf1 100644 --- a/web/i18n/uk-UA/app.ts +++ b/web/i18n/uk-UA/app.ts @@ -209,6 +209,41 @@ const translation = { modelNotSupportedTip: 'Поточна модель не підтримує цю функцію та автоматично знижується до ін\'єкції запитів.', structuredTip: 'Структуровані виходи - це функція, яка забезпечує, що модель завжди генеруватиме відповіді, що відповідають наданій вами схемі JSON.', }, + accessItemsDescription: { + anyone: 'Будь-хто може отримати доступ до веб-додатку', + specific: 'Тільки окремі групи або члени можуть отримати доступ до веб-додатку.', + organization: 'Будь-хто в організації може отримати доступ до веб-додатку.', + }, + accessControlDialog: { + accessItems: { + anyone: 'Кожен, у кого є посилання', + specific: 'Конкретні групи або члени', + organization: 'Тільки члени підприємства', + }, + operateGroupAndMember: { + searchPlaceholder: 'Шукати групи та учасників', + allMembers: 'Всі члени', + expand: 'розвивати', + noResult: 'Немає результату', + }, + title: 'Контроль доступу до веб-додатка', + description: 'Встановіть дозволи доступу до веб-додатку', + accessLabel: 'Хто має доступ', + groups_one: '{{count}} ГРУПА', + groups_other: '{{count}} ГРУП', + members_one: '{{count}} ЧЛЕН', + members_other: '{{count}} ЧЛЕНІ', + noGroupsOrMembers: 'Не вибрано групи чи учасників', + updateSuccess: 'Оновлення успішно', + webAppSSONotEnabledTip: 'Будь ласка, зв\'яжіться з адміністратором підприємства для налаштування методу аутентифікації веб-додатку.', + }, + publishApp: { + title: 'Хто може отримати доступ до веб-додатку', + notSet: 'Не встановлено', + notSetDesc: 'На даний момент ніхто не може отримати доступ до веб-додатку. Будь ласка, налаштуйте дозволи.', + }, + accessControl: 'Контроль доступу до веб-додатків', + noAccessPermission: 'Немає дозволу на доступ до веб-додатку', } export default translation diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts index dee558ad44..a80e308b78 100644 --- a/web/i18n/uk-UA/common.ts +++ b/web/i18n/uk-UA/common.ts @@ -145,6 +145,8 @@ const translation = { newDataset: 'Створити знання', tools: 'Інструменти', exploreMarketplace: 'Дізнайтеся більше про Marketplace', + appDetail: 'Деталі програми', + account: 'Обліковий запис', }, userProfile: { settings: 'Налаштування', @@ -645,6 +647,7 @@ const translation = { license: { expiring: 'Термін дії закінчується за один день', expiring_plural: 'Термін дії закінчується за {{count}} днів', + unlimited: 'Безмежний', }, pagination: { perPage: 'Елементів на сторінці', @@ -668,6 +671,7 @@ const translation = { supportedFormats: 'Підтримує PNG, JPG, JPEG, WEBP і GIF', dropImageHere: 'Перетягніть зображення сюди або', }, + you: 'Ти', } export default translation diff --git a/web/i18n/uk-UA/login.ts b/web/i18n/uk-UA/login.ts index e6d1d15dd5..13c71c32c0 100644 --- a/web/i18n/uk-UA/login.ts +++ b/web/i18n/uk-UA/login.ts @@ -105,6 +105,11 @@ const translation = { licenseLost: 'Ліцензію втрачено', licenseInactiveTip: 'Ліцензія Dify Enterprise для вашої робочої області неактивна. Будь ласка, зверніться до свого адміністратора, щоб продовжити користуватися Dify.', licenseExpiredTip: 'Термін дії ліцензії Dify Enterprise для вашого робочого простору закінчився. Будь ласка, зверніться до свого адміністратора, щоб продовжити користуватися Dify.', + webapp: { + noLoginMethod: 'Метод аутентифікації не налаштований для веб-додатку', + noLoginMethodTip: 'Будь ласка, зв\'яжіться з адміністратором системи, щоб додати метод автентифікації.', + disabled: 'Аутентифікацію вебдодатка вимкнено. Будь ласка, зв\'яжіться з адміністратором системи, щоб увімкнути її. Ви можете спробувати використовувати додаток безпосередньо.', + }, } export default translation diff --git a/web/i18n/uk-UA/plugin.ts b/web/i18n/uk-UA/plugin.ts index 465624f2d3..ebc3d927d3 100644 --- a/web/i18n/uk-UA/plugin.ts +++ b/web/i18n/uk-UA/plugin.ts @@ -62,6 +62,7 @@ const translation = { auto: 'Автоматичний', uninstalledContent: 'Цей плагін встановлюється з локального/GitHub репозиторію. Будь ласка, використовуйте після встановлення.', unsupportedContent: 'Встановлена версія плагіна не передбачає цієї дії.', + toolSetting: 'Налаштування інструментів', }, modelNum: '{{num}} МОДЕЛІ В КОМПЛЕКТІ', switchVersion: 'Версія перемикача', @@ -210,6 +211,7 @@ const translation = { title: 'Плагіни', }, difyVersionNotCompatible: 'Поточна версія Dify не сумісна з цим плагіном, будь ласка, оновіть до мінімальної версії: {{minimalDifyVersion}}', + requestAPlugin: 'Запросити плагін', } export default translation diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index ff6a75242f..bbe32c22c7 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'JSON, згенерований інструментом', }, + authorize: 'Уповноважити', }, questionClassifiers: { model: 'модель', diff --git a/web/i18n/vi-VN/app.ts b/web/i18n/vi-VN/app.ts index c01c00c45c..243454d011 100644 --- a/web/i18n/vi-VN/app.ts +++ b/web/i18n/vi-VN/app.ts @@ -209,6 +209,41 @@ const translation = { modelNotSupportedTip: 'Mô hình hiện tại không hỗ trợ tính năng này và tự động bị hạ cấp xuống việc tiêm lệnh.', moreFillTip: 'Hiển thị tối đa 10 cấp độ lồng ghép', }, + accessItemsDescription: { + anyone: 'Mọi người đều có thể truy cập ứng dụng web.', + specific: 'Chỉ những nhóm hoặc thành viên cụ thể mới có thể truy cập ứng dụng web.', + organization: 'Bất kỳ ai trong tổ chức đều có thể truy cập ứng dụng web.', + }, + accessControlDialog: { + accessItems: { + anyone: 'Ai có liên kết', + specific: 'Các nhóm hoặc thành viên cụ thể', + organization: 'Chỉ các thành viên trong doanh nghiệp', + }, + operateGroupAndMember: { + searchPlaceholder: 'Tìm kiếm nhóm và thành viên', + allMembers: 'Tất cả các thành viên', + expand: 'Mở rộng', + noResult: 'Không có kết quả', + }, + title: 'Kiểm soát truy cập ứng dụng web', + description: 'Cài đặt quyền truy cập ứng dụng web', + accessLabel: 'Ai có quyền truy cập', + groups_one: '{{count}} NHÓM', + groups_other: '{{count}} NHÓM', + members_one: '{{count}} THÀNH VIÊN', + members_other: '{{count}} THÀNH VIÊN', + noGroupsOrMembers: 'Không có nhóm hoặc thành viên nào được chọn', + webAppSSONotEnabledTip: 'Vui lòng liên hệ với quản trị viên doanh nghiệp để cấu hình phương thức xác thực ứng dụng web.', + updateSuccess: 'Cập nhật thành công', + }, + publishApp: { + title: 'Ai có thể truy cập ứng dụng web', + notSet: 'Chưa đặt', + notSetDesc: 'Hiện tại không ai có thể truy cập ứng dụng web. Vui lòng thiết lập quyền truy cập.', + }, + noAccessPermission: 'Không được phép truy cập ứng dụng web', + accessControl: 'Kiểm soát truy cập ứng dụng web', } export default translation diff --git a/web/i18n/vi-VN/common.ts b/web/i18n/vi-VN/common.ts index 6023fe7285..323ca60152 100644 --- a/web/i18n/vi-VN/common.ts +++ b/web/i18n/vi-VN/common.ts @@ -145,6 +145,8 @@ const translation = { newDataset: 'Tạo Kiến thức', tools: 'Công cụ', exploreMarketplace: 'Khám phá Marketplace', + appDetail: 'Chi tiết ứng dụng', + account: 'báo cáo', }, userProfile: { settings: 'Cài đặt', @@ -550,7 +552,7 @@ const translation = { vectorHash: 'Vector hash:', hitScore: 'Điểm truy xuất:', }, - inputPlaceholder: 'Nói chuyện với Bot', + inputPlaceholder: 'Nói chuyện với {{botName}}', thought: 'Tư duy', thinking: 'Suy nghĩ...', resend: 'Gửi lại', @@ -644,6 +646,7 @@ const translation = { license: { expiring_plural: 'Hết hạn sau {{count}} ngày', expiring: 'Hết hạn trong một ngày', + unlimited: 'Vô hạn', }, pagination: { perPage: 'Mục trên mỗi trang', @@ -667,6 +670,7 @@ const translation = { dropImageHere: 'Kéo hình ảnh của bạn vào đây, hoặc', browse: 'duyệt', }, + you: 'Bạn', } export default translation diff --git a/web/i18n/vi-VN/login.ts b/web/i18n/vi-VN/login.ts index ab4ab68f48..cc81bd8193 100644 --- a/web/i18n/vi-VN/login.ts +++ b/web/i18n/vi-VN/login.ts @@ -105,6 +105,11 @@ const translation = { licenseExpired: 'Giấy phép đã hết hạn', licenseExpiredTip: 'Giấy phép Dify Enterprise cho không gian làm việc của bạn đã hết hạn. Vui lòng liên hệ với quản trị viên của bạn để tiếp tục sử dụng Dify.', licenseLostTip: 'Không thể kết nối máy chủ cấp phép Dify. Vui lòng liên hệ với quản trị viên của bạn để tiếp tục sử dụng Dify.', + webapp: { + noLoginMethod: 'Phương thức xác thực chưa được cấu hình cho ứng dụng web', + noLoginMethodTip: 'Vui lòng liên hệ với quản trị viên hệ thống để thêm phương thức xác thực.', + disabled: 'Xác thực webapp đã bị vô hiệu hóa. Vui lòng liên hệ với quản trị hệ thống để kích hoạt nó. Bạn có thể thử sử dụng ứng dụng trực tiếp.', + }, } export default translation diff --git a/web/i18n/vi-VN/plugin.ts b/web/i18n/vi-VN/plugin.ts index 5e5147c69e..889d79f0ca 100644 --- a/web/i18n/vi-VN/plugin.ts +++ b/web/i18n/vi-VN/plugin.ts @@ -62,6 +62,7 @@ const translation = { settings: 'CÀI ĐẶT NGƯỜI DÙNG', empty: 'Nhấp vào nút \'+\' để thêm công cụ. Bạn có thể thêm nhiều công cụ.', unsupportedTitle: 'Hành động không được hỗ trợ', + toolSetting: 'Cài đặt công cụ', }, switchVersion: 'Chuyển đổi phiên bản', endpointDisableTip: 'Tắt điểm cuối', @@ -210,6 +211,7 @@ const translation = { title: 'Plugin', }, difyVersionNotCompatible: 'Phiên bản Dify hiện tại không tương thích với plugin này, vui lòng nâng cấp lên phiên bản tối thiểu cần thiết: {{minimalDifyVersion}}', + requestAPlugin: 'Yêu cầu một plugin', } export default translation diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index 92e94ee5ab..2e0acf0c4d 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -660,6 +660,7 @@ const translation = { }, json: 'JSON được tạo bởi công cụ', }, + authorize: 'Ủy quyền', }, questionClassifiers: { model: 'mô hình', diff --git a/web/i18n/zh-Hans/common.ts b/web/i18n/zh-Hans/common.ts index daef93a137..376e46e9f8 100644 --- a/web/i18n/zh-Hans/common.ts +++ b/web/i18n/zh-Hans/common.ts @@ -573,7 +573,7 @@ const translation = { vectorHash: '向量哈希:', hitScore: '召回得分:', }, - inputPlaceholder: '和机器人聊天', + inputPlaceholder: '和 {{botName}} 聊天', thinking: '深度思考中...', thought: '已深度思考', resend: '重新发送', diff --git a/web/i18n/zh-Hant/app.ts b/web/i18n/zh-Hant/app.ts index 6b3868745f..f4135d3e73 100644 --- a/web/i18n/zh-Hant/app.ts +++ b/web/i18n/zh-Hant/app.ts @@ -208,6 +208,41 @@ const translation = { structuredTip: '結構化輸出是一項功能,確保模型始終生成符合您提供的 JSON 架構的響應。', notConfiguredTip: '結構化輸出尚未配置', }, + accessItemsDescription: { + anyone: '任何人都可以訪問這個網絡應用程式', + specific: '只有特定的群體或成員可以訪問這個網絡應用程序', + organization: '組織中的任何人都可以訪問該網絡應用程序', + }, + accessControlDialog: { + accessItems: { + anyone: '擁有鏈接的人', + specific: '特定群體或成員', + organization: '只有企業內部成員', + }, + operateGroupAndMember: { + searchPlaceholder: '搜尋群組和成員', + allMembers: '所有成員', + expand: '擴大', + noResult: '沒有結果', + }, + title: '網頁應用程式存取控制', + description: '設定網頁應用程式訪問權限', + accessLabel: '誰可以訪問', + groups_one: '{{count}} 群組', + groups_other: '{{count}} 組', + members_one: '{{count}} 成員', + members_other: '{{count}} 成員', + noGroupsOrMembers: '未選擇任何群組或成員', + webAppSSONotEnabledTip: '請聯絡企業管理員配置網頁應用程式的身份驗證方法。', + updateSuccess: '更新成功', + }, + publishApp: { + title: '誰可以訪問網絡應用程序', + notSet: '未設定', + notSetDesc: '目前沒有人能夠訪問網絡應用程序。請設置權限。', + }, + accessControl: '網頁應用程式存取控制', + noAccessPermission: '沒有權限訪問網絡應用程式', } export default translation diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts index 266880b940..08510c286b 100644 --- a/web/i18n/zh-Hant/common.ts +++ b/web/i18n/zh-Hant/common.ts @@ -552,7 +552,7 @@ const translation = { vectorHash: '向量雜湊:', hitScore: '召回得分:', }, - inputPlaceholder: '與 Bot 對話', + inputPlaceholder: '與 {{botName}} 對話', thinking: '思維。。。', thought: '思想', resend: '重新發送', @@ -646,6 +646,7 @@ const translation = { license: { expiring: '將在1天內過期', expiring_plural: '將在 {{count}} 天后過期', + unlimited: '無限制', }, pagination: { perPage: '每頁項目數', @@ -669,6 +670,7 @@ const translation = { browse: '瀏覽', dropImageHere: '將您的圖片放在這裡,或', }, + you: '你', } export default translation diff --git a/web/i18n/zh-Hant/login.ts b/web/i18n/zh-Hant/login.ts index 6f2b834118..ada0e1bf89 100644 --- a/web/i18n/zh-Hant/login.ts +++ b/web/i18n/zh-Hant/login.ts @@ -105,6 +105,11 @@ const translation = { licenseInactive: '許可證處於非活動狀態', licenseInactiveTip: '您的工作區的 Dify Enterprise 許可證處於非活動狀態。請聯繫您的管理員以繼續使用 Dify。', licenseLostTip: '無法連接 Dify 許可證伺服器。請聯繫您的管理員以繼續使用 Dify。', + webapp: { + noLoginMethod: '未為網絡應用程序配置身份驗證方法', + noLoginMethodTip: '請聯絡系統管理員以添加身份驗證方法。', + disabled: '網頁應用程序身份驗證已被禁用。請聯繫系統管理員以啟用它。您可以嘗試直接使用應用程序。', + }, } export default translation diff --git a/web/i18n/zh-Hant/plugin.ts b/web/i18n/zh-Hant/plugin.ts index 5e93925a6d..3b9040dc91 100644 --- a/web/i18n/zh-Hant/plugin.ts +++ b/web/i18n/zh-Hant/plugin.ts @@ -62,6 +62,7 @@ const translation = { empty: '點擊 『+』 按鈕添加工具。您可以新增多個工具。', unsupportedContent2: '按兩下以切換版本。', paramsTip1: '控制 LLM 推理參數。', + toolSetting: '工具設定', }, actionNum: '{{num}}{{作}}包括', switchVersion: 'Switch 版本', diff --git a/web/models/debug.ts b/web/models/debug.ts index 18c2c02b62..e582e8c18a 100644 --- a/web/models/debug.ts +++ b/web/models/debug.ts @@ -59,6 +59,7 @@ export type PromptVariable = { config?: Record icon?: string icon_background?: string + hide?: boolean // used in frontend to hide variable } export type CompletionParams = { diff --git a/web/themes/dark.css b/web/themes/dark.css index edbd9de7b0..fbfacf1f45 100644 --- a/web/themes/dark.css +++ b/web/themes/dark.css @@ -386,6 +386,7 @@ html[data-theme="dark"] { --color-background-gradient-bg-fill-chat-bg-2: #1d1d20; --color-background-gradient-bg-fill-chat-bubble-bg-1: #c8ceda14; --color-background-gradient-bg-fill-chat-bubble-bg-2: #c8ceda05; + --color-background-gradient-bg-fill-chat-bubble-bg-3: #a5bddb; --color-background-gradient-bg-fill-debug-bg-1: #c8ceda14; --color-background-gradient-bg-fill-debug-bg-2: #18181b0a; diff --git a/web/themes/light.css b/web/themes/light.css index 1d96658963..8a3f9d9d48 100644 --- a/web/themes/light.css +++ b/web/themes/light.css @@ -386,6 +386,7 @@ html[data-theme="light"] { --color-background-gradient-bg-fill-chat-bg-2: #f2f4f7; --color-background-gradient-bg-fill-chat-bubble-bg-1: #ffffff; --color-background-gradient-bg-fill-chat-bubble-bg-2: #ffffff99; + --color-background-gradient-bg-fill-chat-bubble-bg-3: #e1effe; --color-background-gradient-bg-fill-debug-bg-1: #ffffff00; --color-background-gradient-bg-fill-debug-bg-2: #c8ceda24; diff --git a/web/themes/tailwind-theme-var-define.ts b/web/themes/tailwind-theme-var-define.ts index 62c0ed82c7..11189441ee 100644 --- a/web/themes/tailwind-theme-var-define.ts +++ b/web/themes/tailwind-theme-var-define.ts @@ -386,6 +386,7 @@ const vars = { 'background-gradient-bg-fill-chat-bg-2': 'var(--color-background-gradient-bg-fill-chat-bg-2)', 'background-gradient-bg-fill-chat-bubble-bg-1': 'var(--color-background-gradient-bg-fill-chat-bubble-bg-1)', 'background-gradient-bg-fill-chat-bubble-bg-2': 'var(--color-background-gradient-bg-fill-chat-bubble-bg-2)', + 'background-gradient-bg-fill-chat-bubble-bg-3': 'var(--color-background-gradient-bg-fill-chat-bubble-bg-3)', 'background-gradient-bg-fill-debug-bg-1': 'var(--color-background-gradient-bg-fill-debug-bg-1)', 'background-gradient-bg-fill-debug-bg-2': 'var(--color-background-gradient-bg-fill-debug-bg-2)',
{t('datasetDocuments.list.batchModal.contentTitle')}{t('datasetDocuments.list.batchModal.contentTitle')}
{t('datasetDocuments.list.batchModal.content')} 1{t('datasetDocuments.list.batchModal.content')} 1
{t('datasetDocuments.list.batchModal.content')} 2