diff --git a/api/.env.example b/api/.env.example index 1eb790df08..b659a1b3bf 100644 --- a/api/.env.example +++ b/api/.env.example @@ -437,6 +437,9 @@ CODE_EXECUTION_SSL_VERIFY=True CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 CODE_MAX_NUMBER=9223372036854775807 CODE_MIN_NUMBER=-9223372036854775808 CODE_MAX_STRING_LENGTH=400000 diff --git a/api/constants/__init__.py b/api/constants/__init__.py index 248cdfc09f..e441395afc 100644 --- a/api/constants/__init__.py +++ b/api/constants/__init__.py @@ -56,11 +56,15 @@ else: } DOCUMENT_EXTENSIONS: set[str] = convert_to_lower_and_upper_set(_doc_extensions) +# console COOKIE_NAME_ACCESS_TOKEN = "access_token" COOKIE_NAME_REFRESH_TOKEN = "refresh_token" -COOKIE_NAME_PASSPORT = "passport" COOKIE_NAME_CSRF_TOKEN = "csrf_token" +# webapp +COOKIE_NAME_WEBAPP_ACCESS_TOKEN = "webapp_access_token" +COOKIE_NAME_PASSPORT = "passport" + HEADER_NAME_CSRF_TOKEN = "X-CSRF-Token" HEADER_NAME_APP_CODE = "X-App-Code" HEADER_NAME_PASSPORT = "X-App-Passport" diff --git a/api/constants/languages.py b/api/constants/languages.py index a509ddcf5d..0312a558c9 100644 --- a/api/constants/languages.py +++ b/api/constants/languages.py @@ -31,3 +31,9 @@ def supported_language(lang): error = f"{lang} is not a valid language." raise ValueError(error) + + +def get_valid_language(lang: str | None) -> str: + if lang and lang in languages: + return lang + return languages[0] diff --git a/api/controllers/console/auth/login.py b/api/controllers/console/auth/login.py index f371613bee..c0a565b5da 100644 --- a/api/controllers/console/auth/login.py +++ b/api/controllers/console/auth/login.py @@ -4,7 +4,7 @@ from flask_restx import Resource, reqparse import services from configs import dify_config -from constants.languages import languages +from constants.languages import get_valid_language from controllers.console import console_ns from controllers.console.auth.error import ( AuthenticationFailedError, @@ -204,10 +204,12 @@ class EmailCodeLoginApi(Resource): .add_argument("email", type=str, required=True, location="json") .add_argument("code", type=str, required=True, location="json") .add_argument("token", type=str, required=True, location="json") + .add_argument("language", type=str, required=False, location="json") ) args = parser.parse_args() user_email = args["email"] + language = args["language"] token_data = AccountService.get_email_code_login_data(args["token"]) if token_data is None: @@ -241,7 +243,9 @@ class EmailCodeLoginApi(Resource): if account is None: try: account = AccountService.create_account_and_tenant( - email=user_email, name=user_email, interface_language=languages[0] + email=user_email, + name=user_email, + interface_language=get_valid_language(language), ) except WorkSpaceNotAllowedCreateError: raise NotAllowedCreateWorkspace() diff --git a/api/controllers/console/setup.py b/api/controllers/console/setup.py index 6d2b22bde3..1200349e2d 100644 --- a/api/controllers/console/setup.py +++ b/api/controllers/console/setup.py @@ -74,12 +74,17 @@ class SetupApi(Resource): .add_argument("email", type=email, required=True, location="json") .add_argument("name", type=StrLen(30), required=True, location="json") .add_argument("password", type=valid_password, required=True, location="json") + .add_argument("language", type=str, required=False, location="json") ) args = parser.parse_args() # setup RegisterService.setup( - email=args["email"], name=args["name"], password=args["password"], ip_address=extract_remote_ip(request) + email=args["email"], + name=args["name"], + password=args["password"], + ip_address=extract_remote_ip(request), + language=args["language"], ) return {"result": "success"}, 201 diff --git a/api/controllers/mcp/mcp.py b/api/controllers/mcp/mcp.py index edadf42f12..8d8fe6b3a8 100644 --- a/api/controllers/mcp/mcp.py +++ b/api/controllers/mcp/mcp.py @@ -9,9 +9,10 @@ from controllers.console.app.mcp_server import AppMCPServerStatus from controllers.mcp import mcp_ns from core.app.app_config.entities import VariableEntity from core.mcp import types as mcp_types +from core.mcp.server.streamable_http import handle_mcp_request from extensions.ext_database import db from libs import helper -from models.model import App, AppMCPServer, AppMode +from models.model import App, AppMCPServer, AppMode, EndUser class MCPRequestError(Exception): @@ -192,6 +193,51 @@ class MCPAppApi(Resource): except ValidationError as e: raise MCPRequestError(mcp_types.INVALID_PARAMS, f"Invalid MCP request: {str(e)}") - mcp_server_handler = MCPServerStreamableHTTPRequestHandler(app, request, converted_user_input_form) - response = mcp_server_handler.handle() - return helper.compact_generate_response(response) + def _retrieve_end_user(self, tenant_id: str, mcp_server_id: str) -> EndUser | None: + """Get end user - manages its own database session""" + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + return ( + session.query(EndUser) + .where(EndUser.tenant_id == tenant_id) + .where(EndUser.session_id == mcp_server_id) + .where(EndUser.type == "mcp") + .first() + ) + + def _create_end_user( + self, client_name: str, tenant_id: str, app_id: str, mcp_server_id: str, session: Session + ) -> EndUser: + """Create end user in existing session""" + end_user = EndUser( + tenant_id=tenant_id, + app_id=app_id, + type="mcp", + name=client_name, + session_id=mcp_server_id, + ) + session.add(end_user) + session.flush() # Use flush instead of commit to keep transaction open + session.refresh(end_user) + return end_user + + def _handle_mcp_request( + self, + app: App, + mcp_server: AppMCPServer, + mcp_request: mcp_types.ClientRequest, + user_input_form: list[VariableEntity], + session: Session, + request_id: Union[int, str], + ) -> mcp_types.JSONRPCResponse | mcp_types.JSONRPCError | None: + """Handle MCP request and return response""" + end_user = self._retrieve_end_user(mcp_server.tenant_id, mcp_server.id) + + if not end_user and isinstance(mcp_request.root, mcp_types.InitializeRequest): + client_info = mcp_request.root.params.clientInfo + client_name = f"{client_info.name}@{client_info.version}" + # Commit the session before creating end user to avoid transaction conflicts + session.commit() + with Session(db.engine, expire_on_commit=False) as create_session, create_session.begin(): + end_user = self._create_end_user(client_name, app.tenant_id, app.id, mcp_server.id, create_session) + + return handle_mcp_request(app, mcp_request, user_input_form, mcp_server, end_user, request_id) diff --git a/api/controllers/web/login.py b/api/controllers/web/login.py index f213fd8c90..244ef47982 100644 --- a/api/controllers/web/login.py +++ b/api/controllers/web/login.py @@ -17,8 +17,8 @@ from libs.helper import email from libs.passport import PassportService from libs.password import valid_password from libs.token import ( - clear_access_token_from_cookie, - extract_access_token, + clear_webapp_access_token_from_cookie, + extract_webapp_access_token, ) from services.account_service import AccountService from services.app_service import AppService @@ -81,7 +81,7 @@ class LoginStatusApi(Resource): ) def get(self): app_code = request.args.get("app_code") - token = extract_access_token(request) + token = extract_webapp_access_token(request) if not app_code: return { "logged_in": bool(token), @@ -128,7 +128,7 @@ class LogoutApi(Resource): response = make_response({"result": "success"}) # enterprise SSO sets same site to None in https deployment # so we need to logout by calling api - clear_access_token_from_cookie(response, samesite="None") + clear_webapp_access_token_from_cookie(response, samesite="None") return response diff --git a/api/controllers/web/passport.py b/api/controllers/web/passport.py index 776b743e92..6a2e0b65fb 100644 --- a/api/controllers/web/passport.py +++ b/api/controllers/web/passport.py @@ -12,10 +12,8 @@ from controllers.web import web_ns from controllers.web.error import WebAppAuthRequiredError from extensions.ext_database import db from libs.passport import PassportService -from libs.token import extract_access_token +from libs.token import extract_webapp_access_token from models.model import App, EndUser, Site -from services.app_service import AppService -from services.enterprise.enterprise_service import EnterpriseService from services.feature_service import FeatureService from services.webapp_auth_service import WebAppAuthService, WebAppAuthType @@ -37,23 +35,18 @@ class PassportResource(Resource): system_features = FeatureService.get_system_features() app_code = request.headers.get(HEADER_NAME_APP_CODE) user_id = request.args.get("user_id") - access_token = extract_access_token(request) - + access_token = extract_webapp_access_token(request) if app_code is None: raise Unauthorized("X-App-Code header is missing.") - app_id = AppService.get_app_id_by_code(app_code) - # exchange token for enterprise logined web user - enterprise_user_decoded = decode_enterprise_webapp_user_id(access_token) - if enterprise_user_decoded: - # a web user has already logged in, exchange a token for this app without redirecting to the login page - return exchange_token_for_existing_web_user( - app_code=app_code, enterprise_user_decoded=enterprise_user_decoded - ) - if system_features.webapp_auth.enabled: - app_settings = EnterpriseService.WebAppAuth.get_app_access_mode_by_id(app_id=app_id) - if not app_settings or not app_settings.access_mode == "public": - raise WebAppAuthRequiredError() + enterprise_user_decoded = decode_enterprise_webapp_user_id(access_token) + app_auth_type = WebAppAuthService.get_app_auth_type(app_code=app_code) + if app_auth_type != WebAppAuthType.PUBLIC: + if not enterprise_user_decoded: + raise WebAppAuthRequiredError() + return exchange_token_for_existing_web_user( + app_code=app_code, enterprise_user_decoded=enterprise_user_decoded, auth_type=app_auth_type + ) # get site from db and check if it is normal site = db.session.scalar(select(Site).where(Site.code == app_code, Site.status == "normal")) @@ -124,7 +117,7 @@ def decode_enterprise_webapp_user_id(jwt_token: str | None): return decoded -def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: dict): +def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: dict, auth_type: WebAppAuthType): """ Exchange a token for an existing web user session. """ @@ -145,13 +138,11 @@ def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: if not app_model or app_model.status != "normal" or not app_model.enable_site: raise NotFound() - app_auth_type = WebAppAuthService.get_app_auth_type(app_code=app_code) - - if app_auth_type == WebAppAuthType.PUBLIC: + if auth_type == WebAppAuthType.PUBLIC: return _exchange_for_public_app_token(app_model, site, enterprise_user_decoded) - elif app_auth_type == WebAppAuthType.EXTERNAL and user_auth_type != "external": + elif auth_type == WebAppAuthType.EXTERNAL and user_auth_type != "external": raise WebAppAuthRequiredError("Please login as external user.") - elif app_auth_type == WebAppAuthType.INTERNAL and user_auth_type != "internal": + elif auth_type == WebAppAuthType.INTERNAL and user_auth_type != "internal": raise WebAppAuthRequiredError("Please login as internal user.") end_user = None diff --git a/api/core/app/apps/pipeline/pipeline_generator.py b/api/core/app/apps/pipeline/pipeline_generator.py index 1fb076b685..f8bfbce37a 100644 --- a/api/core/app/apps/pipeline/pipeline_generator.py +++ b/api/core/app/apps/pipeline/pipeline_generator.py @@ -255,7 +255,7 @@ class PipelineGenerator(BaseAppGenerator): json_text = json.dumps(text) upload_file = FileService(db.engine).upload_text(json_text, name, user.id, dataset.tenant_id) features = FeatureService.get_features(dataset.tenant_id) - if features.billing.subscription.plan == "sandbox": + if features.billing.enabled and features.billing.subscription.plan == "sandbox": tenant_pipeline_task_key = f"tenant_pipeline_task:{dataset.tenant_id}" tenant_self_pipeline_task_queue = f"tenant_self_pipeline_task_queue:{dataset.tenant_id}" diff --git a/api/core/plugin/entities/parameters.py b/api/core/plugin/entities/parameters.py index 51d74ef186..88a3a7bd43 100644 --- a/api/core/plugin/entities/parameters.py +++ b/api/core/plugin/entities/parameters.py @@ -76,7 +76,7 @@ class PluginParameter(BaseModel): auto_generate: PluginParameterAutoGenerate | None = None template: PluginParameterTemplate | None = None required: bool = False - default: Union[float, int, str] | None = None + default: Union[float, int, str, bool] | None = None min: Union[float, int] | None = None max: Union[float, int] | None = None precision: int | None = None diff --git a/api/core/plugin/impl/exc.py b/api/core/plugin/impl/exc.py index 5738c22ced..4cabdc1732 100644 --- a/api/core/plugin/impl/exc.py +++ b/api/core/plugin/impl/exc.py @@ -40,7 +40,7 @@ class PluginDaemonBadRequestError(PluginDaemonClientSideError): description: str = "Bad Request" -class PluginInvokeError(PluginDaemonClientSideError): +class PluginInvokeError(PluginDaemonClientSideError, ValueError): description: str = "Invoke Error" def _get_error_object(self) -> Mapping: diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 99bbe615fb..45b19f25a0 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -72,6 +72,19 @@ default_retrieval_model: dict[str, Any] = { class DatasetRetrieval: def __init__(self, application_generate_entity=None): self.application_generate_entity = application_generate_entity + self._llm_usage = LLMUsage.empty_usage() + + @property + def llm_usage(self) -> LLMUsage: + return self._llm_usage.model_copy() + + def _record_usage(self, usage: LLMUsage | None) -> None: + if usage is None or usage.total_tokens <= 0: + return + if self._llm_usage.total_tokens == 0: + self._llm_usage = usage + else: + self._llm_usage = self._llm_usage.plus(usage) def retrieve( self, @@ -312,15 +325,18 @@ class DatasetRetrieval: ) tools.append(message_tool) dataset_id = None + router_usage = LLMUsage.empty_usage() if planning_strategy == PlanningStrategy.REACT_ROUTER: react_multi_dataset_router = ReactMultiDatasetRouter() - dataset_id = react_multi_dataset_router.invoke( + dataset_id, router_usage = react_multi_dataset_router.invoke( query, tools, model_config, model_instance, user_id, tenant_id ) elif planning_strategy == PlanningStrategy.ROUTER: function_call_router = FunctionCallMultiDatasetRouter() - dataset_id = function_call_router.invoke(query, tools, model_config, model_instance) + dataset_id, router_usage = function_call_router.invoke(query, tools, model_config, model_instance) + + self._record_usage(router_usage) if dataset_id: # get retrieval model config @@ -983,7 +999,8 @@ class DatasetRetrieval: ) # handle invoke result - result_text, _ = self._handle_invoke_result(invoke_result=invoke_result) + result_text, usage = self._handle_invoke_result(invoke_result=invoke_result) + self._record_usage(usage) result_text_json = parse_and_check_json_markdown(result_text, []) automatic_metadata_filters = [] diff --git a/api/core/rag/retrieval/router/multi_dataset_function_call_router.py b/api/core/rag/retrieval/router/multi_dataset_function_call_router.py index de59c6380e..5f3e1a8cae 100644 --- a/api/core/rag/retrieval/router/multi_dataset_function_call_router.py +++ b/api/core/rag/retrieval/router/multi_dataset_function_call_router.py @@ -2,7 +2,7 @@ from typing import Union from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.model_manager import ModelInstance -from core.model_runtime.entities.llm_entities import LLMResult +from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage from core.model_runtime.entities.message_entities import PromptMessageTool, SystemPromptMessage, UserPromptMessage @@ -13,15 +13,15 @@ class FunctionCallMultiDatasetRouter: dataset_tools: list[PromptMessageTool], model_config: ModelConfigWithCredentialsEntity, model_instance: ModelInstance, - ) -> Union[str, None]: + ) -> tuple[Union[str, None], LLMUsage]: """Given input, decided what to do. Returns: Action specifying what tool to use. """ if len(dataset_tools) == 0: - return None + return None, LLMUsage.empty_usage() elif len(dataset_tools) == 1: - return dataset_tools[0].name + return dataset_tools[0].name, LLMUsage.empty_usage() try: prompt_messages = [ @@ -34,9 +34,10 @@ class FunctionCallMultiDatasetRouter: stream=False, model_parameters={"temperature": 0.2, "top_p": 0.3, "max_tokens": 1500}, ) + usage = result.usage or LLMUsage.empty_usage() if result.message.tool_calls: # get retrieval model config - return result.message.tool_calls[0].function.name - return None + return result.message.tool_calls[0].function.name, usage + return None, usage except Exception: - return None + return None, LLMUsage.empty_usage() diff --git a/api/core/rag/retrieval/router/multi_dataset_react_route.py b/api/core/rag/retrieval/router/multi_dataset_react_route.py index 59d36229b3..8f3bec2704 100644 --- a/api/core/rag/retrieval/router/multi_dataset_react_route.py +++ b/api/core/rag/retrieval/router/multi_dataset_react_route.py @@ -58,15 +58,15 @@ class ReactMultiDatasetRouter: model_instance: ModelInstance, user_id: str, tenant_id: str, - ) -> Union[str, None]: + ) -> tuple[Union[str, None], LLMUsage]: """Given input, decided what to do. Returns: Action specifying what tool to use. """ if len(dataset_tools) == 0: - return None + return None, LLMUsage.empty_usage() elif len(dataset_tools) == 1: - return dataset_tools[0].name + return dataset_tools[0].name, LLMUsage.empty_usage() try: return self._react_invoke( @@ -78,7 +78,7 @@ class ReactMultiDatasetRouter: tenant_id=tenant_id, ) except Exception: - return None + return None, LLMUsage.empty_usage() def _react_invoke( self, @@ -91,7 +91,7 @@ class ReactMultiDatasetRouter: prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, - ) -> Union[str, None]: + ) -> tuple[Union[str, None], LLMUsage]: prompt: Union[list[ChatModelMessage], CompletionModelPromptTemplate] if model_config.mode == "chat": prompt = self.create_chat_prompt( @@ -120,7 +120,7 @@ class ReactMultiDatasetRouter: memory=None, model_config=model_config, ) - result_text, _ = self._invoke_llm( + result_text, usage = self._invoke_llm( completion_param=model_config.parameters, model_instance=model_instance, prompt_messages=prompt_messages, @@ -131,8 +131,8 @@ class ReactMultiDatasetRouter: output_parser = StructuredChatOutputParser() react_decision = output_parser.parse(result_text) if isinstance(react_decision, ReactAction): - return react_decision.tool - return None + return react_decision.tool, usage + return None, usage def _invoke_llm( self, diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 04fdb24f65..440744462e 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -326,7 +326,8 @@ class ToolManager: workflow_provider_stmt = select(WorkflowToolProvider).where( WorkflowToolProvider.tenant_id == tenant_id, WorkflowToolProvider.id == provider_id ) - workflow_provider = db.session.scalar(workflow_provider_stmt) + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + workflow_provider = session.scalar(workflow_provider_stmt) if workflow_provider is None: raise ToolProviderNotFoundError(f"workflow provider {provider_id} not found") diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index c7ac3387e5..6eabde3991 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -62,6 +62,11 @@ class ApiBasedToolSchemaParser: root = root[ref] interface["operation"]["parameters"][i] = root for parameter in interface["operation"]["parameters"]: + # Handle complex type defaults that are not supported by PluginParameter + default_value = None + if "schema" in parameter and "default" in parameter["schema"]: + default_value = ApiBasedToolSchemaParser._sanitize_default_value(parameter["schema"]["default"]) + tool_parameter = ToolParameter( name=parameter["name"], label=I18nObject(en_US=parameter["name"], zh_Hans=parameter["name"]), @@ -72,9 +77,7 @@ class ApiBasedToolSchemaParser: required=parameter.get("required", False), form=ToolParameter.ToolParameterForm.LLM, llm_description=parameter.get("description"), - default=parameter["schema"]["default"] - if "schema" in parameter and "default" in parameter["schema"] - else None, + default=default_value, placeholder=I18nObject( en_US=parameter.get("description", ""), zh_Hans=parameter.get("description", "") ), @@ -134,6 +137,11 @@ class ApiBasedToolSchemaParser: required = body_schema.get("required", []) properties = body_schema.get("properties", {}) for name, property in properties.items(): + # Handle complex type defaults that are not supported by PluginParameter + default_value = ApiBasedToolSchemaParser._sanitize_default_value( + property.get("default", None) + ) + tool = ToolParameter( name=name, label=I18nObject(en_US=name, zh_Hans=name), @@ -144,12 +152,11 @@ class ApiBasedToolSchemaParser: required=name in required, form=ToolParameter.ToolParameterForm.LLM, llm_description=property.get("description", ""), - default=property.get("default", None), + default=default_value, placeholder=I18nObject( en_US=property.get("description", ""), zh_Hans=property.get("description", "") ), ) - # check if there is a type typ = ApiBasedToolSchemaParser._get_tool_parameter_type(property) if typ: @@ -197,6 +204,22 @@ class ApiBasedToolSchemaParser: return bundles + @staticmethod + def _sanitize_default_value(value): + """ + Sanitize default values for PluginParameter compatibility. + Complex types (list, dict) are converted to None to avoid validation errors. + + Args: + value: The default value from OpenAPI schema + + Returns: + None for complex types (list, dict), otherwise the original value + """ + if isinstance(value, (list, dict)): + return None + return value + @staticmethod def _get_tool_parameter_type(parameter: dict) -> ToolParameter.ToolParameterType | None: parameter = parameter or {} @@ -217,7 +240,11 @@ class ApiBasedToolSchemaParser: return ToolParameter.ToolParameterType.STRING elif typ == "array": items = parameter.get("items") or parameter.get("schema", {}).get("items") - return ToolParameter.ToolParameterType.FILES if items and items.get("format") == "binary" else None + if items and items.get("format") == "binary": + return ToolParameter.ToolParameterType.FILES + else: + # For regular arrays, return ARRAY type instead of None + return ToolParameter.ToolParameterType.ARRAY else: return None diff --git a/api/core/tools/workflow_as_tool/provider.py b/api/core/tools/workflow_as_tool/provider.py index e514c8c57b..d7afbc7389 100644 --- a/api/core/tools/workflow_as_tool/provider.py +++ b/api/core/tools/workflow_as_tool/provider.py @@ -1,6 +1,7 @@ from collections.abc import Mapping from pydantic import Field +from sqlalchemy.orm import Session from core.app.app_config.entities import VariableEntity, VariableEntityType from core.app.apps.workflow.app_config_manager import WorkflowAppConfigManager @@ -20,6 +21,7 @@ from core.tools.entities.tool_entities import ( from core.tools.utils.workflow_configuration_sync import WorkflowToolConfigurationUtils from core.tools.workflow_as_tool.tool import WorkflowTool from extensions.ext_database import db +from models.account import Account from models.model import App, AppMode from models.tools import WorkflowToolProvider from models.workflow import Workflow @@ -44,29 +46,34 @@ class WorkflowToolProviderController(ToolProviderController): @classmethod def from_db(cls, db_provider: WorkflowToolProvider) -> "WorkflowToolProviderController": - app = db_provider.app + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + provider = session.get(WorkflowToolProvider, db_provider.id) if db_provider.id else None + if not provider: + raise ValueError("workflow provider not found") + app = session.get(App, provider.app_id) + if not app: + raise ValueError("app not found") - if not app: - raise ValueError("app not found") + user = session.get(Account, provider.user_id) if provider.user_id else None - controller = WorkflowToolProviderController( - entity=ToolProviderEntity( - identity=ToolProviderIdentity( - author=db_provider.user.name if db_provider.user_id and db_provider.user else "", - name=db_provider.label, - label=I18nObject(en_US=db_provider.label, zh_Hans=db_provider.label), - description=I18nObject(en_US=db_provider.description, zh_Hans=db_provider.description), - icon=db_provider.icon, + controller = WorkflowToolProviderController( + entity=ToolProviderEntity( + identity=ToolProviderIdentity( + author=user.name if user else "", + name=provider.label, + label=I18nObject(en_US=provider.label, zh_Hans=provider.label), + description=I18nObject(en_US=provider.description, zh_Hans=provider.description), + icon=provider.icon, + ), + credentials_schema=[], + plugin_id=None, ), - credentials_schema=[], - plugin_id=None, - ), - provider_id=db_provider.id or "", - ) + provider_id=provider.id or "", + ) - # init tools - - controller.tools = [controller._get_db_provider_tool(db_provider, app)] + controller.tools = [ + controller._get_db_provider_tool(provider, app, session=session, user=user), + ] return controller @@ -74,7 +81,14 @@ class WorkflowToolProviderController(ToolProviderController): def provider_type(self) -> ToolProviderType: return ToolProviderType.WORKFLOW - def _get_db_provider_tool(self, db_provider: WorkflowToolProvider, app: App) -> WorkflowTool: + def _get_db_provider_tool( + self, + db_provider: WorkflowToolProvider, + app: App, + *, + session: Session, + user: Account | None = None, + ) -> WorkflowTool: """ get db provider tool :param db_provider: the db provider @@ -82,7 +96,7 @@ class WorkflowToolProviderController(ToolProviderController): :return: the tool """ workflow: Workflow | None = ( - db.session.query(Workflow) + session.query(Workflow) .where(Workflow.app_id == db_provider.app_id, Workflow.version == db_provider.version) .first() ) @@ -101,8 +115,6 @@ class WorkflowToolProviderController(ToolProviderController): def fetch_workflow_variable(variable_name: str) -> VariableEntity | None: return next(filter(lambda x: x.variable == variable_name, variables), None) - user = db_provider.user - workflow_tool_parameters = [] for parameter in parameters: variable = fetch_workflow_variable(parameter.name) @@ -187,22 +199,25 @@ class WorkflowToolProviderController(ToolProviderController): if self.tools is not None: return self.tools - db_providers: WorkflowToolProvider | None = ( - db.session.query(WorkflowToolProvider) - .where( - WorkflowToolProvider.tenant_id == tenant_id, - WorkflowToolProvider.app_id == self.provider_id, + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + db_provider: WorkflowToolProvider | None = ( + session.query(WorkflowToolProvider) + .where( + WorkflowToolProvider.tenant_id == tenant_id, + WorkflowToolProvider.app_id == self.provider_id, + ) + .first() ) - .first() - ) - if not db_providers: - return [] - if not db_providers.app: - raise ValueError("app not found") + if not db_provider: + return [] - app = db_providers.app - self.tools = [self._get_db_provider_tool(db_providers, app)] + app = session.get(App, db_provider.app_id) + if not app: + raise ValueError("app not found") + + user = session.get(Account, db_provider.user_id) if db_provider.user_id else None + self.tools = [self._get_db_provider_tool(db_provider, app, session=session, user=user)] return self.tools diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index 50c2327004..2cd46647a0 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -1,12 +1,14 @@ import json import logging -from collections.abc import Generator -from typing import Any +from collections.abc import Generator, Mapping, Sequence +from typing import Any, cast from flask import has_request_context from sqlalchemy import select +from sqlalchemy.orm import Session from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod +from core.model_runtime.entities.llm_entities import LLMUsage, LLMUsageMetadata from core.tools.__base.tool import Tool from core.tools.__base.tool_runtime import ToolRuntime from core.tools.entities.tool_entities import ( @@ -48,6 +50,7 @@ class WorkflowTool(Tool): self.workflow_entities = workflow_entities self.workflow_call_depth = workflow_call_depth self.label = label + self._latest_usage = LLMUsage.empty_usage() super().__init__(entity=entity, runtime=runtime) @@ -83,10 +86,11 @@ class WorkflowTool(Tool): assert self.runtime.invoke_from is not None user = self._resolve_user(user_id=user_id) - if user is None: raise ToolInvokeError("User not found") + self._latest_usage = LLMUsage.empty_usage() + result = generator.generate( app_model=app, workflow=workflow, @@ -110,9 +114,68 @@ class WorkflowTool(Tool): for file in files: yield self.create_file_message(file) # type: ignore + self._latest_usage = self._derive_usage_from_result(data) + yield self.create_text_message(json.dumps(outputs, ensure_ascii=False)) yield self.create_json_message(outputs) + @property + def latest_usage(self) -> LLMUsage: + return self._latest_usage + + @classmethod + def _derive_usage_from_result(cls, data: Mapping[str, Any]) -> LLMUsage: + usage_dict = cls._extract_usage_dict(data) + if usage_dict is not None: + return LLMUsage.from_metadata(cast(LLMUsageMetadata, dict(usage_dict))) + + total_tokens = data.get("total_tokens") + total_price = data.get("total_price") + if total_tokens is None and total_price is None: + return LLMUsage.empty_usage() + + usage_metadata: dict[str, Any] = {} + if total_tokens is not None: + try: + usage_metadata["total_tokens"] = int(str(total_tokens)) + except (TypeError, ValueError): + pass + if total_price is not None: + usage_metadata["total_price"] = str(total_price) + currency = data.get("currency") + if currency is not None: + usage_metadata["currency"] = currency + + if not usage_metadata: + return LLMUsage.empty_usage() + + return LLMUsage.from_metadata(cast(LLMUsageMetadata, usage_metadata)) + + @classmethod + def _extract_usage_dict(cls, payload: Mapping[str, Any]) -> Mapping[str, Any] | None: + usage_candidate = payload.get("usage") + if isinstance(usage_candidate, Mapping): + return usage_candidate + + metadata_candidate = payload.get("metadata") + if isinstance(metadata_candidate, Mapping): + usage_candidate = metadata_candidate.get("usage") + if isinstance(usage_candidate, Mapping): + return usage_candidate + + for value in payload.values(): + if isinstance(value, Mapping): + found = cls._extract_usage_dict(value) + if found is not None: + return found + elif isinstance(value, Sequence) and not isinstance(value, (str, bytes, bytearray)): + for item in value: + if isinstance(item, Mapping): + found = cls._extract_usage_dict(item) + if found is not None: + return found + return None + def fork_tool_runtime(self, runtime: ToolRuntime) -> "WorkflowTool": """ fork a new tool with metadata @@ -179,16 +242,17 @@ class WorkflowTool(Tool): """ get the workflow by app id and version """ - if not version: - workflow = ( - db.session.query(Workflow) - .where(Workflow.app_id == app_id, Workflow.version != Workflow.VERSION_DRAFT) - .order_by(Workflow.created_at.desc()) - .first() - ) - else: - stmt = select(Workflow).where(Workflow.app_id == app_id, Workflow.version == version) - workflow = db.session.scalar(stmt) + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + if not version: + stmt = ( + select(Workflow) + .where(Workflow.app_id == app_id, Workflow.version != Workflow.VERSION_DRAFT) + .order_by(Workflow.created_at.desc()) + ) + workflow = session.scalars(stmt).first() + else: + stmt = select(Workflow).where(Workflow.app_id == app_id, Workflow.version == version) + workflow = session.scalar(stmt) if not workflow: raise ValueError("workflow not found or not published") @@ -200,7 +264,8 @@ class WorkflowTool(Tool): get the app by app id """ stmt = select(App).where(App.id == app_id) - app = db.session.scalar(stmt) + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + app = session.scalar(stmt) if not app: raise ValueError("app not found") diff --git a/api/core/workflow/nodes/agent/entities.py b/api/core/workflow/nodes/agent/entities.py index ce6eb33ecc..985ee5eef2 100644 --- a/api/core/workflow/nodes/agent/entities.py +++ b/api/core/workflow/nodes/agent/entities.py @@ -26,8 +26,8 @@ class AgentNodeData(BaseNodeData): class ParamsAutoGenerated(IntEnum): - CLOSE = auto() - OPEN = auto() + CLOSE = 0 + OPEN = 1 class AgentOldVersionModelFeatures(StrEnum): diff --git a/api/core/workflow/nodes/base/__init__.py b/api/core/workflow/nodes/base/__init__.py index 8cf31dc342..f83df0e323 100644 --- a/api/core/workflow/nodes/base/__init__.py +++ b/api/core/workflow/nodes/base/__init__.py @@ -1,4 +1,5 @@ from .entities import BaseIterationNodeData, BaseIterationState, BaseLoopNodeData, BaseLoopState, BaseNodeData +from .usage_tracking_mixin import LLMUsageTrackingMixin __all__ = [ "BaseIterationNodeData", @@ -6,4 +7,5 @@ __all__ = [ "BaseLoopNodeData", "BaseLoopState", "BaseNodeData", + "LLMUsageTrackingMixin", ] diff --git a/api/core/workflow/nodes/base/usage_tracking_mixin.py b/api/core/workflow/nodes/base/usage_tracking_mixin.py new file mode 100644 index 0000000000..d9a0ef8972 --- /dev/null +++ b/api/core/workflow/nodes/base/usage_tracking_mixin.py @@ -0,0 +1,28 @@ +from core.model_runtime.entities.llm_entities import LLMUsage +from core.workflow.runtime import GraphRuntimeState + + +class LLMUsageTrackingMixin: + """Provides shared helpers for merging and recording LLM usage within workflow nodes.""" + + graph_runtime_state: GraphRuntimeState + + @staticmethod + def _merge_usage(current: LLMUsage, new_usage: LLMUsage | None) -> LLMUsage: + """Return a combined usage snapshot, preserving zero-value inputs.""" + if new_usage is None or new_usage.total_tokens <= 0: + return current + if current.total_tokens == 0: + return new_usage + return current.plus(new_usage) + + def _accumulate_usage(self, usage: LLMUsage) -> None: + """Push usage into the graph runtime accumulator for downstream reporting.""" + if usage.total_tokens <= 0: + return + + current_usage = self.graph_runtime_state.llm_usage + if current_usage.total_tokens == 0: + self.graph_runtime_state.llm_usage = usage.model_copy() + else: + self.graph_runtime_state.llm_usage = current_usage.plus(usage) diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index 41060bd569..3a3a2290be 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any, NewType, cast from flask import Flask, current_app from typing_extensions import TypeIs +from core.model_runtime.entities.llm_entities import LLMUsage from core.variables import IntegerVariable, NoneSegment from core.variables.segments import ArrayAnySegment, ArraySegment from core.variables.variables import VariableUnion @@ -34,6 +35,7 @@ from core.workflow.node_events import ( NodeRunResult, StreamCompletedEvent, ) +from core.workflow.nodes.base import LLMUsageTrackingMixin from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig from core.workflow.nodes.base.node import Node from core.workflow.nodes.iteration.entities import ErrorHandleMode, IterationNodeData @@ -58,7 +60,7 @@ logger = logging.getLogger(__name__) EmptyArraySegment = NewType("EmptyArraySegment", ArraySegment) -class IterationNode(Node): +class IterationNode(LLMUsageTrackingMixin, Node): """ Iteration Node. """ @@ -118,6 +120,7 @@ class IterationNode(Node): started_at = naive_utc_now() iter_run_map: dict[str, float] = {} outputs: list[object] = [] + usage_accumulator = [LLMUsage.empty_usage()] yield IterationStartedEvent( start_at=started_at, @@ -130,22 +133,27 @@ class IterationNode(Node): iterator_list_value=iterator_list_value, outputs=outputs, iter_run_map=iter_run_map, + usage_accumulator=usage_accumulator, ) + self._accumulate_usage(usage_accumulator[0]) yield from self._handle_iteration_success( started_at=started_at, inputs=inputs, outputs=outputs, iterator_list_value=iterator_list_value, iter_run_map=iter_run_map, + usage=usage_accumulator[0], ) except IterationNodeError as e: + self._accumulate_usage(usage_accumulator[0]) yield from self._handle_iteration_failure( started_at=started_at, inputs=inputs, outputs=outputs, iterator_list_value=iterator_list_value, iter_run_map=iter_run_map, + usage=usage_accumulator[0], error=e, ) @@ -196,6 +204,7 @@ class IterationNode(Node): iterator_list_value: Sequence[object], outputs: list[object], iter_run_map: dict[str, float], + usage_accumulator: list[LLMUsage], ) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]: if self._node_data.is_parallel: # Parallel mode execution @@ -203,6 +212,7 @@ class IterationNode(Node): iterator_list_value=iterator_list_value, outputs=outputs, iter_run_map=iter_run_map, + usage_accumulator=usage_accumulator, ) else: # Sequential mode execution @@ -228,6 +238,9 @@ class IterationNode(Node): # Update the total tokens from this iteration self.graph_runtime_state.total_tokens += graph_engine.graph_runtime_state.total_tokens + usage_accumulator[0] = self._merge_usage( + usage_accumulator[0], graph_engine.graph_runtime_state.llm_usage + ) iter_run_map[str(index)] = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds() def _execute_parallel_iterations( @@ -235,6 +248,7 @@ class IterationNode(Node): iterator_list_value: Sequence[object], outputs: list[object], iter_run_map: dict[str, float], + usage_accumulator: list[LLMUsage], ) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]: # Initialize outputs list with None values to maintain order outputs.extend([None] * len(iterator_list_value)) @@ -245,7 +259,16 @@ class IterationNode(Node): with ThreadPoolExecutor(max_workers=max_workers) as executor: # Submit all iteration tasks future_to_index: dict[ - Future[tuple[datetime, list[GraphNodeEventBase], object | None, int, dict[str, VariableUnion]]], + Future[ + tuple[ + datetime, + list[GraphNodeEventBase], + object | None, + int, + dict[str, VariableUnion], + LLMUsage, + ] + ], int, ] = {} for index, item in enumerate(iterator_list_value): @@ -264,7 +287,14 @@ class IterationNode(Node): index = future_to_index[future] try: result = future.result() - iter_start_at, events, output_value, tokens_used, conversation_snapshot = result + ( + iter_start_at, + events, + output_value, + tokens_used, + conversation_snapshot, + iteration_usage, + ) = result # Update outputs at the correct index outputs[index] = output_value @@ -276,6 +306,8 @@ class IterationNode(Node): self.graph_runtime_state.total_tokens += tokens_used iter_run_map[str(index)] = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds() + usage_accumulator[0] = self._merge_usage(usage_accumulator[0], iteration_usage) + # Sync conversation variables after iteration completion self._sync_conversation_variables_from_snapshot(conversation_snapshot) @@ -303,7 +335,7 @@ class IterationNode(Node): item: object, flask_app: Flask, context_vars: contextvars.Context, - ) -> tuple[datetime, list[GraphNodeEventBase], object | None, int, dict[str, VariableUnion]]: + ) -> tuple[datetime, list[GraphNodeEventBase], object | None, int, dict[str, VariableUnion], LLMUsage]: """Execute a single iteration in parallel mode and return results.""" with preserve_flask_contexts(flask_app=flask_app, context_vars=context_vars): iter_start_at = datetime.now(UTC).replace(tzinfo=None) @@ -332,6 +364,7 @@ class IterationNode(Node): output_value, graph_engine.graph_runtime_state.total_tokens, conversation_snapshot, + graph_engine.graph_runtime_state.llm_usage, ) def _handle_iteration_success( @@ -341,6 +374,8 @@ class IterationNode(Node): outputs: list[object], iterator_list_value: Sequence[object], iter_run_map: dict[str, float], + *, + usage: LLMUsage, ) -> Generator[NodeEventBase, None, None]: # Flatten the list of lists if all outputs are lists flattened_outputs = self._flatten_outputs_if_needed(outputs) @@ -351,7 +386,9 @@ class IterationNode(Node): outputs={"output": flattened_outputs}, steps=len(iterator_list_value), metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map, }, ) @@ -362,8 +399,11 @@ class IterationNode(Node): status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs={"output": flattened_outputs}, metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, }, + llm_usage=usage, ) ) @@ -400,6 +440,8 @@ class IterationNode(Node): outputs: list[object], iterator_list_value: Sequence[object], iter_run_map: dict[str, float], + *, + usage: LLMUsage, error: IterationNodeError, ) -> Generator[NodeEventBase, None, None]: # Flatten the list of lists if all outputs are lists (even in failure case) @@ -411,7 +453,9 @@ class IterationNode(Node): outputs={"output": flattened_outputs}, steps=len(iterator_list_value), metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map, }, error=str(error), @@ -420,6 +464,12 @@ class IterationNode(Node): node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, error=str(error), + metadata={ + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, + }, + llm_usage=usage, ) ) diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index ba5134f9e6..4a63900527 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -15,14 +15,11 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti from core.entities.agent_entities import PlanningStrategy from core.entities.model_entities import ModelStatus from core.model_manager import ModelInstance, ModelManager -from core.model_runtime.entities.message_entities import ( - PromptMessageRole, -) -from core.model_runtime.entities.model_entities import ( - ModelFeature, - ModelType, -) +from core.model_runtime.entities.llm_entities import LLMUsage +from core.model_runtime.entities.message_entities import PromptMessageRole +from core.model_runtime.entities.model_entities import ModelFeature, ModelType from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.model_runtime.utils.encoders import jsonable_encoder from core.prompt.simple_prompt_transform import ModelMode from core.rag.datasource.retrieval_service import RetrievalService from core.rag.entities.metadata_entities import Condition, MetadataCondition @@ -33,8 +30,14 @@ from core.variables import ( ) from core.variables.segments import ArrayObjectSegment from core.workflow.entities import GraphInitParams -from core.workflow.enums import ErrorStrategy, NodeType, WorkflowNodeExecutionStatus +from core.workflow.enums import ( + ErrorStrategy, + NodeType, + WorkflowNodeExecutionMetadataKey, + WorkflowNodeExecutionStatus, +) from core.workflow.node_events import ModelInvokeCompletedEvent, NodeRunResult +from core.workflow.nodes.base import LLMUsageTrackingMixin from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig from core.workflow.nodes.base.node import Node from core.workflow.nodes.knowledge_retrieval.template_prompts import ( @@ -80,7 +83,7 @@ default_retrieval_model = { } -class KnowledgeRetrievalNode(Node): +class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node): node_type = NodeType.KNOWLEDGE_RETRIEVAL _node_data: KnowledgeRetrievalNodeData @@ -182,14 +185,21 @@ class KnowledgeRetrievalNode(Node): ) # retrieve knowledge + usage = LLMUsage.empty_usage() try: - results = self._fetch_dataset_retriever(node_data=self._node_data, query=query) + results, usage = self._fetch_dataset_retriever(node_data=self._node_data, query=query) outputs = {"result": ArrayObjectSegment(value=results)} return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=variables, - process_data={}, + process_data={"usage": jsonable_encoder(usage)}, outputs=outputs, # type: ignore + metadata={ + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, + }, + llm_usage=usage, ) except KnowledgeRetrievalNodeError as e: @@ -199,6 +209,7 @@ class KnowledgeRetrievalNode(Node): inputs=variables, error=str(e), error_type=type(e).__name__, + llm_usage=usage, ) # Temporary handle all exceptions from DatasetRetrieval class here. except Exception as e: @@ -207,11 +218,15 @@ class KnowledgeRetrievalNode(Node): inputs=variables, error=str(e), error_type=type(e).__name__, + llm_usage=usage, ) finally: db.session.close() - def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: str) -> list[dict[str, Any]]: + def _fetch_dataset_retriever( + self, node_data: KnowledgeRetrievalNodeData, query: str + ) -> tuple[list[dict[str, Any]], LLMUsage]: + usage = LLMUsage.empty_usage() available_datasets = [] dataset_ids = node_data.dataset_ids @@ -245,9 +260,10 @@ class KnowledgeRetrievalNode(Node): if not dataset: continue available_datasets.append(dataset) - metadata_filter_document_ids, metadata_condition = self._get_metadata_filter_condition( + metadata_filter_document_ids, metadata_condition, metadata_usage = self._get_metadata_filter_condition( [dataset.id for dataset in available_datasets], query, node_data ) + usage = self._merge_usage(usage, metadata_usage) all_documents = [] dataset_retrieval = DatasetRetrieval() if node_data.retrieval_mode == DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE: @@ -330,6 +346,8 @@ class KnowledgeRetrievalNode(Node): metadata_filter_document_ids=metadata_filter_document_ids, metadata_condition=metadata_condition, ) + usage = self._merge_usage(usage, dataset_retrieval.llm_usage) + dify_documents = [item for item in all_documents if item.provider == "dify"] external_documents = [item for item in all_documents if item.provider == "external"] retrieval_resource_list = [] @@ -406,11 +424,12 @@ class KnowledgeRetrievalNode(Node): ) for position, item in enumerate(retrieval_resource_list, start=1): item["metadata"]["position"] = position - return retrieval_resource_list + return retrieval_resource_list, usage def _get_metadata_filter_condition( self, dataset_ids: list, query: str, node_data: KnowledgeRetrievalNodeData - ) -> tuple[dict[str, list[str]] | None, MetadataCondition | None]: + ) -> tuple[dict[str, list[str]] | None, MetadataCondition | None, LLMUsage]: + usage = LLMUsage.empty_usage() document_query = db.session.query(Document).where( Document.dataset_id.in_(dataset_ids), Document.indexing_status == "completed", @@ -420,9 +439,12 @@ class KnowledgeRetrievalNode(Node): filters: list[Any] = [] metadata_condition = None if node_data.metadata_filtering_mode == "disabled": - return None, None + return None, None, usage elif node_data.metadata_filtering_mode == "automatic": - automatic_metadata_filters = self._automatic_metadata_filter_func(dataset_ids, query, node_data) + automatic_metadata_filters, automatic_usage = self._automatic_metadata_filter_func( + dataset_ids, query, node_data + ) + usage = self._merge_usage(usage, automatic_usage) if automatic_metadata_filters: conditions = [] for sequence, filter in enumerate(automatic_metadata_filters): @@ -496,11 +518,12 @@ class KnowledgeRetrievalNode(Node): metadata_filter_document_ids = defaultdict(list) if documents else None # type: ignore for document in documents: metadata_filter_document_ids[document.dataset_id].append(document.id) # type: ignore - return metadata_filter_document_ids, metadata_condition + return metadata_filter_document_ids, metadata_condition, usage def _automatic_metadata_filter_func( self, dataset_ids: list, query: str, node_data: KnowledgeRetrievalNodeData - ) -> list[dict[str, Any]]: + ) -> tuple[list[dict[str, Any]], LLMUsage]: + usage = LLMUsage.empty_usage() # get all metadata field stmt = select(DatasetMetadata).where(DatasetMetadata.dataset_id.in_(dataset_ids)) metadata_fields = db.session.scalars(stmt).all() @@ -548,6 +571,7 @@ class KnowledgeRetrievalNode(Node): for event in generator: if isinstance(event, ModelInvokeCompletedEvent): result_text = event.text + usage = self._merge_usage(usage, event.usage) break result_text_json = parse_and_check_json_markdown(result_text, []) @@ -564,8 +588,8 @@ class KnowledgeRetrievalNode(Node): } ) except Exception: - return [] - return automatic_metadata_filters + return [], usage + return automatic_metadata_filters, usage def _process_metadata_filter_func( self, sequence: int, condition: str, metadata_name: str, value: Any, filters: list[Any] diff --git a/api/core/workflow/nodes/loop/loop_node.py b/api/core/workflow/nodes/loop/loop_node.py index b51790c0a2..ca39e5aa23 100644 --- a/api/core/workflow/nodes/loop/loop_node.py +++ b/api/core/workflow/nodes/loop/loop_node.py @@ -5,6 +5,7 @@ from collections.abc import Callable, Generator, Mapping, Sequence from datetime import datetime from typing import TYPE_CHECKING, Any, Literal, cast +from core.model_runtime.entities.llm_entities import LLMUsage from core.variables import Segment, SegmentType from core.workflow.enums import ( ErrorStrategy, @@ -27,6 +28,7 @@ from core.workflow.node_events import ( NodeRunResult, StreamCompletedEvent, ) +from core.workflow.nodes.base import LLMUsageTrackingMixin from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig from core.workflow.nodes.base.node import Node from core.workflow.nodes.loop.entities import LoopNodeData, LoopVariableData @@ -40,7 +42,7 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class LoopNode(Node): +class LoopNode(LLMUsageTrackingMixin, Node): """ Loop Node. """ @@ -108,7 +110,7 @@ class LoopNode(Node): raise ValueError(f"Invalid value for loop variable {loop_variable.label}") variable_selector = [self._node_id, loop_variable.label] variable = segment_to_variable(segment=processed_segment, selector=variable_selector) - self.graph_runtime_state.variable_pool.add(variable_selector, variable) + self.graph_runtime_state.variable_pool.add(variable_selector, variable.value) loop_variable_selectors[loop_variable.label] = variable_selector inputs[loop_variable.label] = processed_segment.value @@ -117,6 +119,7 @@ class LoopNode(Node): loop_duration_map: dict[str, float] = {} single_loop_variable_map: dict[str, dict[str, Any]] = {} # single loop variable output + loop_usage = LLMUsage.empty_usage() # Start Loop event yield LoopStartedEvent( @@ -163,6 +166,9 @@ class LoopNode(Node): # Update the total tokens from this iteration cost_tokens += graph_engine.graph_runtime_state.total_tokens + # Accumulate usage from the sub-graph execution + loop_usage = self._merge_usage(loop_usage, graph_engine.graph_runtime_state.llm_usage) + # Collect loop variable values after iteration single_loop_variable = {} for key, selector in loop_variable_selectors.items(): @@ -189,6 +195,7 @@ class LoopNode(Node): ) self.graph_runtime_state.total_tokens += cost_tokens + self._accumulate_usage(loop_usage) # Loop completed successfully yield LoopSucceededEvent( start_at=start_at, @@ -196,7 +203,9 @@ class LoopNode(Node): outputs=self._node_data.outputs, steps=loop_count, metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: cost_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: loop_usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: loop_usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: loop_usage.currency, "completed_reason": "loop_break" if reach_break_condition else "loop_completed", WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, @@ -207,22 +216,28 @@ class LoopNode(Node): node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: loop_usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: loop_usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: loop_usage.currency, WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, outputs=self._node_data.outputs, inputs=inputs, + llm_usage=loop_usage, ) ) except Exception as e: + self._accumulate_usage(loop_usage) yield LoopFailedEvent( start_at=start_at, inputs=inputs, steps=loop_count, metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: loop_usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: loop_usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: loop_usage.currency, "completed_reason": "error", WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, @@ -235,10 +250,13 @@ class LoopNode(Node): status=WorkflowNodeExecutionStatus.FAILED, error=str(e), metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: loop_usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: loop_usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: loop_usage.currency, WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, + llm_usage=loop_usage, ) ) diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 1fd9139405..799ad9b92f 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -6,10 +6,13 @@ from sqlalchemy.orm import Session from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler from core.file import File, FileTransferMethod +from core.model_runtime.entities.llm_entities import LLMUsage +from core.tools.__base.tool import Tool from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter from core.tools.errors import ToolInvokeError from core.tools.tool_engine import ToolEngine from core.tools.utils.message_transformer import ToolFileMessageTransformer +from core.tools.workflow_as_tool.tool import WorkflowTool from core.variables.segments import ArrayAnySegment, ArrayFileSegment from core.variables.variables import ArrayAnyVariable from core.workflow.enums import ( @@ -136,13 +139,14 @@ class ToolNode(Node): try: # convert tool messages - yield from self._transform_message( + _ = yield from self._transform_message( messages=message_stream, tool_info=tool_info, parameters_for_log=parameters_for_log, user_id=self.user_id, tenant_id=self.tenant_id, node_id=self._node_id, + tool_runtime=tool_runtime, ) except ToolInvokeError as e: yield StreamCompletedEvent( @@ -233,7 +237,8 @@ class ToolNode(Node): user_id: str, tenant_id: str, node_id: str, - ) -> Generator: + tool_runtime: Tool, + ) -> Generator[NodeEventBase, None, LLMUsage]: """ Convert ToolInvokeMessages into tuple[plain_text, files] """ @@ -421,17 +426,34 @@ class ToolNode(Node): is_final=True, ) + usage = self._extract_tool_usage(tool_runtime) + + metadata: dict[WorkflowNodeExecutionMetadataKey, Any] = { + WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info, + } + if usage.total_tokens > 0: + metadata[WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS] = usage.total_tokens + metadata[WorkflowNodeExecutionMetadataKey.TOTAL_PRICE] = usage.total_price + metadata[WorkflowNodeExecutionMetadataKey.CURRENCY] = usage.currency + yield StreamCompletedEvent( node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, outputs={"text": text, "files": ArrayFileSegment(value=files), "json": json_output, **variables}, - metadata={ - WorkflowNodeExecutionMetadataKey.TOOL_INFO: tool_info, - }, + metadata=metadata, inputs=parameters_for_log, + llm_usage=usage, ) ) + return usage + + @staticmethod + def _extract_tool_usage(tool_runtime: Tool) -> LLMUsage: + if isinstance(tool_runtime, WorkflowTool): + return tool_runtime.latest_usage + return LLMUsage.empty_usage() + @classmethod def _extract_variable_selector_to_variable_mapping( cls, diff --git a/api/docker/entrypoint.sh b/api/docker/entrypoint.sh index 97f5df388f..93708b5c87 100755 --- a/api/docker/entrypoint.sh +++ b/api/docker/entrypoint.sh @@ -64,7 +64,8 @@ if [[ "${MODE}" == "worker" ]]; then exec celery -A app.celery worker -P ${WORKER_POOL} $CONCURRENCY_OPTION \ --max-tasks-per-child ${MAX_TASKS_PER_CHILD:-50} --loglevel ${LOG_LEVEL:-INFO} \ - -Q ${DEFAULT_QUEUES} + -Q ${DEFAULT_QUEUES} \ + --prefetch-multiplier=${CELERY_PREFETCH_MULTIPLIER:-1} elif [[ "${MODE}" == "beat" ]]; then exec celery -A app.celery beat --loglevel ${LOG_LEVEL:-INFO} diff --git a/api/libs/helper.py b/api/libs/helper.py index b878141d8e..60484dd40b 100644 --- a/api/libs/helper.py +++ b/api/libs/helper.py @@ -81,6 +81,8 @@ class AvatarUrlField(fields.Raw): from models import Account if isinstance(obj, Account) and obj.avatar is not None: + if obj.avatar.startswith(("http://", "https://")): + return obj.avatar return file_helpers.get_signed_file_url(obj.avatar) return None diff --git a/api/libs/token.py b/api/libs/token.py index 4be25696e7..0b40f18143 100644 --- a/api/libs/token.py +++ b/api/libs/token.py @@ -12,6 +12,7 @@ from constants import ( COOKIE_NAME_CSRF_TOKEN, COOKIE_NAME_PASSPORT, COOKIE_NAME_REFRESH_TOKEN, + COOKIE_NAME_WEBAPP_ACCESS_TOKEN, HEADER_NAME_CSRF_TOKEN, HEADER_NAME_PASSPORT, ) @@ -81,6 +82,14 @@ def extract_access_token(request: Request) -> str | None: return _try_extract_from_cookie(request) or _try_extract_from_header(request) +def extract_webapp_access_token(request: Request) -> str | None: + """ + Try to extract webapp access token from cookie, then header. + """ + + return request.cookies.get(_real_cookie_name(COOKIE_NAME_WEBAPP_ACCESS_TOKEN)) or _try_extract_from_header(request) + + def extract_webapp_passport(app_code: str, request: Request) -> str | None: """ Try to extract app token from header or params. @@ -155,6 +164,10 @@ def clear_access_token_from_cookie(response: Response, samesite: str = "Lax"): _clear_cookie(response, COOKIE_NAME_ACCESS_TOKEN, samesite) +def clear_webapp_access_token_from_cookie(response: Response, samesite: str = "Lax"): + _clear_cookie(response, COOKIE_NAME_WEBAPP_ACCESS_TOKEN, samesite) + + def clear_refresh_token_from_cookie(response: Response): _clear_cookie(response, COOKIE_NAME_REFRESH_TOKEN) diff --git a/api/migrations/versions/2025_10_21_1430-ae662b25d9bc_remove_builtin_template_user.py b/api/migrations/versions/2025_10_21_1430-ae662b25d9bc_remove_builtin_template_user.py new file mode 100644 index 0000000000..086a02e7c3 --- /dev/null +++ b/api/migrations/versions/2025_10_21_1430-ae662b25d9bc_remove_builtin_template_user.py @@ -0,0 +1,36 @@ +"""remove-builtin-template-user + +Revision ID: ae662b25d9bc +Revises: d98acf217d43 +Create Date: 2025-10-21 14:30:28.566192 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'ae662b25d9bc' +down_revision = 'd98acf217d43' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + + with op.batch_alter_table('pipeline_built_in_templates', schema=None) as batch_op: + batch_op.drop_column('updated_by') + batch_op.drop_column('created_by') + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('pipeline_built_in_templates', schema=None) as batch_op: + batch_op.add_column(sa.Column('created_by', sa.UUID(), autoincrement=False, nullable=False)) + batch_op.add_column(sa.Column('updated_by', sa.UUID(), autoincrement=False, nullable=True)) + + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index 5653445f2b..4a9e2688b8 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -1239,15 +1239,6 @@ class PipelineBuiltInTemplate(Base): # type: ignore[name-defined] language = mapped_column(db.String(255), nullable=False) created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) - created_by = mapped_column(StringUUID, nullable=False) - updated_by = mapped_column(StringUUID, nullable=True) - - @property - def created_user_name(self): - account = db.session.query(Account).where(Account.id == self.created_by).first() - if account: - return account.name - return "" class PipelineCustomizedTemplate(Base): # type: ignore[name-defined] diff --git a/api/models/tools.py b/api/models/tools.py index 667e049d0b..cc80ddcf51 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -222,7 +222,7 @@ class WorkflowToolProvider(TypeBase): sa.UniqueConstraint("tenant_id", "app_id", name="unique_workflow_tool_provider_app_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # name of the workflow provider name: Mapped[str] = mapped_column(String(255), nullable=False) # label of the workflow provider diff --git a/api/pyproject.toml b/api/pyproject.toml index 544ec9b16e..91d45587d5 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "dify-api" -version = "1.9.1" +version = "1.9.2" requires-python = ">=3.11,<3.13" dependencies = [ diff --git a/api/services/account_service.py b/api/services/account_service.py index cb0eb7a9dd..13c3993fb5 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -13,7 +13,7 @@ from sqlalchemy.orm import Session from werkzeug.exceptions import Unauthorized from configs import dify_config -from constants.languages import language_timezone_mapping, languages +from constants.languages import get_valid_language, language_timezone_mapping from events.tenant_event import tenant_was_created from extensions.ext_database import db from extensions.ext_redis import redis_client, redis_fallback @@ -1259,7 +1259,7 @@ class RegisterService: return f"member_invite:token:{token}" @classmethod - def setup(cls, email: str, name: str, password: str, ip_address: str): + def setup(cls, email: str, name: str, password: str, ip_address: str, language: str): """ Setup dify @@ -1269,11 +1269,10 @@ class RegisterService: :param ip_address: ip address """ try: - # Register account = AccountService.create_account( email=email, name=name, - interface_language=languages[0], + interface_language=get_valid_language(language), password=password, is_setup=True, ) @@ -1315,7 +1314,7 @@ class RegisterService: account = AccountService.create_account( email=email, name=name, - interface_language=language or languages[0], + interface_language=get_valid_language(language), password=password, is_setup=is_setup, ) diff --git a/api/services/feature_service.py b/api/services/feature_service.py index 19d96cb972..148442f76e 100644 --- a/api/services/feature_service.py +++ b/api/services/feature_service.py @@ -174,6 +174,7 @@ class FeatureService: if dify_config.ENTERPRISE_ENABLED: features.webapp_copyright_enabled = True + features.knowledge_pipeline.publish_enabled = True cls._fulfill_params_from_workspace_info(features, tenant_id) return features diff --git a/api/services/rag_pipeline/pipeline_template/database/database_retrieval.py b/api/services/rag_pipeline/pipeline_template/database/database_retrieval.py index ec91f79606..908f9a2684 100644 --- a/api/services/rag_pipeline/pipeline_template/database/database_retrieval.py +++ b/api/services/rag_pipeline/pipeline_template/database/database_retrieval.py @@ -74,5 +74,4 @@ class DatabasePipelineTemplateRetrieval(PipelineTemplateRetrievalBase): "chunk_structure": pipeline_template.chunk_structure, "export_data": pipeline_template.yaml_content, "graph": graph_data, - "created_by": pipeline_template.created_user_name, } diff --git a/api/services/tools/workflow_tools_manage_service.py b/api/services/tools/workflow_tools_manage_service.py index 2449536d5c..b1cc963681 100644 --- a/api/services/tools/workflow_tools_manage_service.py +++ b/api/services/tools/workflow_tools_manage_service.py @@ -4,6 +4,7 @@ from datetime import datetime from typing import Any from sqlalchemy import or_, select +from sqlalchemy.orm import Session from core.model_runtime.utils.encoders import jsonable_encoder from core.tools.__base.tool_provider import ToolProviderController @@ -13,6 +14,7 @@ from core.tools.utils.workflow_configuration_sync import WorkflowToolConfigurati from core.tools.workflow_as_tool.provider import WorkflowToolProviderController from core.tools.workflow_as_tool.tool import WorkflowTool from extensions.ext_database import db +from libs.uuid_utils import uuidv7 from models.model import App from models.tools import WorkflowToolProvider from models.workflow import Workflow @@ -63,27 +65,27 @@ class WorkflowToolManageService: if workflow is None: raise ValueError(f"Workflow not found for app {workflow_app_id}") - workflow_tool_provider = WorkflowToolProvider( - tenant_id=tenant_id, - user_id=user_id, - app_id=workflow_app_id, - name=name, - label=label, - icon=json.dumps(icon), - description=description, - parameter_configuration=json.dumps(parameters), - privacy_policy=privacy_policy, - version=workflow.version, - ) + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + workflow_tool_provider = WorkflowToolProvider( + id=str(uuidv7()), + tenant_id=tenant_id, + user_id=user_id, + app_id=workflow_app_id, + name=name, + label=label, + icon=json.dumps(icon), + description=description, + parameter_configuration=json.dumps(parameters), + privacy_policy=privacy_policy, + version=workflow.version, + ) + session.add(workflow_tool_provider) try: WorkflowToolProviderController.from_db(workflow_tool_provider) except Exception as e: raise ValueError(str(e)) - db.session.add(workflow_tool_provider) - db.session.commit() - if labels is not None: ToolLabelManager.update_tool_labels( ToolTransformService.workflow_provider_to_controller(workflow_tool_provider), labels @@ -168,7 +170,6 @@ class WorkflowToolManageService: except Exception as e: raise ValueError(str(e)) - db.session.add(workflow_tool_provider) db.session.commit() if labels is not None: diff --git a/api/services/variable_truncator.py b/api/services/variable_truncator.py index a8f37c31c8..4e13d2d964 100644 --- a/api/services/variable_truncator.py +++ b/api/services/variable_truncator.py @@ -17,6 +17,7 @@ from core.variables.segments import ( StringSegment, ) from core.variables.utils import dumps_with_segments +from core.workflow.nodes.variable_assigner.common.helpers import UpdatedVariable _MAX_DEPTH = 100 @@ -56,7 +57,7 @@ class UnknownTypeError(Exception): pass -JSONTypes: TypeAlias = int | float | str | list | dict | None | bool +JSONTypes: TypeAlias = int | float | str | list[object] | dict[str, object] | None | bool @dataclasses.dataclass(frozen=True) @@ -202,6 +203,9 @@ class VariableTruncator: """Recursively calculate JSON size without serialization.""" if isinstance(value, Segment): return VariableTruncator.calculate_json_size(value.value) + if isinstance(value, UpdatedVariable): + # TODO(Workflow): migrate UpdatedVariable serialization upstream and drop this fallback. + return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) if depth > _MAX_DEPTH: raise MaxDepthExceededError() if isinstance(value, str): @@ -248,14 +252,14 @@ class VariableTruncator: truncated_value = value[:truncated_size] + "..." return _PartResult(truncated_value, self.calculate_json_size(truncated_value), True) - def _truncate_array(self, value: list, target_size: int) -> _PartResult[list]: + def _truncate_array(self, value: list[object], target_size: int) -> _PartResult[list[object]]: """ Truncate array with correct strategy: 1. First limit to 20 items 2. If still too large, truncate individual items """ - truncated_value: list[Any] = [] + truncated_value: list[object] = [] truncated = False used_size = self.calculate_json_size([]) @@ -278,7 +282,11 @@ class VariableTruncator: if used_size > target_size: break - part_result = self._truncate_json_primitives(item, target_size - used_size) + remaining_budget = target_size - used_size + if item is None or isinstance(item, (str, list, dict, bool, int, float)): + part_result = self._truncate_json_primitives(item, remaining_budget) + else: + raise UnknownTypeError(f"got unknown type {type(item)} in array truncation") truncated_value.append(part_result.value) used_size += part_result.value_size truncated = part_result.truncated @@ -369,10 +377,10 @@ class VariableTruncator: def _truncate_json_primitives(self, val: str, target_size: int) -> _PartResult[str]: ... @overload - def _truncate_json_primitives(self, val: list, target_size: int) -> _PartResult[list]: ... + def _truncate_json_primitives(self, val: list[object], target_size: int) -> _PartResult[list[object]]: ... @overload - def _truncate_json_primitives(self, val: dict, target_size: int) -> _PartResult[dict]: ... + def _truncate_json_primitives(self, val: dict[str, object], target_size: int) -> _PartResult[dict[str, object]]: ... @overload def _truncate_json_primitives(self, val: bool, target_size: int) -> _PartResult[bool]: ... # type: ignore @@ -387,10 +395,15 @@ class VariableTruncator: def _truncate_json_primitives(self, val: None, target_size: int) -> _PartResult[None]: ... def _truncate_json_primitives( - self, val: str | list | dict | bool | int | float | None, target_size: int + self, + val: UpdatedVariable | str | list[object] | dict[str, object] | bool | int | float | None, + target_size: int, ) -> _PartResult[Any]: """Truncate a value within an object to fit within budget.""" - if isinstance(val, str): + if isinstance(val, UpdatedVariable): + # TODO(Workflow): push UpdatedVariable normalization closer to its producer. + return self._truncate_object(val.model_dump(), target_size) + elif isinstance(val, str): return self._truncate_string(val, target_size) elif isinstance(val, list): return self._truncate_array(val, target_size) diff --git a/api/tests/integration_tests/conftest.py b/api/tests/integration_tests/conftest.py index 9dc7b76e04..4395a9815a 100644 --- a/api/tests/integration_tests/conftest.py +++ b/api/tests/integration_tests/conftest.py @@ -58,6 +58,7 @@ def setup_account(request) -> Generator[Account, None, None]: name=name, password=secrets.token_hex(16), ip_address="localhost", + language="en-US", ) with _CACHED_APP.test_request_context(): diff --git a/api/tests/test_containers_integration_tests/services/test_account_service.py b/api/tests/test_containers_integration_tests/services/test_account_service.py index c59fc50f08..4d4e77a802 100644 --- a/api/tests/test_containers_integration_tests/services/test_account_service.py +++ b/api/tests/test_containers_integration_tests/services/test_account_service.py @@ -2299,6 +2299,7 @@ class TestRegisterService: name=admin_name, password=admin_password, ip_address=ip_address, + language="en-US", ) # Verify account was created @@ -2348,6 +2349,7 @@ class TestRegisterService: name=admin_name, password=admin_password, ip_address=ip_address, + language="en-US", ) # Verify no entities were created (rollback worked) diff --git a/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py b/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py index a6b7695672..08c7d07620 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py @@ -6,6 +6,7 @@ from faker import Faker from core.tools.entities.api_entities import ToolProviderApiEntity from core.tools.entities.common_entities import I18nObject from core.tools.entities.tool_entities import ToolProviderType +from libs.uuid_utils import uuidv7 from models.tools import ApiToolProvider, BuiltinToolProvider, MCPToolProvider, WorkflowToolProvider from services.plugin.plugin_service import PluginService from services.tools.tools_transform_service import ToolTransformService @@ -67,6 +68,7 @@ class TestToolTransformService: ) elif provider_type == "workflow": provider = WorkflowToolProvider( + id=str(uuidv7()), name=fake.company(), description=fake.text(max_nb_chars=100), icon='{"background": "#FF6B6B", "content": "🔧"}', @@ -759,6 +761,7 @@ class TestToolTransformService: # Create workflow tool provider provider = WorkflowToolProvider( + id=str(uuidv7()), name=fake.company(), description=fake.text(max_nb_chars=100), icon='{"background": "#FF6B6B", "content": "🔧"}', diff --git a/api/tests/unit_tests/core/tools/utils/test_parser.py b/api/tests/unit_tests/core/tools/utils/test_parser.py index e1eab21ca4..f39158aa59 100644 --- a/api/tests/unit_tests/core/tools/utils/test_parser.py +++ b/api/tests/unit_tests/core/tools/utils/test_parser.py @@ -109,3 +109,83 @@ def test_parse_openapi_to_tool_bundle_properties_all_of(app): assert tool_bundles[0].parameters[0].llm_description == "desc prop1" # TODO: support enum in OpenAPI # assert set(tool_bundles[0].parameters[0].options) == {"option1", "option2", "option3"} + + +def test_parse_openapi_to_tool_bundle_default_value_type_casting(app): + """ + Test that default values are properly cast to match parameter types. + This addresses the issue where array default values like [] cause validation errors + when parameter type is inferred as string/number/boolean. + """ + openapi = { + "openapi": "3.0.0", + "info": {"title": "Test API", "version": "1.0.0"}, + "servers": [{"url": "https://example.com"}], + "paths": { + "/product/create": { + "post": { + "operationId": "createProduct", + "summary": "Create a product", + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "categories": { + "description": "List of category identifiers", + "default": [], + "type": "array", + "items": {"type": "string"}, + }, + "name": { + "description": "Product name", + "default": "Default Product", + "type": "string", + }, + "price": {"description": "Product price", "default": 0.0, "type": "number"}, + "available": { + "description": "Product availability", + "default": True, + "type": "boolean", + }, + }, + } + } + } + }, + "responses": {"200": {"description": "Default Response"}}, + } + } + }, + } + + with app.test_request_context(): + tool_bundles = ApiBasedToolSchemaParser.parse_openapi_to_tool_bundle(openapi) + + assert len(tool_bundles) == 1 + bundle = tool_bundles[0] + assert len(bundle.parameters) == 4 + + # Find parameters by name + params_by_name = {param.name: param for param in bundle.parameters} + + # Check categories parameter (array type with [] default) + categories_param = params_by_name["categories"] + assert categories_param.type == "array" # Will be detected by _get_tool_parameter_type + assert categories_param.default is None # Array default [] is converted to None + + # Check name parameter (string type with string default) + name_param = params_by_name["name"] + assert name_param.type == "string" + assert name_param.default == "Default Product" + + # Check price parameter (number type with number default) + price_param = params_by_name["price"] + assert price_param.type == "number" + assert price_param.default == 0.0 + + # Check available parameter (boolean type with boolean default) + available_param = params_by_name["available"] + assert available_param.type == "boolean" + assert available_param.default is True diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index 22790fa4a6..a611d3eb0e 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,5 @@ -from constants import COOKIE_NAME_ACCESS_TOKEN -from libs.token import extract_access_token +from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN +from libs.token import extract_access_token, extract_webapp_access_token class MockRequest: @@ -14,10 +14,12 @@ def test_extract_access_token(): return MockRequest(headers, cookies, args) test_cases = [ - (_mock_request({"Authorization": "Bearer 123"}, {}, {}), "123"), - (_mock_request({}, {COOKIE_NAME_ACCESS_TOKEN: "123"}, {}), "123"), - (_mock_request({}, {}, {}), None), - (_mock_request({"Authorization": "Bearer_aaa 123"}, {}, {}), None), + (_mock_request({"Authorization": "Bearer 123"}, {}, {}), "123", "123"), + (_mock_request({}, {COOKIE_NAME_ACCESS_TOKEN: "123"}, {}), "123", None), + (_mock_request({}, {}, {}), None, None), + (_mock_request({"Authorization": "Bearer_aaa 123"}, {}, {}), None, None), + (_mock_request({}, {COOKIE_NAME_WEBAPP_ACCESS_TOKEN: "123"}, {}), None, "123"), ] - for request, expected in test_cases: - assert extract_access_token(request) == expected # pyright: ignore[reportArgumentType] + for request, expected_console, expected_webapp in test_cases: + assert extract_access_token(request) == expected_console # pyright: ignore[reportArgumentType] + assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] diff --git a/api/tests/unit_tests/services/test_account_service.py b/api/tests/unit_tests/services/test_account_service.py index 737202f8de..627a04bcd0 100644 --- a/api/tests/unit_tests/services/test_account_service.py +++ b/api/tests/unit_tests/services/test_account_service.py @@ -893,7 +893,7 @@ class TestRegisterService: mock_dify_setup.return_value = mock_dify_setup_instance # Execute test - RegisterService.setup("admin@example.com", "Admin User", "password123", "192.168.1.1") + RegisterService.setup("admin@example.com", "Admin User", "password123", "192.168.1.1", "en-US") # Verify results mock_create_account.assert_called_once_with( @@ -925,6 +925,7 @@ class TestRegisterService: "Admin User", "password123", "192.168.1.1", + "en-US", ) # Verify rollback operations were called diff --git a/api/uv.lock b/api/uv.lock index 2bed6821bb..3703425c02 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1305,7 +1305,7 @@ wheels = [ [[package]] name = "dify-api" -version = "1.9.1" +version = "1.9.2" source = { virtual = "." } dependencies = [ { name = "apscheduler" }, diff --git a/dev/basedpyright-check b/dev/basedpyright-check index ef58ed1f57..1c87b27d6f 100755 --- a/dev/basedpyright-check +++ b/dev/basedpyright-check @@ -10,7 +10,7 @@ PATH_TO_CHECK="$1" # run basedpyright checks if [ -n "$PATH_TO_CHECK" ]; then - uv run --directory api --dev basedpyright "$PATH_TO_CHECK" + uv run --directory api --dev -- basedpyright --threads $(nproc) "$PATH_TO_CHECK" else - uv run --directory api --dev basedpyright + uv run --directory api --dev -- basedpyright --threads $(nproc) fi diff --git a/docker/.env.example b/docker/.env.example index 2c6104809c..56c4c70294 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -259,6 +259,18 @@ POSTGRES_MAINTENANCE_WORK_MEM=64MB # Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB +# Sets the maximum allowed duration of any statement before termination. +# Default is 60000 milliseconds. +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT +POSTGRES_STATEMENT_TIMEOUT=60000 + +# Sets the maximum allowed duration of any idle in-transaction session before termination. +# Default is 60000 milliseconds. +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT +POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=60000 + # ------------------------------ # Redis Configuration # This Redis configuration is used for caching and for pub/sub during conversation. diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 12dc3aa48d..8e62acfa60 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:1.9.1 + image: langgenius/dify-api:1.9.2 restart: always environment: # Use the shared environment variables. @@ -24,13 +24,6 @@ services: volumes: # Mount the storage directory to the container, for storing user files. - ./volumes/app/storage:/app/api/storage - # TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release - entrypoint: - - /bin/bash - - -c - - | - uv pip install --system weaviate-client==4.17.0 - exec /bin/bash /app/api/docker/entrypoint.sh networks: - ssrf_proxy_network - default @@ -38,7 +31,7 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.9.1 + image: langgenius/dify-api:1.9.2 restart: always environment: # Use the shared environment variables. @@ -58,13 +51,6 @@ services: volumes: # Mount the storage directory to the container, for storing user files. - ./volumes/app/storage:/app/api/storage - # TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release - entrypoint: - - /bin/bash - - -c - - | - uv pip install --system weaviate-client==4.17.0 - exec /bin/bash /app/api/docker/entrypoint.sh networks: - ssrf_proxy_network - default @@ -72,7 +58,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.9.1 + image: langgenius/dify-api:1.9.2 restart: always environment: # Use the shared environment variables. @@ -90,7 +76,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.9.1 + image: langgenius/dify-web:1.9.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -129,6 +115,8 @@ services: -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + -c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-60000}' + -c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}' volumes: - ./volumes/db/data:/var/lib/postgresql/data healthcheck: @@ -191,7 +179,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.3.0-local + image: langgenius/dify-plugin-daemon:0.3.3-local restart: always environment: # Use the shared environment variables. diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index ebc619a50f..9a1b9b53ba 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -15,6 +15,8 @@ services: -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + -c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-60000}' + -c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}' volumes: - ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data ports: @@ -85,7 +87,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.3.0-local + image: langgenius/dify-plugin-daemon:0.3.3-local restart: always env_file: - ./middleware.env diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 5c5448a33a..dd2428ce06 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -68,6 +68,8 @@ x-shared-env: &shared-api-worker-env POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} + POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-60000} + POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000} REDIS_HOST: ${REDIS_HOST:-redis} REDIS_PORT: ${REDIS_PORT:-6379} REDIS_USERNAME: ${REDIS_USERNAME:-} @@ -614,7 +616,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:1.9.1 + image: langgenius/dify-api:1.9.2 restart: always environment: # Use the shared environment variables. @@ -636,13 +638,6 @@ services: volumes: # Mount the storage directory to the container, for storing user files. - ./volumes/app/storage:/app/api/storage - # TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release - entrypoint: - - /bin/bash - - -c - - | - uv pip install --system weaviate-client==4.17.0 - exec /bin/bash /app/api/docker/entrypoint.sh networks: - ssrf_proxy_network - default @@ -650,7 +645,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:1.9.1 + image: langgenius/dify-api:1.9.2 restart: always environment: # Use the shared environment variables. @@ -670,13 +665,6 @@ services: volumes: # Mount the storage directory to the container, for storing user files. - ./volumes/app/storage:/app/api/storage - # TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release - entrypoint: - - /bin/bash - - -c - - | - uv pip install --system weaviate-client==4.17.0 - exec /bin/bash /app/api/docker/entrypoint.sh networks: - ssrf_proxy_network - default @@ -684,7 +672,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.9.1 + image: langgenius/dify-api:1.9.2 restart: always environment: # Use the shared environment variables. @@ -702,7 +690,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.9.1 + image: langgenius/dify-web:1.9.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -741,6 +729,8 @@ services: -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + -c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-60000}' + -c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}' volumes: - ./volumes/db/data:/var/lib/postgresql/data healthcheck: @@ -803,7 +793,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.3.0-local + image: langgenius/dify-plugin-daemon:0.3.3-local restart: always environment: # Use the shared environment variables. diff --git a/docker/middleware.env.example b/docker/middleware.env.example index 2eba62f594..c9bb8c0528 100644 --- a/docker/middleware.env.example +++ b/docker/middleware.env.example @@ -40,6 +40,18 @@ POSTGRES_MAINTENANCE_WORK_MEM=64MB # Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB +# Sets the maximum allowed duration of any statement before termination. +# Default is 60000 milliseconds. +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT +POSTGRES_STATEMENT_TIMEOUT=60000 + +# Sets the maximum allowed duration of any idle in-transaction session before termination. +# Default is 60000 milliseconds. +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT +POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=60000 + # ----------------------------- # Environment Variables for redis Service # ----------------------------- diff --git a/web/.storybook/main.ts b/web/.storybook/main.ts index e656115ceb..ca56261431 100644 --- a/web/.storybook/main.ts +++ b/web/.storybook/main.ts @@ -2,8 +2,7 @@ import type { StorybookConfig } from '@storybook/nextjs' import path from 'node:path' import { fileURLToPath } from 'node:url' -const __filename = fileURLToPath(import.meta.url) -const __dirname = path.dirname(__filename) +const storybookDir = path.dirname(fileURLToPath(import.meta.url)) const config: StorybookConfig = { stories: ['../app/components/**/*.stories.@(js|jsx|mjs|ts|tsx)'], @@ -36,9 +35,9 @@ const config: StorybookConfig = { config.resolve.alias = { ...config.resolve.alias, // Mock the plugin index files to avoid circular dependencies - [path.resolve(__dirname, '../app/components/base/prompt-editor/plugins/context-block/index.tsx')]: path.resolve(__dirname, '__mocks__/context-block.tsx'), - [path.resolve(__dirname, '../app/components/base/prompt-editor/plugins/history-block/index.tsx')]: path.resolve(__dirname, '__mocks__/history-block.tsx'), - [path.resolve(__dirname, '../app/components/base/prompt-editor/plugins/query-block/index.tsx')]: path.resolve(__dirname, '__mocks__/query-block.tsx'), + [path.resolve(storybookDir, '../app/components/base/prompt-editor/plugins/context-block/index.tsx')]: path.resolve(storybookDir, '__mocks__/context-block.tsx'), + [path.resolve(storybookDir, '../app/components/base/prompt-editor/plugins/history-block/index.tsx')]: path.resolve(storybookDir, '__mocks__/history-block.tsx'), + [path.resolve(storybookDir, '../app/components/base/prompt-editor/plugins/query-block/index.tsx')]: path.resolve(storybookDir, '__mocks__/query-block.tsx'), } return config }, diff --git a/web/.storybook/utils/audio-player-manager.mock.ts b/web/.storybook/utils/audio-player-manager.mock.ts new file mode 100644 index 0000000000..aca8b56b76 --- /dev/null +++ b/web/.storybook/utils/audio-player-manager.mock.ts @@ -0,0 +1,64 @@ +import { AudioPlayerManager } from '@/app/components/base/audio-btn/audio.player.manager' + +type PlayerCallback = ((event: string) => void) | null + +class MockAudioPlayer { + private callback: PlayerCallback = null + private finishTimer?: ReturnType + + public setCallback(callback: PlayerCallback) { + this.callback = callback + } + + public playAudio() { + this.clearTimer() + this.callback?.('play') + this.finishTimer = setTimeout(() => { + this.callback?.('ended') + }, 2000) + } + + public pauseAudio() { + this.clearTimer() + this.callback?.('paused') + } + + private clearTimer() { + if (this.finishTimer) + clearTimeout(this.finishTimer) + } +} + +class MockAudioPlayerManager { + private readonly player = new MockAudioPlayer() + + public getAudioPlayer( + _url: string, + _isPublic: boolean, + _id: string | undefined, + _msgContent: string | null | undefined, + _voice: string | undefined, + callback: PlayerCallback, + ) { + this.player.setCallback(callback) + return this.player + } + + public resetMsgId() { + // No-op for the mock + } +} + +export const ensureMockAudioManager = () => { + const managerAny = AudioPlayerManager as unknown as { + getInstance: () => AudioPlayerManager + __isStorybookMockInstalled?: boolean + } + + if (managerAny.__isStorybookMockInstalled) + return + + const mock = new MockAudioPlayerManager() + managerAny.getInstance = () => mock as unknown as AudioPlayerManager + managerAny.__isStorybookMockInstalled = true +} diff --git a/web/app/(commonLayout)/layout.tsx b/web/app/(commonLayout)/layout.tsx index ed1c995e25..be9c4fe49a 100644 --- a/web/app/(commonLayout)/layout.tsx +++ b/web/app/(commonLayout)/layout.tsx @@ -9,6 +9,7 @@ import { EventEmitterContextProvider } from '@/context/event-emitter' import { ProviderContextProvider } from '@/context/provider-context' import { ModalContextProvider } from '@/context/modal-context' import GotoAnything from '@/app/components/goto-anything' +import Zendesk from '@/app/components/base/zendesk' const Layout = ({ children }: { children: ReactNode }) => { return ( @@ -28,6 +29,7 @@ const Layout = ({ children }: { children: ReactNode }) => { + ) diff --git a/web/app/components/app/annotation/index.tsx b/web/app/components/app/annotation/index.tsx index b450c106ee..8718890e35 100644 --- a/web/app/components/app/annotation/index.tsx +++ b/web/app/components/app/annotation/index.tsx @@ -53,7 +53,6 @@ const Annotation: FC = (props) => { const [isShowViewModal, setIsShowViewModal] = useState(false) const [selectedIds, setSelectedIds] = useState([]) const debouncedQueryParams = useDebounce(queryParams, { wait: 500 }) - const [isBatchDeleting, setIsBatchDeleting] = useState(false) const fetchAnnotationConfig = async () => { const res = await doFetchAnnotationConfig(appDetail.id) @@ -108,9 +107,6 @@ const Annotation: FC = (props) => { } const handleBatchDelete = async () => { - if (isBatchDeleting) - return - setIsBatchDeleting(true) try { await delAnnotations(appDetail.id, selectedIds) Toast.notify({ message: t('common.api.actionSuccess'), type: 'success' }) @@ -121,9 +117,6 @@ const Annotation: FC = (props) => { catch (e: any) { Toast.notify({ type: 'error', message: e.message || t('common.api.actionFailed') }) } - finally { - setIsBatchDeleting(false) - } } const handleView = (item: AnnotationItem) => { @@ -213,7 +206,6 @@ const Annotation: FC = (props) => { onSelectedIdsChange={setSelectedIds} onBatchDelete={handleBatchDelete} onCancel={() => setSelectedIds([])} - isBatchDeleting={isBatchDeleting} /> :
} diff --git a/web/app/components/app/annotation/list.tsx b/web/app/components/app/annotation/list.tsx index 6705ac5768..70ecedb869 100644 --- a/web/app/components/app/annotation/list.tsx +++ b/web/app/components/app/annotation/list.tsx @@ -19,7 +19,6 @@ type Props = { onSelectedIdsChange: (selectedIds: string[]) => void onBatchDelete: () => Promise onCancel: () => void - isBatchDeleting?: boolean } const List: FC = ({ @@ -30,7 +29,6 @@ const List: FC = ({ onSelectedIdsChange, onBatchDelete, onCancel, - isBatchDeleting, }) => { const { t } = useTranslation() const { formatTime } = useTimestamp() @@ -142,7 +140,6 @@ const List: FC = ({ selectedIds={selectedIds} onBatchDelete={onBatchDelete} onCancel={onCancel} - isBatchDeleting={isBatchDeleting} /> )} diff --git a/web/app/components/app/configuration/config-prompt/advanced-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/advanced-prompt-input.tsx index 6e38007b8d..5bf2f177ff 100644 --- a/web/app/components/app/configuration/config-prompt/advanced-prompt-input.tsx +++ b/web/app/components/app/configuration/config-prompt/advanced-prompt-input.tsx @@ -78,7 +78,9 @@ const AdvancedPromptInput: FC = ({ const handleOpenExternalDataToolModal = () => { setShowExternalDataToolModal({ payload: {}, - onSaveCallback: (newExternalDataTool: ExternalDataTool) => { + onSaveCallback: (newExternalDataTool?: ExternalDataTool) => { + if (!newExternalDataTool) + return eventEmitter?.emit({ type: ADD_EXTERNAL_DATA_TOOL, payload: newExternalDataTool, diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx index d9ba533dcf..68bf6dd7c2 100644 --- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx +++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx @@ -76,7 +76,9 @@ const Prompt: FC = ({ const handleOpenExternalDataToolModal = () => { setShowExternalDataToolModal({ payload: {}, - onSaveCallback: (newExternalDataTool: ExternalDataTool) => { + onSaveCallback: (newExternalDataTool?: ExternalDataTool) => { + if (!newExternalDataTool) + return eventEmitter?.emit({ type: ADD_EXTERNAL_DATA_TOOL, payload: newExternalDataTool, diff --git a/web/app/components/app/configuration/config-var/config-modal/index.tsx b/web/app/components/app/configuration/config-var/config-modal/index.tsx index f225f3142c..bab77e61b0 100644 --- a/web/app/components/app/configuration/config-var/config-modal/index.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/index.tsx @@ -320,7 +320,7 @@ const ConfigModal: FC = ({ {type === InputVarType.paragraph && (