From acba135de130ceeb40efcc6ed1048ff442a22c9a Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Thu, 14 Aug 2025 00:52:19 -0700 Subject: [PATCH 01/36] Revert "feat: support to upload files for visual model call when running LLM node for debugging in a single step" (#23922) --- api/core/workflow/nodes/llm/node.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index c0c0cb405c..dfc2a0000b 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -5,7 +5,7 @@ import logging from collections.abc import Generator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Optional -from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity +from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.file import FileType, file_manager from core.helper.code_executor import CodeExecutor, CodeLanguage from core.llm_generator.output_parser.errors import OutputParserError @@ -194,17 +194,6 @@ class LLMNode(BaseNode): else [] ) - # single step run fetch file from sys files - if not files and self.invoke_from == InvokeFrom.DEBUGGER and not self.previous_node_id: - files = ( - llm_utils.fetch_files( - variable_pool=variable_pool, - selector=["sys", "files"], - ) - if self._node_data.vision.enabled - else [] - ) - if files: node_inputs["#files#"] = [file.to_dict() for file in files] From 4a2e6af9b59b4905122bee8e49aa2b1fb0279892 Mon Sep 17 00:00:00 2001 From: Alex Chim <132866042+AlexChim1231@users.noreply.github.com> Date: Thu, 14 Aug 2025 15:54:25 +0800 Subject: [PATCH 02/36] Fixes #23921 (#23924) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/service_api/app/conversation.py | 9 +++++---- api/services/conversation_service.py | 5 +++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/api/controllers/service_api/app/conversation.py b/api/controllers/service_api/app/conversation.py index 79c860e6b8..073307ac4a 100644 --- a/api/controllers/service_api/app/conversation.py +++ b/api/controllers/service_api/app/conversation.py @@ -1,5 +1,3 @@ -import json - from flask_restful import Resource, marshal_with, reqparse from flask_restful.inputs import int_range from sqlalchemy.orm import Session @@ -136,12 +134,15 @@ class ConversationVariableDetailApi(Resource): variable_id = str(variable_id) parser = reqparse.RequestParser() - parser.add_argument("value", required=True, location="json") + # using lambda is for passing the already-typed value without modification + # if no lambda, it will be converted to string + # the string cannot be converted using json.loads + parser.add_argument("value", required=True, location="json", type=lambda x: x) args = parser.parse_args() try: return ConversationService.update_conversation_variable( - app_model, conversation_id, variable_id, end_user, json.loads(args["value"]) + app_model, conversation_id, variable_id, end_user, args["value"] ) except services.errors.conversation.ConversationNotExistsError: raise NotFound("Conversation Not Exists.") diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index 713c4c6782..d76981a23f 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -277,6 +277,11 @@ class ConversationService: # Validate that the new value type matches the expected variable type expected_type = SegmentType(current_variable.value_type) + + # There is showing number in web ui but int in db + if expected_type == SegmentType.INTEGER: + expected_type = SegmentType.NUMBER + if not expected_type.is_valid(new_value): inferred_type = SegmentType.infer_segment_type(new_value) raise ConversationVariableTypeMismatchError( From e340fccafb84bfe15b4bc2b905aca70c79386ad5 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 14 Aug 2025 19:50:59 +0800 Subject: [PATCH 03/36] feat: integrate flask-orjson for improved JSON serialization performance (#23935) --- api/app_factory.py | 2 ++ .../code_executor/template_transformer.py | 4 +-- .../rag/datasource/keyword/jieba/jieba.py | 26 ++++++++------- api/core/variables/utils.py | 33 +++++++++++-------- api/extensions/ext_orjson.py | 8 +++++ api/models/workflow.py | 2 +- api/pyproject.toml | 1 + api/uv.lock | 15 +++++++++ 8 files changed, 64 insertions(+), 27 deletions(-) create mode 100644 api/extensions/ext_orjson.py diff --git a/api/app_factory.py b/api/app_factory.py index 81155cbacd..032d6b17fc 100644 --- a/api/app_factory.py +++ b/api/app_factory.py @@ -51,6 +51,7 @@ def initialize_extensions(app: DifyApp): ext_login, ext_mail, ext_migrate, + ext_orjson, ext_otel, ext_proxy_fix, ext_redis, @@ -67,6 +68,7 @@ def initialize_extensions(app: DifyApp): ext_logging, ext_warnings, ext_import_modules, + ext_orjson, ext_set_secretkey, ext_compress, ext_code_based_extension, diff --git a/api/core/helper/code_executor/template_transformer.py b/api/core/helper/code_executor/template_transformer.py index b416e48ce4..3965f8cb31 100644 --- a/api/core/helper/code_executor/template_transformer.py +++ b/api/core/helper/code_executor/template_transformer.py @@ -5,7 +5,7 @@ from base64 import b64encode from collections.abc import Mapping from typing import Any -from core.variables.utils import SegmentJSONEncoder +from core.variables.utils import dumps_with_segments class TemplateTransformer(ABC): @@ -93,7 +93,7 @@ class TemplateTransformer(ABC): @classmethod def serialize_inputs(cls, inputs: Mapping[str, Any]) -> str: - inputs_json_str = json.dumps(inputs, ensure_ascii=False, cls=SegmentJSONEncoder).encode() + inputs_json_str = dumps_with_segments(inputs, ensure_ascii=False).encode() input_base64_encoded = b64encode(inputs_json_str).decode("utf-8") return input_base64_encoded diff --git a/api/core/rag/datasource/keyword/jieba/jieba.py b/api/core/rag/datasource/keyword/jieba/jieba.py index 7c5f47006f..c98306ea4b 100644 --- a/api/core/rag/datasource/keyword/jieba/jieba.py +++ b/api/core/rag/datasource/keyword/jieba/jieba.py @@ -1,7 +1,7 @@ -import json from collections import defaultdict from typing import Any, Optional +import orjson from pydantic import BaseModel from configs import dify_config @@ -134,13 +134,13 @@ class Jieba(BaseKeyword): dataset_keyword_table = self.dataset.dataset_keyword_table keyword_data_source_type = dataset_keyword_table.data_source_type if keyword_data_source_type == "database": - dataset_keyword_table.keyword_table = json.dumps(keyword_table_dict, cls=SetEncoder) + dataset_keyword_table.keyword_table = dumps_with_sets(keyword_table_dict) db.session.commit() else: file_key = "keyword_files/" + self.dataset.tenant_id + "/" + self.dataset.id + ".txt" if storage.exists(file_key): storage.delete(file_key) - storage.save(file_key, json.dumps(keyword_table_dict, cls=SetEncoder).encode("utf-8")) + storage.save(file_key, dumps_with_sets(keyword_table_dict).encode("utf-8")) def _get_dataset_keyword_table(self) -> Optional[dict]: dataset_keyword_table = self.dataset.dataset_keyword_table @@ -156,12 +156,11 @@ class Jieba(BaseKeyword): data_source_type=keyword_data_source_type, ) if keyword_data_source_type == "database": - dataset_keyword_table.keyword_table = json.dumps( + dataset_keyword_table.keyword_table = dumps_with_sets( { "__type__": "keyword_table", "__data__": {"index_id": self.dataset.id, "summary": None, "table": {}}, - }, - cls=SetEncoder, + } ) db.session.add(dataset_keyword_table) db.session.commit() @@ -252,8 +251,13 @@ class Jieba(BaseKeyword): self._save_dataset_keyword_table(keyword_table) -class SetEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set): - return list(obj) - return super().default(obj) +def set_orjson_default(obj: Any) -> Any: + """Default function for orjson serialization of set types""" + if isinstance(obj, set): + return list(obj) + raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable") + + +def dumps_with_sets(obj: Any) -> str: + """JSON dumps with set support using orjson""" + return orjson.dumps(obj, default=set_orjson_default).decode("utf-8") diff --git a/api/core/variables/utils.py b/api/core/variables/utils.py index 692db3502e..7ebd29f865 100644 --- a/api/core/variables/utils.py +++ b/api/core/variables/utils.py @@ -1,5 +1,7 @@ -import json from collections.abc import Iterable, Sequence +from typing import Any + +import orjson from .segment_group import SegmentGroup from .segments import ArrayFileSegment, FileSegment, Segment @@ -12,15 +14,20 @@ def to_selector(node_id: str, name: str, paths: Iterable[str] = ()) -> Sequence[ return selectors -class SegmentJSONEncoder(json.JSONEncoder): - def default(self, o): - if isinstance(o, ArrayFileSegment): - return [v.model_dump() for v in o.value] - elif isinstance(o, FileSegment): - return o.value.model_dump() - elif isinstance(o, SegmentGroup): - return [self.default(seg) for seg in o.value] - elif isinstance(o, Segment): - return o.value - else: - super().default(o) +def segment_orjson_default(o: Any) -> Any: + """Default function for orjson serialization of Segment types""" + if isinstance(o, ArrayFileSegment): + return [v.model_dump() for v in o.value] + elif isinstance(o, FileSegment): + return o.value.model_dump() + elif isinstance(o, SegmentGroup): + return [segment_orjson_default(seg) for seg in o.value] + elif isinstance(o, Segment): + return o.value + raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable") + + +def dumps_with_segments(obj: Any, ensure_ascii: bool = False) -> str: + """JSON dumps with segment support using orjson""" + option = orjson.OPT_NON_STR_KEYS + return orjson.dumps(obj, default=segment_orjson_default, option=option).decode("utf-8") diff --git a/api/extensions/ext_orjson.py b/api/extensions/ext_orjson.py new file mode 100644 index 0000000000..659784a585 --- /dev/null +++ b/api/extensions/ext_orjson.py @@ -0,0 +1,8 @@ +from flask_orjson import OrjsonProvider + +from dify_app import DifyApp + + +def init_app(app: DifyApp) -> None: + """Initialize Flask-Orjson extension for faster JSON serialization""" + app.json = OrjsonProvider(app) diff --git a/api/models/workflow.py b/api/models/workflow.py index 453a650f84..7ff463e08f 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -1153,7 +1153,7 @@ class WorkflowDraftVariable(Base): value: The Segment object to store as the variable's value. """ self.__value = value - self.value = json.dumps(value, cls=variable_utils.SegmentJSONEncoder) + self.value = variable_utils.dumps_with_segments(value) self.value_type = value.value_type def get_node_id(self) -> str | None: diff --git a/api/pyproject.toml b/api/pyproject.toml index de472c870a..61a725a830 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -18,6 +18,7 @@ dependencies = [ "flask-cors~=6.0.0", "flask-login~=0.6.3", "flask-migrate~=4.0.7", + "flask-orjson~=2.0.0", "flask-restful~=0.3.10", "flask-sqlalchemy~=3.1.1", "gevent~=24.11.1", diff --git a/api/uv.lock b/api/uv.lock index 870975418f..cecce2bc43 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1253,6 +1253,7 @@ dependencies = [ { name = "flask-cors" }, { name = "flask-login" }, { name = "flask-migrate" }, + { name = "flask-orjson" }, { name = "flask-restful" }, { name = "flask-sqlalchemy" }, { name = "gevent" }, @@ -1440,6 +1441,7 @@ requires-dist = [ { name = "flask-cors", specifier = "~=6.0.0" }, { name = "flask-login", specifier = "~=0.6.3" }, { name = "flask-migrate", specifier = "~=4.0.7" }, + { name = "flask-orjson", specifier = "~=2.0.0" }, { name = "flask-restful", specifier = "~=0.3.10" }, { name = "flask-sqlalchemy", specifier = "~=3.1.1" }, { name = "gevent", specifier = "~=24.11.1" }, @@ -1859,6 +1861,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/93/01/587023575286236f95d2ab8a826c320375ed5ea2102bb103ed89704ffa6b/Flask_Migrate-4.0.7-py3-none-any.whl", hash = "sha256:5c532be17e7b43a223b7500d620edae33795df27c75811ddf32560f7d48ec617", size = 21127, upload-time = "2024-03-11T18:42:59.462Z" }, ] +[[package]] +name = "flask-orjson" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "flask" }, + { name = "orjson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/49/575796f6ddca171d82dbb12762e33166c8b8f8616c946f0a6dfbb9bc3cd6/flask_orjson-2.0.0.tar.gz", hash = "sha256:6df6631437f9bc52cf9821735f896efa5583b5f80712f7d29d9ef69a79986a9c", size = 2974, upload-time = "2024-01-15T00:03:22.236Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/ca/53e14be018a2284acf799830e8cd8e0b263c0fd3dff1ad7b35f8417e7067/flask_orjson-2.0.0-py3-none-any.whl", hash = "sha256:5d15f2ba94b8d6c02aee88fc156045016e83db9eda2c30545fabd640aebaec9d", size = 3622, upload-time = "2024-01-15T00:03:17.511Z" }, +] + [[package]] name = "flask-restful" version = "0.3.10" From 05e071bf2f2d5ff2c6b6f21abcd0c3828c11e834 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Thu, 14 Aug 2025 19:51:28 +0800 Subject: [PATCH 04/36] fix: resolve user profile dropdown cache sync issue across layouts (#23937) --- web/app/components/swr-initializer.tsx | 3 +++ web/context/app-context.tsx | 29 ++++++++++++++++++-------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/web/app/components/swr-initializer.tsx b/web/app/components/swr-initializer.tsx index 3592a0e017..a3f6e011d8 100644 --- a/web/app/components/swr-initializer.tsx +++ b/web/app/components/swr-initializer.tsx @@ -79,6 +79,9 @@ const SwrInitializer = ({ new Map(), }}> {children} diff --git a/web/context/app-context.tsx b/web/context/app-context.tsx index f941cb43b4..4ba9e3492d 100644 --- a/web/context/app-context.tsx +++ b/web/context/app-context.tsx @@ -75,7 +75,7 @@ export type AppContextProviderProps = { } export const AppContextProvider: FC = ({ children }) => { - const { data: userProfileResponse, mutate: mutateUserProfile } = useSWR({ url: '/account/profile', params: {} }, fetchUserProfile) + const { data: userProfileResponse, mutate: mutateUserProfile, error: userProfileError } = useSWR({ url: '/account/profile', params: {} }, fetchUserProfile) const { data: currentWorkspaceResponse, mutate: mutateCurrentWorkspace, isLoading: isLoadingCurrentWorkspace } = useSWR({ url: '/workspaces/current', params: {} }, fetchCurrentWorkspace) const [userProfile, setUserProfile] = useState(userProfilePlaceholder) @@ -86,15 +86,26 @@ export const AppContextProvider: FC = ({ children }) => const isCurrentWorkspaceEditor = useMemo(() => ['owner', 'admin', 'editor'].includes(currentWorkspace.role), [currentWorkspace.role]) const isCurrentWorkspaceDatasetOperator = useMemo(() => currentWorkspace.role === 'dataset_operator', [currentWorkspace.role]) const updateUserProfileAndVersion = useCallback(async () => { - if (userProfileResponse && !userProfileResponse.bodyUsed) { - const result = await userProfileResponse.json() - setUserProfile(result) - const current_version = userProfileResponse.headers.get('x-version') - const current_env = process.env.NODE_ENV === 'development' ? 'DEVELOPMENT' : userProfileResponse.headers.get('x-env') - const versionData = await fetchLangGeniusVersion({ url: '/version', params: { current_version } }) - setLangGeniusVersionInfo({ ...versionData, current_version, latest_version: versionData.version, current_env }) + if (userProfileResponse) { + try { + const clonedResponse = (userProfileResponse as Response).clone() + const result = await clonedResponse.json() + setUserProfile(result) + const current_version = userProfileResponse.headers.get('x-version') + const current_env = process.env.NODE_ENV === 'development' ? 'DEVELOPMENT' : userProfileResponse.headers.get('x-env') + const versionData = await fetchLangGeniusVersion({ url: '/version', params: { current_version } }) + setLangGeniusVersionInfo({ ...versionData, current_version, latest_version: versionData.version, current_env }) + } + catch (error) { + console.error('Failed to update user profile:', error) + if (userProfile.id === '') + setUserProfile(userProfilePlaceholder) + } } - }, [userProfileResponse]) + else if (userProfileError && userProfile.id === '') { + setUserProfile(userProfilePlaceholder) + } + }, [userProfileResponse, userProfileError, userProfile.id]) useEffect(() => { updateUserProfileAndVersion() From f40e2cf98a2c82e107580d79259f7f67f258e1e4 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Thu, 14 Aug 2025 19:52:07 +0800 Subject: [PATCH 05/36] Fix: remove redundant allowed_keys check in jsonable_encoder (#23931) Signed-off-by: Yongtao Huang Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/model_runtime/utils/encoders.py | 7 ++----- api/core/provider_manager.py | 2 +- api/core/workflow/workflow_engine_manager.py | 0 3 files changed, 3 insertions(+), 6 deletions(-) delete mode 100644 api/core/workflow/workflow_engine_manager.py diff --git a/api/core/model_runtime/utils/encoders.py b/api/core/model_runtime/utils/encoders.py index a5c11aeeba..f65339fbfc 100644 --- a/api/core/model_runtime/utils/encoders.py +++ b/api/core/model_runtime/utils/encoders.py @@ -151,12 +151,9 @@ def jsonable_encoder( return format(obj, "f") if isinstance(obj, dict): encoded_dict = {} - allowed_keys = set(obj.keys()) for key, value in obj.items(): - if ( - (not sqlalchemy_safe or (not isinstance(key, str)) or (not key.startswith("_sa"))) - and (value is not None or not exclude_none) - and key in allowed_keys + if (not sqlalchemy_safe or (not isinstance(key, str)) or (not key.startswith("_sa"))) and ( + value is not None or not exclude_none ): encoded_key = jsonable_encoder( key, diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 6de4f3a303..9250497d29 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -523,7 +523,7 @@ class ProviderManager: # Init trial provider records if not exists if ProviderQuotaType.TRIAL not in provider_quota_to_provider_record_dict: try: - # FIXME ignore the type errork, onyl TrialHostingQuota has limit need to change the logic + # FIXME ignore the type error, only TrialHostingQuota has limit need to change the logic new_provider_record = Provider( tenant_id=tenant_id, # TODO: Use provider name with prefix after the data migration. diff --git a/api/core/workflow/workflow_engine_manager.py b/api/core/workflow/workflow_engine_manager.py deleted file mode 100644 index e69de29bb2..0000000000 From 8d472135294aadaced6f0b71288738f6c3a4e5da Mon Sep 17 00:00:00 2001 From: Rajhans Jadhao Date: Thu, 14 Aug 2025 17:25:18 +0530 Subject: [PATCH 06/36] fix(workflow/if-else): keep conditions in sync on variable rename (#23611) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: crazywoola <427733928@qq.com> --- .../components/workflow/hooks/use-workflow.ts | 10 +++---- .../nodes/_base/components/variable/utils.ts | 30 ++++++++++++++++++- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/web/app/components/workflow/hooks/use-workflow.ts b/web/app/components/workflow/hooks/use-workflow.ts index a6435ec632..387567da0a 100644 --- a/web/app/components/workflow/hooks/use-workflow.ts +++ b/web/app/components/workflow/hooks/use-workflow.ts @@ -259,11 +259,11 @@ export const useWorkflow = () => { const handleOutVarRenameChange = useCallback((nodeId: string, oldValeSelector: ValueSelector, newVarSelector: ValueSelector) => { const { getNodes, setNodes } = store.getState() - const afterNodes = getAfterNodesInSameBranch(nodeId) - const effectNodes = findUsedVarNodes(oldValeSelector, afterNodes) - if (effectNodes.length > 0) { - const newNodes = getNodes().map((node) => { - if (effectNodes.find(n => n.id === node.id)) + const allNodes = getNodes() + const affectedNodes = findUsedVarNodes(oldValeSelector, allNodes) + if (affectedNodes.length > 0) { + const newNodes = allNodes.map((node) => { + if (affectedNodes.find(n => n.id === node.id)) return updateNodeVars(node, oldValeSelector, newVarSelector) return node diff --git a/web/app/components/workflow/nodes/_base/components/variable/utils.ts b/web/app/components/workflow/nodes/_base/components/variable/utils.ts index 8c3ffb8810..ac9432f69e 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/utils.ts +++ b/web/app/components/workflow/nodes/_base/components/variable/utils.ts @@ -1022,7 +1022,15 @@ export const getNodeUsedVars = (node: Node): ValueSelector[] => { res = (data as IfElseNodeType).conditions?.map((c) => { return c.variable_selector || [] }) || [] - res.push(...((data as IfElseNodeType).cases || []).flatMap(c => (c.conditions || [])).map(c => c.variable_selector || [])) + res.push(...((data as IfElseNodeType).cases || []).flatMap(c => (c.conditions || [])).flatMap((c) => { + const selectors: ValueSelector[] = [] + if (c.variable_selector) + selectors.push(c.variable_selector) + // Handle sub-variable conditions + if (c.sub_variable_condition && c.sub_variable_condition.conditions) + selectors.push(...c.sub_variable_condition.conditions.map(subC => subC.variable_selector || []).filter(sel => sel.length > 0)) + return selectors + })) break } case BlockEnum.Code: { @@ -1259,6 +1267,26 @@ export const updateNodeVars = (oldNode: Node, oldVarSelector: ValueSelector, new return c }) } + if (payload.cases) { + payload.cases = payload.cases.map((caseItem) => { + if (caseItem.conditions) { + caseItem.conditions = caseItem.conditions.map((c) => { + if (c.variable_selector?.join('.') === oldVarSelector.join('.')) + c.variable_selector = newVarSelector + // Handle sub-variable conditions + if (c.sub_variable_condition && c.sub_variable_condition.conditions) { + c.sub_variable_condition.conditions = c.sub_variable_condition.conditions.map((subC) => { + if (subC.variable_selector?.join('.') === oldVarSelector.join('.')) + subC.variable_selector = newVarSelector + return subC + }) + } + return c + }) + } + return caseItem + }) + } break } case BlockEnum.Code: { From 01f233338177667784a095c20c78372e227d40e1 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 15 Aug 2025 08:59:49 +0800 Subject: [PATCH 07/36] chore: remove redundant .env.example from root directory (#23948) --- .env.example | 1197 -------------------------------------------------- 1 file changed, 1197 deletions(-) delete mode 100644 .env.example diff --git a/.env.example b/.env.example deleted file mode 100644 index 3e95f2e982..0000000000 --- a/.env.example +++ /dev/null @@ -1,1197 +0,0 @@ -# ------------------------------ -# Environment Variables for API service & worker -# ------------------------------ - -# ------------------------------ -# Common Variables -# ------------------------------ - -# The backend URL of the console API, -# used to concatenate the authorization callback. -# If empty, it is the same domain. -# Example: https://api.console.dify.ai -CONSOLE_API_URL= - -# The front-end URL of the console web, -# used to concatenate some front-end addresses and for CORS configuration use. -# If empty, it is the same domain. -# Example: https://console.dify.ai -CONSOLE_WEB_URL= - -# Service API Url, -# used to display Service API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://api.dify.ai -SERVICE_API_URL= - -# WebApp API backend Url, -# used to declare the back-end URL for the front-end API. -# If empty, it is the same domain. -# Example: https://api.app.dify.ai -APP_API_URL= - -# WebApp Url, -# used to display WebAPP API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://app.dify.ai -APP_WEB_URL= - -# File preview or download Url prefix. -# used to display File preview or download Url to the front-end or as Multi-model inputs; -# Url is signed and has expiration time. -# Setting FILES_URL is required for file processing plugins. -# - For https://example.com, use FILES_URL=https://example.com -# - For http://example.com, use FILES_URL=http://example.com -# Recommendation: use a dedicated domain (e.g., https://upload.example.com). -# Alternatively, use http://:5001 or http://api:5001, -# ensuring port 5001 is externally accessible (see docker-compose.yaml). -FILES_URL= - -# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network. -# Set this to the internal Docker service URL for proper plugin file access. -# Example: INTERNAL_FILES_URL=http://api:5001 -INTERNAL_FILES_URL= - -# ------------------------------ -# Server Configuration -# ------------------------------ - -# The log level for the application. -# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` -LOG_LEVEL=INFO -# Log file path -LOG_FILE=/app/logs/server.log -# Log file max size, the unit is MB -LOG_FILE_MAX_SIZE=20 -# Log file max backup count -LOG_FILE_BACKUP_COUNT=5 -# Log dateformat -LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S -# Log Timezone -LOG_TZ=UTC - -# Debug mode, default is false. -# It is recommended to turn on this configuration for local development -# to prevent some problems caused by monkey patch. -DEBUG=false - -# Flask debug mode, it can output trace information at the interface when turned on, -# which is convenient for debugging. -FLASK_DEBUG=false - -# Enable request logging, which will log the request and response information. -# And the log level is DEBUG -ENABLE_REQUEST_LOGGING=False - -# A secret key that is used for securely signing the session cookie -# and encrypting sensitive information on the database. -# You can generate a strong key using `openssl rand -base64 42`. -SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U - -# Password for admin user initialization. -# If left unset, admin user will not be prompted for a password -# when creating the initial admin account. -# The length of the password cannot exceed 30 characters. -INIT_PASSWORD= - -# Deployment environment. -# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. -# Testing environment. There will be a distinct color label on the front-end page, -# indicating that this environment is a testing environment. -DEPLOY_ENV=PRODUCTION - -# Whether to enable the version check policy. -# If set to empty, https://updates.dify.ai will be called for version check. -CHECK_UPDATE_URL=https://updates.dify.ai - -# Used to change the OpenAI base address, default is https://api.openai.com/v1. -# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, -# or when a local model provides OpenAI compatible API, it can be replaced. -OPENAI_API_BASE=https://api.openai.com/v1 - -# When enabled, migrations will be executed prior to application startup -# and the application will start after the migrations have completed. -MIGRATION_ENABLED=true - -# File Access Time specifies a time interval in seconds for the file to be accessed. -# The default value is 300 seconds. -FILES_ACCESS_TIMEOUT=300 - -# Access token expiration time in minutes -ACCESS_TOKEN_EXPIRE_MINUTES=60 - -# Refresh token expiration time in days -REFRESH_TOKEN_EXPIRE_DAYS=30 - -# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_MAX_ACTIVE_REQUESTS=0 -APP_MAX_EXECUTION_TIME=1200 - -# ------------------------------ -# Container Startup Related Configuration -# Only effective when starting with docker image or docker-compose. -# ------------------------------ - -# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. -DIFY_BIND_ADDRESS=0.0.0.0 - -# API service binding port number, default 5001. -DIFY_PORT=5001 - -# The number of API server workers, i.e., the number of workers. -# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent -# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers -SERVER_WORKER_AMOUNT=1 - -# Defaults to gevent. If using windows, it can be switched to sync or solo. -SERVER_WORKER_CLASS=gevent - -# Default number of worker connections, the default is 10. -SERVER_WORKER_CONNECTIONS=10 - -# Similar to SERVER_WORKER_CLASS. -# If using windows, it can be switched to sync or solo. -CELERY_WORKER_CLASS= - -# Request handling timeout. The default is 200, -# it is recommended to set it to 360 to support a longer sse connection time. -GUNICORN_TIMEOUT=360 - -# The number of Celery workers. The default is 1, and can be set as needed. -CELERY_WORKER_AMOUNT= - -# Flag indicating whether to enable autoscaling of Celery workers. -# -# Autoscaling is useful when tasks are CPU intensive and can be dynamically -# allocated and deallocated based on the workload. -# -# When autoscaling is enabled, the maximum and minimum number of workers can -# be specified. The autoscaling algorithm will dynamically adjust the number -# of workers within the specified range. -# -# Default is false (i.e., autoscaling is disabled). -# -# Example: -# CELERY_AUTO_SCALE=true -CELERY_AUTO_SCALE=false - -# The maximum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. -CELERY_MAX_WORKERS= - -# The minimum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. -CELERY_MIN_WORKERS= - -# API Tool configuration -API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 -API_TOOL_DEFAULT_READ_TIMEOUT=60 - -# ------------------------------- -# Datasource Configuration -# -------------------------------- -ENABLE_WEBSITE_JINAREADER=true -ENABLE_WEBSITE_FIRECRAWL=true -ENABLE_WEBSITE_WATERCRAWL=true - -# ------------------------------ -# Database Configuration -# The database uses PostgreSQL. Please use the public schema. -# It is consistent with the configuration in the 'db' service below. -# ------------------------------ - -DB_USERNAME=postgres -DB_PASSWORD=difyai123456 -DB_HOST=db -DB_PORT=5432 -DB_DATABASE=dify -# The size of the database connection pool. -# The default is 30 connections, which can be appropriately increased. -SQLALCHEMY_POOL_SIZE=30 -# Database connection pool recycling time, the default is 3600 seconds. -SQLALCHEMY_POOL_RECYCLE=3600 -# Whether to print SQL, default is false. -SQLALCHEMY_ECHO=false -# If True, will test connections for liveness upon each checkout -SQLALCHEMY_POOL_PRE_PING=false -# Whether to enable the Last in first out option or use default FIFO queue if is false -SQLALCHEMY_POOL_USE_LIFO=false - -# Maximum number of connections to the database -# Default is 100 -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS -POSTGRES_MAX_CONNECTIONS=100 - -# Sets the amount of shared memory used for postgres's shared buffers. -# Default is 128MB -# Recommended value: 25% of available memory -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS -POSTGRES_SHARED_BUFFERS=128MB - -# Sets the amount of memory used by each database worker for working space. -# Default is 4MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM -POSTGRES_WORK_MEM=4MB - -# Sets the amount of memory reserved for maintenance activities. -# Default is 64MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM -POSTGRES_MAINTENANCE_WORK_MEM=64MB - -# Sets the planner's assumption about the effective cache size. -# Default is 4096MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE -POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB - -# ------------------------------ -# Redis Configuration -# This Redis configuration is used for caching and for pub/sub during conversation. -# ------------------------------ - -REDIS_HOST=redis -REDIS_PORT=6379 -REDIS_USERNAME= -REDIS_PASSWORD=difyai123456 -REDIS_USE_SSL=false -REDIS_DB=0 - -# Whether to use Redis Sentinel mode. -# If set to true, the application will automatically discover and connect to the master node through Sentinel. -REDIS_USE_SENTINEL=false - -# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. -# Format: `:,:,:` -REDIS_SENTINELS= -REDIS_SENTINEL_SERVICE_NAME= -REDIS_SENTINEL_USERNAME= -REDIS_SENTINEL_PASSWORD= -REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 - -# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. -# Format: `:,:,:` -REDIS_USE_CLUSTERS=false -REDIS_CLUSTERS= -REDIS_CLUSTERS_PASSWORD= - -# ------------------------------ -# Celery Configuration -# ------------------------------ - -# Use redis as the broker, and redis db 1 for celery broker. -# Format as follows: `redis://:@:/` -# Example: redis://:difyai123456@redis:6379/1 -# If use Redis Sentinel, format as follows: `sentinel://:@:/` -# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1 -CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 -BROKER_USE_SSL=false - -# If you are using Redis Sentinel for high availability, configure the following settings. -CELERY_USE_SENTINEL=false -CELERY_SENTINEL_MASTER_NAME= -CELERY_SENTINEL_PASSWORD= -CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 - -# ------------------------------ -# CORS Configuration -# Used to set the front-end cross-domain access policy. -# ------------------------------ - -# Specifies the allowed origins for cross-origin requests to the Web API, -# e.g. https://dify.app or * for all origins. -WEB_API_CORS_ALLOW_ORIGINS=* - -# Specifies the allowed origins for cross-origin requests to the console API, -# e.g. https://cloud.dify.ai or * for all origins. -CONSOLE_CORS_ALLOW_ORIGINS=* - -# ------------------------------ -# File Storage Configuration -# ------------------------------ - -# The type of storage to use for storing user files. -STORAGE_TYPE=opendal - -# Apache OpenDAL Configuration -# The configuration for OpenDAL consists of the following format: OPENDAL__. -# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. -# Dify will scan configurations starting with OPENDAL_ and automatically apply them. -# The scheme name for the OpenDAL storage. -OPENDAL_SCHEME=fs -# Configurations for OpenDAL Local File System. -OPENDAL_FS_ROOT=storage - -# ClickZetta Volume Configuration (for storage backend) -# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume -# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters - -# Volume type selection (three types available): -# - user: Personal/small team use, simple config, user-level permissions -# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions -# - external: Data lake integration, external storage connection, volume-level + storage-level permissions -CLICKZETTA_VOLUME_TYPE=user - -# External Volume name (required only when TYPE=external) -CLICKZETTA_VOLUME_NAME= - -# Table Volume table prefix (used only when TYPE=table) -CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ - -# Dify file directory prefix (isolates from other apps, recommended to keep default) -CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km - -# S3 Configuration -# -S3_ENDPOINT= -S3_REGION=us-east-1 -S3_BUCKET_NAME=difyai -S3_ACCESS_KEY= -S3_SECRET_KEY= -# Whether to use AWS managed IAM roles for authenticating with the S3 service. -# If set to false, the access key and secret key must be provided. -S3_USE_AWS_MANAGED_IAM=false - -# Azure Blob Configuration -# -AZURE_BLOB_ACCOUNT_NAME=difyai -AZURE_BLOB_ACCOUNT_KEY=difyai -AZURE_BLOB_CONTAINER_NAME=difyai-container -AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net - -# Google Storage Configuration -# -GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name -GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= - -# The Alibaba Cloud OSS configurations, -# -ALIYUN_OSS_BUCKET_NAME=your-bucket-name -ALIYUN_OSS_ACCESS_KEY=your-access-key -ALIYUN_OSS_SECRET_KEY=your-secret-key -ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com -ALIYUN_OSS_REGION=ap-southeast-1 -ALIYUN_OSS_AUTH_VERSION=v4 -# Don't start with '/'. OSS doesn't support leading slash in object names. -ALIYUN_OSS_PATH=your-path - -# Tencent COS Configuration -# -TENCENT_COS_BUCKET_NAME=your-bucket-name -TENCENT_COS_SECRET_KEY=your-secret-key -TENCENT_COS_SECRET_ID=your-secret-id -TENCENT_COS_REGION=your-region -TENCENT_COS_SCHEME=your-scheme - -# Oracle Storage Configuration -# -OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com -OCI_BUCKET_NAME=your-bucket-name -OCI_ACCESS_KEY=your-access-key -OCI_SECRET_KEY=your-secret-key -OCI_REGION=us-ashburn-1 - -# Huawei OBS Configuration -# -HUAWEI_OBS_BUCKET_NAME=your-bucket-name -HUAWEI_OBS_SECRET_KEY=your-secret-key -HUAWEI_OBS_ACCESS_KEY=your-access-key -HUAWEI_OBS_SERVER=your-server-url - -# Volcengine TOS Configuration -# -VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name -VOLCENGINE_TOS_SECRET_KEY=your-secret-key -VOLCENGINE_TOS_ACCESS_KEY=your-access-key -VOLCENGINE_TOS_ENDPOINT=your-server-url -VOLCENGINE_TOS_REGION=your-region - -# Baidu OBS Storage Configuration -# -BAIDU_OBS_BUCKET_NAME=your-bucket-name -BAIDU_OBS_SECRET_KEY=your-secret-key -BAIDU_OBS_ACCESS_KEY=your-access-key -BAIDU_OBS_ENDPOINT=your-server-url - -# Supabase Storage Configuration -# -SUPABASE_BUCKET_NAME=your-bucket-name -SUPABASE_API_KEY=your-access-key -SUPABASE_URL=your-server-url - -# ------------------------------ -# Vector Database Configuration -# ------------------------------ - -# The type of vector store to use. -# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`. -VECTOR_STORE=weaviate - -# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. -WEAVIATE_ENDPOINT=http://weaviate:8080 -WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih - -# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. -QDRANT_URL=http://qdrant:6333 -QDRANT_API_KEY=difyai123456 -QDRANT_CLIENT_TIMEOUT=20 -QDRANT_GRPC_ENABLED=false -QDRANT_GRPC_PORT=6334 -QDRANT_REPLICATION_FACTOR=1 - -# Milvus configuration. Only available when VECTOR_STORE is `milvus`. -# The milvus uri. -MILVUS_URI=http://host.docker.internal:19530 -MILVUS_DATABASE= -MILVUS_TOKEN= -MILVUS_USER= -MILVUS_PASSWORD= -MILVUS_ENABLE_HYBRID_SEARCH=False -MILVUS_ANALYZER_PARAMS= - -# MyScale configuration, only available when VECTOR_STORE is `myscale` -# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: -# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters -MYSCALE_HOST=myscale -MYSCALE_PORT=8123 -MYSCALE_USER=default -MYSCALE_PASSWORD= -MYSCALE_DATABASE=dify -MYSCALE_FTS_PARAMS= - -# Couchbase configurations, only available when VECTOR_STORE is `couchbase` -# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) -COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server -COUCHBASE_USER=Administrator -COUCHBASE_PASSWORD=password -COUCHBASE_BUCKET_NAME=Embeddings -COUCHBASE_SCOPE_NAME=_default - -# pgvector configurations, only available when VECTOR_STORE is `pgvector` -PGVECTOR_HOST=pgvector -PGVECTOR_PORT=5432 -PGVECTOR_USER=postgres -PGVECTOR_PASSWORD=difyai123456 -PGVECTOR_DATABASE=dify -PGVECTOR_MIN_CONNECTION=1 -PGVECTOR_MAX_CONNECTION=5 -PGVECTOR_PG_BIGM=false -PGVECTOR_PG_BIGM_VERSION=1.2-20240606 - -# vastbase configurations, only available when VECTOR_STORE is `vastbase` -VASTBASE_HOST=vastbase -VASTBASE_PORT=5432 -VASTBASE_USER=dify -VASTBASE_PASSWORD=Difyai123456 -VASTBASE_DATABASE=dify -VASTBASE_MIN_CONNECTION=1 -VASTBASE_MAX_CONNECTION=5 - -# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` -PGVECTO_RS_HOST=pgvecto-rs -PGVECTO_RS_PORT=5432 -PGVECTO_RS_USER=postgres -PGVECTO_RS_PASSWORD=difyai123456 -PGVECTO_RS_DATABASE=dify - -# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` -ANALYTICDB_KEY_ID=your-ak -ANALYTICDB_KEY_SECRET=your-sk -ANALYTICDB_REGION_ID=cn-hangzhou -ANALYTICDB_INSTANCE_ID=gp-ab123456 -ANALYTICDB_ACCOUNT=testaccount -ANALYTICDB_PASSWORD=testpassword -ANALYTICDB_NAMESPACE=dify -ANALYTICDB_NAMESPACE_PASSWORD=difypassword -ANALYTICDB_HOST=gp-test.aliyuncs.com -ANALYTICDB_PORT=5432 -ANALYTICDB_MIN_CONNECTION=1 -ANALYTICDB_MAX_CONNECTION=5 - -# TiDB vector configurations, only available when VECTOR_STORE is `tidb_vector` -TIDB_VECTOR_HOST=tidb -TIDB_VECTOR_PORT=4000 -TIDB_VECTOR_USER= -TIDB_VECTOR_PASSWORD= -TIDB_VECTOR_DATABASE=dify - -# Matrixone vector configurations. -MATRIXONE_HOST=matrixone -MATRIXONE_PORT=6001 -MATRIXONE_USER=dump -MATRIXONE_PASSWORD=111 -MATRIXONE_DATABASE=dify - -# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` -TIDB_ON_QDRANT_URL=http://127.0.0.1 -TIDB_ON_QDRANT_API_KEY=dify -TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 -TIDB_ON_QDRANT_GRPC_ENABLED=false -TIDB_ON_QDRANT_GRPC_PORT=6334 -TIDB_PUBLIC_KEY=dify -TIDB_PRIVATE_KEY=dify -TIDB_API_URL=http://127.0.0.1 -TIDB_IAM_API_URL=http://127.0.0.1 -TIDB_REGION=regions/aws-us-east-1 -TIDB_PROJECT_ID=dify -TIDB_SPEND_LIMIT=100 - -# Chroma configuration, only available when VECTOR_STORE is `chroma` -CHROMA_HOST=127.0.0.1 -CHROMA_PORT=8000 -CHROMA_TENANT=default_tenant -CHROMA_DATABASE=default_database -CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider -CHROMA_AUTH_CREDENTIALS= - -# Oracle configuration, only available when VECTOR_STORE is `oracle` -ORACLE_USER=dify -ORACLE_PASSWORD=dify -ORACLE_DSN=oracle:1521/FREEPDB1 -ORACLE_CONFIG_DIR=/app/api/storage/wallet -ORACLE_WALLET_LOCATION=/app/api/storage/wallet -ORACLE_WALLET_PASSWORD=dify -ORACLE_IS_AUTONOMOUS=false - -# relyt configurations, only available when VECTOR_STORE is `relyt` -RELYT_HOST=db -RELYT_PORT=5432 -RELYT_USER=postgres -RELYT_PASSWORD=difyai123456 -RELYT_DATABASE=postgres - -# open search configuration, only available when VECTOR_STORE is `opensearch` -OPENSEARCH_HOST=opensearch -OPENSEARCH_PORT=9200 -OPENSEARCH_SECURE=true -OPENSEARCH_VERIFY_CERTS=true -OPENSEARCH_AUTH_METHOD=basic -OPENSEARCH_USER=admin -OPENSEARCH_PASSWORD=admin -# If using AWS managed IAM, e.g. Managed Cluster or OpenSearch Serverless -OPENSEARCH_AWS_REGION=ap-southeast-1 -OPENSEARCH_AWS_SERVICE=aoss - -# tencent vector configurations, only available when VECTOR_STORE is `tencent` -TENCENT_VECTOR_DB_URL=http://127.0.0.1 -TENCENT_VECTOR_DB_API_KEY=dify -TENCENT_VECTOR_DB_TIMEOUT=30 -TENCENT_VECTOR_DB_USERNAME=dify -TENCENT_VECTOR_DB_DATABASE=dify -TENCENT_VECTOR_DB_SHARD=1 -TENCENT_VECTOR_DB_REPLICAS=2 -TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false - -# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` -ELASTICSEARCH_HOST=0.0.0.0 -ELASTICSEARCH_PORT=9200 -ELASTICSEARCH_USERNAME=elastic -ELASTICSEARCH_PASSWORD=elastic -KIBANA_PORT=5601 - -# baidu vector configurations, only available when VECTOR_STORE is `baidu` -BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 -BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 -BAIDU_VECTOR_DB_ACCOUNT=root -BAIDU_VECTOR_DB_API_KEY=dify -BAIDU_VECTOR_DB_DATABASE=dify -BAIDU_VECTOR_DB_SHARD=1 -BAIDU_VECTOR_DB_REPLICAS=3 - -# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` -VIKINGDB_ACCESS_KEY=your-ak -VIKINGDB_SECRET_KEY=your-sk -VIKINGDB_REGION=cn-shanghai -VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http -VIKINGDB_CONNECTION_TIMEOUT=30 -VIKINGDB_SOCKET_TIMEOUT=30 - -# Lindorm configuration, only available when VECTOR_STORE is `lindorm` -LINDORM_URL=http://lindorm:30070 -LINDORM_USERNAME=lindorm -LINDORM_PASSWORD=lindorm -LINDORM_QUERY_TIMEOUT=1 - -# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase` -OCEANBASE_VECTOR_HOST=oceanbase -OCEANBASE_VECTOR_PORT=2881 -OCEANBASE_VECTOR_USER=root@test -OCEANBASE_VECTOR_PASSWORD=difyai123456 -OCEANBASE_VECTOR_DATABASE=test -OCEANBASE_CLUSTER_NAME=difyai -OCEANBASE_MEMORY_LIMIT=6G -OCEANBASE_ENABLE_HYBRID_SEARCH=false - -# opengauss configurations, only available when VECTOR_STORE is `opengauss` -OPENGAUSS_HOST=opengauss -OPENGAUSS_PORT=6600 -OPENGAUSS_USER=postgres -OPENGAUSS_PASSWORD=Dify@123 -OPENGAUSS_DATABASE=dify -OPENGAUSS_MIN_CONNECTION=1 -OPENGAUSS_MAX_CONNECTION=5 -OPENGAUSS_ENABLE_PQ=false - -# huawei cloud search service vector configurations, only available when VECTOR_STORE is `huawei_cloud` -HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 -HUAWEI_CLOUD_USER=admin -HUAWEI_CLOUD_PASSWORD=admin - -# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` -UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io -UPSTASH_VECTOR_TOKEN=dify - -# TableStore Vector configuration -# (only used when VECTOR_STORE is tablestore) -TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com -TABLESTORE_INSTANCE_NAME=instance-name -TABLESTORE_ACCESS_KEY_ID=xxx -TABLESTORE_ACCESS_KEY_SECRET=xxx - -# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` -CLICKZETTA_USERNAME= -CLICKZETTA_PASSWORD= -CLICKZETTA_INSTANCE= -CLICKZETTA_SERVICE=api.clickzetta.com -CLICKZETTA_WORKSPACE=quick_start -CLICKZETTA_VCLUSTER=default_ap -CLICKZETTA_SCHEMA=dify -CLICKZETTA_BATCH_SIZE=100 -CLICKZETTA_ENABLE_INVERTED_INDEX=true -CLICKZETTA_ANALYZER_TYPE=chinese -CLICKZETTA_ANALYZER_MODE=smart -CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance - -# ------------------------------ -# Knowledge Configuration -# ------------------------------ - -# Upload file size limit, default 15M. -UPLOAD_FILE_SIZE_LIMIT=15 - -# The maximum number of files that can be uploaded at a time, default 5. -UPLOAD_FILE_BATCH_LIMIT=5 - -# ETL type, support: `dify`, `Unstructured` -# `dify` Dify's proprietary file extraction scheme -# `Unstructured` Unstructured.io file extraction scheme -ETL_TYPE=dify - -# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured -# Or using Unstructured for document extractor node for pptx. -# For example: http://unstructured:8000/general/v0/general -UNSTRUCTURED_API_URL= -UNSTRUCTURED_API_KEY= -SCARF_NO_ANALYTICS=true - -# ------------------------------ -# Model Configuration -# ------------------------------ - -# The maximum number of tokens allowed for prompt generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating a prompt in the prompt generation tool. -# Default: 512 tokens. -PROMPT_GENERATION_MAX_TOKENS=512 - -# The maximum number of tokens allowed for code generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating code in the code generation tool. -# Default: 1024 tokens. -CODE_GENERATION_MAX_TOKENS=1024 - -# Enable or disable plugin based token counting. If disabled, token counting will return 0. -# This can improve performance by skipping token counting operations. -# Default: false (disabled). -PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false - -# ------------------------------ -# Multi-modal Configuration -# ------------------------------ - -# The format of the image/video/audio/document sent when the multi-modal model is input, -# the default is base64, optional url. -# The delay of the call in url mode will be lower than that in base64 mode. -# It is generally recommended to use the more compatible base64 mode. -# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. -MULTIMODAL_SEND_FORMAT=base64 -# Upload image file size limit, default 10M. -UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 -# Upload video file size limit, default 100M. -UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 -# Upload audio file size limit, default 50M. -UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 - -# ------------------------------ -# Sentry Configuration -# Used for application monitoring and error log tracking. -# ------------------------------ -SENTRY_DSN= - -# API Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -API_SENTRY_DSN= -# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. -API_SENTRY_TRACES_SAMPLE_RATE=1.0 -# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. -API_SENTRY_PROFILES_SAMPLE_RATE=1.0 - -# Web Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -WEB_SENTRY_DSN= - -# ------------------------------ -# Notion Integration Configuration -# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations -# ------------------------------ - -# Configure as "public" or "internal". -# Since Notion's OAuth redirect URL only supports HTTPS, -# if deploying locally, please use Notion's internal integration. -NOTION_INTEGRATION_TYPE=public -# Notion OAuth client secret (used for public integration type) -NOTION_CLIENT_SECRET= -# Notion OAuth client id (used for public integration type) -NOTION_CLIENT_ID= -# Notion internal integration secret. -# If the value of NOTION_INTEGRATION_TYPE is "internal", -# you need to configure this variable. -NOTION_INTERNAL_SECRET= - -# ------------------------------ -# Mail related configuration -# ------------------------------ - -# Mail type, support: resend, smtp, sendgrid -MAIL_TYPE=resend - -# Default send from email address, if not specified -# If using SendGrid, use the 'from' field for authentication if necessary. -MAIL_DEFAULT_SEND_FROM= - -# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. -RESEND_API_URL=https://api.resend.com -RESEND_API_KEY=your-resend-api-key - - -# SMTP server configuration, used when MAIL_TYPE is `smtp` -SMTP_SERVER= -SMTP_PORT=465 -SMTP_USERNAME= -SMTP_PASSWORD= -SMTP_USE_TLS=true -SMTP_OPPORTUNISTIC_TLS=false - -# Sendgid configuration -SENDGRID_API_KEY= - -# ------------------------------ -# Others Configuration -# ------------------------------ - -# Maximum length of segmentation tokens for indexing -INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 - -# Member invitation link valid time (hours), -# Default: 72. -INVITE_EXPIRY_HOURS=72 - -# Reset password token valid time (minutes), -RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 - -# The sandbox service endpoint. -CODE_EXECUTION_ENDPOINT=http://sandbox:8194 -CODE_EXECUTION_API_KEY=dify-sandbox -CODE_MAX_NUMBER=9223372036854775807 -CODE_MIN_NUMBER=-9223372036854775808 -CODE_MAX_DEPTH=5 -CODE_MAX_PRECISION=20 -CODE_MAX_STRING_LENGTH=80000 -CODE_MAX_STRING_ARRAY_LENGTH=30 -CODE_MAX_OBJECT_ARRAY_LENGTH=30 -CODE_MAX_NUMBER_ARRAY_LENGTH=1000 -CODE_EXECUTION_CONNECT_TIMEOUT=10 -CODE_EXECUTION_READ_TIMEOUT=60 -CODE_EXECUTION_WRITE_TIMEOUT=10 -TEMPLATE_TRANSFORM_MAX_LENGTH=80000 - -# Workflow runtime configuration -WORKFLOW_MAX_EXECUTION_STEPS=500 -WORKFLOW_MAX_EXECUTION_TIME=1200 -WORKFLOW_CALL_MAX_DEPTH=5 -MAX_VARIABLE_SIZE=204800 -WORKFLOW_PARALLEL_DEPTH_LIMIT=3 -WORKFLOW_FILE_UPLOAD_LIMIT=10 - -# Workflow storage configuration -# Options: rdbms, hybrid -# rdbms: Use only the relational database (default) -# hybrid: Save new data to object storage, read from both object storage and RDBMS -WORKFLOW_NODE_EXECUTION_STORAGE=rdbms - -# Repository configuration -# Core workflow execution repository implementation -CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository - -# Core workflow node execution repository implementation -CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository - -# API workflow node execution repository implementation -API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository - -# API workflow run repository implementation -API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository - -# HTTP request node in workflow configuration -HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 -HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 -HTTP_REQUEST_NODE_SSL_VERIFY=True - -# Respect X-* headers to redirect clients -RESPECT_XFORWARD_HEADERS_ENABLED=false - -# SSRF Proxy server HTTP URL -SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 -# SSRF Proxy server HTTPS URL -SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 - -# Maximum loop count in the workflow -LOOP_NODE_MAX_COUNT=100 - -# The maximum number of tools that can be used in the agent. -MAX_TOOLS_NUM=10 - -# Maximum number of Parallelism branches in the workflow -MAX_PARALLEL_LIMIT=10 - -# The maximum number of iterations for agent setting -MAX_ITERATIONS_NUM=99 - -# ------------------------------ -# Environment Variables for web Service -# ------------------------------ - -# The timeout for the text generation in millisecond -TEXT_GENERATION_TIMEOUT_MS=60000 - -# Allow rendering unsafe URLs which have "data:" scheme. -ALLOW_UNSAFE_DATA_SCHEME=false - -# ------------------------------ -# Environment Variables for db Service -# ------------------------------ - -# The name of the default postgres user. -POSTGRES_USER=${DB_USERNAME} -# The password for the default postgres user. -POSTGRES_PASSWORD=${DB_PASSWORD} -# The name of the default postgres database. -POSTGRES_DB=${DB_DATABASE} -# postgres data directory -PGDATA=/var/lib/postgresql/data/pgdata - -# ------------------------------ -# Environment Variables for sandbox Service -# ------------------------------ - -# The API key for the sandbox service -SANDBOX_API_KEY=dify-sandbox -# The mode in which the Gin framework runs -SANDBOX_GIN_MODE=release -# The timeout for the worker in seconds -SANDBOX_WORKER_TIMEOUT=15 -# Enable network for the sandbox service -SANDBOX_ENABLE_NETWORK=true -# HTTP proxy URL for SSRF protection -SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 -# HTTPS proxy URL for SSRF protection -SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 -# The port on which the sandbox service runs -SANDBOX_PORT=8194 - -# ------------------------------ -# Environment Variables for weaviate Service -# (only used when VECTOR_STORE is weaviate) -# ------------------------------ -WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate -WEAVIATE_QUERY_DEFAULTS_LIMIT=25 -WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true -WEAVIATE_DEFAULT_VECTORIZER_MODULE=none -WEAVIATE_CLUSTER_HOSTNAME=node1 -WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true -WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih -WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai -WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true -WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai - -# ------------------------------ -# Environment Variables for Chroma -# (only used when VECTOR_STORE is chroma) -# ------------------------------ - -# Authentication credentials for Chroma server -CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 -# Authentication provider for Chroma server -CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider -# Persistence setting for Chroma server -CHROMA_IS_PERSISTENT=TRUE - -# ------------------------------ -# Environment Variables for Oracle Service -# (only used when VECTOR_STORE is oracle) -# ------------------------------ -ORACLE_PWD=Dify123456 -ORACLE_CHARACTERSET=AL32UTF8 - -# ------------------------------ -# Environment Variables for milvus Service -# (only used when VECTOR_STORE is milvus) -# ------------------------------ -# ETCD configuration for auto compaction mode -ETCD_AUTO_COMPACTION_MODE=revision -# ETCD configuration for auto compaction retention in terms of number of revisions -ETCD_AUTO_COMPACTION_RETENTION=1000 -# ETCD configuration for backend quota in bytes -ETCD_QUOTA_BACKEND_BYTES=4294967296 -# ETCD configuration for the number of changes before triggering a snapshot -ETCD_SNAPSHOT_COUNT=50000 -# MinIO access key for authentication -MINIO_ACCESS_KEY=minioadmin -# MinIO secret key for authentication -MINIO_SECRET_KEY=minioadmin -# ETCD service endpoints -ETCD_ENDPOINTS=etcd:2379 -# MinIO service address -MINIO_ADDRESS=minio:9000 -# Enable or disable security authorization -MILVUS_AUTHORIZATION_ENABLED=true - -# ------------------------------ -# Environment Variables for pgvector / pgvector-rs Service -# (only used when VECTOR_STORE is pgvector / pgvector-rs) -# ------------------------------ -PGVECTOR_PGUSER=postgres -# The password for the default postgres user. -PGVECTOR_POSTGRES_PASSWORD=difyai123456 -# The name of the default postgres database. -PGVECTOR_POSTGRES_DB=dify -# postgres data directory -PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata - -# ------------------------------ -# Environment Variables for opensearch -# (only used when VECTOR_STORE is opensearch) -# ------------------------------ -OPENSEARCH_DISCOVERY_TYPE=single-node -OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true -OPENSEARCH_JAVA_OPTS_MIN=512m -OPENSEARCH_JAVA_OPTS_MAX=1024m -OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 -OPENSEARCH_MEMLOCK_SOFT=-1 -OPENSEARCH_MEMLOCK_HARD=-1 -OPENSEARCH_NOFILE_SOFT=65536 -OPENSEARCH_NOFILE_HARD=65536 - -# ------------------------------ -# Environment Variables for Nginx reverse proxy -# ------------------------------ -NGINX_SERVER_NAME=_ -NGINX_HTTPS_ENABLED=false -# HTTP port -NGINX_PORT=80 -# SSL settings are only applied when HTTPS_ENABLED is true -NGINX_SSL_PORT=443 -# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory -# and modify the env vars below accordingly. -NGINX_SSL_CERT_FILENAME=dify.crt -NGINX_SSL_CERT_KEY_FILENAME=dify.key -NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3 - -# Nginx performance tuning -NGINX_WORKER_PROCESSES=auto -NGINX_CLIENT_MAX_BODY_SIZE=100M -NGINX_KEEPALIVE_TIMEOUT=65 - -# Proxy settings -NGINX_PROXY_READ_TIMEOUT=3600s -NGINX_PROXY_SEND_TIMEOUT=3600s - -# Set true to accept requests for /.well-known/acme-challenge/ -NGINX_ENABLE_CERTBOT_CHALLENGE=false - -# ------------------------------ -# Certbot Configuration -# ------------------------------ - -# Email address (required to get certificates from Let's Encrypt) -CERTBOT_EMAIL=your_email@example.com - -# Domain name -CERTBOT_DOMAIN=your_domain.com - -# certbot command options -# i.e: --force-renewal --dry-run --test-cert --debug -CERTBOT_OPTIONS= - -# ------------------------------ -# Environment Variables for SSRF Proxy -# ------------------------------ -SSRF_HTTP_PORT=3128 -SSRF_COREDUMP_DIR=/var/spool/squid -SSRF_REVERSE_PROXY_PORT=8194 -SSRF_SANDBOX_HOST=sandbox -SSRF_DEFAULT_TIME_OUT=5 -SSRF_DEFAULT_CONNECT_TIME_OUT=5 -SSRF_DEFAULT_READ_TIME_OUT=5 -SSRF_DEFAULT_WRITE_TIME_OUT=5 - -# ------------------------------ -# docker env var for specifying vector db type at startup -# (based on the vector db type, the corresponding docker -# compose profile will be used) -# if you want to use unstructured, add ',unstructured' to the end -# ------------------------------ -COMPOSE_PROFILES=${VECTOR_STORE:-weaviate} - -# ------------------------------ -# Docker Compose Service Expose Host Port Configurations -# ------------------------------ -EXPOSE_NGINX_PORT=80 -EXPOSE_NGINX_SSL_PORT=443 - -# ---------------------------------------------------------------------------- -# ModelProvider & Tool Position Configuration -# Used to specify the model providers and tools that can be used in the app. -# ---------------------------------------------------------------------------- - -# Pin, include, and exclude tools -# Use comma-separated values with no spaces between items. -# Example: POSITION_TOOL_PINS=bing,google -POSITION_TOOL_PINS= -POSITION_TOOL_INCLUDES= -POSITION_TOOL_EXCLUDES= - -# Pin, include, and exclude model providers -# Use comma-separated values with no spaces between items. -# Example: POSITION_PROVIDER_PINS=openai,openllm -POSITION_PROVIDER_PINS= -POSITION_PROVIDER_INCLUDES= -POSITION_PROVIDER_EXCLUDES= - -# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP -CSP_WHITELIST= - -# Enable or disable create tidb service job -CREATE_TIDB_SERVICE_JOB_ENABLED=false - -# Maximum number of submitted thread count in a ThreadPool for parallel node execution -MAX_SUBMIT_COUNT=100 - -# The maximum number of top-k value for RAG. -TOP_K_MAX_VALUE=10 - -# ------------------------------ -# Plugin Daemon Configuration -# ------------------------------ - -DB_PLUGIN_DATABASE=dify_plugin -EXPOSE_PLUGIN_DAEMON_PORT=5002 -PLUGIN_DAEMON_PORT=5002 -PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi -PLUGIN_DAEMON_URL=http://plugin_daemon:5002 -PLUGIN_MAX_PACKAGE_SIZE=52428800 -PLUGIN_PPROF_ENABLED=false - -PLUGIN_DEBUGGING_HOST=0.0.0.0 -PLUGIN_DEBUGGING_PORT=5003 -EXPOSE_PLUGIN_DEBUGGING_HOST=localhost -EXPOSE_PLUGIN_DEBUGGING_PORT=5003 - -# If this key is changed, DIFY_INNER_API_KEY in plugin_daemon service must also be updated or agent node will fail. -PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 -PLUGIN_DIFY_INNER_API_URL=http://api:5001 - -ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} - -MARKETPLACE_ENABLED=true -MARKETPLACE_API_URL=https://marketplace.dify.ai - -FORCE_VERIFYING_SIGNATURE=true - -PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 -PLUGIN_MAX_EXECUTION_TIMEOUT=600 -# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple -PIP_MIRROR_URL= - -# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example -# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss volcengine_tos -PLUGIN_STORAGE_TYPE=local -PLUGIN_STORAGE_LOCAL_ROOT=/app/storage -PLUGIN_WORKING_PATH=/app/storage/cwd -PLUGIN_INSTALLED_PATH=plugin -PLUGIN_PACKAGE_CACHE_PATH=plugin_packages -PLUGIN_MEDIA_CACHE_PATH=assets -# Plugin oss bucket -PLUGIN_STORAGE_OSS_BUCKET= -# Plugin oss s3 credentials -PLUGIN_S3_USE_AWS=false -PLUGIN_S3_USE_AWS_MANAGED_IAM=false -PLUGIN_S3_ENDPOINT= -PLUGIN_S3_USE_PATH_STYLE=false -PLUGIN_AWS_ACCESS_KEY= -PLUGIN_AWS_SECRET_KEY= -PLUGIN_AWS_REGION= -# Plugin oss azure blob -PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= -PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= -# Plugin oss tencent cos -PLUGIN_TENCENT_COS_SECRET_KEY= -PLUGIN_TENCENT_COS_SECRET_ID= -PLUGIN_TENCENT_COS_REGION= -# Plugin oss aliyun oss -PLUGIN_ALIYUN_OSS_REGION= -PLUGIN_ALIYUN_OSS_ENDPOINT= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= -PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 -PLUGIN_ALIYUN_OSS_PATH= -# Plugin oss volcengine tos -PLUGIN_VOLCENGINE_TOS_ENDPOINT= -PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= -PLUGIN_VOLCENGINE_TOS_SECRET_KEY= -PLUGIN_VOLCENGINE_TOS_REGION= - -# ------------------------------ -# OTLP Collector Configuration -# ------------------------------ -ENABLE_OTEL=false -OTLP_TRACE_ENDPOINT= -OTLP_METRIC_ENDPOINT= -OTLP_BASE_ENDPOINT=http://localhost:4318 -OTLP_API_KEY= -OTEL_EXPORTER_OTLP_PROTOCOL= -OTEL_EXPORTER_TYPE=otlp -OTEL_SAMPLING_RATE=0.1 -OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 -OTEL_MAX_QUEUE_SIZE=2048 -OTEL_MAX_EXPORT_BATCH_SIZE=512 -OTEL_METRIC_EXPORT_INTERVAL=60000 -OTEL_BATCH_EXPORT_TIMEOUT=10000 -OTEL_METRIC_EXPORT_TIMEOUT=30000 - -# Prevent Clickjacking -ALLOW_EMBED=false - -# Dataset queue monitor configuration -QUEUE_MONITOR_THRESHOLD=200 -# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai -QUEUE_MONITOR_ALERT_EMAILS= -# Monitor interval in minutes, default is 30 minutes -QUEUE_MONITOR_INTERVAL=30 From 62c34c4bc2ac801bf48f09a535e21c030e2c1b83 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Fri, 15 Aug 2025 09:01:18 +0800 Subject: [PATCH 08/36] refactor: unify pnpm version management with packageManager field (#23943) --- .github/workflows/style.yml | 4 +++- .github/workflows/translate-i18n-base-on-english.yml | 4 +++- .github/workflows/web-tests.yml | 4 +++- web/Dockerfile | 5 ++++- web/package.json | 1 + 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 54f3f42a25..9aad9558b0 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -82,7 +82,7 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v4 with: - version: 10 + package_json_file: web/package.json run_install: false - name: Setup NodeJS @@ -95,10 +95,12 @@ jobs: - name: Web dependencies if: steps.changed-files.outputs.any_changed == 'true' + working-directory: ./web run: pnpm install --frozen-lockfile - name: Web style check if: steps.changed-files.outputs.any_changed == 'true' + working-directory: ./web run: pnpm run lint docker-compose-template: diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml index 4b06174ee1..c004836808 100644 --- a/.github/workflows/translate-i18n-base-on-english.yml +++ b/.github/workflows/translate-i18n-base-on-english.yml @@ -46,7 +46,7 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v4 with: - version: 10 + package_json_file: web/package.json run_install: false - name: Set up Node.js @@ -59,10 +59,12 @@ jobs: - name: Install dependencies if: env.FILES_CHANGED == 'true' + working-directory: ./web run: pnpm install --frozen-lockfile - name: Generate i18n translations if: env.FILES_CHANGED == 'true' + working-directory: ./web run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }} - name: Create Pull Request diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml index c3f8fdbaf6..d104d69947 100644 --- a/.github/workflows/web-tests.yml +++ b/.github/workflows/web-tests.yml @@ -35,7 +35,7 @@ jobs: if: steps.changed-files.outputs.any_changed == 'true' uses: pnpm/action-setup@v4 with: - version: 10 + package_json_file: web/package.json run_install: false - name: Setup Node.js @@ -48,8 +48,10 @@ jobs: - name: Install dependencies if: steps.changed-files.outputs.any_changed == 'true' + working-directory: ./web run: pnpm install --frozen-lockfile - name: Run tests if: steps.changed-files.outputs.any_changed == 'true' + working-directory: ./web run: pnpm test diff --git a/web/Dockerfile b/web/Dockerfile index d59039528c..d284efca87 100644 --- a/web/Dockerfile +++ b/web/Dockerfile @@ -6,7 +6,7 @@ LABEL maintainer="takatost@gmail.com" # RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories RUN apk add --no-cache tzdata -RUN npm install -g pnpm@10.13.1 +RUN corepack enable ENV PNPM_HOME="/pnpm" ENV PATH="$PNPM_HOME:$PATH" @@ -19,6 +19,9 @@ WORKDIR /app/web COPY package.json . COPY pnpm-lock.yaml . +# Use packageManager from package.json +RUN corepack install + # if you located in China, you can use taobao registry to speed up # RUN pnpm install --frozen-lockfile --registry https://registry.npmmirror.com/ diff --git a/web/package.json b/web/package.json index a492104906..385ae6662d 100644 --- a/web/package.json +++ b/web/package.json @@ -2,6 +2,7 @@ "name": "dify-web", "version": "1.7.2", "private": true, + "packageManager": "pnpm@10.14.0", "engines": { "node": ">=v22.11.0" }, From 11fdcb18c6c5254156fb575e1499653933375f4d Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Fri, 15 Aug 2025 09:12:29 +0800 Subject: [PATCH 09/36] feature: add test for tool engine serialization (#23951) --- .../utils/test_tool_engine_serialization.py | 481 ++++++++++++++++++ 1 file changed, 481 insertions(+) create mode 100644 api/tests/unit_tests/core/tools/utils/test_tool_engine_serialization.py diff --git a/api/tests/unit_tests/core/tools/utils/test_tool_engine_serialization.py b/api/tests/unit_tests/core/tools/utils/test_tool_engine_serialization.py new file mode 100644 index 0000000000..4029edfb68 --- /dev/null +++ b/api/tests/unit_tests/core/tools/utils/test_tool_engine_serialization.py @@ -0,0 +1,481 @@ +import json +from datetime import date, datetime +from decimal import Decimal +from uuid import uuid4 + +import numpy as np +import pytest +import pytz + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.utils.message_transformer import ToolFileMessageTransformer, safe_json_dict, safe_json_value + + +class TestSafeJsonValue: + """Test suite for safe_json_value function to ensure proper serialization of complex types""" + + def test_datetime_conversion(self): + """Test datetime conversion with timezone handling""" + # Test datetime with UTC timezone + dt = datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC) + result = safe_json_value(dt) + assert isinstance(result, str) + assert "2024-01-01T12:00:00+00:00" in result + + # Test datetime without timezone (should default to UTC) + dt_no_tz = datetime(2024, 1, 1, 12, 0, 0) + result = safe_json_value(dt_no_tz) + assert isinstance(result, str) + # The exact time will depend on the system's timezone, so we check the format + assert "T" in result # ISO format separator + # Check that it's a valid ISO format datetime string + assert len(result) >= 19 # At least YYYY-MM-DDTHH:MM:SS + + def test_date_conversion(self): + """Test date conversion to ISO format""" + test_date = date(2024, 1, 1) + result = safe_json_value(test_date) + assert result == "2024-01-01" + + def test_uuid_conversion(self): + """Test UUID conversion to string""" + test_uuid = uuid4() + result = safe_json_value(test_uuid) + assert isinstance(result, str) + assert result == str(test_uuid) + + def test_decimal_conversion(self): + """Test Decimal conversion to float""" + test_decimal = Decimal("123.456") + result = safe_json_value(test_decimal) + assert result == 123.456 + assert isinstance(result, float) + + def test_bytes_conversion(self): + """Test bytes conversion with UTF-8 decoding""" + # Test valid UTF-8 bytes + test_bytes = b"Hello, World!" + result = safe_json_value(test_bytes) + assert result == "Hello, World!" + + # Test invalid UTF-8 bytes (should fall back to hex) + invalid_bytes = b"\xff\xfe\xfd" + result = safe_json_value(invalid_bytes) + assert result == "fffefd" + + def test_memoryview_conversion(self): + """Test memoryview conversion to hex string""" + test_bytes = b"test data" + test_memoryview = memoryview(test_bytes) + result = safe_json_value(test_memoryview) + assert result == "746573742064617461" # hex of "test data" + + def test_numpy_ndarray_conversion(self): + """Test numpy ndarray conversion to list""" + # Test 1D array + test_array = np.array([1, 2, 3, 4]) + result = safe_json_value(test_array) + assert result == [1, 2, 3, 4] + + # Test 2D array + test_2d_array = np.array([[1, 2], [3, 4]]) + result = safe_json_value(test_2d_array) + assert result == [[1, 2], [3, 4]] + + # Test array with float values + test_float_array = np.array([1.5, 2.7, 3.14]) + result = safe_json_value(test_float_array) + assert result == [1.5, 2.7, 3.14] + + def test_dict_conversion(self): + """Test dictionary conversion using safe_json_dict""" + test_dict = { + "string": "value", + "number": 42, + "float": 3.14, + "boolean": True, + "list": [1, 2, 3], + "nested": {"key": "value"}, + } + result = safe_json_value(test_dict) + assert isinstance(result, dict) + assert result == test_dict + + def test_list_conversion(self): + """Test list conversion with mixed types""" + test_list = [ + "string", + 42, + 3.14, + True, + [1, 2, 3], + {"key": "value"}, + datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), + Decimal("123.456"), + uuid4(), + ] + result = safe_json_value(test_list) + assert isinstance(result, list) + assert len(result) == len(test_list) + assert isinstance(result[6], str) # datetime should be converted to string + assert isinstance(result[7], float) # Decimal should be converted to float + assert isinstance(result[8], str) # UUID should be converted to string + + def test_tuple_conversion(self): + """Test tuple conversion to list""" + test_tuple = (1, "string", 3.14) + result = safe_json_value(test_tuple) + assert isinstance(result, list) + assert result == [1, "string", 3.14] + + def test_set_conversion(self): + """Test set conversion to list""" + test_set = {1, "string", 3.14} + result = safe_json_value(test_set) + assert isinstance(result, list) + # Note: set order is not guaranteed, so we check length and content + assert len(result) == 3 + assert 1 in result + assert "string" in result + assert 3.14 in result + + def test_basic_types_passthrough(self): + """Test that basic types are passed through unchanged""" + assert safe_json_value("string") == "string" + assert safe_json_value(42) == 42 + assert safe_json_value(3.14) == 3.14 + assert safe_json_value(True) is True + assert safe_json_value(False) is False + assert safe_json_value(None) is None + + def test_nested_complex_structure(self): + """Test complex nested structure with all types""" + complex_data = { + "dates": [date(2024, 1, 1), date(2024, 1, 2)], + "timestamps": [ + datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), + datetime(2024, 1, 2, 12, 0, 0, tzinfo=pytz.UTC), + ], + "numbers": [Decimal("123.456"), Decimal("789.012")], + "identifiers": [uuid4(), uuid4()], + "binary_data": [b"hello", b"world"], + "arrays": [np.array([1, 2, 3]), np.array([4, 5, 6])], + } + + result = safe_json_value(complex_data) + + # Verify structure is maintained + assert isinstance(result, dict) + assert "dates" in result + assert "timestamps" in result + assert "numbers" in result + assert "identifiers" in result + assert "binary_data" in result + assert "arrays" in result + + # Verify conversions + assert all(isinstance(d, str) for d in result["dates"]) + assert all(isinstance(t, str) for t in result["timestamps"]) + assert all(isinstance(n, float) for n in result["numbers"]) + assert all(isinstance(i, str) for i in result["identifiers"]) + assert all(isinstance(b, str) for b in result["binary_data"]) + assert all(isinstance(a, list) for a in result["arrays"]) + + +class TestSafeJsonDict: + """Test suite for safe_json_dict function""" + + def test_valid_dict_conversion(self): + """Test valid dictionary conversion""" + test_dict = { + "string": "value", + "number": 42, + "datetime": datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), + "decimal": Decimal("123.456"), + } + result = safe_json_dict(test_dict) + assert isinstance(result, dict) + assert result["string"] == "value" + assert result["number"] == 42 + assert isinstance(result["datetime"], str) + assert isinstance(result["decimal"], float) + + def test_invalid_input_type(self): + """Test that invalid input types raise TypeError""" + with pytest.raises(TypeError, match="safe_json_dict\\(\\) expects a dictionary \\(dict\\) as input"): + safe_json_dict("not a dict") + + with pytest.raises(TypeError, match="safe_json_dict\\(\\) expects a dictionary \\(dict\\) as input"): + safe_json_dict([1, 2, 3]) + + with pytest.raises(TypeError, match="safe_json_dict\\(\\) expects a dictionary \\(dict\\) as input"): + safe_json_dict(42) + + def test_empty_dict(self): + """Test empty dictionary handling""" + result = safe_json_dict({}) + assert result == {} + + def test_nested_dict_conversion(self): + """Test nested dictionary conversion""" + test_dict = { + "level1": { + "level2": {"datetime": datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), "decimal": Decimal("123.456")} + } + } + result = safe_json_dict(test_dict) + assert isinstance(result["level1"]["level2"]["datetime"], str) + assert isinstance(result["level1"]["level2"]["decimal"], float) + + +class TestToolInvokeMessageJsonSerialization: + """Test suite for ToolInvokeMessage JSON serialization through safe_json_value""" + + def test_json_message_serialization(self): + """Test JSON message serialization with complex data""" + complex_data = { + "timestamp": datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), + "amount": Decimal("123.45"), + "id": uuid4(), + "binary": b"test data", + "array": np.array([1, 2, 3]), + } + + # Create JSON message + json_message = ToolInvokeMessage.JsonMessage(json_object=complex_data) + message = ToolInvokeMessage(type=ToolInvokeMessage.MessageType.JSON, message=json_message) + + # Apply safe_json_value transformation + transformed_data = safe_json_value(message.message.json_object) + + # Verify transformations + assert isinstance(transformed_data["timestamp"], str) + assert isinstance(transformed_data["amount"], float) + assert isinstance(transformed_data["id"], str) + assert isinstance(transformed_data["binary"], str) + assert isinstance(transformed_data["array"], list) + + # Verify JSON serialization works + json_string = json.dumps(transformed_data, ensure_ascii=False) + assert isinstance(json_string, str) + + # Verify we can deserialize back + deserialized = json.loads(json_string) + assert deserialized["amount"] == 123.45 + assert deserialized["array"] == [1, 2, 3] + + def test_json_message_with_nested_structures(self): + """Test JSON message with deeply nested complex structures""" + nested_data = { + "level1": { + "level2": { + "level3": { + "dates": [date(2024, 1, 1), date(2024, 1, 2)], + "timestamps": [datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC)], + "numbers": [Decimal("1.1"), Decimal("2.2")], + "arrays": [np.array([1, 2]), np.array([3, 4])], + } + } + } + } + + json_message = ToolInvokeMessage.JsonMessage(json_object=nested_data) + message = ToolInvokeMessage(type=ToolInvokeMessage.MessageType.JSON, message=json_message) + + # Transform the data + transformed_data = safe_json_value(message.message.json_object) + + # Verify nested transformations + level3 = transformed_data["level1"]["level2"]["level3"] + assert all(isinstance(d, str) for d in level3["dates"]) + assert all(isinstance(t, str) for t in level3["timestamps"]) + assert all(isinstance(n, float) for n in level3["numbers"]) + assert all(isinstance(a, list) for a in level3["arrays"]) + + # Test JSON serialization + json_string = json.dumps(transformed_data, ensure_ascii=False) + assert isinstance(json_string, str) + + # Verify deserialization + deserialized = json.loads(json_string) + assert deserialized["level1"]["level2"]["level3"]["numbers"] == [1.1, 2.2] + + def test_json_message_transformer_integration(self): + """Test integration with ToolFileMessageTransformer for JSON messages""" + complex_data = { + "metadata": { + "created_at": datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), + "version": Decimal("1.0"), + "tags": ["tag1", "tag2"], + }, + "data": {"values": np.array([1.1, 2.2, 3.3]), "binary": b"binary content"}, + } + + # Create message generator + def message_generator(): + json_message = ToolInvokeMessage.JsonMessage(json_object=complex_data) + message = ToolInvokeMessage(type=ToolInvokeMessage.MessageType.JSON, message=json_message) + yield message + + # Transform messages + transformed_messages = list( + ToolFileMessageTransformer.transform_tool_invoke_messages( + message_generator(), user_id="test_user", tenant_id="test_tenant" + ) + ) + + assert len(transformed_messages) == 1 + transformed_message = transformed_messages[0] + assert transformed_message.type == ToolInvokeMessage.MessageType.JSON + + # Verify the JSON object was transformed + json_obj = transformed_message.message.json_object + assert isinstance(json_obj["metadata"]["created_at"], str) + assert isinstance(json_obj["metadata"]["version"], float) + assert isinstance(json_obj["data"]["values"], list) + assert isinstance(json_obj["data"]["binary"], str) + + # Test final JSON serialization + final_json = json.dumps(json_obj, ensure_ascii=False) + assert isinstance(final_json, str) + + # Verify we can deserialize + deserialized = json.loads(final_json) + assert deserialized["metadata"]["version"] == 1.0 + assert deserialized["data"]["values"] == [1.1, 2.2, 3.3] + + def test_edge_cases_and_error_handling(self): + """Test edge cases and error handling in JSON serialization""" + # Test with None values + data_with_none = {"null_value": None, "empty_string": "", "zero": 0, "false_value": False} + + json_message = ToolInvokeMessage.JsonMessage(json_object=data_with_none) + message = ToolInvokeMessage(type=ToolInvokeMessage.MessageType.JSON, message=json_message) + + transformed_data = safe_json_value(message.message.json_object) + json_string = json.dumps(transformed_data, ensure_ascii=False) + + # Verify serialization works with edge cases + assert json_string is not None + deserialized = json.loads(json_string) + assert deserialized["null_value"] is None + assert deserialized["empty_string"] == "" + assert deserialized["zero"] == 0 + assert deserialized["false_value"] is False + + # Test with very large numbers + large_data = { + "large_int": 2**63 - 1, + "large_float": 1.7976931348623157e308, + "small_float": 2.2250738585072014e-308, + } + + json_message = ToolInvokeMessage.JsonMessage(json_object=large_data) + message = ToolInvokeMessage(type=ToolInvokeMessage.MessageType.JSON, message=json_message) + + transformed_data = safe_json_value(message.message.json_object) + json_string = json.dumps(transformed_data, ensure_ascii=False) + + # Verify large numbers are handled correctly + deserialized = json.loads(json_string) + assert deserialized["large_int"] == 2**63 - 1 + assert deserialized["large_float"] == 1.7976931348623157e308 + assert deserialized["small_float"] == 2.2250738585072014e-308 + + +class TestEndToEndSerialization: + """Test suite for end-to-end serialization workflow""" + + def test_complete_workflow_with_real_data(self): + """Test complete workflow from complex data to JSON string and back""" + # Simulate real-world complex data structure + real_world_data = { + "user_profile": { + "id": uuid4(), + "name": "John Doe", + "email": "john@example.com", + "created_at": datetime(2024, 1, 1, 12, 0, 0, tzinfo=pytz.UTC), + "last_login": datetime(2024, 1, 15, 14, 30, 0, tzinfo=pytz.UTC), + "preferences": {"theme": "dark", "language": "en", "timezone": "UTC"}, + }, + "analytics": { + "session_count": 42, + "total_time": Decimal("123.45"), + "metrics": np.array([1.1, 2.2, 3.3, 4.4, 5.5]), + "events": [ + { + "timestamp": datetime(2024, 1, 1, 10, 0, 0, tzinfo=pytz.UTC), + "action": "login", + "duration": Decimal("5.67"), + }, + { + "timestamp": datetime(2024, 1, 1, 11, 0, 0, tzinfo=pytz.UTC), + "action": "logout", + "duration": Decimal("3600.0"), + }, + ], + }, + "files": [ + { + "id": uuid4(), + "name": "document.pdf", + "size": 1024, + "uploaded_at": datetime(2024, 1, 1, 9, 0, 0, tzinfo=pytz.UTC), + "checksum": b"abc123def456", + } + ], + } + + # Step 1: Create ToolInvokeMessage + json_message = ToolInvokeMessage.JsonMessage(json_object=real_world_data) + message = ToolInvokeMessage(type=ToolInvokeMessage.MessageType.JSON, message=json_message) + + # Step 2: Apply safe_json_value transformation + transformed_data = safe_json_value(message.message.json_object) + + # Step 3: Serialize to JSON string + json_string = json.dumps(transformed_data, ensure_ascii=False) + + # Step 4: Verify the string is valid JSON + assert isinstance(json_string, str) + assert json_string.startswith("{") + assert json_string.endswith("}") + + # Step 5: Deserialize back to Python object + deserialized_data = json.loads(json_string) + + # Step 6: Verify data integrity + assert deserialized_data["user_profile"]["name"] == "John Doe" + assert deserialized_data["user_profile"]["email"] == "john@example.com" + assert isinstance(deserialized_data["user_profile"]["created_at"], str) + assert isinstance(deserialized_data["analytics"]["total_time"], float) + assert deserialized_data["analytics"]["total_time"] == 123.45 + assert isinstance(deserialized_data["analytics"]["metrics"], list) + assert deserialized_data["analytics"]["metrics"] == [1.1, 2.2, 3.3, 4.4, 5.5] + assert isinstance(deserialized_data["files"][0]["checksum"], str) + + # Step 7: Verify all complex types were properly converted + self._verify_all_complex_types_converted(deserialized_data) + + def _verify_all_complex_types_converted(self, data): + """Helper method to verify all complex types were properly converted""" + if isinstance(data, dict): + for key, value in data.items(): + if key in ["id", "checksum"]: + # These should be strings (UUID/bytes converted) + assert isinstance(value, str) + elif key in ["created_at", "last_login", "timestamp", "uploaded_at"]: + # These should be strings (datetime converted) + assert isinstance(value, str) + elif key in ["total_time", "duration"]: + # These should be floats (Decimal converted) + assert isinstance(value, float) + elif key == "metrics": + # This should be a list (ndarray converted) + assert isinstance(value, list) + else: + # Recursively check nested structures + self._verify_all_complex_types_converted(value) + elif isinstance(data, list): + for item in data: + self._verify_all_complex_types_converted(item) From aa71173dbb7670146a188711ee0d1d087656bdb4 Mon Sep 17 00:00:00 2001 From: heyszt <270985384@qq.com> Date: Fri, 15 Aug 2025 09:13:41 +0800 Subject: [PATCH 10/36] Feat: External_trace_id compatible with OpenTelemetry (#23918) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/console/app/completion.py | 6 ++ api/controllers/console/app/workflow.py | 9 +++ api/core/helper/trace_id_helper.py | 66 ++++++++++++++++++- api/core/ops/aliyun_trace/aliyun_trace.py | 31 ++++++--- .../aliyun_trace/data_exporter/traceclient.py | 11 ++++ 5 files changed, 113 insertions(+), 10 deletions(-) diff --git a/api/controllers/console/app/completion.py b/api/controllers/console/app/completion.py index 732f5b799a..ad94112f05 100644 --- a/api/controllers/console/app/completion.py +++ b/api/controllers/console/app/completion.py @@ -1,6 +1,7 @@ import logging import flask_login +from flask import request from flask_restful import Resource, reqparse from werkzeug.exceptions import InternalServerError, NotFound @@ -24,6 +25,7 @@ from core.errors.error import ( ProviderTokenNotInitError, QuotaExceededError, ) +from core.helper.trace_id_helper import get_external_trace_id from core.model_runtime.errors.invoke import InvokeError from libs import helper from libs.helper import uuid_value @@ -115,6 +117,10 @@ class ChatMessageApi(Resource): streaming = args["response_mode"] != "blocking" args["auto_generate_name"] = False + external_trace_id = get_external_trace_id(request) + if external_trace_id: + args["external_trace_id"] = external_trace_id + account = flask_login.current_user try: diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py index a9f088a276..c58301b300 100644 --- a/api/controllers/console/app/workflow.py +++ b/api/controllers/console/app/workflow.py @@ -23,6 +23,7 @@ from core.app.app_config.features.file_upload.manager import FileUploadConfigMan from core.app.apps.base_app_queue_manager import AppQueueManager from core.app.entities.app_invoke_entities import InvokeFrom from core.file.models import File +from core.helper.trace_id_helper import get_external_trace_id from extensions.ext_database import db from factories import file_factory, variable_factory from fields.workflow_fields import workflow_fields, workflow_pagination_fields @@ -185,6 +186,10 @@ class AdvancedChatDraftWorkflowRunApi(Resource): args = parser.parse_args() + external_trace_id = get_external_trace_id(request) + if external_trace_id: + args["external_trace_id"] = external_trace_id + try: response = AppGenerateService.generate( app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.DEBUGGER, streaming=True @@ -373,6 +378,10 @@ class DraftWorkflowRunApi(Resource): parser.add_argument("files", type=list, required=False, location="json") args = parser.parse_args() + external_trace_id = get_external_trace_id(request) + if external_trace_id: + args["external_trace_id"] = external_trace_id + try: response = AppGenerateService.generate( app_model=app_model, diff --git a/api/core/helper/trace_id_helper.py b/api/core/helper/trace_id_helper.py index e90c3194f2..df42837796 100644 --- a/api/core/helper/trace_id_helper.py +++ b/api/core/helper/trace_id_helper.py @@ -16,15 +16,33 @@ def get_external_trace_id(request: Any) -> Optional[str]: """ Retrieve the trace_id from the request. - Priority: header ('X-Trace-Id'), then parameters, then JSON body. Returns None if not provided or invalid. + Priority: + 1. header ('X-Trace-Id') + 2. parameters + 3. JSON body + 4. Current OpenTelemetry context (if enabled) + 5. OpenTelemetry traceparent header (if present and valid) + + Returns None if no valid trace_id is provided. """ trace_id = request.headers.get("X-Trace-Id") + if not trace_id: trace_id = request.args.get("trace_id") + if not trace_id and getattr(request, "is_json", False): json_data = getattr(request, "json", None) if json_data: trace_id = json_data.get("trace_id") + + if not trace_id: + trace_id = get_trace_id_from_otel_context() + + if not trace_id: + traceparent = request.headers.get("traceparent") + if traceparent: + trace_id = parse_traceparent_header(traceparent) + if isinstance(trace_id, str) and is_valid_trace_id(trace_id): return trace_id return None @@ -40,3 +58,49 @@ def extract_external_trace_id_from_args(args: Mapping[str, Any]) -> dict: if trace_id: return {"external_trace_id": trace_id} return {} + + +def get_trace_id_from_otel_context() -> Optional[str]: + """ + Retrieve the current trace ID from the active OpenTelemetry trace context. + Returns None if: + 1. OpenTelemetry SDK is not installed or enabled. + 2. There is no active span or trace context. + """ + try: + from opentelemetry.trace import SpanContext, get_current_span + from opentelemetry.trace.span import INVALID_TRACE_ID + + span = get_current_span() + if not span: + return None + + span_context: SpanContext = span.get_span_context() + + if not span_context or span_context.trace_id == INVALID_TRACE_ID: + return None + + trace_id_hex = f"{span_context.trace_id:032x}" + return trace_id_hex + + except Exception: + return None + + +def parse_traceparent_header(traceparent: str) -> Optional[str]: + """ + Parse the `traceparent` header to extract the trace_id. + + Expected format: + 'version-trace_id-span_id-flags' + + Reference: + W3C Trace Context Specification: https://www.w3.org/TR/trace-context/ + """ + try: + parts = traceparent.split("-") + if len(parts) == 4 and len(parts[1]) == 32: + return parts[1] + except Exception: + pass + return None diff --git a/api/core/ops/aliyun_trace/aliyun_trace.py b/api/core/ops/aliyun_trace/aliyun_trace.py index 06050619e9..82f54582ed 100644 --- a/api/core/ops/aliyun_trace/aliyun_trace.py +++ b/api/core/ops/aliyun_trace/aliyun_trace.py @@ -4,15 +4,15 @@ from collections.abc import Sequence from typing import Optional from urllib.parse import urljoin -from opentelemetry.trace import Status, StatusCode +from opentelemetry.trace import Link, Status, StatusCode from sqlalchemy.orm import Session, sessionmaker from core.ops.aliyun_trace.data_exporter.traceclient import ( TraceClient, convert_datetime_to_nanoseconds, - convert_string_to_id, convert_to_span_id, convert_to_trace_id, + create_link, generate_span_id, ) from core.ops.aliyun_trace.entities.aliyun_trace_entity import SpanData @@ -103,10 +103,11 @@ class AliyunDataTrace(BaseTraceInstance): def workflow_trace(self, trace_info: WorkflowTraceInfo): trace_id = convert_to_trace_id(trace_info.workflow_run_id) + links = [] if trace_info.trace_id: - trace_id = convert_string_to_id(trace_info.trace_id) + links.append(create_link(trace_id_str=trace_info.trace_id)) workflow_span_id = convert_to_span_id(trace_info.workflow_run_id, "workflow") - self.add_workflow_span(trace_id, workflow_span_id, trace_info) + self.add_workflow_span(trace_id, workflow_span_id, trace_info, links) workflow_node_executions = self.get_workflow_node_executions(trace_info) for node_execution in workflow_node_executions: @@ -132,8 +133,9 @@ class AliyunDataTrace(BaseTraceInstance): status = Status(StatusCode.ERROR, trace_info.error) trace_id = convert_to_trace_id(message_id) + links = [] if trace_info.trace_id: - trace_id = convert_string_to_id(trace_info.trace_id) + links.append(create_link(trace_id_str=trace_info.trace_id)) message_span_id = convert_to_span_id(message_id, "message") message_span = SpanData( @@ -152,6 +154,7 @@ class AliyunDataTrace(BaseTraceInstance): OUTPUT_VALUE: str(trace_info.outputs), }, status=status, + links=links, ) self.trace_client.add_span(message_span) @@ -192,8 +195,9 @@ class AliyunDataTrace(BaseTraceInstance): message_id = trace_info.message_id trace_id = convert_to_trace_id(message_id) + links = [] if trace_info.trace_id: - trace_id = convert_string_to_id(trace_info.trace_id) + links.append(create_link(trace_id_str=trace_info.trace_id)) documents_data = extract_retrieval_documents(trace_info.documents) dataset_retrieval_span = SpanData( @@ -211,6 +215,7 @@ class AliyunDataTrace(BaseTraceInstance): INPUT_VALUE: str(trace_info.inputs), OUTPUT_VALUE: json.dumps(documents_data, ensure_ascii=False), }, + links=links, ) self.trace_client.add_span(dataset_retrieval_span) @@ -224,8 +229,9 @@ class AliyunDataTrace(BaseTraceInstance): status = Status(StatusCode.ERROR, trace_info.error) trace_id = convert_to_trace_id(message_id) + links = [] if trace_info.trace_id: - trace_id = convert_string_to_id(trace_info.trace_id) + links.append(create_link(trace_id_str=trace_info.trace_id)) tool_span = SpanData( trace_id=trace_id, @@ -244,6 +250,7 @@ class AliyunDataTrace(BaseTraceInstance): OUTPUT_VALUE: str(trace_info.tool_outputs), }, status=status, + links=links, ) self.trace_client.add_span(tool_span) @@ -413,7 +420,9 @@ class AliyunDataTrace(BaseTraceInstance): status=self.get_workflow_node_status(node_execution), ) - def add_workflow_span(self, trace_id: int, workflow_span_id: int, trace_info: WorkflowTraceInfo): + def add_workflow_span( + self, trace_id: int, workflow_span_id: int, trace_info: WorkflowTraceInfo, links: Sequence[Link] + ): message_span_id = None if trace_info.message_id: message_span_id = convert_to_span_id(trace_info.message_id, "message") @@ -438,6 +447,7 @@ class AliyunDataTrace(BaseTraceInstance): OUTPUT_VALUE: json.dumps(trace_info.workflow_run_outputs, ensure_ascii=False), }, status=status, + links=links, ) self.trace_client.add_span(message_span) @@ -456,6 +466,7 @@ class AliyunDataTrace(BaseTraceInstance): OUTPUT_VALUE: json.dumps(trace_info.workflow_run_outputs, ensure_ascii=False), }, status=status, + links=links, ) self.trace_client.add_span(workflow_span) @@ -466,8 +477,9 @@ class AliyunDataTrace(BaseTraceInstance): status = Status(StatusCode.ERROR, trace_info.error) trace_id = convert_to_trace_id(message_id) + links = [] if trace_info.trace_id: - trace_id = convert_string_to_id(trace_info.trace_id) + links.append(create_link(trace_id_str=trace_info.trace_id)) suggested_question_span = SpanData( trace_id=trace_id, @@ -487,6 +499,7 @@ class AliyunDataTrace(BaseTraceInstance): OUTPUT_VALUE: json.dumps(trace_info.suggested_question, ensure_ascii=False), }, status=status, + links=links, ) self.trace_client.add_span(suggested_question_span) diff --git a/api/core/ops/aliyun_trace/data_exporter/traceclient.py b/api/core/ops/aliyun_trace/data_exporter/traceclient.py index bd19c8a503..3eb7c30d55 100644 --- a/api/core/ops/aliyun_trace/data_exporter/traceclient.py +++ b/api/core/ops/aliyun_trace/data_exporter/traceclient.py @@ -16,6 +16,7 @@ from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.semconv.resource import ResourceAttributes +from opentelemetry.trace import Link, SpanContext, TraceFlags from configs import dify_config from core.ops.aliyun_trace.entities.aliyun_trace_entity import SpanData @@ -166,6 +167,16 @@ class SpanBuilder: return span +def create_link(trace_id_str: str) -> Link: + placeholder_span_id = 0x0000000000000000 + trace_id = int(trace_id_str, 16) + span_context = SpanContext( + trace_id=trace_id, span_id=placeholder_span_id, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED) + ) + + return Link(span_context) + + def generate_span_id() -> int: span_id = random.getrandbits(64) while span_id == INVALID_SPAN_ID: From e7a5268fddd4965fd39ff322afba5b6d762028d2 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Fri, 15 Aug 2025 10:38:03 +0800 Subject: [PATCH 11/36] Fix hover button contrast in dark mode for app and dataset cards (#23955) --- web/app/(commonLayout)/datasets/dataset-card.tsx | 4 ++-- web/app/components/apps/app-card.tsx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/web/app/(commonLayout)/datasets/dataset-card.tsx b/web/app/(commonLayout)/datasets/dataset-card.tsx index 2f0563d47e..3e913ca52f 100644 --- a/web/app/(commonLayout)/datasets/dataset-card.tsx +++ b/web/app/(commonLayout)/datasets/dataset-card.tsx @@ -216,8 +216,8 @@ const DatasetCard = ({ } btnClassName={open => cn( - open ? '!bg-black/5 !shadow-none' : '!bg-transparent', - 'h-8 w-8 rounded-md border-none !p-2 hover:!bg-black/5', + open ? '!bg-state-base-hover !shadow-none' : '!bg-transparent', + 'h-8 w-8 rounded-md border-none !p-2 hover:!bg-state-base-hover', ) } className={'!z-20 h-fit !w-[128px]'} diff --git a/web/app/components/apps/app-card.tsx b/web/app/components/apps/app-card.tsx index 688da4c25d..ee9230af12 100644 --- a/web/app/components/apps/app-card.tsx +++ b/web/app/components/apps/app-card.tsx @@ -407,8 +407,8 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => { } btnClassName={open => cn( - open ? '!bg-black/5 !shadow-none' : '!bg-transparent', - 'h-8 w-8 rounded-md border-none !p-2 hover:!bg-black/5', + open ? '!bg-state-base-hover !shadow-none' : '!bg-transparent', + 'h-8 w-8 rounded-md border-none !p-2 hover:!bg-state-base-hover', ) } popupClassName={ From f560116fb27a6b3edadb658e1a38033be31d7a7e Mon Sep 17 00:00:00 2001 From: xubo <51738289+xb565517830@users.noreply.github.com> Date: Fri, 15 Aug 2025 11:25:25 +0800 Subject: [PATCH 12/36] fix: 504 Gateway Time-out error on /console/api/version endpoint (#23961) --- api/controllers/console/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/controllers/console/version.py b/api/controllers/console/version.py index 8237ea3cdc..894785abc8 100644 --- a/api/controllers/console/version.py +++ b/api/controllers/console/version.py @@ -32,7 +32,7 @@ class VersionApi(Resource): return result try: - response = requests.get(check_update_url, {"current_version": args.get("current_version")}) + response = requests.get(check_update_url, {"current_version": args.get("current_version")}, timeout=(3, 10)) except Exception as error: logging.warning("Check update version error: %s.", str(error)) result["version"] = args.get("current_version") From 352776ba7719b0a437639c236fb8882f525b4b5e Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Fri, 15 Aug 2025 11:25:50 +0800 Subject: [PATCH 13/36] update: GitHub star fallback count to current value (#23957) --- web/app/components/header/github-star/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/header/github-star/index.tsx b/web/app/components/header/github-star/index.tsx index e825dcdd14..6d87328c25 100644 --- a/web/app/components/header/github-star/index.tsx +++ b/web/app/components/header/github-star/index.tsx @@ -5,7 +5,7 @@ import type { GithubRepo } from '@/models/common' import { RiLoader2Line } from '@remixicon/react' const defaultData = { - stargazers_count: 98570, + stargazers_count: 110918, } const getStar = async () => { From 821fe26b5641e31d074cb39efc98bfecd227a4fa Mon Sep 17 00:00:00 2001 From: Will Date: Fri, 15 Aug 2025 15:03:00 +0800 Subject: [PATCH 14/36] fix comparison with callable (#23978) --- api/services/conversation_service.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index d76981a23f..4f3dd3c762 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -103,10 +103,10 @@ class ConversationService: @classmethod def _build_filter_condition(cls, sort_field: str, sort_direction: Callable, reference_conversation: Conversation): field_value = getattr(reference_conversation, sort_field) - if sort_direction == desc: + if sort_direction is desc: return getattr(Conversation, sort_field) < field_value - else: - return getattr(Conversation, sort_field) > field_value + + return getattr(Conversation, sort_field) > field_value @classmethod def rename( @@ -147,7 +147,7 @@ class ConversationService: app_model.tenant_id, message.query, conversation.id, app_model.id ) conversation.name = name - except: + except Exception: pass db.session.commit() From 4031a46572d3969a140b9f9218705f88157c7ff1 Mon Sep 17 00:00:00 2001 From: Masashi Tomooka Date: Fri, 15 Aug 2025 16:18:53 +0900 Subject: [PATCH 15/36] doc: add deployment pattern using Amazon ECS and CDK (#23985) --- README.md | 3 ++- README_AR.md | 3 ++- README_BN.md | 3 ++- README_CN.md | 3 ++- README_DE.md | 3 ++- README_ES.md | 3 ++- README_FR.md | 3 ++- README_JA.md | 3 ++- README_KL.md | 3 ++- README_KR.md | 3 ++- README_PT.md | 3 ++- README_SI.md | 3 ++- README_TR.md | 3 ++- README_TW.md | 3 ++- README_VI.md | 3 ++- 15 files changed, 30 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 775f6f351f..80e44b0728 100644 --- a/README.md +++ b/README.md @@ -225,7 +225,8 @@ Deploy Dify to AWS with [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Using Alibaba Cloud Computing Nest diff --git a/README_AR.md b/README_AR.md index e7a4dbdb27..9c8378d087 100644 --- a/README_AR.md +++ b/README_AR.md @@ -208,7 +208,8 @@ docker compose up -d ##### AWS -- [AWS CDK بواسطة @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK بواسطة @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK بواسطة @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### استخدام Alibaba Cloud للنشر [بسرعة نشر Dify إلى سحابة علي بابا مع عش الحوسبة السحابية علي بابا](https://computenest.console.aliyun.com/service/instance/create/default?type=user&ServiceName=Dify%E7%A4%BE%E5%8C%BA%E7%89%88) diff --git a/README_BN.md b/README_BN.md index e4da437eff..a31aafdf56 100644 --- a/README_BN.md +++ b/README_BN.md @@ -225,7 +225,8 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন ##### AWS -- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud ব্যবহার করে ডিপ্লয় diff --git a/README_CN.md b/README_CN.md index 82149519d3..0698693429 100644 --- a/README_CN.md +++ b/README_CN.md @@ -223,7 +223,8 @@ docker compose up -d 使用 [CDK](https://aws.amazon.com/cdk/) 将 Dify 部署到 AWS ##### AWS -- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### 使用 阿里云计算巢 部署 diff --git a/README_DE.md b/README_DE.md index 2420ac0392..392cc7885e 100644 --- a/README_DE.md +++ b/README_DE.md @@ -220,7 +220,8 @@ Stellen Sie Dify mit nur einem Klick mithilfe von [terraform](https://www.terraf Bereitstellung von Dify auf AWS mit [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_ES.md b/README_ES.md index 4fa59dc18f..859da5bfd7 100644 --- a/README_ES.md +++ b/README_ES.md @@ -220,7 +220,8 @@ Despliega Dify en una plataforma en la nube con un solo clic utilizando [terrafo Despliegue Dify en AWS usando [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK por @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK por @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK por @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_FR.md b/README_FR.md index dcbc869620..fcadad419b 100644 --- a/README_FR.md +++ b/README_FR.md @@ -218,7 +218,8 @@ Déployez Dify sur une plateforme cloud en un clic en utilisant [terraform](http Déployez Dify sur AWS en utilisant [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK par @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK par @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK par @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_JA.md b/README_JA.md index d840fd6419..6ddc30789c 100644 --- a/README_JA.md +++ b/README_JA.md @@ -219,7 +219,8 @@ docker compose up -d [CDK](https://aws.amazon.com/cdk/) を使用して、DifyをAWSにデプロイします ##### AWS -- [@KevinZhaoによるAWS CDK](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [@KevinZhaoによるAWS CDK (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [@tmokmssによるAWS CDK (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud [Alibaba Cloud Computing Nest](https://computenest.console.aliyun.com/service/instance/create/default?type=user&ServiceName=Dify%E7%A4%BE%E5%8C%BA%E7%89%88) diff --git a/README_KL.md b/README_KL.md index 41c7969e1c..7232da8003 100644 --- a/README_KL.md +++ b/README_KL.md @@ -218,7 +218,8 @@ wa'logh nIqHom neH ghun deployment toy'wI' [terraform](https://www.terraform.io/ wa'logh nIqHom neH ghun deployment toy'wI' [CDK](https://aws.amazon.com/cdk/) lo'laH. ##### AWS -- [AWS CDK qachlot @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK qachlot @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK qachlot @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_KR.md b/README_KR.md index d4b31a8928..74010d43ed 100644 --- a/README_KR.md +++ b/README_KR.md @@ -212,7 +212,8 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했 [CDK](https://aws.amazon.com/cdk/)를 사용하여 AWS에 Dify 배포 ##### AWS -- [KevinZhao의 AWS CDK](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [KevinZhao의 AWS CDK (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [tmokmss의 AWS CDK (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_PT.md b/README_PT.md index 94452cb233..f9e3ef7f4b 100644 --- a/README_PT.md +++ b/README_PT.md @@ -217,7 +217,8 @@ Implante o Dify na Plataforma Cloud com um único clique usando [terraform](http Implante o Dify na AWS usando [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK por @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK por @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK por @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_SI.md b/README_SI.md index d840e9155f..ac16df798b 100644 --- a/README_SI.md +++ b/README_SI.md @@ -218,7 +218,8 @@ namestite Dify v Cloud Platform z enim klikom z uporabo [terraform](https://www. Uvedite Dify v AWS z uporabo [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK by @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_TR.md b/README_TR.md index 470a7570e0..8065ec908c 100644 --- a/README_TR.md +++ b/README_TR.md @@ -211,7 +211,8 @@ Dify'ı bulut platformuna tek tıklamayla dağıtın [terraform](https://www.ter [CDK](https://aws.amazon.com/cdk/) kullanarak Dify'ı AWS'ye dağıtın ##### AWS -- [AWS CDK tarafından @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK tarafından @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK tarafından @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud diff --git a/README_TW.md b/README_TW.md index 18f1d2754a..c36027183c 100644 --- a/README_TW.md +++ b/README_TW.md @@ -223,7 +223,8 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify ### AWS -- [由 @KevinZhao 提供的 AWS CDK](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [由 @KevinZhao 提供的 AWS CDK (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [由 @tmokmss 提供的 AWS CDK (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### 使用 阿里云计算巢進行部署 diff --git a/README_VI.md b/README_VI.md index 2ab6da80fc..958a70114a 100644 --- a/README_VI.md +++ b/README_VI.md @@ -213,7 +213,8 @@ Triển khai Dify lên nền tảng đám mây với một cú nhấp chuột b Triển khai Dify trên AWS bằng [CDK](https://aws.amazon.com/cdk/) ##### AWS -- [AWS CDK bởi @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK bởi @KevinZhao (EKS based)](https://github.com/aws-samples/solution-for-deploying-dify-on-aws) +- [AWS CDK bởi @tmokmss (ECS based)](https://github.com/aws-samples/dify-self-hosted-on-aws) #### Alibaba Cloud From 658157e9a1fded47732688083fbdefadeafba784 Mon Sep 17 00:00:00 2001 From: Will Date: Fri, 15 Aug 2025 15:19:30 +0800 Subject: [PATCH 16/36] chore: improved type annotations in MCP-related codes (#23984) --- api/core/mcp/client/sse_client.py | 7 +++-- api/core/mcp/server/streamable_http.py | 7 +++-- api/core/mcp/utils.py | 42 +++++++++++++++++++++----- 3 files changed, 43 insertions(+), 13 deletions(-) diff --git a/api/core/mcp/client/sse_client.py b/api/core/mcp/client/sse_client.py index 2d3a3f5344..c6fe768a60 100644 --- a/api/core/mcp/client/sse_client.py +++ b/api/core/mcp/client/sse_client.py @@ -7,6 +7,7 @@ from typing import Any, TypeAlias, final from urllib.parse import urljoin, urlparse import httpx +from httpx_sse import EventSource, ServerSentEvent from sseclient import SSEClient from core.mcp import types @@ -114,7 +115,7 @@ class SSETransport: logger.exception("Error parsing server message") read_queue.put(exc) - def _handle_sse_event(self, sse, read_queue: ReadQueue, status_queue: StatusQueue) -> None: + def _handle_sse_event(self, sse: ServerSentEvent, read_queue: ReadQueue, status_queue: StatusQueue) -> None: """Handle a single SSE event. Args: @@ -130,7 +131,7 @@ class SSETransport: case _: logger.warning("Unknown SSE event: %s", sse.event) - def sse_reader(self, event_source, read_queue: ReadQueue, status_queue: StatusQueue) -> None: + def sse_reader(self, event_source: EventSource, read_queue: ReadQueue, status_queue: StatusQueue) -> None: """Read and process SSE events. Args: @@ -225,7 +226,7 @@ class SSETransport: self, executor: ThreadPoolExecutor, client: httpx.Client, - event_source, + event_source: EventSource, ) -> tuple[ReadQueue, WriteQueue]: """Establish connection and start worker threads. diff --git a/api/core/mcp/server/streamable_http.py b/api/core/mcp/server/streamable_http.py index 496b5432a0..efe91bbff4 100644 --- a/api/core/mcp/server/streamable_http.py +++ b/api/core/mcp/server/streamable_http.py @@ -16,13 +16,14 @@ from extensions.ext_database import db from models.model import App, AppMCPServer, AppMode, EndUser from services.app_generate_service import AppGenerateService -""" -Apply to MCP HTTP streamable server with stateless http -""" logger = logging.getLogger(__name__) class MCPServerStreamableHTTPRequestHandler: + """ + Apply to MCP HTTP streamable server with stateless http + """ + def __init__( self, app: App, request: types.ClientRequest | types.ClientNotification, user_input_form: list[VariableEntity] ): diff --git a/api/core/mcp/utils.py b/api/core/mcp/utils.py index a54badcd4c..80912bc4c1 100644 --- a/api/core/mcp/utils.py +++ b/api/core/mcp/utils.py @@ -1,6 +1,10 @@ import json +from collections.abc import Generator +from contextlib import AbstractContextManager import httpx +import httpx_sse +from httpx_sse import connect_sse from configs import dify_config from core.mcp.types import ErrorData, JSONRPCError @@ -55,20 +59,42 @@ def create_ssrf_proxy_mcp_http_client( ) -def ssrf_proxy_sse_connect(url, **kwargs): +def ssrf_proxy_sse_connect(url: str, **kwargs) -> AbstractContextManager[httpx_sse.EventSource]: """Connect to SSE endpoint with SSRF proxy protection. This function creates an SSE connection using the configured proxy settings - to prevent SSRF attacks when connecting to external endpoints. + to prevent SSRF attacks when connecting to external endpoints. It returns + a context manager that yields an EventSource object for SSE streaming. + + The function handles HTTP client creation and cleanup automatically, but + also accepts a pre-configured client via kwargs. Args: - url: The SSE endpoint URL - **kwargs: Additional arguments passed to the SSE connection + url (str): The SSE endpoint URL to connect to + **kwargs: Additional arguments passed to the SSE connection, including: + - client (httpx.Client, optional): Pre-configured HTTP client. + If not provided, one will be created with SSRF protection. + - method (str, optional): HTTP method to use, defaults to "GET" + - headers (dict, optional): HTTP headers to include in the request + - timeout (httpx.Timeout, optional): Timeout configuration for the connection Returns: - EventSource object for SSE streaming + AbstractContextManager[httpx_sse.EventSource]: A context manager that yields an EventSource + object for SSE streaming. The EventSource provides access to server-sent events. + + Example: + ```python + with ssrf_proxy_sse_connect(url, headers=headers) as event_source: + for sse in event_source.iter_sse(): + print(sse.event, sse.data) + ``` + + Note: + If a client is not provided in kwargs, one will be automatically created + with SSRF protection based on the application's configuration. If an + exception occurs during connection, any automatically created client + will be cleaned up automatically. """ - from httpx_sse import connect_sse # Extract client if provided, otherwise create one client = kwargs.pop("client", None) @@ -101,7 +127,9 @@ def ssrf_proxy_sse_connect(url, **kwargs): raise -def create_mcp_error_response(request_id: int | str | None, code: int, message: str, data=None): +def create_mcp_error_response( + request_id: int | str | None, code: int, message: str, data=None +) -> Generator[bytes, None, None]: """Create MCP error response""" error_data = ErrorData(code=code, message=message, data=data) json_response = JSONRPCError( From 462ba8f41667154f840bf939227b5f89a2f1d901 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Fri, 15 Aug 2025 00:25:11 -0700 Subject: [PATCH 17/36] chore: add configurable stdio buffer sizes for plugins in compose file (#23980) --- docker/.env.example | 5 ++++- docker/docker-compose-template.yaml | 2 ++ docker/docker-compose.yaml | 4 ++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docker/.env.example b/docker/.env.example index 7a435ad66c..bd614f78f1 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -861,7 +861,7 @@ WORKFLOW_NODE_EXECUTION_STORAGE=rdbms # Repository configuration # Core workflow execution repository implementation -# Options: +# Options: # - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default) # - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository @@ -1157,6 +1157,9 @@ MARKETPLACE_API_URL=https://marketplace.dify.ai FORCE_VERIFYING_SIGNATURE=true +PLUGIN_STDIO_BUFFER_SIZE=1024 +PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 + PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 PLUGIN_MAX_EXECUTION_TIMEOUT=600 # PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 1dbd9b3993..04981f6b7f 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -181,6 +181,8 @@ services: FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} + PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 101f8eb323..afb20cb53b 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -506,6 +506,8 @@ x-shared-env: &shared-api-worker-env MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} + PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} @@ -747,6 +749,8 @@ services: FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} + PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} From c9e18346ce89b201c9ce7d6d588ad3441c410c34 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Fri, 15 Aug 2025 16:28:41 +0800 Subject: [PATCH 18/36] Chore: remove empty files and unused code (#23990) Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> --- api/core/mcp/auth/auth_provider.py | 2 -- api/core/mcp/client/sse_client.py | 5 ----- api/core/tools/entities/agent_entities.py | 0 api/core/tools/entities/file_entities.py | 1 - .../unit_tests/core/mcp/client/test_sse.py | 20 ------------------- 5 files changed, 28 deletions(-) delete mode 100644 api/core/tools/entities/agent_entities.py delete mode 100644 api/core/tools/entities/file_entities.py diff --git a/api/core/mcp/auth/auth_provider.py b/api/core/mcp/auth/auth_provider.py index 00d5a25956..bad99fc092 100644 --- a/api/core/mcp/auth/auth_provider.py +++ b/api/core/mcp/auth/auth_provider.py @@ -10,8 +10,6 @@ from core.mcp.types import ( from models.tools import MCPToolProvider from services.tools.mcp_tools_manage_service import MCPToolManageService -LATEST_PROTOCOL_VERSION = "1.0" - class OAuthClientProvider: mcp_provider: MCPToolProvider diff --git a/api/core/mcp/client/sse_client.py b/api/core/mcp/client/sse_client.py index c6fe768a60..cc38954eca 100644 --- a/api/core/mcp/client/sse_client.py +++ b/api/core/mcp/client/sse_client.py @@ -38,11 +38,6 @@ WriteQueue: TypeAlias = queue.Queue[SessionMessage | Exception | None] StatusQueue: TypeAlias = queue.Queue[_StatusReady | _StatusError] -def remove_request_params(url: str) -> str: - """Remove request parameters from URL, keeping only the path.""" - return urljoin(url, urlparse(url).path) - - class SSETransport: """SSE client transport implementation.""" diff --git a/api/core/tools/entities/agent_entities.py b/api/core/tools/entities/agent_entities.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/tools/entities/file_entities.py b/api/core/tools/entities/file_entities.py deleted file mode 100644 index 8b13789179..0000000000 --- a/api/core/tools/entities/file_entities.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/api/tests/unit_tests/core/mcp/client/test_sse.py b/api/tests/unit_tests/core/mcp/client/test_sse.py index 8122cd08eb..880a0d4940 100644 --- a/api/tests/unit_tests/core/mcp/client/test_sse.py +++ b/api/tests/unit_tests/core/mcp/client/test_sse.py @@ -262,26 +262,6 @@ def test_sse_client_queue_cleanup(): # Note: In real implementation, cleanup should put None to signal shutdown -def test_sse_client_url_processing(): - """Test SSE client URL processing functions.""" - from core.mcp.client.sse_client import remove_request_params - - # Test URL with parameters - url_with_params = "http://example.com/sse?param1=value1¶m2=value2" - cleaned_url = remove_request_params(url_with_params) - assert cleaned_url == "http://example.com/sse" - - # Test URL without parameters - url_without_params = "http://example.com/sse" - cleaned_url = remove_request_params(url_without_params) - assert cleaned_url == "http://example.com/sse" - - # Test URL with path and parameters - complex_url = "http://example.com/path/to/sse?session=123&token=abc" - cleaned_url = remove_request_params(complex_url) - assert cleaned_url == "http://example.com/path/to/sse" - - def test_sse_client_headers_propagation(): """Test that custom headers are properly propagated in SSE client.""" test_url = "http://test.example/sse" From 4b9812ce6a32b4f6524ca661ea4798cacc25f5d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=80=90=E5=B0=8F=E5=BF=83?= Date: Fri, 15 Aug 2025 18:23:42 +0800 Subject: [PATCH 19/36] fix: move database service call inside session context in workflow draft variable API (#23996) --- api/controllers/console/app/workflow_draft_variable.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/controllers/console/app/workflow_draft_variable.py b/api/controllers/console/app/workflow_draft_variable.py index ba93f82756..414c07ef50 100644 --- a/api/controllers/console/app/workflow_draft_variable.py +++ b/api/controllers/console/app/workflow_draft_variable.py @@ -163,11 +163,11 @@ class WorkflowVariableCollectionApi(Resource): draft_var_srv = WorkflowDraftVariableService( session=session, ) - workflow_vars = draft_var_srv.list_variables_without_values( - app_id=app_model.id, - page=args.page, - limit=args.limit, - ) + workflow_vars = draft_var_srv.list_variables_without_values( + app_id=app_model.id, + page=args.page, + limit=args.limit, + ) return workflow_vars From 8b601a983cf5402c1c5895d27a7e4e7ed33f2132 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sat, 16 Aug 2025 11:08:30 +0800 Subject: [PATCH 20/36] Fix missing user_id in trace_manager (#24024) --- api/core/app/apps/chat/app_generator.py | 4 +++- api/core/app/apps/completion/app_generator.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 0c76cc39ae..c273776eb1 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -140,7 +140,9 @@ class ChatAppGenerator(MessageBasedAppGenerator): ) # get tracing instance - trace_manager = TraceQueueManager(app_id=app_model.id) + trace_manager = TraceQueueManager( + app_id=app_model.id, user_id=user.id if isinstance(user, Account) else user.session_id + ) # init application generate entity application_generate_entity = ChatAppGenerateEntity( diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 9356bd1cea..64dade2968 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -124,7 +124,9 @@ class CompletionAppGenerator(MessageBasedAppGenerator): ) # get tracing instance - trace_manager = TraceQueueManager(app_model.id) + trace_manager = TraceQueueManager( + app_id=app_model.id, user_id=user.id if isinstance(user, Account) else user.session_id + ) # init application generate entity application_generate_entity = CompletionAppGenerateEntity( From af10b3c5fa49b93b56468370fd3986ea3c7586d3 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sat, 16 Aug 2025 19:28:31 +0800 Subject: [PATCH 21/36] Fix: add 'api_key' alias for backward compatibility (#24022) Signed-off-by: Yongtao Huang Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> --- api/core/tools/entities/tool_entities.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/api/core/tools/entities/tool_entities.py b/api/core/tools/entities/tool_entities.py index 5377cbbb69..5ffba07b44 100644 --- a/api/core/tools/entities/tool_entities.py +++ b/api/core/tools/entities/tool_entities.py @@ -108,10 +108,18 @@ class ApiProviderAuthType(Enum): :param value: mode value :return: mode """ + # 'api_key' deprecated in PR #21656 + # normalize & tiny alias for backward compatibility + v = (value or "").strip().lower() + if v == "api_key": + v = cls.API_KEY_HEADER.value + for mode in cls: - if mode.value == value: + if mode.value == v: return mode - raise ValueError(f"invalid mode value {value}") + + valid = ", ".join(m.value for m in cls) + raise ValueError(f"invalid mode value '{value}', expected one of: {valid}") class ToolInvokeMessage(BaseModel): From 32fa817eaa3642b8d1d4d6f65c605c6f8cf6e9cd Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sat, 16 Aug 2025 19:29:19 +0800 Subject: [PATCH 22/36] Update mypy.ini (#24014) Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/events/{event_handlers => }/document_index_event.py | 0 api/events/event_handlers/create_document_index.py | 2 +- api/mypy.ini | 3 +-- 3 files changed, 2 insertions(+), 3 deletions(-) rename api/events/{event_handlers => }/document_index_event.py (100%) diff --git a/api/events/event_handlers/document_index_event.py b/api/events/document_index_event.py similarity index 100% rename from api/events/event_handlers/document_index_event.py rename to api/events/document_index_event.py diff --git a/api/events/event_handlers/create_document_index.py b/api/events/event_handlers/create_document_index.py index bdb69945f0..c607161e2a 100644 --- a/api/events/event_handlers/create_document_index.py +++ b/api/events/event_handlers/create_document_index.py @@ -5,7 +5,7 @@ import click from werkzeug.exceptions import NotFound from core.indexing_runner import DocumentIsPausedError, IndexingRunner -from events.event_handlers.document_index_event import document_index_created +from events.document_index_event import document_index_created from extensions.ext_database import db from libs.datetime_utils import naive_utc_now from models.dataset import Document diff --git a/api/mypy.ini b/api/mypy.ini index 6836b2602b..3a6a54afe1 100644 --- a/api/mypy.ini +++ b/api/mypy.ini @@ -5,8 +5,7 @@ check_untyped_defs = True cache_fine_grained = True sqlite_cache = True exclude = (?x)( - core/model_runtime/model_providers/ - | tests/ + tests/ | migrations/ ) From d92ddc4dd4f4ee0cda17b956c0142ce08d8859f4 Mon Sep 17 00:00:00 2001 From: kurokobo Date: Sun, 17 Aug 2025 10:32:57 +0900 Subject: [PATCH 23/36] chore(i18n): correct japanese translation (#24041) --- web/i18n/ja-JP/app-annotation.ts | 2 +- web/i18n/ja-JP/common.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/i18n/ja-JP/app-annotation.ts b/web/i18n/ja-JP/app-annotation.ts index 6d7edf7077..3ccbcc30c6 100644 --- a/web/i18n/ja-JP/app-annotation.ts +++ b/web/i18n/ja-JP/app-annotation.ts @@ -17,7 +17,7 @@ const translation = { bulkImport: '一括インポート', bulkExport: '一括エクスポート', clearAll: 'すべて削除', - clearAllConfirm: 'すべての寸法を削除?', + clearAllConfirm: 'すべての注釈を削除しますか?', }, }, editModal: { diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts index d0a6b64d6e..74246270e8 100644 --- a/web/i18n/ja-JP/common.ts +++ b/web/i18n/ja-JP/common.ts @@ -565,7 +565,7 @@ const translation = { overview: '監視', promptEng: 'オーケストレート', apiAccess: 'API アクセス', - logAndAnn: 'ログ&アナウンス', + logAndAnn: 'ログ&注釈', logs: 'ログ', }, environment: { From 0a9af4519462e78548142e689f2a6998ea63687e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B9=9B=E9=9C=B2=E5=85=88=E7=94=9F?= Date: Sun, 17 Aug 2025 09:33:22 +0800 Subject: [PATCH 24/36] no used function for message_queue. (#24027) Signed-off-by: zhanluxianshen Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/app/apps/message_based_app_queue_manager.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/api/core/app/apps/message_based_app_queue_manager.py b/api/core/app/apps/message_based_app_queue_manager.py index 8507f23f17..4100a0d5a9 100644 --- a/api/core/app/apps/message_based_app_queue_manager.py +++ b/api/core/app/apps/message_based_app_queue_manager.py @@ -6,7 +6,6 @@ from core.app.entities.queue_entities import ( MessageQueueMessage, QueueAdvancedChatMessageEndEvent, QueueErrorEvent, - QueueMessage, QueueMessageEndEvent, QueueStopEvent, ) @@ -22,15 +21,6 @@ class MessageBasedAppQueueManager(AppQueueManager): self._app_mode = app_mode self._message_id = str(message_id) - def construct_queue_message(self, event: AppQueueEvent) -> QueueMessage: - return MessageQueueMessage( - task_id=self._task_id, - message_id=self._message_id, - conversation_id=self._conversation_id, - app_mode=self._app_mode, - event=event, - ) - def _publish(self, event: AppQueueEvent, pub_from: PublishFrom) -> None: """ Publish event to queue From c69634085deb300ed9c8927405dc95718c880597 Mon Sep 17 00:00:00 2001 From: kurokobo Date: Mon, 18 Aug 2025 00:14:37 +0900 Subject: [PATCH 25/36] Revert "Fix: Correct file variable handling for custom tools in workflow (#24061) --- api/core/workflow/nodes/tool/tool_node.py | 27 ----------------------- 1 file changed, 27 deletions(-) diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index df89b2476d..4c8e13de70 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -318,33 +318,6 @@ class ToolNode(BaseNode): json.append(message.message.json_object) elif message.type == ToolInvokeMessage.MessageType.LINK: assert isinstance(message.message, ToolInvokeMessage.TextMessage) - - if message.meta: - transfer_method = message.meta.get("transfer_method", FileTransferMethod.TOOL_FILE) - else: - transfer_method = FileTransferMethod.TOOL_FILE - - tool_file_id = message.message.text.split("/")[-1].split(".")[0] - - with Session(db.engine) as session: - stmt = select(ToolFile).where(ToolFile.id == tool_file_id) - tool_file = session.scalar(stmt) - if tool_file is None: - raise ToolFileError(f"Tool file {tool_file_id} does not exist") - - mapping = { - "tool_file_id": tool_file_id, - "type": file_factory.get_file_type_by_mime_type(tool_file.mimetype), - "transfer_method": transfer_method, - "url": message.message.text, - } - - file = file_factory.build_from_mapping( - mapping=mapping, - tenant_id=self.tenant_id, - ) - files.append(file) - stream_text = f"Link: {message.message.text}\n" text += stream_text yield RunStreamChunkEvent(chunk_content=stream_text, from_variable_selector=[node_id, "text"]) From ff52a54fef8d6bb9da539f840490b9a398734acd Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Mon, 18 Aug 2025 09:22:59 +0800 Subject: [PATCH 26/36] Restore useLabelStore mistakenly removed in commit 403e2d58 (#24052) Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/app/components/tools/labels/store.ts | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 web/app/components/tools/labels/store.ts diff --git a/web/app/components/tools/labels/store.ts b/web/app/components/tools/labels/store.ts new file mode 100644 index 0000000000..c19991dfd4 --- /dev/null +++ b/web/app/components/tools/labels/store.ts @@ -0,0 +1,15 @@ +import { create } from 'zustand' +import type { Label } from './constant' + +type State = { + labelList: Label[] +} + +type Action = { + setLabelList: (labelList?: Label[]) => void +} + +export const useStore = create(set => ({ + labelList: [], + setLabelList: labelList => set(() => ({ labelList })), +})) From b475a6b257b6d5d609b6bdaf1e9e49d6b97f32cb Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Mon, 18 Aug 2025 09:29:52 +0800 Subject: [PATCH 27/36] chore: synchronize translations (#24044) --- web/i18n/fa-IR/billing.ts | 40 +++++++++++++---------------------- web/i18n/fr-FR/billing.ts | 34 +++++++++++------------------- web/i18n/hi-IN/billing.ts | 26 +++++++---------------- web/i18n/it-IT/billing.ts | 42 +++++++++++++------------------------ web/i18n/ja-JP/workflow.ts | 1 + web/i18n/pl-PL/billing.ts | 42 +++++++++++++------------------------ web/i18n/pt-BR/billing.ts | 39 +++++++++++++--------------------- web/i18n/ro-RO/billing.ts | 32 ++++++++++------------------ web/i18n/ru-RU/billing.ts | 34 +++++++++++------------------- web/i18n/sl-SI/billing.ts | 38 +++++++++++++-------------------- web/i18n/th-TH/billing.ts | 40 +++++++++++++---------------------- web/i18n/tr-TR/billing.ts | 34 +++++++++++------------------- web/i18n/uk-UA/billing.ts | 36 ++++++++++++------------------- web/i18n/vi-VN/billing.ts | 36 ++++++++++++------------------- web/i18n/zh-Hant/billing.ts | 16 ++++++++++++++ 15 files changed, 185 insertions(+), 305 deletions(-) diff --git a/web/i18n/fa-IR/billing.ts b/web/i18n/fa-IR/billing.ts index e4de29ced5..68eff70426 100644 --- a/web/i18n/fa-IR/billing.ts +++ b/web/i18n/fa-IR/billing.ts @@ -23,19 +23,14 @@ const translation = { contractSales: 'تماس با فروش', contractOwner: 'تماس با مدیر تیم', startForFree: 'رایگان شروع کنید', - getStartedWith: 'شروع کنید با ', contactSales: 'تماس با فروش', talkToSales: 'صحبت با فروش', modelProviders: 'ارائه‌دهندگان مدل', - teamMembers: 'اعضای تیم', annotationQuota: 'سهمیه حاشیه‌نویسی', buildApps: 'ساخت اپلیکیشن‌ها', vectorSpace: 'فضای وکتور', - vectorSpaceBillingTooltip: 'هر 1 مگابایت می‌تواند حدود 1.2 میلیون کاراکتر از داده‌های وکتور شده را ذخیره کند (براساس تخمین با استفاده از OpenAI Embeddings، متفاوت بر اساس مدل‌ها).', vectorSpaceTooltip: 'فضای وکتور سیستم حافظه بلند مدت است که برای درک داده‌های شما توسط LLM‌ها مورد نیاز است.', - documentsUploadQuota: 'سهمیه بارگذاری مستندات', documentProcessingPriority: 'اولویت پردازش مستندات', - documentProcessingPriorityTip: 'برای اولویت پردازش بالاتر مستندات، لطفاً طرح خود را ارتقاء دهید.', documentProcessingPriorityUpgrade: 'داده‌های بیشتری را با دقت بالاتر و سرعت بیشتر پردازش کنید.', priority: { 'standard': 'استاندارد', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'محیط آزمایشی', description: '200 بار آزمایش رایگان GPT', - includesTitle: 'شامل:', for: 'دوره آزمایشی رایگان قابلیت‌های اصلی', }, professional: { name: 'حرفه‌ای', description: 'برای افراد و تیم‌های کوچک برای باز کردن قدرت بیشتر به طور مقرون به صرفه.', - includesTitle: 'همه چیز در طرح رایگان، به علاوه:', for: 'برای توسعه‌دهندگان مستقل/تیم‌های کوچک', }, team: { name: 'تیم', description: 'همکاری بدون محدودیت و لذت بردن از عملکرد برتر.', - includesTitle: 'همه چیز در طرح حرفه‌ای، به علاوه:', for: 'برای تیم‌های متوسط', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'دریافت کامل‌ترین قابلیت‌ها و پشتیبانی برای سیستم‌های بزرگ و بحرانی.', includesTitle: 'همه چیز در طرح تیم، به علاوه:', features: { - 0: 'راهکارهای استقرار مقیاس‌پذیر در سطح سازمانی', - 8: 'پشتیبانی فنی حرفه‌ای', - 3: 'چندین فضای کاری و مدیریت سازمانی', - 5: 'SLA های توافق شده توسط شرکای Dify', - 4: 'SSO', - 2: 'ویژگی‌های انحصاری سازمانی', - 1: 'مجوز صدور مجوز تجاری', - 6: 'امنیت و کنترل‌های پیشرفته', - 7: 'به‌روزرسانی‌ها و نگهداری توسط دیفی به‌طور رسمی', + 4: 'Sso', + 1: 'مجوز جواز تجاری', + 2: 'ویژگی های انحصاری سازمانی', + 8: 'پشتیبانی فنی حرفه ای', + 5: 'SLA های مذاکره شده توسط Dify Partners', + 6: 'امنیت و کنترل پیشرفته', + 3: 'فضاهای کاری چندگانه و مدیریت سازمانی', + 7: 'به روز رسانی و نگهداری توسط Dify به طور رسمی', + 0: 'راه حل های استقرار مقیاس پذیر در سطح سازمانی', }, price: 'سفارشی', btnText: 'تماس با فروش', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 0: 'تمام ویژگی‌های اصلی منتشر شده در مخزن عمومی', - 2: 'با رعایت مجوز منبع باز دیفی', 1: 'فضای کاری واحد', + 2: 'با مجوز منبع باز Dify مطابقت دارد', + 0: 'تمام ویژگی های اصلی در مخزن عمومی منتشر شده است', }, btnText: 'شروع کنید با جامعه', price: 'رایگان', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 1: 'محل کار واحد', - 0: 'قابل اطمینان خودمدیریتی توسط ارائه‌دهندگان مختلف ابر', - 2: 'شعار و سفارشی‌سازی برند وب‌اپلیکیشن', - 3: 'پشتیبانی اولویت ایمیل و چت', + 1: 'فضای کاری واحد', + 3: 'پشتیبانی از ایمیل و چت اولویت دار', + 2: 'لوگوی وب اپلیکیشن و سفارشی سازی برندینگ', + 0: 'قابلیت اطمینان خود مدیریت شده توسط ارائه دهندگان مختلف ابر', }, btnText: 'گرفتن نسخه پریمیوم در', description: 'برای سازمان‌ها و تیم‌های میان‌رده', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'طرح خود را ارتقاء دهید تا فضای بیشتری دریافت کنید.', }, apps: { - fullTipLine1: 'طرح خود را ارتقاء دهید تا', - fullTipLine2: 'اپلیکیشن‌های بیشتری بسازید.', fullTip2: 'محدودیت طرح به پایان رسید', contactUs: 'با ما تماس بگیرید', fullTip1: 'به‌روزرسانی کنید تا برنامه‌های بیشتری ایجاد کنید', diff --git a/web/i18n/fr-FR/billing.ts b/web/i18n/fr-FR/billing.ts index 879a067941..262df37954 100644 --- a/web/i18n/fr-FR/billing.ts +++ b/web/i18n/fr-FR/billing.ts @@ -23,18 +23,13 @@ const translation = { contractSales: 'Contactez les ventes', contractOwner: 'Contacter le chef d\'équipe', startForFree: 'Commencez gratuitement', - getStartedWith: 'Commencez avec', contactSales: 'Contacter les ventes', talkToSales: 'Parlez aux Ventes', modelProviders: 'Fournisseurs de Modèles', - teamMembers: 'Membres de l\'équipe', buildApps: 'Construire des Applications', vectorSpace: 'Espace Vectoriel', - vectorSpaceBillingTooltip: 'Chaque 1MB peut stocker environ 1,2 million de caractères de données vectorisées (estimé en utilisant les embeddings OpenAI, varie selon les modèles).', vectorSpaceTooltip: 'L\'espace vectoriel est le système de mémoire à long terme nécessaire pour que les LLMs comprennent vos données.', - documentsUploadQuota: 'Quota de téléchargement de documents', documentProcessingPriority: 'Priorité de Traitement de Document', - documentProcessingPriorityTip: 'Pour une priorité de traitement de documents plus élevée, veuillez mettre à niveau votre plan.', documentProcessingPriorityUpgrade: 'Traitez plus de données avec une précision plus élevée à des vitesses plus rapides.', priority: { 'standard': 'Standard', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Bac à sable', description: '200 essais gratuits de GPT', - includesTitle: 'Inclus :', for: 'Essai gratuit des fonctionnalités principales', }, professional: { name: 'Professionnel', description: 'Pour les individus et les petites équipes afin de débloquer plus de puissance à un prix abordable.', - includesTitle: 'Tout ce qui est dans le plan gratuit, plus :', for: 'Pour les développeurs indépendants / petites équipes', }, team: { name: 'Équipe', description: 'Collaborez sans limites et profitez d\'une performance de premier ordre.', - includesTitle: 'Tout ce qui est inclus dans le plan Professionnel, plus :', for: 'Pour les équipes de taille moyenne', }, enterprise: { @@ -123,14 +115,14 @@ const translation = { description: 'Obtenez toutes les capacités et le support pour les systèmes à grande échelle et critiques pour la mission.', includesTitle: 'Tout ce qui est inclus dans le plan Équipe, plus :', features: { - 5: 'SLA négociés par Dify Partners', - 1: 'Autorisation de Licence Commerciale', - 2: 'Fonctionnalités exclusives pour les entreprises', - 4: 'SSO', - 8: 'Support Technique Professionnel', - 3: 'Gestion de plusieurs espaces de travail et d\'entreprise', 6: 'Sécurité et contrôles avancés', - 7: 'Mises à jour et maintenance par Dify Officiellement', + 3: 'Espaces de travail multiples et gestion d’entreprise', + 4: 'SSO', + 1: 'Autorisation de licence commerciale', + 2: 'Fonctionnalités exclusives à l’entreprise', + 5: 'SLA négociés par les partenaires Dify', + 8: 'Assistance technique professionnelle', + 7: 'Mises à jour et maintenance par Dify officiellement', 0: 'Solutions de déploiement évolutives de niveau entreprise', }, for: 'Pour les équipes de grande taille', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 2: 'Conforme à la licence open source de Dify', 1: 'Espace de travail unique', - 0: 'Toutes les fonctionnalités principales publiées dans le référentiel public', + 0: 'Toutes les fonctionnalités de base publiées dans le dépôt public', + 2: 'Conforme à la licence Open Source Dify', }, name: 'Communauté', btnText: 'Commencez avec la communauté', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 3: 'Support par e-mail et chat prioritaire', + 2: 'Personnalisation du logo et de l’image de marque WebApp', 1: 'Espace de travail unique', - 0: 'Fiabilité autogérée par divers fournisseurs de cloud', - 2: 'Personnalisation du logo et de la marque de l\'application Web', + 3: 'Assistance prioritaire par e-mail et chat', + 0: 'Fiabilité autogérée par différents fournisseurs de cloud', }, for: 'Pour les organisations et les équipes de taille moyenne', includesTitle: 'Tout de la communauté, en plus :', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Mettez à niveau votre plan pour obtenir plus d\'espace.', }, apps: { - fullTipLine1: 'Mettez à jour votre plan pour', - fullTipLine2: 'construire plus d\'applications.', fullTip2: 'Limite de plan atteinte', contactUs: 'Contactez-nous', fullTip1: 'Mettez à niveau pour créer plus d\'applications', diff --git a/web/i18n/hi-IN/billing.ts b/web/i18n/hi-IN/billing.ts index 1f8b29587c..25c4298628 100644 --- a/web/i18n/hi-IN/billing.ts +++ b/web/i18n/hi-IN/billing.ts @@ -24,22 +24,15 @@ const translation = { contractSales: 'बिक्री से संपर्क करें', contractOwner: 'टीम प्रबंधक से संपर्क करें', startForFree: 'मुफ्त में शुरू करें', - getStartedWith: 'इसके साथ शुरू करें ', contactSales: 'बिक्री से संपर्क करें', talkToSales: 'बिक्री से बात करें', modelProviders: 'मॉडल प्रदाता', - teamMembers: 'टीम के सदस्य', annotationQuota: 'एनोटेशन कोटा', buildApps: 'ऐप्स बनाएं', vectorSpace: 'वेक्टर स्पेस', - vectorSpaceBillingTooltip: - 'प्रत्येक 1MB लगभग 1.2 मिलियन वर्णों के वेक्टराइज्ड डेटा को संग्रहीत कर सकता है (OpenAI एम्बेडिंग का उपयोग करके अनुमानित, मॉडल में भिन्नता होती है)।', vectorSpaceTooltip: 'वेक्टर स्पेस वह दीर्घकालिक स्मृति प्रणाली है जिसकी आवश्यकता LLMs को आपके डेटा को समझने के लिए होती है।', - documentsUploadQuota: 'दस्तावेज़ अपलोड कोटा', documentProcessingPriority: 'दस्तावेज़ प्रसंस्करण प्राथमिकता', - documentProcessingPriorityTip: - 'उच्च दस्तावेज़ प्रसंस्करण प्राथमिकता के लिए, कृपया अपनी योजना अपग्रेड करें।', documentProcessingPriorityUpgrade: 'तेजी से गति पर उच्च सटीकता के साथ अधिक डेटा संसाधित करें।', priority: { @@ -113,21 +106,18 @@ const translation = { sandbox: { name: 'सैंडबॉक्स', description: '200 बार GPT मुफ्त ट्रायल', - includesTitle: 'शामिल हैं:', for: 'कोर क्षमताओं का मुफ्त परीक्षण', }, professional: { name: 'प्रोफेशनल', description: 'व्यक्तियों और छोटे टीमों के लिए अधिक शक्ति सस्ती दर पर खोलें।', - includesTitle: 'मुफ्त योजना में सब कुछ, साथ में:', for: 'स्वतंत्र डेवलपर्स/छोटे टीमों के लिए', }, team: { name: 'टीम', description: 'बिना सीमा के सहयोग करें और शीर्ष स्तरीय प्रदर्शन का आनंद लें।', - includesTitle: 'प्रोफेशनल योजना में सब कुछ, साथ में:', for: 'मध्यम आकार की टीमों के लिए', }, enterprise: { @@ -136,15 +126,15 @@ const translation = { 'बड़े पैमाने पर मिशन-क्रिटिकल सिस्टम के लिए पूर्ण क्षमताएं और समर्थन प्राप्त करें।', includesTitle: 'टीम योजना में सब कुछ, साथ में:', features: { - 0: 'उद्योग स्तर के बड़े पैमाने पर वितरण समाधान', - 3: 'अनेक कार्यक्षेत्र और उद्यम प्रबंधक', - 8: 'प्रोफेशनल तकनीकी समर्थन', - 6: 'उन्नत सुरक्षा और नियंत्रण', - 2: 'विशेष उद्यम सुविधाएँ', 1: 'Commercial License Authorization', 4: 'SSO', + 6: 'उन्नत सुरक्षा और नियंत्रण', + 2: 'विशेष उद्यम सुविधाएँ', + 3: 'अनेक कार्यक्षेत्र और उद्यम प्रबंधक', 5: 'डिफाई पार्टनर्स द्वारा बातचीत किए गए एसएलए', + 8: 'प्रोफेशनल तकनीकी समर्थन', 7: 'डीफाई द्वारा आधिकारिक रूप से अपडेट और रखरखाव', + 0: 'उद्योग स्तर के बड़े पैमाने पर वितरण समाधान', }, price: 'कस्टम', btnText: 'बिक्री से संपर्क करें', @@ -153,9 +143,9 @@ const translation = { }, community: { features: { + 1: 'एकल कार्यक्षेत्र', 2: 'डिफी ओपन सोर्स लाइसेंस के अनुपालन में', 0: 'सभी मुख्य सुविधाएं सार्वजनिक संग्रह के तहत जारी की गई हैं।', - 1: 'एकल कार्यक्षेत्र', }, description: 'व्यक्तिगत उपयोगकर्ताओं, छोटे टीमों, या गैर-व्यावसायिक परियोजनाओं के लिए', for: 'व्यक्तिगत उपयोगकर्ताओं, छोटे टीमों, या गैर-व्यावसायिक परियोजनाओं के लिए', @@ -166,9 +156,9 @@ const translation = { }, premium: { features: { + 1: 'एकल कार्यक्षेत्र', 2: 'वेब ऐप लोगो और ब्रांडिंग कस्टमाइजेशन', 3: 'प्राथमिकता ईमेल और चैट समर्थन', - 1: 'एकल कार्यक्षेत्र', 0: 'विभिन्न क्लाउड प्रदाताओं द्वारा आत्म-प्रबंधित विश्वसनीयता', }, priceTip: 'क्लाउड मार्केटप्लेस के आधार पर', @@ -186,8 +176,6 @@ const translation = { fullSolution: 'अधिक स्थान प्राप्त करने के लिए अपनी योजना अपग्रेड करें।', }, apps: { - fullTipLine1: 'अधिक ऐप्स बनाने के लिए', - fullTipLine2: 'अपनी योजना अपग्रेड करें।', fullTip1: 'अधिक ऐप्स बनाने के लिए अपग्रेड करें', fullTip2: 'योजना की सीमा पहुँच गई', contactUs: 'हमसे संपर्क करें', diff --git a/web/i18n/it-IT/billing.ts b/web/i18n/it-IT/billing.ts index 69adc34569..43d285f652 100644 --- a/web/i18n/it-IT/billing.ts +++ b/web/i18n/it-IT/billing.ts @@ -24,22 +24,15 @@ const translation = { contractSales: 'Contatta vendite', contractOwner: 'Contatta il responsabile del team', startForFree: 'Inizia gratis', - getStartedWith: 'Inizia con ', contactSales: 'Contatta le vendite', talkToSales: 'Parla con le vendite', modelProviders: 'Fornitori di Modelli', - teamMembers: 'Membri del Team', annotationQuota: 'Quota di Annotazione', buildApps: 'Crea App', vectorSpace: 'Spazio Vettoriale', - vectorSpaceBillingTooltip: - 'Ogni 1MB può memorizzare circa 1,2 milioni di caratteri di dati vettoriali (stimato utilizzando OpenAI Embeddings, varia tra i modelli).', vectorSpaceTooltip: 'Lo Spazio Vettoriale è il sistema di memoria a lungo termine necessario per permettere agli LLM di comprendere i tuoi dati.', - documentsUploadQuota: 'Quota di Caricamento Documenti', documentProcessingPriority: 'Priorità di Elaborazione Documenti', - documentProcessingPriorityTip: - 'Per una maggiore priorità di elaborazione dei documenti, aggiorna il tuo piano.', documentProcessingPriorityUpgrade: 'Elabora più dati con maggiore precisione a velocità più elevate.', priority: { @@ -113,21 +106,18 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 prove gratuite di GPT', - includesTitle: 'Include:', for: 'Prova gratuita delle capacità principali', }, professional: { name: 'Professional', description: 'Per individui e piccoli team per sbloccare più potenza a prezzi accessibili.', - includesTitle: 'Tutto nel piano gratuito, più:', for: 'Per sviluppatori indipendenti / piccoli team', }, team: { name: 'Team', description: 'Collabora senza limiti e goditi prestazioni di alto livello.', - includesTitle: 'Tutto nel piano Professional, più:', for: 'Per team di medie dimensioni', }, enterprise: { @@ -136,15 +126,15 @@ const translation = { 'Ottieni tutte le capacità e il supporto per sistemi mission-critical su larga scala.', includesTitle: 'Tutto nel piano Team, più:', features: { - 6: 'Sicurezza e Controlli Avanzati', - 2: 'Funzionalità esclusive per le imprese', + 3: 'Spazi di lavoro multipli e gestione aziendale', + 2: 'Funzionalità esclusive per le aziende', + 1: 'Autorizzazione Licenza Commerciale', + 5: 'SLA negoziati dai partner Dify', 4: 'SSO', - 8: 'Supporto Tecnico Professionale', - 5: 'SLA negoziati da Dify Partners', - 0: 'Soluzioni di distribuzione scalabili di livello enterprise', - 7: 'Aggiornamenti e manutenzione di Dify ufficialmente', - 1: 'Autorizzazione alla Licenza Commerciale', - 3: 'Gestione di più spazi di lavoro e imprese', + 6: 'Sicurezza e controlli avanzati', + 8: 'Supporto tecnico professionale', + 7: 'Aggiornamenti e manutenzione da parte di Dify ufficialmente', + 0: 'Soluzioni di distribuzione scalabili di livello aziendale', }, price: 'Personalizzato', for: 'Per team di grandi dimensioni', @@ -153,9 +143,9 @@ const translation = { }, community: { features: { - 1: 'Spazio di Lavoro Unico', - 2: 'Rispetta la Licenza Open Source di Dify', - 0: 'Tutte le funzionalità principali rilasciate sotto il repository pubblico', + 1: 'Area di lavoro singola', + 2: 'Conforme alla licenza Open Source Dify', + 0: 'Tutte le funzionalità principali rilasciate nel repository pubblico', }, name: 'Comunità', btnText: 'Inizia con la comunità', @@ -166,10 +156,10 @@ const translation = { }, premium: { features: { - 0: 'Affidabilità autogestita da vari fornitori di cloud', - 3: 'Supporto prioritario via Email e Chat', - 2: 'Personalizzazione del logo e del marchio dell\'app web', - 1: 'Spazio di Lavoro Unico', + 3: 'Supporto prioritario via e-mail e chat', + 1: 'Area di lavoro singola', + 2: 'Personalizzazione del logo e del marchio WebApp', + 0: 'Affidabilità autogestita da vari fornitori di servizi cloud', }, name: 'Premium', priceTip: 'Basato su Cloud Marketplace', @@ -186,8 +176,6 @@ const translation = { fullSolution: 'Aggiorna il tuo piano per ottenere più spazio.', }, apps: { - fullTipLine1: 'Aggiorna il tuo piano per', - fullTipLine2: 'creare più app.', fullTip1des: 'Hai raggiunto il limite di costruzione delle app su questo piano.', fullTip2des: 'Si consiglia di disinstallare le applicazioni inattive per liberare spazio, o contattarci.', contactUs: 'Contattaci', diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index 2c88b54b8a..99da1bc6c6 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -995,6 +995,7 @@ const translation = { noLastRunFound: '以前の実行が見つかりませんでした。', copyLastRunError: '最後の実行の入力をコピーできませんでした', noMatchingInputsFound: '前回の実行から一致する入力が見つかりませんでした。', + lastRunInputsCopied: '前回の実行から{{count}}個の入力をコピーしました', }, } diff --git a/web/i18n/pl-PL/billing.ts b/web/i18n/pl-PL/billing.ts index 00284109e8..09e213df8d 100644 --- a/web/i18n/pl-PL/billing.ts +++ b/web/i18n/pl-PL/billing.ts @@ -24,21 +24,14 @@ const translation = { contractSales: 'Skontaktuj się z działem sprzedaży', contractOwner: 'Skontaktuj się z zarządcą zespołu', startForFree: 'Zacznij za darmo', - getStartedWith: 'Rozpocznij z ', contactSales: 'Kontakt z działem sprzedaży', talkToSales: 'Porozmawiaj z działem sprzedaży', modelProviders: 'Dostawcy modeli', - teamMembers: 'Członkowie zespołu', buildApps: 'Twórz aplikacje', vectorSpace: 'Przestrzeń wektorowa', - vectorSpaceBillingTooltip: - 'Każdy 1MB może przechowywać około 1,2 miliona znaków z wektoryzowanych danych (szacowane na podstawie OpenAI Embeddings, różni się w zależności od modelu).', vectorSpaceTooltip: 'Przestrzeń wektorowa jest systemem pamięci długoterminowej wymaganym dla LLM, aby zrozumieć Twoje dane.', - documentsUploadQuota: 'Limit przesyłanych dokumentów', documentProcessingPriority: 'Priorytet przetwarzania dokumentów', - documentProcessingPriorityTip: - 'Dla wyższego priorytetu przetwarzania dokumentów, ulepsz swój plan.', documentProcessingPriorityUpgrade: 'Przetwarzaj więcej danych z większą dokładnością i w szybszym tempie.', priority: { @@ -112,21 +105,18 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 razy darmowa próba GPT', - includesTitle: 'Zawiera:', for: 'Darmowy okres próbny podstawowych funkcji', }, professional: { name: 'Profesjonalny', description: 'Dla osób fizycznych i małych zespołów, aby odblokować więcej mocy w przystępnej cenie.', - includesTitle: 'Wszystko w darmowym planie, plus:', for: 'Dla niezależnych deweloperów/małych zespołów', }, team: { name: 'Zespół', description: 'Współpracuj bez ograniczeń i ciesz się najwyższą wydajnością.', - includesTitle: 'Wszystko w planie Profesjonalnym, plus:', for: 'Dla średniej wielkości zespołów', }, enterprise: { @@ -135,15 +125,15 @@ const translation = { 'Uzyskaj pełne możliwości i wsparcie dla systemów o kluczowym znaczeniu dla misji.', includesTitle: 'Wszystko w planie Zespołowym, plus:', features: { - 3: 'Wiele przestrzeni roboczych i zarządzanie przedsiębiorstwem', - 5: 'Wynegocjowane SLA przez Dify Partners', - 0: 'Rozwiązania do wdrożeń na dużą skalę klasy przedsiębiorstw', - 8: 'Profesjonalne wsparcie techniczne', - 2: 'Ekskluzywne funkcje przedsiębiorstwa', - 6: 'Zaawansowane zabezpieczenia i kontrola', - 7: 'Aktualizacje i konserwacja przez Dify Oficjalnie', - 4: 'SSO', + 2: 'Wyjątkowe funkcje dla przedsiębiorstw', + 7: 'Aktualizacje i konserwacja przez Dify oficjalnie', + 4: 'Usługi rejestracji jednokrotnej', 1: 'Autoryzacja licencji komercyjnej', + 0: 'Skalowalne rozwiązania wdrożeniowe klasy korporacyjnej', + 5: 'Umowy SLA wynegocjowane przez Dify Partners', + 8: 'Profesjonalne wsparcie techniczne', + 3: 'Wiele przestrzeni roboczych i zarządzanie przedsiębiorstwem', + 6: 'Zaawansowane zabezpieczenia i kontrola', }, priceTip: 'Tylko roczne fakturowanie', btnText: 'Skontaktuj się z działem sprzedaży', @@ -152,9 +142,9 @@ const translation = { }, community: { features: { - 0: 'Wszystkie funkcje podstawowe wydane w publicznym repozytorium', - 1: 'Jedno Miejsce Pracy', - 2: 'Zgodne z licencją Dify Open Source', + 1: 'Pojedyncza przestrzeń robocza', + 2: 'Zgodny z licencją Dify Open Source', + 0: 'Wszystkie podstawowe funkcje udostępnione w repozytorium publicznym', }, includesTitle: 'Darmowe funkcje:', name: 'Społeczność', @@ -165,10 +155,10 @@ const translation = { }, premium: { features: { - 0: 'Samozarządzana niezawodność różnych dostawców chmury', - 1: 'Jedno miejsce pracy', - 3: 'Priorytetowe wsparcie przez e-mail i czat', - 2: 'Logo aplikacji internetowej i dostosowanie marki', + 1: 'Pojedyncza przestrzeń robocza', + 2: 'Personalizacja logo i brandingu aplikacji internetowej', + 3: 'Priorytetowa pomoc techniczna przez e-mail i czat', + 0: 'Niezawodność samodzielnego zarządzania przez różnych dostawców usług w chmurze', }, description: 'Dla średnich organizacji i zespołów', for: 'Dla średnich organizacji i zespołów', @@ -185,8 +175,6 @@ const translation = { fullSolution: 'Ulepsz swój plan, aby uzyskać więcej miejsca.', }, apps: { - fullTipLine1: 'Ulepsz swój plan, aby', - fullTipLine2: 'tworzyć więcej aplikacji.', fullTip1des: 'Osiągnąłeś limit tworzenia aplikacji w tym planie.', fullTip1: 'Zaktualizuj, aby stworzyć więcej aplikacji', fullTip2: 'Osiągnięto limit planu', diff --git a/web/i18n/pt-BR/billing.ts b/web/i18n/pt-BR/billing.ts index f26008c718..3ef93d9f91 100644 --- a/web/i18n/pt-BR/billing.ts +++ b/web/i18n/pt-BR/billing.ts @@ -22,17 +22,13 @@ const translation = { currentPlan: 'Plano Atual', contractOwner: 'Entre em contato com o gerente da equipe', startForFree: 'Comece de graça', - getStartedWith: 'Comece com', contactSales: 'Fale com a equipe de Vendas', talkToSales: 'Fale com a equipe de Vendas', modelProviders: 'Fornecedores de Modelos', - teamMembers: 'Membros da Equipe', buildApps: 'Construir Aplicações', vectorSpace: 'Espaço Vetorial', - vectorSpaceBillingTooltip: 'Cada 1MB pode armazenar cerca de 1,2 milhão de caracteres de dados vetorizados (estimado usando OpenAI Embeddings, varia entre os modelos).', vectorSpaceTooltip: 'O Espaço Vetorial é o sistema de memória de longo prazo necessário para que LLMs compreendam seus dados.', documentProcessingPriority: 'Prioridade no Processamento de Documentos', - documentProcessingPriorityTip: 'Para maior prioridade no processamento de documentos, faça o upgrade do seu plano.', documentProcessingPriorityUpgrade: 'Processe mais dados com maior precisão e velocidade.', priority: { 'standard': 'Padrão', @@ -53,7 +49,6 @@ const translation = { dedicatedAPISupport: 'Suporte dedicado à API', customIntegration: 'Integração e suporte personalizados', ragAPIRequest: 'Solicitações API RAG', - agentModel: 'Modelo de Agente', workflow: 'Fluxo de trabalho', llmLoadingBalancing: 'Balanceamento de carga LLM', bulkUpload: 'Upload em massa de documentos', @@ -75,7 +70,6 @@ const translation = { ragAPIRequestTooltip: 'Refere-se ao número de chamadas de API que invocam apenas as capacidades de processamento da base de conhecimento do Dify.', receiptInfo: 'Somente proprietários e administradores de equipe podem se inscrever e visualizar informações de cobrança', customTools: 'Ferramentas personalizadas', - documentsUploadQuota: 'Cota de upload de documentos', annotationQuota: 'Cota de anotação', contractSales: 'Entre em contato com a equipe de vendas', unavailable: 'Indisponível', @@ -104,19 +98,16 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 vezes GPT de teste gratuito', - includesTitle: 'Inclui:', for: 'Teste gratuito das capacidades principais', }, professional: { name: 'Profissional', description: 'Para indivíduos e pequenas equipes desbloquearem mais poder de forma acessível.', - includesTitle: 'Tudo no plano gratuito, além de:', for: 'Para Desenvolvedores Independentes/Pequenas Equipes', }, team: { name: 'Equipe', description: 'Colabore sem limites e aproveite o desempenho de primeira linha.', - includesTitle: 'Tudo no plano Profissional, além de:', for: 'Para Equipes de Médio Porte', }, enterprise: { @@ -124,15 +115,15 @@ const translation = { description: 'Obtenha capacidades completas e suporte para sistemas críticos em larga escala.', includesTitle: 'Tudo no plano Equipe, além de:', features: { - 6: 'Segurança e Controles Avançados', - 7: 'Atualizações e Manutenção por Dify Oficialmente', - 5: 'Acordos de Nível de Serviço negociados pelos Parceiros Dify', - 1: 'Autorização de Licença Comercial', - 8: 'Suporte Técnico Profissional', + 3: 'Vários espaços de trabalho e gerenciamento corporativo', + 2: 'Recursos exclusivos da empresa', + 6: 'Segurança e controles avançados', 4: 'SSO', - 2: 'Recursos Exclusivos da Empresa', - 3: 'Múltiplos Espaços de Trabalho e Gestão Empresarial', - 0: 'Soluções de Implantação Escaláveis de Nível Empresarial', + 8: 'Suporte Técnico Profissional', + 0: 'Soluções de implantação escaláveis de nível empresarial', + 7: 'Atualizações e manutenção por Dify oficialmente', + 1: 'Autorização de Licença Comercial', + 5: 'SLAs negociados pela Dify Partners', }, btnText: 'Contate Vendas', priceTip: 'Faturamento Anual Apenas', @@ -141,9 +132,9 @@ const translation = { }, community: { features: { - 1: 'Espaço de Trabalho Único', - 0: 'Todos os recursos principais lançados sob o repositório público', - 2: 'Cumpre a Licença de Código Aberto Dify', + 0: 'Todos os principais recursos lançados no repositório público', + 2: 'Está em conformidade com a licença de código aberto Dify', + 1: 'Espaço de trabalho individual', }, name: 'Comunidade', description: 'Para Usuários Individuais, Pequenas Equipes ou Projetos Não Comerciais', @@ -154,10 +145,10 @@ const translation = { }, premium: { features: { - 1: 'Espaço de Trabalho Único', - 3: 'Suporte prioritário por e-mail e chat', - 2: 'Customização de Logo e Branding do WebApp', + 2: 'Personalização do logotipo e da marca do WebApp', + 1: 'Espaço de trabalho individual', 0: 'Confiabilidade autogerenciada por vários provedores de nuvem', + 3: 'Suporte prioritário por e-mail e bate-papo', }, includesTitle: 'Tudo da Comunidade, além de:', for: 'Para organizações e equipes de médio porte', @@ -174,8 +165,6 @@ const translation = { fullSolution: 'Faça o upgrade do seu plano para obter mais espaço.', }, apps: { - fullTipLine1: 'Faça o upgrade do seu plano para', - fullTipLine2: 'construir mais aplicativos.', fullTip1: 'Atualize para criar mais aplicativos', fullTip2: 'Limite do plano alcançado', fullTip1des: 'Você atingiu o limite de criar aplicativos neste plano.', diff --git a/web/i18n/ro-RO/billing.ts b/web/i18n/ro-RO/billing.ts index 682641372d..df35ec26fb 100644 --- a/web/i18n/ro-RO/billing.ts +++ b/web/i18n/ro-RO/billing.ts @@ -23,18 +23,13 @@ const translation = { contractSales: 'Contactați vânzările', contractOwner: 'Contactați managerul echipei', startForFree: 'Începe gratuit', - getStartedWith: 'Începe cu ', contactSales: 'Contactați vânzările', talkToSales: 'Vorbiți cu vânzările', modelProviders: 'Furnizori de modele', - teamMembers: 'Membri ai echipei', buildApps: 'Construiește aplicații', vectorSpace: 'Spațiu vectorial', - vectorSpaceBillingTooltip: 'Fiecare 1MB poate stoca aproximativ 1,2 milioane de caractere de date vectorizate (estimat folosind OpenAI Embeddings, variază în funcție de modele).', vectorSpaceTooltip: 'Spațiul vectorial este sistemul de memorie pe termen lung necesar pentru ca LLM-urile să înțeleagă datele dvs.', - documentsUploadQuota: 'Cotă de încărcare a documentelor', documentProcessingPriority: 'Prioritatea procesării documentelor', - documentProcessingPriorityTip: 'Pentru o prioritate mai mare a procesării documentelor, vă rugăm să actualizați planul.', documentProcessingPriorityUpgrade: 'Procesați mai multe date cu o acuratețe mai mare și la viteze mai rapide.', priority: { 'standard': 'Standard', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 de încercări gratuite GPT', - includesTitle: 'Include:', for: 'Proba gratuită a capacităților de bază', }, professional: { name: 'Professional', description: 'Pentru persoane fizice și echipe mici pentru a debloca mai multă putere la un preț accesibil.', - includesTitle: 'Tot ce este în planul gratuit, plus:', for: 'Pentru dezvoltatori independenți / echipe mici', }, team: { name: 'Echipă', description: 'Colaborați fără limite și bucurați-vă de performanțe de top.', - includesTitle: 'Tot ce este în planul Professional, plus:', for: 'Pentru echipe de dimensiuni medii', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'Obțineți capacități și asistență complete pentru sisteme critice la scară largă.', includesTitle: 'Tot ce este în planul Echipă, plus:', features: { - 3: 'Multiple Spații de lucru și Management Enterprise', - 6: 'Securitate avansată și control', + 6: 'Securitate și controale avansate', + 1: 'Autorizare licență comercială', 2: 'Funcții exclusive pentru întreprinderi', + 0: 'Soluții de implementare scalabile la nivel de întreprindere', + 5: 'SLA-uri negociate de partenerii Dify', + 3: 'Mai multe spații de lucru și managementul întreprinderii', + 7: 'Actualizări și întreținere de către Dify oficial', 8: 'Asistență tehnică profesională', 4: 'SSO', - 7: 'Actualizări și întreținere de către Dify Oficial', - 1: 'Autorizare pentru licență comercială', - 5: 'SLA-uri negociate de partenerii Dify', - 0: 'Soluții de desfășurare scalabile de nivel enterprise', }, for: 'Pentru echipe de mari dimensiuni', price: 'Personalizat', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 2: 'Se conformează Licenței Open Source Dify', + 0: 'Toate caracteristicile de bază lansate în depozitul public', + 2: 'Respectă licența Dify Open Source', 1: 'Spațiu de lucru unic', - 0: 'Toate funcțiile de bază lansate sub depozitul public', }, description: 'Pentru utilizatori individuali, echipe mici sau proiecte necomerciale', btnText: 'Începe cu Comunitatea', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { + 3: 'Asistență prioritară prin e-mail și chat', 1: 'Spațiu de lucru unic', - 0: 'Fiabilitate autogestionată de diferiți furnizori de cloud', - 2: 'Personalizarea logo-ului și branding-ului aplicației web', - 3: 'Suport prioritar prin email și chat', + 0: 'Fiabilitate autogestionată de diverși furnizori de cloud', + 2: 'Personalizarea logo-ului și brandingului WebApp', }, btnText: 'Obține Premium în', description: 'Pentru organizații și echipe de dimensiuni medii', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Actualizați-vă planul pentru a obține mai mult spațiu.', }, apps: { - fullTipLine1: 'Actualizați-vă planul pentru a', - fullTipLine2: 'construi mai multe aplicații.', fullTip2des: 'Se recomandă curățarea aplicațiilor inactive pentru a elibera resurse, sau contactați-ne.', fullTip2: 'Limita planului a fost atinsă', fullTip1des: 'Ați atins limita de aplicații construite pe acest plan', diff --git a/web/i18n/ru-RU/billing.ts b/web/i18n/ru-RU/billing.ts index 7a0560334c..7af47ee00b 100644 --- a/web/i18n/ru-RU/billing.ts +++ b/web/i18n/ru-RU/billing.ts @@ -23,19 +23,14 @@ const translation = { contractSales: 'Связаться с отделом продаж', contractOwner: 'Связаться с руководителем команды', startForFree: 'Начать бесплатно', - getStartedWith: 'Начать с ', contactSales: 'Связаться с отделом продаж', talkToSales: 'Поговорить с отделом продаж', modelProviders: 'Поставщики моделей', - teamMembers: 'Участники команды', annotationQuota: 'Квота аннотаций', buildApps: 'Создать приложения', vectorSpace: 'Векторное пространство', - vectorSpaceBillingTooltip: 'Каждый 1 МБ может хранить около 1,2 миллиона символов векторизованных данных (оценка с использованием Embeddings OpenAI, варьируется в зависимости от модели).', vectorSpaceTooltip: 'Векторное пространство - это система долговременной памяти, необходимая LLM для понимания ваших данных.', - documentsUploadQuota: 'Квота загрузки документов', documentProcessingPriority: 'Приоритет обработки документов', - documentProcessingPriorityTip: 'Для более высокого приоритета обработки документов, пожалуйста, обновите свой тарифный план.', documentProcessingPriorityUpgrade: 'Обрабатывайте больше данных с большей точностью и на более высоких скоростях.', priority: { 'standard': 'Стандартный', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Песочница', description: '200 бесплатных пробных использований GPT', - includesTitle: 'Включает:', for: 'Бесплатная пробная версия основных возможностей', }, professional: { name: 'Профессиональный', description: 'Для частных лиц и небольших команд, чтобы разблокировать больше возможностей по доступной цене.', - includesTitle: 'Все в бесплатном плане, плюс:', for: 'Для независимых разработчиков/малых команд', }, team: { name: 'Команда', description: 'Сотрудничайте без ограничений и наслаждайтесь высочайшей производительностью.', - includesTitle: 'Все в профессиональном плане, плюс:', for: 'Для команд среднего размера', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'Получите полный набор возможностей и поддержку для крупномасштабных критически важных систем.', includesTitle: 'Все в командном плане, плюс:', features: { - 7: 'Обновления и обслуживание от Dify официально', 4: 'ССО', + 5: 'Согласованные SLA от Dify Partners', 8: 'Профессиональная техническая поддержка', - 6: 'Современная безопасность и контроль', - 2: 'Эксклюзивные функции для предприятий', - 1: 'Коммерческая лицензия', - 3: 'Множественные рабочие области и управление предприятием', - 0: 'Решения для масштабируемого развертывания корпоративного уровня', - 5: 'Согласованные Соглашения об Уровне Услуг от Dify Partners', + 2: 'Эксклюзивные корпоративные функции', + 6: 'Расширенная безопасность и контроль', + 7: 'Обновления и обслуживание от Dify официально', + 3: 'Несколько рабочих пространств и управление предприятием', + 0: 'Масштабируемые решения для развертывания корпоративного уровня', + 1: 'Разрешение на коммерческую лицензию', }, price: 'Пользовательский', priceTip: 'Только годовая подписка', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 0: 'Все основные функции выпущены в публичном репозитории', 1: 'Единое рабочее пространство', - 2: 'Соблюдает Лицензию на открытое программное обеспечение Dify', + 2: 'Соответствует лицензии Dify с открытым исходным кодом', + 0: 'Все основные функции выпущены в общедоступном репозитории', }, name: 'Сообщество', btnText: 'Начните с сообщества', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 3: 'Приоритетная поддержка по электронной почте и чату', + 2: 'Настройка логотипа и брендинга WebApp', 1: 'Единое рабочее пространство', - 2: 'Настройка логотипа и брендинга веб-приложения', - 0: 'Самостоятельное управление надежностью различными облачными провайдерами', + 3: 'Приоритетная поддержка по электронной почте и в чате', + 0: 'Самостоятельное управление надежностью от различных поставщиков облачных услуг', }, description: 'Для средних организаций и команд', includesTitle: 'Всё из Сообщества, плюс:', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Обновите свой тарифный план, чтобы получить больше места.', }, apps: { - fullTipLine1: 'Обновите свой тарифный план, чтобы', - fullTipLine2: 'создавать больше приложений.', fullTip2des: 'Рекомендуется удалить неактивные приложения, чтобы освободить место, или свяжитесь с нами.', fullTip2: 'Достигнут лимит плана', contactUs: 'Свяжитесь с нами', diff --git a/web/i18n/sl-SI/billing.ts b/web/i18n/sl-SI/billing.ts index 9b7987293f..ffaa1b56e2 100644 --- a/web/i18n/sl-SI/billing.ts +++ b/web/i18n/sl-SI/billing.ts @@ -23,19 +23,14 @@ const translation = { contractSales: 'Kontaktirajte prodajo', contractOwner: 'Kontaktirajte upravitelja ekipe', startForFree: 'Začnite brezplačno', - getStartedWith: 'Začnite z ', contactSales: 'Kontaktirajte prodajo', talkToSales: 'Pogovorite se s prodajo', modelProviders: 'Ponudniki modelov', - teamMembers: 'Člani ekipe', annotationQuota: 'Kvote za označevanje', buildApps: 'Gradite aplikacije', vectorSpace: 'Prostor za vektorje', - vectorSpaceBillingTooltip: 'Vsak 1 MB lahko shrani približno 1,2 milijona znakov vektoriziranih podatkov (ocenjeno z uporabo OpenAI Embeddings, odvisno od modelov).', vectorSpaceTooltip: 'Prostor za vektorje je dolgoročni pomnilniški sistem, potreben za to, da LLM-ji razumejo vaše podatke.', - documentsUploadQuota: 'Kvote za nalaganje dokumentov', documentProcessingPriority: 'Prioriteta obdelave dokumentov', - documentProcessingPriorityTip: 'Za višjo prioriteto obdelave dokumentov nadgradite svoj načrt.', documentProcessingPriorityUpgrade: 'Obdelujte več podatkov z večjo natančnostjo in hitrostjo.', priority: { 'standard': 'Standard', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Peskovnik', description: '200 brezplačnih poskusov GPT', - includesTitle: 'Vključuje:', for: 'Brezplačno preizkušanje osnovnih zmogljivosti', }, professional: { name: 'Profesionalni', description: 'Za posameznike in male ekipe, da odklenete več zmogljivosti po ugodni ceni.', - includesTitle: 'Vse v brezplačnem načrtu, plus:', for: 'Za neodvisne razvijalce/male ekipe', }, team: { name: 'Ekipa', description: 'Sodelujte brez omejitev in uživajte v vrhunski zmogljivosti.', - includesTitle: 'Vse v profesionalnem načrtu, plus:', for: 'Za srednje velike ekipe', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'Pridobite vse zmogljivosti in podporo za velike sisteme kritične za misijo.', includesTitle: 'Vse v načrtu Ekipa, plus:', features: { - 5: 'Pogajali smo se o SLAs s partnerji Dify', - 4: 'SSO', - 0: 'Rešitve za razširljivo uvedbo na ravni podjetij', - 1: 'Avtorizacija za komercialno licenco', + 0: 'Prilagodljive rešitve za uvajanje na ravni podjetij', 2: 'Ekskluzivne funkcije za podjetja', - 7: 'Posodobitve in vzdrževanje s strani Dify uradno', - 3: 'Več delovnih prostorov in upravljanje podjetij', + 7: 'Posodobitve in vzdrževanje s strani Dify Official', + 8: 'Strokovna tehnična podpora', + 1: 'Dovoljenje za komercialno licenco', + 3: 'Več delovnih prostorov in upravljanje podjetja', + 5: 'Dogovorjene pogodbe o ravni storitev s strani Dify Partners', 6: 'Napredna varnost in nadzor', - 8: 'Profesionalna tehnična podpora', + 4: 'SSO', }, priceTip: 'Letno zaračunavanje samo', price: 'Po meri', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 2: 'Upošteva Dify odprtokodno licenco', - 0: 'Vse ključne funkcije so bile objavljene v javnem repozitoriju', - 1: 'Enotno delovno okolje', + 1: 'En delovni prostor', + 0: 'Vse osnovne funkcije, izdane v javnem repozitoriju', + 2: 'Skladen z odprtokodno licenco Dify', }, includesTitle: 'Brezplačne funkcije:', price: 'Brezplačno', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 2: 'Prilagoditev logotipa in blagovne znamke spletne aplikacije', - 1: 'Enotno delovno okolje', - 0: 'Samoobvladovana zanesljivost različnih ponudnikov oblačnih storitev', - 3: 'Prednostna e-pošta in podpora za klepet', + 1: 'En delovni prostor', + 3: 'Prednostna podpora po e-pošti in klepetu', + 2: 'Prilagajanje logotipa in blagovne znamke WebApp', + 0: 'Samostojna zanesljivost različnih ponudnikov storitev v oblaku', }, name: 'Premium', priceTip: 'Na podlagi oblaka Marketplace', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Nadgradite svoj načrt za več prostora.', }, apps: { - fullTipLine1: 'Nadgradite svoj načrt, da', - fullTipLine2: 'gradite več aplikacij.', fullTip1des: 'Dosegli ste omejitev za izdelavo aplikacij v tem načrtu.', fullTip1: 'Nadgradite za ustvarjanje več aplikacij', fullTip2: 'Dosežena meja načrta', diff --git a/web/i18n/th-TH/billing.ts b/web/i18n/th-TH/billing.ts index c58d61c112..afbe9318c4 100644 --- a/web/i18n/th-TH/billing.ts +++ b/web/i18n/th-TH/billing.ts @@ -23,19 +23,14 @@ const translation = { contractSales: 'ติดต่อฝ่ายขาย', contractOwner: 'ติดต่อผู้จัดการทีม', startForFree: 'เริ่มฟรี', - getStartedWith: 'เริ่มต้นใช้งาน', contactSales: 'ติดต่อฝ่ายขาย', talkToSales: 'พูดคุยกับฝ่ายขาย', modelProviders: 'ผู้ให้บริการโมเดล', - teamMembers: 'สมาชิกในทีม', annotationQuota: 'โควต้าคําอธิบายประกอบ', buildApps: 'สร้างแอพ', vectorSpace: 'พื้นที่เวกเตอร์', - vectorSpaceBillingTooltip: 'แต่ละ 1MB สามารถจัดเก็บข้อมูลแบบเวกเตอร์ได้ประมาณ 1.2 ล้านอักขระ (โดยประมาณโดยใช้ OpenAI Embeddings แตกต่างกันไปตามรุ่น)', vectorSpaceTooltip: 'Vector Space เป็นระบบหน่วยความจําระยะยาวที่จําเป็นสําหรับ LLM ในการทําความเข้าใจข้อมูลของคุณ', - documentsUploadQuota: 'โควต้าการอัปโหลดเอกสาร', documentProcessingPriority: 'ลําดับความสําคัญในการประมวลผลเอกสาร', - documentProcessingPriorityTip: 'สําหรับลําดับความสําคัญในการประมวลผลเอกสารที่สูงขึ้น โปรดอัปเกรดแผนของคุณ', documentProcessingPriorityUpgrade: 'ประมวลผลข้อมูลได้มากขึ้นด้วยความแม่นยําที่สูงขึ้นด้วยความเร็วที่เร็วขึ้น', priority: { 'standard': 'มาตรฐาน', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'กระบะทราย', description: 'ทดลองใช้ GPT ฟรี 200 ครั้ง', - includesTitle: 'มี:', for: 'ทดลองใช้ฟรีของความสามารถหลัก', }, professional: { name: 'มืออาชีพ', description: 'สําหรับบุคคลและทีมขนาดเล็กเพื่อปลดล็อกพลังงานมากขึ้นในราคาย่อมเยา', - includesTitle: 'ทุกอย่างในแผนฟรี รวมถึง:', for: 'สำหรับนักพัฒนาที่เป็นอิสระ/ทีมขนาดเล็ก', }, team: { name: 'ทีม', description: 'ทํางานร่วมกันอย่างไร้ขีดจํากัดและเพลิดเพลินไปกับประสิทธิภาพระดับสูงสุด', - includesTitle: 'ทุกอย่างในแผน Professional รวมถึง:', for: 'สำหรับทีมขนาดกลาง', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'รับความสามารถและการสนับสนุนเต็มรูปแบบสําหรับระบบที่สําคัญต่อภารกิจขนาดใหญ่', includesTitle: 'ทุกอย่างในแผนทีม รวมถึง:', features: { - 8: 'การสนับสนุนทางเทคนิคระดับมืออาชีพ', - 2: 'คุณสมบัติพิเศษขององค์กร', - 3: 'หลายพื้นที่ทำงานและการบริหารจัดการองค์กร', 4: 'SSO', - 6: 'ความปลอดภัยและการควบคุมขั้นสูง', - 5: 'เจรจาข้อตกลงบริการ (SLA) โดย Dify Partners', - 7: 'การอัปเดตและการบำรุงรักษาโดย Dify อย่างเป็นทางการ', - 1: 'ใบอนุญาตการใช้เชิงพาณิชย์', - 0: 'โซลูชันการปรับใช้ที่มีขนาดใหญ่และมีคุณภาพระดับองค์กร', + 2: 'คุณสมบัติพิเศษสําหรับองค์กร', + 5: 'SLA ที่เจรจาโดย Dify Partners', + 1: 'การอนุญาตใบอนุญาตเชิงพาณิชย์', + 8: 'การสนับสนุนด้านเทคนิคอย่างมืออาชีพ', + 0: 'โซลูชันการปรับใช้ที่ปรับขนาดได้ระดับองค์กร', + 7: 'การอัปเดตและบํารุงรักษาโดย Dify อย่างเป็นทางการ', + 3: 'พื้นที่ทํางานหลายแห่งและการจัดการองค์กร', + 6: 'การรักษาความปลอดภัยและการควบคุมขั้นสูง', }, btnText: 'ติดต่อฝ่ายขาย', price: 'ที่กำหนดเอง', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 2: 'ปฏิบัติตามใบอนุญาตโอเพ่นซอร์สของ Dify', - 0: 'ฟีเจอร์หลักทั้งหมดถูกปล่อยออกภายใต้ที่เก็บสาธารณะ', - 1: 'พื้นที่ทำงานเดียว', + 1: 'พื้นที่ทํางานเดียว', + 2: 'สอดคล้องกับใบอนุญาตโอเพ่นซอร์ส Dify', + 0: 'คุณสมบัติหลักทั้งหมดที่เผยแพร่ภายใต้ที่เก็บสาธารณะ', }, name: 'ชุมชน', price: 'ฟรี', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 3: 'การสนับสนุนทางอีเมลและแชทที่มีความสำคัญ', - 1: 'พื้นที่ทำงานเดียว', - 2: 'การปรับแต่งโลโก้และแบรนดิ้งของเว็บแอป', - 0: 'การจัดการความน่าเชื่อถือด้วยตนเองโดยผู้ให้บริการคลาวด์ต่าง ๆ', + 2: 'โลโก้ WebApp และการปรับแต่งแบรนด์', + 3: 'การสนับสนุนทางอีเมลและแชทลําดับความสําคัญ', + 1: 'พื้นที่ทํางานเดียว', + 0: 'ความน่าเชื่อถือที่จัดการด้วยตนเองโดยผู้ให้บริการคลาวด์ต่างๆ', }, priceTip: 'อิงตามตลาดคลาวด์', for: 'สำหรับองค์กรและทีมขนาดกลาง', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'อัปเกรดแผนของคุณเพื่อเพิ่มพื้นที่', }, apps: { - fullTipLine1: 'อัปเกรดแผนของคุณเป็น', - fullTipLine2: 'สร้างแอปเพิ่มเติม', contactUs: 'ติดต่อเรา', fullTip2: 'ถึงขีดจำกัดของแผนแล้ว', fullTip1: 'อัปเกรดเพื่อสร้างแอปเพิ่มเติม', diff --git a/web/i18n/tr-TR/billing.ts b/web/i18n/tr-TR/billing.ts index fd51bae648..d85de6b5a2 100644 --- a/web/i18n/tr-TR/billing.ts +++ b/web/i18n/tr-TR/billing.ts @@ -23,19 +23,14 @@ const translation = { contractSales: 'Satışla iletişime geçin', contractOwner: 'Takım yöneticisine başvurun', startForFree: 'Ücretsiz Başla', - getStartedWith: 'ile başlayın', contactSales: 'Satışlarla İletişime Geçin', talkToSales: 'Satışlarla Konuşun', modelProviders: 'Model Sağlayıcılar', - teamMembers: 'Takım Üyeleri', annotationQuota: 'Ek Açıklama Kotası', buildApps: 'Uygulamalar Oluştur', vectorSpace: 'Vektör Alanı', - vectorSpaceBillingTooltip: 'Her 1MB yaklaşık 1.2 milyon karakter vektörize veri depolayabilir (OpenAI Embeddings ile tahmin edilmiştir, modellere göre farklılık gösterebilir).', vectorSpaceTooltip: 'Vektör Alanı, LLM\'lerin verilerinizi anlaması için gerekli uzun süreli hafıza sistemidir.', - documentsUploadQuota: 'Doküman Yükleme Kotası', documentProcessingPriority: 'Doküman İşleme Önceliği', - documentProcessingPriorityTip: 'Daha yüksek doküman işleme önceliği için planınızı yükseltin.', documentProcessingPriorityUpgrade: 'Daha fazla veriyi daha yüksek doğrulukla ve daha hızlı işleyin.', priority: { 'standard': 'Standart', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 kez GPT ücretsiz deneme', - includesTitle: 'İçerdikleri:', for: 'Temel Yeteneklerin Ücretsiz Denemesi', }, professional: { name: 'Profesyonel', description: 'Bireyler ve küçük takımlar için daha fazla güç açın.', - includesTitle: 'Ücretsiz plandaki her şey, artı:', for: 'Bağımsız Geliştiriciler/Küçük Takımlar için', }, team: { name: 'Takım', description: 'Sınırsız işbirliği ve en üst düzey performans.', - includesTitle: 'Profesyonel plandaki her şey, artı:', for: 'Orta Boyutlu Takımlar İçin', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'Büyük ölçekli kritik sistemler için tam yetenekler ve destek.', includesTitle: 'Takım plandaki her şey, artı:', features: { - 3: 'Birden Fazla Çalışma Alanı ve Kurumsal Yönetim', 8: 'Profesyonel Teknik Destek', - 4: 'SSO', - 2: 'Özel Şirket Özellikleri', 1: 'Ticari Lisans Yetkilendirmesi', - 7: 'Dify Tarafından Resmi Güncellemeler ve Bakım', - 5: 'Dify Ortakları tarafından müzakere edilen SLA\'lar', 6: 'Gelişmiş Güvenlik ve Kontroller', + 5: 'Dify Partners tarafından müzakere edilen SLA\'lar', + 4: 'SSO', + 2: 'Özel Kurumsal Özellikler', 0: 'Kurumsal Düzeyde Ölçeklenebilir Dağıtım Çözümleri', + 7: 'Resmi olarak Dify tarafından Güncellemeler ve Bakım', + 3: 'Çoklu Çalışma Alanları ve Kurumsal Yönetim', }, priceTip: 'Yıllık Faturalama Sadece', for: 'Büyük boyutlu Takımlar için', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 1: 'Tek İş Alanı', - 0: 'Tüm Temel Özellikler Kamu Deposu Altında Yayınlandı', - 2: 'Dify Açık Kaynak Lisansına uyar', + 1: 'Tek Çalışma Alanı', + 0: 'Genel depo altında yayınlanan tüm temel özellikler', + 2: 'Dify Açık Kaynak Lisansı ile uyumludur', }, price: 'Ücretsiz', includesTitle: 'Ücretsiz Özellikler:', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 1: 'Tek İş Alanı', - 0: 'Çeşitli Bulut Sağlayıcıları Tarafından Kendiliğinden Yönetilen Güvenilirlik', - 3: 'Öncelikli Email ve Sohbet Desteği', - 2: 'Web Uygulaması Logo ve Markalaşma Özelleştirmesi', + 1: 'Tek Çalışma Alanı', + 0: 'Çeşitli Bulut Sağlayıcıları Tarafından Kendi Kendini Yöneten Güvenilirlik', + 2: 'WebApp Logosu ve Marka Özelleştirmesi', + 3: 'Öncelikli E-posta ve Sohbet Desteği', }, name: 'Premium', includesTitle: 'Topluluktan her şey, artı:', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Daha fazla alan için planınızı yükseltin.', }, apps: { - fullTipLine1: 'Daha fazla uygulama oluşturmak için', - fullTipLine2: 'planınızı yükseltin.', contactUs: 'Bizimle iletişime geçin', fullTip2des: 'Kullanımı serbest bırakmak için etkisiz uygulamaların temizlenmesi önerilir veya bizimle iletişime geçin.', fullTip1des: 'Bu planda uygulama oluşturma limitine ulaştınız.', diff --git a/web/i18n/uk-UA/billing.ts b/web/i18n/uk-UA/billing.ts index 56888531b0..a048fe67cd 100644 --- a/web/i18n/uk-UA/billing.ts +++ b/web/i18n/uk-UA/billing.ts @@ -23,17 +23,13 @@ const translation = { contractSales: 'Зв\'язатися з відділом продажів', contractOwner: 'Зв\'язатися з керівником команди', startForFree: 'Почніть безкоштовно', - getStartedWith: 'Почніть роботу з ', contactSales: 'Зв\'язатися з відділом продажів', talkToSales: 'Поговоріть зі службою продажів', modelProviders: 'Постачальники моделей', - teamMembers: 'Члени команди', buildApps: 'Створювати додатки', vectorSpace: 'Векторний простір', - vectorSpaceBillingTooltip: 'Кожен 1 МБ може зберігати близько 1,2 мільйона символів векторизованих даних (оцінка з використанням OpenAI Embeddings, відрізняється в залежності від моделей).', vectorSpaceTooltip: 'Векторний простір – це система довгострокової пам\'яті, необхідна LLM для розуміння ваших даних.', documentProcessingPriority: 'Пріоритет обробки документів', - documentProcessingPriorityTip: 'Для вищого пріоритету обробки документів оновіть свій план.', documentProcessingPriorityUpgrade: 'Обробляйте більше даних із вищою точністю та на більших швидкостях.', priority: { 'standard': 'Стандартний', @@ -77,7 +73,6 @@ const translation = { ragAPIRequestTooltip: 'Відноситься до кількості викликів API, що викликають лише можливості обробки бази знань Dify.', receiptInfo: 'Лише власник команди та адміністратор команди можуть підписуватися та переглядати інформацію про виставлення рахунків', annotationQuota: 'Квота анотацій', - documentsUploadQuota: 'Квота завантаження документів', teamMember_one: '{{count,number}} член команди', teamWorkspace: '{{count,number}} Командний Простір', apiRateLimit: 'Обмеження швидкості API', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Пісочниця', description: '200 безкоштовних пробних версій GPT', - includesTitle: 'Включає в себе:', for: 'Безкоштовна пробна версія основних функцій', }, professional: { name: 'Професійний', description: 'Щоб окремі особи та невеликі команди могли отримати більше можливостей за доступною ціною.', - includesTitle: 'Все у безкоштовному плані, плюс:', for: 'Для незалежних розробників/малих команд', }, team: { name: 'Команда', description: 'Співпрацюйте без обмежень і користуйтеся продуктивністю найвищого рівня.', - includesTitle: 'Все, що входить до плану Professional, плюс:', for: 'Для середніх команд', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'Отримайте повні можливості та підтримку для масштабних критично важливих систем.', includesTitle: 'Все, що входить до плану Team, плюс:', features: { - 5: 'Угоди про рівень обслуговування, узгоджені партнерами Dify', - 2: 'Ексклюзивні підприємницькі функції', - 6: 'Розвинена безпека та контроль', + 4: 'Єдиний вхід', + 7: 'Оновлення та обслуговування від Dify Official', + 1: 'Авторизація комерційної ліцензії', 8: 'Професійна технічна підтримка', - 1: 'Комерційна ліцензія на авторизацію', - 3: 'Кілька робочих просторів та управління підприємством', - 4: 'ССО', - 0: 'Рішення для масштабованого розгортання підприємств', - 7: 'Оновлення та обслуговування від Dify Офіційно', + 2: 'Ексклюзивні функції підприємства', + 6: 'Розширені функції безпеки та керування', + 3: 'Кілька робочих областей і управління підприємством', + 5: 'Угода про рівень обслуговування за домовленістю від Dify Partners', + 0: 'Масштабовані рішення для розгортання корпоративного рівня', }, btnText: 'Зв\'язатися з відділом продажу', priceTip: 'Тільки річна оплата', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 2: 'Відповідає ліцензії Dify Open Source', 1: 'Єдине робоче місце', - 0: 'Усі основні функції випущені під публічним репозиторієм', + 2: 'Відповідає ліцензії Dify з відкритим вихідним кодом', + 0: 'Усі основні функції випущено в загальнодоступному репозиторії', }, btnText: 'Розпочніть з громади', includesTitle: 'Безкоштовні можливості:', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 2: 'Логотип веб-додатку та налаштування брендингу', 1: 'Єдине робоче місце', - 3: 'Пріоритетна email та чат підтримка', - 0: 'Самостійно керовані надійність різних хмарних постачальників', + 2: 'Налаштування логотипу WebApp та брендингу', + 3: 'Пріоритетна підтримка електронною поштою та в чаті', + 0: 'Самокерована надійність різними хмарними провайдерами', }, description: 'Для середніх підприємств та команд', btnText: 'Отримайте Преміум у', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Оновіть свій план, щоб отримати більше місця.', }, apps: { - fullTipLine1: 'Оновіть свій план, щоб', - fullTipLine2: 'створити більше програм.', fullTip1des: 'Ви досягли межі створення додатків за цим планом.', fullTip2: 'Досягнуто ліміту плану', fullTip1: 'Оновіть, щоб створити більше додатків', diff --git a/web/i18n/vi-VN/billing.ts b/web/i18n/vi-VN/billing.ts index 3a8ac03ffb..69035dc595 100644 --- a/web/i18n/vi-VN/billing.ts +++ b/web/i18n/vi-VN/billing.ts @@ -23,18 +23,13 @@ const translation = { contractSales: 'Liên hệ bộ phận bán hàng', contractOwner: 'Liên hệ quản lý nhóm', startForFree: 'Bắt đầu miễn phí', - getStartedWith: 'Bắt đầu với ', contactSales: 'Liên hệ Bán hàng', talkToSales: 'Nói chuyện với Bộ phận Bán hàng', modelProviders: 'Nhà cung cấp Mô hình', - teamMembers: 'Thành viên Nhóm', buildApps: 'Xây dựng Ứng dụng', vectorSpace: 'Không gian Vector', - vectorSpaceBillingTooltip: 'Mỗi 1MB có thể lưu trữ khoảng 1.2 triệu ký tự dữ liệu vector hóa (ước tính sử dụng OpenAI Embeddings, thay đổi tùy theo các mô hình).', vectorSpaceTooltip: 'Không gian Vector là hệ thống bộ nhớ dài hạn cần thiết cho LLMs để hiểu dữ liệu của bạn.', - documentsUploadQuota: 'Hạn mức Tải lên Tài liệu', documentProcessingPriority: 'Ưu tiên Xử lý Tài liệu', - documentProcessingPriorityTip: 'Để có ưu tiên xử lý tài liệu cao hơn, vui lòng nâng cấp kế hoạch của bạn.', documentProcessingPriorityUpgrade: 'Xử lý nhiều dữ liệu với độ chính xác cao và tốc độ nhanh hơn.', priority: { 'standard': 'Tiêu chuẩn', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Hộp Cát', description: 'Thử nghiệm miễn phí 200 lần GPT', - includesTitle: 'Bao gồm:', for: 'Dùng thử miễn phí các tính năng cốt lõi', }, professional: { name: 'Chuyên nghiệp', description: 'Dành cho cá nhân và các nhóm nhỏ để mở khóa nhiều sức mạnh với giá cả phải chăng.', - includesTitle: 'Tất cả trong kế hoạch miễn phí, cộng thêm:', for: 'Dành cho các nhà phát triển độc lập/nhóm nhỏ', }, team: { name: 'Nhóm', description: 'Hợp tác mà không giới hạn và tận hưởng hiệu suất hạng nhất.', - includesTitle: 'Tất cả trong kế hoạch Chuyên nghiệp, cộng thêm:', for: 'Dành cho các đội nhóm vừa', }, enterprise: { @@ -123,15 +115,15 @@ const translation = { description: 'Nhận toàn bộ khả năng và hỗ trợ cho các hệ thống quan trọng cho nhiệm vụ quy mô lớn.', includesTitle: 'Tất cả trong kế hoạch Nhóm, cộng thêm:', features: { - 2: 'Tính năng Doanh nghiệp Độc quyền', - 1: 'Giấy phép kinh doanh', - 8: 'Hỗ trợ kỹ thuật chuyên nghiệp', - 7: 'Cập nhật và Bảo trì bởi Dify Chính thức', - 5: 'Thỏa thuận SLA bởi các đối tác Dify', - 6: 'An ninh nâng cao và kiểm soát', - 3: 'Nhiều không gian làm việc & Quản lý doanh nghiệp', - 0: 'Giải pháp triển khai mở rộng quy mô cấp doanh nghiệp', + 2: 'Các tính năng dành riêng cho doanh nghiệp', + 3: 'Nhiều không gian làm việc & quản lý doanh nghiệp', + 7: 'Cập nhật và bảo trì bởi Dify chính thức', 4: 'SSO', + 8: 'Hỗ trợ kỹ thuật chuyên nghiệp', + 5: 'SLA được đàm phán bởi Dify Partners', + 1: 'Ủy quyền giấy phép thương mại', + 6: 'Bảo mật & Kiểm soát nâng cao', + 0: 'Giải pháp triển khai có thể mở rộng cấp doanh nghiệp', }, price: 'Tùy chỉnh', for: 'Dành cho các đội lớn', @@ -140,9 +132,9 @@ const translation = { }, community: { features: { - 2: 'Tuân thủ Giấy phép Mã nguồn Mở Dify', - 0: 'Tất cả các tính năng cốt lõi được phát hành dưới Kho lưu trữ công khai', 1: 'Không gian làm việc đơn', + 0: 'Tất cả các tính năng cốt lõi được phát hành trong kho lưu trữ công cộng', + 2: 'Tuân thủ Giấy phép nguồn mở Dify', }, description: 'Dành cho người dùng cá nhân, nhóm nhỏ hoặc các dự án phi thương mại', name: 'Cộng đồng', @@ -153,10 +145,10 @@ const translation = { }, premium: { features: { - 3: 'Hỗ trợ qua Email & Chat Ưu tiên', - 2: 'Tùy chỉnh Logo & Thương hiệu Ứng dụng Web', 1: 'Không gian làm việc đơn', - 0: 'Độ tin cậy tự quản lý bởi các nhà cung cấp đám mây khác nhau', + 2: 'Logo WebApp & Tùy chỉnh thương hiệu', + 3: 'Hỗ trợ email & trò chuyện ưu tiên', + 0: 'Độ tin cậy tự quản lý của các nhà cung cấp đám mây khác nhau', }, comingSoon: 'Hỗ trợ Microsoft Azure & Google Cloud Sẽ Đến Sớm', priceTip: 'Dựa trên Thị trường Đám mây', @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Nâng cấp kế hoạch của bạn để có thêm không gian.', }, apps: { - fullTipLine1: 'Nâng cấp kế hoạch của bạn để', - fullTipLine2: 'xây dựng thêm ứng dụng.', contactUs: 'Liên hệ với chúng tôi', fullTip2: 'Đã đạt giới hạn kế hoạch', fullTip1des: 'Bạn đã đạt đến giới hạn xây dựng ứng dụng trên kế hoạch này.', diff --git a/web/i18n/zh-Hant/billing.ts b/web/i18n/zh-Hant/billing.ts index f957bc4eab..bedf4550f8 100644 --- a/web/i18n/zh-Hant/billing.ts +++ b/web/i18n/zh-Hant/billing.ts @@ -115,6 +115,15 @@ const translation = { description: '獲得大規模關鍵任務系統的完整功能和支援。', includesTitle: 'Team 計劃中的一切,加上:', features: { + 8: '專業技術支持', + 3: '多個工作區和企業管理', + 0: '企業級可擴展部署解決方案', + 1: '商業許可證授權', + 7: 'Dify 官方更新和維護', + 6: '進階安全與控制', + 4: '單一登入', + 5: 'Dify 合作夥伴協商的 SLA', + 2: '獨家企業功能', }, price: '自訂', btnText: '聯繫銷售', @@ -123,6 +132,9 @@ const translation = { }, community: { features: { + 0: '所有核心功能在公共存儲庫下發布', + 1: '單一工作區', + 2: '符合 Dify 開源許可證', }, includesTitle: '免費功能:', btnText: '開始使用社區', @@ -133,6 +145,10 @@ const translation = { }, premium: { features: { + 3: '優先電子郵件和聊天支持', + 2: 'WebApp 標誌和品牌定制', + 0: '各種雲端供應商的自我管理可靠性', + 1: '單一工作區', }, for: '適用於中型組織和團隊', comingSoon: '微軟 Azure 與 Google Cloud 支持即將推出', From 97b24f48d587656e6c21f99ee67ff8e81ac6486f Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 18 Aug 2025 09:43:20 +0800 Subject: [PATCH 28/36] feat: add testcontainers based tests for metadata service (#24048) --- .../services/test_metadata_service.py | 1144 +++++++++++++++++ 1 file changed, 1144 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_metadata_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_metadata_service.py b/api/tests/test_containers_integration_tests/services/test_metadata_service.py new file mode 100644 index 0000000000..7fef572c14 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_metadata_service.py @@ -0,0 +1,1144 @@ +from unittest.mock import patch + +import pytest +from faker import Faker + +from core.rag.index_processor.constant.built_in_field import BuiltInField +from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models.dataset import Dataset, DatasetMetadata, DatasetMetadataBinding, Document +from services.entities.knowledge_entities.knowledge_entities import MetadataArgs +from services.metadata_service import MetadataService + + +class TestMetadataService: + """Integration tests for MetadataService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.metadata_service.current_user") as mock_current_user, + patch("services.metadata_service.redis_client") as mock_redis_client, + patch("services.dataset_service.DocumentService") as mock_document_service, + ): + # Setup default mock returns + mock_redis_client.get.return_value = None + mock_redis_client.set.return_value = True + mock_redis_client.delete.return_value = 1 + + yield { + "current_user": mock_current_user, + "redis_client": mock_redis_client, + "document_service": mock_document_service, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + # Create account + account = Account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + status="active", + ) + + from extensions.ext_database import db + + db.session.add(account) + db.session.commit() + + # Create tenant for the account + tenant = Tenant( + name=fake.company(), + status="normal", + ) + db.session.add(tenant) + db.session.commit() + + # Create tenant-account join + join = TenantAccountJoin( + tenant_id=tenant.id, + account_id=account.id, + role=TenantAccountRole.OWNER.value, + current=True, + ) + db.session.add(join) + db.session.commit() + + # Set current tenant for account + account.current_tenant = tenant + + return account, tenant + + def _create_test_dataset(self, db_session_with_containers, mock_external_service_dependencies, account, tenant): + """ + Helper method to create a test dataset for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + account: Account instance + tenant: Tenant instance + + Returns: + Dataset: Created dataset instance + """ + fake = Faker() + + dataset = Dataset( + tenant_id=tenant.id, + name=fake.company(), + description=fake.text(max_nb_chars=100), + data_source_type="upload_file", + created_by=account.id, + built_in_field_enabled=False, + ) + + from extensions.ext_database import db + + db.session.add(dataset) + db.session.commit() + + return dataset + + def _create_test_document(self, db_session_with_containers, mock_external_service_dependencies, dataset, account): + """ + Helper method to create a test document for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + dataset: Dataset instance + account: Account instance + + Returns: + Document: Created document instance + """ + fake = Faker() + + document = Document( + tenant_id=dataset.tenant_id, + dataset_id=dataset.id, + position=1, + data_source_type="upload_file", + data_source_info="{}", + batch="test-batch", + name=fake.file_name(), + created_from="web", + created_by=account.id, + doc_form="text", + doc_language="en", + ) + + from extensions.ext_database import db + + db.session.add(document) + db.session.commit() + + return document + + def test_create_metadata_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful metadata creation with valid parameters. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + metadata_args = MetadataArgs(type="string", name="test_metadata") + + # Act: Execute the method under test + result = MetadataService.create_metadata(dataset.id, metadata_args) + + # Assert: Verify the expected outcomes + assert result is not None + assert result.name == "test_metadata" + assert result.type == "string" + assert result.dataset_id == dataset.id + assert result.tenant_id == tenant.id + assert result.created_by == account.id + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(result) + assert result.id is not None + assert result.created_at is not None + + def test_create_metadata_name_too_long(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test metadata creation fails when name exceeds 255 characters. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + long_name = "a" * 256 # 256 characters, exceeding 255 limit + metadata_args = MetadataArgs(type="string", name=long_name) + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Metadata name cannot exceed 255 characters."): + MetadataService.create_metadata(dataset.id, metadata_args) + + def test_create_metadata_name_already_exists(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test metadata creation fails when name already exists in the same dataset. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create first metadata + first_metadata_args = MetadataArgs(type="string", name="duplicate_name") + MetadataService.create_metadata(dataset.id, first_metadata_args) + + # Try to create second metadata with same name + second_metadata_args = MetadataArgs(type="number", name="duplicate_name") + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Metadata name already exists."): + MetadataService.create_metadata(dataset.id, second_metadata_args) + + def test_create_metadata_name_conflicts_with_built_in_field( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata creation fails when name conflicts with built-in field names. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Try to create metadata with built-in field name + built_in_field_name = BuiltInField.document_name.value + metadata_args = MetadataArgs(type="string", name=built_in_field_name) + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Metadata name already exists in Built-in fields."): + MetadataService.create_metadata(dataset.id, metadata_args) + + def test_update_metadata_name_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful metadata name update with valid parameters. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata first + metadata_args = MetadataArgs(type="string", name="old_name") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Act: Execute the method under test + new_name = "new_name" + result = MetadataService.update_metadata_name(dataset.id, metadata.id, new_name) + + # Assert: Verify the expected outcomes + assert result is not None + assert result.name == new_name + assert result.updated_by == account.id + assert result.updated_at is not None + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(result) + assert result.name == new_name + + def test_update_metadata_name_too_long(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test metadata name update fails when new name exceeds 255 characters. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata first + metadata_args = MetadataArgs(type="string", name="old_name") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Try to update with too long name + long_name = "a" * 256 # 256 characters, exceeding 255 limit + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Metadata name cannot exceed 255 characters."): + MetadataService.update_metadata_name(dataset.id, metadata.id, long_name) + + def test_update_metadata_name_already_exists(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test metadata name update fails when new name already exists in the same dataset. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create two metadata entries + first_metadata_args = MetadataArgs(type="string", name="first_metadata") + first_metadata = MetadataService.create_metadata(dataset.id, first_metadata_args) + + second_metadata_args = MetadataArgs(type="number", name="second_metadata") + second_metadata = MetadataService.create_metadata(dataset.id, second_metadata_args) + + # Try to update first metadata with second metadata's name + with pytest.raises(ValueError, match="Metadata name already exists."): + MetadataService.update_metadata_name(dataset.id, first_metadata.id, "second_metadata") + + def test_update_metadata_name_conflicts_with_built_in_field( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata name update fails when new name conflicts with built-in field names. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata first + metadata_args = MetadataArgs(type="string", name="old_name") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Try to update with built-in field name + built_in_field_name = BuiltInField.document_name.value + + with pytest.raises(ValueError, match="Metadata name already exists in Built-in fields."): + MetadataService.update_metadata_name(dataset.id, metadata.id, built_in_field_name) + + def test_update_metadata_name_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test metadata name update fails when metadata ID does not exist. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Try to update non-existent metadata + import uuid + + fake_metadata_id = str(uuid.uuid4()) # Use valid UUID format + new_name = "new_name" + + # Act: Execute the method under test + result = MetadataService.update_metadata_name(dataset.id, fake_metadata_id, new_name) + + # Assert: Verify the method returns None when metadata is not found + assert result is None + + def test_delete_metadata_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful metadata deletion with valid parameters. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata first + metadata_args = MetadataArgs(type="string", name="to_be_deleted") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Act: Execute the method under test + result = MetadataService.delete_metadata(dataset.id, metadata.id) + + # Assert: Verify the expected outcomes + assert result is not None + assert result.id == metadata.id + + # Verify metadata was deleted from database + from extensions.ext_database import db + + deleted_metadata = db.session.query(DatasetMetadata).filter_by(id=metadata.id).first() + assert deleted_metadata is None + + def test_delete_metadata_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test metadata deletion fails when metadata ID does not exist. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Try to delete non-existent metadata + import uuid + + fake_metadata_id = str(uuid.uuid4()) # Use valid UUID format + + # Act: Execute the method under test + result = MetadataService.delete_metadata(dataset.id, fake_metadata_id) + + # Assert: Verify the method returns None when metadata is not found + assert result is None + + def test_delete_metadata_with_document_bindings( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata deletion successfully removes document metadata bindings. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + document = self._create_test_document( + db_session_with_containers, mock_external_service_dependencies, dataset, account + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata + metadata_args = MetadataArgs(type="string", name="test_metadata") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Create metadata binding + binding = DatasetMetadataBinding( + tenant_id=tenant.id, + dataset_id=dataset.id, + metadata_id=metadata.id, + document_id=document.id, + created_by=account.id, + ) + + from extensions.ext_database import db + + db.session.add(binding) + db.session.commit() + + # Set document metadata + document.doc_metadata = {"test_metadata": "test_value"} + db.session.add(document) + db.session.commit() + + # Act: Execute the method under test + result = MetadataService.delete_metadata(dataset.id, metadata.id) + + # Assert: Verify the expected outcomes + assert result is not None + + # Verify metadata was deleted from database + deleted_metadata = db.session.query(DatasetMetadata).filter_by(id=metadata.id).first() + assert deleted_metadata is None + + # Note: The service attempts to update document metadata but may not succeed + # due to mock configuration. The main functionality (metadata deletion) is verified. + + def test_get_built_in_fields_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of built-in metadata fields. + """ + # Act: Execute the method under test + result = MetadataService.get_built_in_fields() + + # Assert: Verify the expected outcomes + assert result is not None + assert len(result) == 5 + + # Verify all expected built-in fields are present + field_names = [field["name"] for field in result] + field_types = [field["type"] for field in result] + + assert BuiltInField.document_name.value in field_names + assert BuiltInField.uploader.value in field_names + assert BuiltInField.upload_date.value in field_names + assert BuiltInField.last_update_date.value in field_names + assert BuiltInField.source.value in field_names + + # Verify field types + assert "string" in field_types + assert "time" in field_types + + def test_enable_built_in_field_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful enabling of built-in fields for a dataset. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + document = self._create_test_document( + db_session_with_containers, mock_external_service_dependencies, dataset, account + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Mock DocumentService.get_working_documents_by_dataset_id + mock_external_service_dependencies["document_service"].get_working_documents_by_dataset_id.return_value = [ + document + ] + + # Verify dataset starts with built-in fields disabled + assert dataset.built_in_field_enabled is False + + # Act: Execute the method under test + MetadataService.enable_built_in_field(dataset) + + # Assert: Verify the expected outcomes + from extensions.ext_database import db + + db.session.refresh(dataset) + assert dataset.built_in_field_enabled is True + + # Note: Document metadata update depends on DocumentService mock working correctly + # The main functionality (enabling built-in fields) is verified + + def test_enable_built_in_field_already_enabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test enabling built-in fields when they are already enabled. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Enable built-in fields first + dataset.built_in_field_enabled = True + from extensions.ext_database import db + + db.session.add(dataset) + db.session.commit() + + # Mock DocumentService.get_working_documents_by_dataset_id + mock_external_service_dependencies["document_service"].get_working_documents_by_dataset_id.return_value = [] + + # Act: Execute the method under test + MetadataService.enable_built_in_field(dataset) + + # Assert: Verify the method returns early without changes + db.session.refresh(dataset) + assert dataset.built_in_field_enabled is True + + def test_enable_built_in_field_with_no_documents( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test enabling built-in fields for a dataset with no documents. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Mock DocumentService.get_working_documents_by_dataset_id to return empty list + mock_external_service_dependencies["document_service"].get_working_documents_by_dataset_id.return_value = [] + + # Act: Execute the method under test + MetadataService.enable_built_in_field(dataset) + + # Assert: Verify the expected outcomes + from extensions.ext_database import db + + db.session.refresh(dataset) + assert dataset.built_in_field_enabled is True + + def test_disable_built_in_field_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful disabling of built-in fields for a dataset. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + document = self._create_test_document( + db_session_with_containers, mock_external_service_dependencies, dataset, account + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Enable built-in fields first + dataset.built_in_field_enabled = True + from extensions.ext_database import db + + db.session.add(dataset) + db.session.commit() + + # Set document metadata with built-in fields + document.doc_metadata = { + BuiltInField.document_name.value: document.name, + BuiltInField.uploader.value: "test_uploader", + BuiltInField.upload_date.value: 1234567890.0, + BuiltInField.last_update_date.value: 1234567890.0, + BuiltInField.source.value: "test_source", + } + db.session.add(document) + db.session.commit() + + # Mock DocumentService.get_working_documents_by_dataset_id + mock_external_service_dependencies["document_service"].get_working_documents_by_dataset_id.return_value = [ + document + ] + + # Act: Execute the method under test + MetadataService.disable_built_in_field(dataset) + + # Assert: Verify the expected outcomes + db.session.refresh(dataset) + assert dataset.built_in_field_enabled is False + + # Note: Document metadata update depends on DocumentService mock working correctly + # The main functionality (disabling built-in fields) is verified + + def test_disable_built_in_field_already_disabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test disabling built-in fields when they are already disabled. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Verify dataset starts with built-in fields disabled + assert dataset.built_in_field_enabled is False + + # Mock DocumentService.get_working_documents_by_dataset_id + mock_external_service_dependencies["document_service"].get_working_documents_by_dataset_id.return_value = [] + + # Act: Execute the method under test + MetadataService.disable_built_in_field(dataset) + + # Assert: Verify the method returns early without changes + from extensions.ext_database import db + + db.session.refresh(dataset) + assert dataset.built_in_field_enabled is False + + def test_disable_built_in_field_with_no_documents( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test disabling built-in fields for a dataset with no documents. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Enable built-in fields first + dataset.built_in_field_enabled = True + from extensions.ext_database import db + + db.session.add(dataset) + db.session.commit() + + # Mock DocumentService.get_working_documents_by_dataset_id to return empty list + mock_external_service_dependencies["document_service"].get_working_documents_by_dataset_id.return_value = [] + + # Act: Execute the method under test + MetadataService.disable_built_in_field(dataset) + + # Assert: Verify the expected outcomes + db.session.refresh(dataset) + assert dataset.built_in_field_enabled is False + + def test_update_documents_metadata_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful update of documents metadata. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + document = self._create_test_document( + db_session_with_containers, mock_external_service_dependencies, dataset, account + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata + metadata_args = MetadataArgs(type="string", name="test_metadata") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Mock DocumentService.get_document + mock_external_service_dependencies["document_service"].get_document.return_value = document + + # Create metadata operation data + from services.entities.knowledge_entities.knowledge_entities import ( + DocumentMetadataOperation, + MetadataDetail, + MetadataOperationData, + ) + + metadata_detail = MetadataDetail(id=metadata.id, name=metadata.name, value="test_value") + + operation = DocumentMetadataOperation(document_id=document.id, metadata_list=[metadata_detail]) + + operation_data = MetadataOperationData(operation_data=[operation]) + + # Act: Execute the method under test + MetadataService.update_documents_metadata(dataset, operation_data) + + # Assert: Verify the expected outcomes + from extensions.ext_database import db + + # Verify document metadata was updated + db.session.refresh(document) + assert document.doc_metadata is not None + assert "test_metadata" in document.doc_metadata + assert document.doc_metadata["test_metadata"] == "test_value" + + # Verify metadata binding was created + binding = ( + db.session.query(DatasetMetadataBinding).filter_by(metadata_id=metadata.id, document_id=document.id).first() + ) + assert binding is not None + assert binding.tenant_id == tenant.id + assert binding.dataset_id == dataset.id + + def test_update_documents_metadata_with_built_in_fields_enabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test update of documents metadata when built-in fields are enabled. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + document = self._create_test_document( + db_session_with_containers, mock_external_service_dependencies, dataset, account + ) + + # Enable built-in fields + dataset.built_in_field_enabled = True + from extensions.ext_database import db + + db.session.add(dataset) + db.session.commit() + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata + metadata_args = MetadataArgs(type="string", name="test_metadata") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Mock DocumentService.get_document + mock_external_service_dependencies["document_service"].get_document.return_value = document + + # Create metadata operation data + from services.entities.knowledge_entities.knowledge_entities import ( + DocumentMetadataOperation, + MetadataDetail, + MetadataOperationData, + ) + + metadata_detail = MetadataDetail(id=metadata.id, name=metadata.name, value="test_value") + + operation = DocumentMetadataOperation(document_id=document.id, metadata_list=[metadata_detail]) + + operation_data = MetadataOperationData(operation_data=[operation]) + + # Act: Execute the method under test + MetadataService.update_documents_metadata(dataset, operation_data) + + # Assert: Verify the expected outcomes + # Verify document metadata was updated with both custom and built-in fields + db.session.refresh(document) + assert document.doc_metadata is not None + assert "test_metadata" in document.doc_metadata + assert document.doc_metadata["test_metadata"] == "test_value" + + # Note: Built-in fields would be added if DocumentService mock works correctly + # The main functionality (custom metadata update) is verified + + def test_update_documents_metadata_document_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test update of documents metadata when document is not found. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata + metadata_args = MetadataArgs(type="string", name="test_metadata") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Mock DocumentService.get_document to return None (document not found) + mock_external_service_dependencies["document_service"].get_document.return_value = None + + # Create metadata operation data + from services.entities.knowledge_entities.knowledge_entities import ( + DocumentMetadataOperation, + MetadataDetail, + MetadataOperationData, + ) + + metadata_detail = MetadataDetail(id=metadata.id, name=metadata.name, value="test_value") + + operation = DocumentMetadataOperation(document_id="non-existent-document-id", metadata_list=[metadata_detail]) + + operation_data = MetadataOperationData(operation_data=[operation]) + + # Act: Execute the method under test + # The method should handle the error gracefully and continue + MetadataService.update_documents_metadata(dataset, operation_data) + + # Assert: Verify the method completes without raising exceptions + # The main functionality (error handling) is verified + + def test_knowledge_base_metadata_lock_check_dataset_id( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata lock check for dataset operations. + """ + # Arrange: Setup mocks + mock_external_service_dependencies["redis_client"].get.return_value = None + mock_external_service_dependencies["redis_client"].set.return_value = True + + dataset_id = "test-dataset-id" + + # Act: Execute the method under test + MetadataService.knowledge_base_metadata_lock_check(dataset_id, None) + + # Assert: Verify the expected outcomes + # Verify Redis lock was set + mock_external_service_dependencies["redis_client"].set.assert_called_once() + + # Verify lock key format + call_args = mock_external_service_dependencies["redis_client"].set.call_args + assert call_args[0][0] == f"dataset_metadata_lock_{dataset_id}" + + def test_knowledge_base_metadata_lock_check_document_id( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata lock check for document operations. + """ + # Arrange: Setup mocks + mock_external_service_dependencies["redis_client"].get.return_value = None + mock_external_service_dependencies["redis_client"].set.return_value = True + + document_id = "test-document-id" + + # Act: Execute the method under test + MetadataService.knowledge_base_metadata_lock_check(None, document_id) + + # Assert: Verify the expected outcomes + # Verify Redis lock was set + mock_external_service_dependencies["redis_client"].set.assert_called_once() + + # Verify lock key format + call_args = mock_external_service_dependencies["redis_client"].set.call_args + assert call_args[0][0] == f"document_metadata_lock_{document_id}" + + def test_knowledge_base_metadata_lock_check_lock_exists( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata lock check when lock already exists. + """ + # Arrange: Setup mocks to simulate existing lock + mock_external_service_dependencies["redis_client"].get.return_value = "1" # Lock exists + + dataset_id = "test-dataset-id" + + # Act & Assert: Verify proper error handling + with pytest.raises( + ValueError, match="Another knowledge base metadata operation is running, please wait a moment." + ): + MetadataService.knowledge_base_metadata_lock_check(dataset_id, None) + + def test_knowledge_base_metadata_lock_check_document_lock_exists( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test metadata lock check when document lock already exists. + """ + # Arrange: Setup mocks to simulate existing lock + mock_external_service_dependencies["redis_client"].get.return_value = "1" # Lock exists + + document_id = "test-document-id" + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Another document metadata operation is running, please wait a moment."): + MetadataService.knowledge_base_metadata_lock_check(None, document_id) + + def test_get_dataset_metadatas_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of dataset metadata information. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata + metadata_args = MetadataArgs(type="string", name="test_metadata") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Create document and metadata binding + document = self._create_test_document( + db_session_with_containers, mock_external_service_dependencies, dataset, account + ) + + binding = DatasetMetadataBinding( + tenant_id=tenant.id, + dataset_id=dataset.id, + metadata_id=metadata.id, + document_id=document.id, + created_by=account.id, + ) + + from extensions.ext_database import db + + db.session.add(binding) + db.session.commit() + + # Act: Execute the method under test + result = MetadataService.get_dataset_metadatas(dataset) + + # Assert: Verify the expected outcomes + assert result is not None + assert "doc_metadata" in result + assert "built_in_field_enabled" in result + + # Verify metadata information + doc_metadata = result["doc_metadata"] + assert len(doc_metadata) == 1 + assert doc_metadata[0]["id"] == metadata.id + assert doc_metadata[0]["name"] == metadata.name + assert doc_metadata[0]["type"] == metadata.type + assert doc_metadata[0]["count"] == 1 # One document bound to this metadata + + # Verify built-in field status + assert result["built_in_field_enabled"] is False + + def test_get_dataset_metadatas_with_built_in_fields_enabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test retrieval of dataset metadata when built-in fields are enabled. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Enable built-in fields + dataset.built_in_field_enabled = True + from extensions.ext_database import db + + db.session.add(dataset) + db.session.commit() + + # Setup mocks + mock_external_service_dependencies["current_user"].current_tenant_id = tenant.id + mock_external_service_dependencies["current_user"].id = account.id + + # Create metadata + metadata_args = MetadataArgs(type="string", name="test_metadata") + metadata = MetadataService.create_metadata(dataset.id, metadata_args) + + # Act: Execute the method under test + result = MetadataService.get_dataset_metadatas(dataset) + + # Assert: Verify the expected outcomes + assert result is not None + assert "doc_metadata" in result + assert "built_in_field_enabled" in result + + # Verify metadata information + doc_metadata = result["doc_metadata"] + assert len(doc_metadata) == 1 # Only custom metadata, built-in fields are not included in this list + + # Verify built-in field status + assert result["built_in_field_enabled"] is True + + def test_get_dataset_metadatas_no_metadata(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test retrieval of dataset metadata when no metadata exists. + """ + # Arrange: Create test data + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + dataset = self._create_test_dataset( + db_session_with_containers, mock_external_service_dependencies, account, tenant + ) + + # Act: Execute the method under test + result = MetadataService.get_dataset_metadatas(dataset) + + # Assert: Verify the expected outcomes + assert result is not None + assert "doc_metadata" in result + assert "built_in_field_enabled" in result + + # Verify metadata information + doc_metadata = result["doc_metadata"] + assert len(doc_metadata) == 0 # No metadata exists + + # Verify built-in field status + assert result["built_in_field_enabled"] is False From 80f0594f4bc232a3ffebafa59893761e6001ddf2 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 18 Aug 2025 09:54:22 +0800 Subject: [PATCH 29/36] feat: add testcontainers based tests for model loadbalancing service (#24066) --- .../test_model_load_balancing_service.py | 474 ++++++++++++++++++ 1 file changed, 474 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py b/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py new file mode 100644 index 0000000000..a8a36b2565 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py @@ -0,0 +1,474 @@ +from unittest.mock import MagicMock, patch + +import pytest +from faker import Faker + +from models.account import TenantAccountJoin, TenantAccountRole +from models.model import Account, Tenant +from models.provider import LoadBalancingModelConfig, Provider, ProviderModelSetting +from services.model_load_balancing_service import ModelLoadBalancingService + + +class TestModelLoadBalancingService: + """Integration tests for ModelLoadBalancingService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.model_load_balancing_service.ProviderManager") as mock_provider_manager, + patch("services.model_load_balancing_service.LBModelManager") as mock_lb_model_manager, + patch("services.model_load_balancing_service.ModelProviderFactory") as mock_model_provider_factory, + patch("services.model_load_balancing_service.encrypter") as mock_encrypter, + ): + # Setup default mock returns + mock_provider_manager_instance = mock_provider_manager.return_value + + # Mock provider configuration + mock_provider_config = MagicMock() + mock_provider_config.provider.provider = "openai" + mock_provider_config.custom_configuration.provider = None + + # Mock provider model setting + mock_provider_model_setting = MagicMock() + mock_provider_model_setting.load_balancing_enabled = False + + mock_provider_config.get_provider_model_setting.return_value = mock_provider_model_setting + + # Mock provider configurations dict + mock_provider_configs = {"openai": mock_provider_config} + mock_provider_manager_instance.get_configurations.return_value = mock_provider_configs + + # Mock LBModelManager + mock_lb_model_manager.get_config_in_cooldown_and_ttl.return_value = (False, 0) + + # Mock ModelProviderFactory + mock_model_provider_factory_instance = mock_model_provider_factory.return_value + + # Mock credential schemas + mock_credential_schema = MagicMock() + mock_credential_schema.credential_form_schemas = [] + + # Mock provider configuration methods + mock_provider_config.extract_secret_variables.return_value = [] + mock_provider_config.obfuscated_credentials.return_value = {} + mock_provider_config._get_credential_schema.return_value = mock_credential_schema + + yield { + "provider_manager": mock_provider_manager, + "lb_model_manager": mock_lb_model_manager, + "model_provider_factory": mock_model_provider_factory, + "encrypter": mock_encrypter, + "provider_config": mock_provider_config, + "provider_model_setting": mock_provider_model_setting, + "credential_schema": mock_credential_schema, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + # Create account + account = Account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + status="active", + ) + + from extensions.ext_database import db + + db.session.add(account) + db.session.commit() + + # Create tenant for the account + tenant = Tenant( + name=fake.company(), + status="normal", + ) + db.session.add(tenant) + db.session.commit() + + # Create tenant-account join + join = TenantAccountJoin( + tenant_id=tenant.id, + account_id=account.id, + role=TenantAccountRole.OWNER.value, + current=True, + ) + db.session.add(join) + db.session.commit() + + # Set current tenant for account + account.current_tenant = tenant + + return account, tenant + + def _create_test_provider_and_setting( + self, db_session_with_containers, tenant_id, mock_external_service_dependencies + ): + """ + Helper method to create a test provider and provider model setting. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + tenant_id: Tenant ID for the provider + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (provider, provider_model_setting) - Created provider and setting instances + """ + fake = Faker() + + from extensions.ext_database import db + + # Create provider + provider = Provider( + tenant_id=tenant_id, + provider_name="openai", + provider_type="custom", + is_valid=True, + ) + db.session.add(provider) + db.session.commit() + + # Create provider model setting + provider_model_setting = ProviderModelSetting( + tenant_id=tenant_id, + provider_name="openai", + model_name="gpt-3.5-turbo", + model_type="text-generation", # Use the origin model type that matches the query + enabled=True, + load_balancing_enabled=False, + ) + db.session.add(provider_model_setting) + db.session.commit() + + return provider, provider_model_setting + + def test_enable_model_load_balancing_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful model load balancing enablement. + + This test verifies: + - Proper provider configuration retrieval + - Successful enablement of model load balancing + - Correct method calls to provider configuration + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + provider, provider_model_setting = self._create_test_provider_and_setting( + db_session_with_containers, tenant.id, mock_external_service_dependencies + ) + + # Setup mocks for enable method + mock_provider_config = mock_external_service_dependencies["provider_config"] + mock_provider_config.enable_model_load_balancing = MagicMock() + + # Act: Execute the method under test + service = ModelLoadBalancingService() + service.enable_model_load_balancing( + tenant_id=tenant.id, provider="openai", model="gpt-3.5-turbo", model_type="llm" + ) + + # Assert: Verify the expected outcomes + mock_provider_config.enable_model_load_balancing.assert_called_once() + call_args = mock_provider_config.enable_model_load_balancing.call_args + assert call_args.kwargs["model"] == "gpt-3.5-turbo" + assert call_args.kwargs["model_type"].value == "llm" # ModelType enum value + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(provider) + db.session.refresh(provider_model_setting) + assert provider.id is not None + assert provider_model_setting.id is not None + + def test_disable_model_load_balancing_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful model load balancing disablement. + + This test verifies: + - Proper provider configuration retrieval + - Successful disablement of model load balancing + - Correct method calls to provider configuration + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + provider, provider_model_setting = self._create_test_provider_and_setting( + db_session_with_containers, tenant.id, mock_external_service_dependencies + ) + + # Setup mocks for disable method + mock_provider_config = mock_external_service_dependencies["provider_config"] + mock_provider_config.disable_model_load_balancing = MagicMock() + + # Act: Execute the method under test + service = ModelLoadBalancingService() + service.disable_model_load_balancing( + tenant_id=tenant.id, provider="openai", model="gpt-3.5-turbo", model_type="llm" + ) + + # Assert: Verify the expected outcomes + mock_provider_config.disable_model_load_balancing.assert_called_once() + call_args = mock_provider_config.disable_model_load_balancing.call_args + assert call_args.kwargs["model"] == "gpt-3.5-turbo" + assert call_args.kwargs["model_type"].value == "llm" # ModelType enum value + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(provider) + db.session.refresh(provider_model_setting) + assert provider.id is not None + assert provider_model_setting.id is not None + + def test_enable_model_load_balancing_provider_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when provider does not exist. + + This test verifies: + - Proper error handling for non-existent provider + - Correct exception type and message + - No database state changes + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Setup mocks to return empty provider configurations + mock_provider_manager = mock_external_service_dependencies["provider_manager"] + mock_provider_manager_instance = mock_provider_manager.return_value + mock_provider_manager_instance.get_configurations.return_value = {} + + # Act & Assert: Verify proper error handling + service = ModelLoadBalancingService() + with pytest.raises(ValueError) as exc_info: + service.enable_model_load_balancing( + tenant_id=tenant.id, provider="nonexistent_provider", model="gpt-3.5-turbo", model_type="llm" + ) + + # Verify correct error message + assert "Provider nonexistent_provider does not exist." in str(exc_info.value) + + # Verify no database state changes occurred + from extensions.ext_database import db + + db.session.rollback() + + def test_get_load_balancing_configs_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of load balancing configurations. + + This test verifies: + - Proper provider configuration retrieval + - Successful database query for load balancing configs + - Correct return format and data structure + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + provider, provider_model_setting = self._create_test_provider_and_setting( + db_session_with_containers, tenant.id, mock_external_service_dependencies + ) + + # Create load balancing config + from extensions.ext_database import db + + load_balancing_config = LoadBalancingModelConfig( + tenant_id=tenant.id, + provider_name="openai", + model_name="gpt-3.5-turbo", + model_type="text-generation", # Use the origin model type that matches the query + name="config1", + encrypted_config='{"api_key": "test_key"}', + enabled=True, + ) + db.session.add(load_balancing_config) + db.session.commit() + + # Verify the config was created + db.session.refresh(load_balancing_config) + assert load_balancing_config.id is not None + + # Setup mocks for get_load_balancing_configs method + mock_provider_config = mock_external_service_dependencies["provider_config"] + mock_provider_model_setting = mock_external_service_dependencies["provider_model_setting"] + mock_provider_model_setting.load_balancing_enabled = True + + # Mock credential schema methods + mock_credential_schema = mock_external_service_dependencies["credential_schema"] + mock_credential_schema.credential_form_schemas = [] + + # Mock encrypter + mock_encrypter = mock_external_service_dependencies["encrypter"] + mock_encrypter.get_decrypt_decoding.return_value = ("key", "cipher") + + # Mock _get_credential_schema method + mock_provider_config._get_credential_schema.return_value = mock_credential_schema + + # Mock extract_secret_variables method + mock_provider_config.extract_secret_variables.return_value = [] + + # Mock obfuscated_credentials method + mock_provider_config.obfuscated_credentials.return_value = {} + + # Mock LBModelManager.get_config_in_cooldown_and_ttl + mock_lb_model_manager = mock_external_service_dependencies["lb_model_manager"] + mock_lb_model_manager.get_config_in_cooldown_and_ttl.return_value = (False, 0) + + # Act: Execute the method under test + service = ModelLoadBalancingService() + is_enabled, configs = service.get_load_balancing_configs( + tenant_id=tenant.id, provider="openai", model="gpt-3.5-turbo", model_type="llm" + ) + + # Assert: Verify the expected outcomes + assert is_enabled is True + assert len(configs) == 1 + assert configs[0]["id"] == load_balancing_config.id + assert configs[0]["name"] == "config1" + assert configs[0]["enabled"] is True + assert configs[0]["in_cooldown"] is False + assert configs[0]["ttl"] == 0 + + # Verify database state + db.session.refresh(load_balancing_config) + assert load_balancing_config.id is not None + + def test_get_load_balancing_configs_provider_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when provider does not exist in get_load_balancing_configs. + + This test verifies: + - Proper error handling for non-existent provider + - Correct exception type and message + - No database state changes + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Setup mocks to return empty provider configurations + mock_provider_manager = mock_external_service_dependencies["provider_manager"] + mock_provider_manager_instance = mock_provider_manager.return_value + mock_provider_manager_instance.get_configurations.return_value = {} + + # Act & Assert: Verify proper error handling + service = ModelLoadBalancingService() + with pytest.raises(ValueError) as exc_info: + service.get_load_balancing_configs( + tenant_id=tenant.id, provider="nonexistent_provider", model="gpt-3.5-turbo", model_type="llm" + ) + + # Verify correct error message + assert "Provider nonexistent_provider does not exist." in str(exc_info.value) + + # Verify no database state changes occurred + from extensions.ext_database import db + + db.session.rollback() + + def test_get_load_balancing_configs_with_inherit_config( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test load balancing configs retrieval with inherit configuration. + + This test verifies: + - Proper handling of inherit configuration + - Correct ordering of configurations + - Inherit config initialization when needed + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + provider, provider_model_setting = self._create_test_provider_and_setting( + db_session_with_containers, tenant.id, mock_external_service_dependencies + ) + + # Create load balancing config + from extensions.ext_database import db + + load_balancing_config = LoadBalancingModelConfig( + tenant_id=tenant.id, + provider_name="openai", + model_name="gpt-3.5-turbo", + model_type="text-generation", # Use the origin model type that matches the query + name="config1", + encrypted_config='{"api_key": "test_key"}', + enabled=True, + ) + db.session.add(load_balancing_config) + db.session.commit() + + # Setup mocks for inherit config scenario + mock_provider_config = mock_external_service_dependencies["provider_config"] + mock_provider_config.custom_configuration.provider = MagicMock() # Enable custom config + + mock_provider_model_setting = mock_external_service_dependencies["provider_model_setting"] + mock_provider_model_setting.load_balancing_enabled = True + + # Mock credential schema methods + mock_credential_schema = mock_external_service_dependencies["credential_schema"] + mock_credential_schema.credential_form_schemas = [] + + # Mock encrypter + mock_encrypter = mock_external_service_dependencies["encrypter"] + mock_encrypter.get_decrypt_decoding.return_value = ("key", "cipher") + + # Act: Execute the method under test + service = ModelLoadBalancingService() + is_enabled, configs = service.get_load_balancing_configs( + tenant_id=tenant.id, provider="openai", model="gpt-3.5-turbo", model_type="llm" + ) + + # Assert: Verify the expected outcomes + assert is_enabled is True + assert len(configs) == 2 # inherit config + existing config + + # First config should be inherit config + assert configs[0]["name"] == "__inherit__" + assert configs[0]["enabled"] is True + + # Second config should be the existing config + assert configs[1]["id"] == load_balancing_config.id + assert configs[1]["name"] == "config1" + + # Verify database state + db.session.refresh(load_balancing_config) + assert load_balancing_config.id is not None + + # Verify inherit config was created in database + inherit_configs = ( + db.session.query(LoadBalancingModelConfig).filter(LoadBalancingModelConfig.name == "__inherit__").all() + ) + assert len(inherit_configs) == 1 From f8fc9f8c794f43dc0dea1eaae2a27c2fac9ce59d Mon Sep 17 00:00:00 2001 From: Zhehao Peng <32246435+Zhehao-P@users.noreply.github.com> Date: Sun, 17 Aug 2025 19:01:29 -0700 Subject: [PATCH 30/36] feat: add select input support to the conversation opener (#24043) --- .../conversation-opener/modal.tsx | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/web/app/components/base/features/new-feature-panel/conversation-opener/modal.tsx b/web/app/components/base/features/new-feature-panel/conversation-opener/modal.tsx index 51e33c43d2..53db991e71 100644 --- a/web/app/components/base/features/new-feature-panel/conversation-opener/modal.tsx +++ b/web/app/components/base/features/new-feature-panel/conversation-opener/modal.tsx @@ -8,6 +8,7 @@ import Modal from '@/app/components/base/modal' import Button from '@/app/components/base/button' import Divider from '@/app/components/base/divider' import ConfirmAddVar from '@/app/components/app/configuration/config-prompt/confirm-add-var' +import PromptEditor from '@/app/components/base/prompt-editor' import type { OpeningStatement } from '@/app/components/base/features/types' import { getInputKeys } from '@/app/components/base/block-input' import type { PromptVariable } from '@/models/debug' @@ -101,7 +102,7 @@ const OpeningSettingModal = ({
·
{tempSuggestedQuestions.length}/{MAX_QUESTION_NUM}
- +
{t('appDebug.feature.conversationOpener.title')}
-
+
-