From f06025a3425b9580bfc72b7b6aa4938e615ce335 Mon Sep 17 00:00:00 2001 From: KVOJJJin Date: Mon, 27 Oct 2025 13:35:54 +0800 Subject: [PATCH 01/15] Fix: upload limit in knowledge (#27480) Co-authored-by: jyong <718720800@qq.com> --- api/controllers/console/files.py | 1 + .../datasets/create/file-uploader/index.tsx | 23 +++++++++++-------- web/i18n/en-US/dataset-creation.ts | 2 +- web/i18n/zh-Hans/dataset-creation.ts | 2 +- web/models/common.ts | 1 + 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/api/controllers/console/files.py b/api/controllers/console/files.py index 1cd193f7ad..36fcd460bb 100644 --- a/api/controllers/console/files.py +++ b/api/controllers/console/files.py @@ -39,6 +39,7 @@ class FileApi(Resource): return { "file_size_limit": dify_config.UPLOAD_FILE_SIZE_LIMIT, "batch_count_limit": dify_config.UPLOAD_FILE_BATCH_LIMIT, + "file_upload_limit": dify_config.BATCH_UPLOAD_LIMIT, "image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT, "video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT, "audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT, diff --git a/web/app/components/datasets/create/file-uploader/index.tsx b/web/app/components/datasets/create/file-uploader/index.tsx index 43d69d1889..463715bb62 100644 --- a/web/app/components/datasets/create/file-uploader/index.tsx +++ b/web/app/components/datasets/create/file-uploader/index.tsx @@ -19,8 +19,6 @@ import { IS_CE_EDITION } from '@/config' import { Theme } from '@/types/app' import useTheme from '@/hooks/use-theme' -const FILES_NUMBER_LIMIT = 20 - type IFileUploaderProps = { fileList: FileItem[] titleClassName?: string @@ -72,6 +70,7 @@ const FileUploader = ({ const fileUploadConfig = useMemo(() => fileUploadConfigResponse ?? { file_size_limit: 15, batch_count_limit: 5, + file_upload_limit: 5, }, [fileUploadConfigResponse]) const fileListRef = useRef([]) @@ -121,10 +120,10 @@ const FileUploader = ({ data: formData, onprogress: onProgress, }, false, undefined, '?source=datasets') - .then((res: File) => { + .then((res) => { const completeFile = { fileID: fileItem.fileID, - file: res, + file: res as unknown as File, progress: -1, } const index = fileListRef.current.findIndex(item => item.fileID === fileItem.fileID) @@ -163,11 +162,12 @@ const FileUploader = ({ }, [fileUploadConfig, uploadBatchFiles]) const initialUpload = useCallback((files: File[]) => { + const filesCountLimit = fileUploadConfig.file_upload_limit if (!files.length) return false - if (files.length + fileList.length > FILES_NUMBER_LIMIT && !IS_CE_EDITION) { - notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.filesNumber', { filesNumber: FILES_NUMBER_LIMIT }) }) + if (files.length + fileList.length > filesCountLimit && !IS_CE_EDITION) { + notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.filesNumber', { filesNumber: filesCountLimit }) }) return false } @@ -180,7 +180,7 @@ const FileUploader = ({ prepareFileList(newFiles) fileListRef.current = newFiles uploadMultipleFiles(preparedFiles) - }, [prepareFileList, uploadMultipleFiles, notify, t, fileList]) + }, [prepareFileList, uploadMultipleFiles, notify, t, fileList, fileUploadConfig]) const handleDragEnter = (e: DragEvent) => { e.preventDefault() @@ -255,10 +255,11 @@ const FileUploader = ({ ) let files = nested.flat() if (notSupportBatchUpload) files = files.slice(0, 1) + files = files.slice(0, fileUploadConfig.batch_count_limit) const valid = files.filter(isValid) initialUpload(valid) }, - [initialUpload, isValid, notSupportBatchUpload, traverseFileEntry], + [initialUpload, isValid, notSupportBatchUpload, traverseFileEntry, fileUploadConfig], ) const selectHandle = () => { if (fileUploader.current) @@ -273,9 +274,10 @@ const FileUploader = ({ onFileListUpdate?.([...fileListRef.current]) } const fileChangeHandle = useCallback((e: React.ChangeEvent) => { - const files = [...(e.target.files ?? [])] as File[] + let files = [...(e.target.files ?? [])] as File[] + files = files.slice(0, fileUploadConfig.batch_count_limit) initialUpload(files.filter(isValid)) - }, [isValid, initialUpload]) + }, [isValid, initialUpload, fileUploadConfig]) const { theme } = useTheme() const chartColor = useMemo(() => theme === Theme.dark ? '#5289ff' : '#296dff', [theme]) @@ -325,6 +327,7 @@ const FileUploader = ({ size: fileUploadConfig.file_size_limit, supportTypes: supportTypesShowNames, batchCount: notSupportBatchUpload ? 1 : fileUploadConfig.batch_count_limit, + totalCount: fileUploadConfig.file_upload_limit, })} {dragging &&
}
diff --git a/web/i18n/en-US/dataset-creation.ts b/web/i18n/en-US/dataset-creation.ts index 54d5a54fb4..f32639a6b4 100644 --- a/web/i18n/en-US/dataset-creation.ts +++ b/web/i18n/en-US/dataset-creation.ts @@ -38,7 +38,7 @@ const translation = { button: 'Drag and drop file or folder, or', buttonSingleFile: 'Drag and drop file, or', browse: 'Browse', - tip: 'Supports {{supportTypes}}. Max {{batchCount}} in a batch and {{size}} MB each.', + tip: 'Supports {{supportTypes}}. Max {{batchCount}} in a batch and {{size}} MB each. Max total {{totalCount}} files.', validation: { typeError: 'File type not supported', size: 'File too large. Maximum is {{size}}MB', diff --git a/web/i18n/zh-Hans/dataset-creation.ts b/web/i18n/zh-Hans/dataset-creation.ts index 5b1ff2435c..f780269914 100644 --- a/web/i18n/zh-Hans/dataset-creation.ts +++ b/web/i18n/zh-Hans/dataset-creation.ts @@ -38,7 +38,7 @@ const translation = { button: '拖拽文件或文件夹至此,或者', buttonSingleFile: '拖拽文件至此,或者', browse: '选择文件', - tip: '已支持 {{supportTypes}},每批最多 {{batchCount}} 个文件,每个文件不超过 {{size}} MB。', + tip: '已支持 {{supportTypes}},每批最多 {{batchCount}} 个文件,每个文件不超过 {{size}} MB ,总数不超过 {{totalCount}} 个文件。', validation: { typeError: '文件类型不支持', size: '文件太大了,不能超过 {{size}}MB', diff --git a/web/models/common.ts b/web/models/common.ts index aa6372e36f..d83ae5fb98 100644 --- a/web/models/common.ts +++ b/web/models/common.ts @@ -236,6 +236,7 @@ export type FileUploadConfigResponse = { audio_file_size_limit?: number // default is 50MB video_file_size_limit?: number // default is 100MB workflow_file_upload_limit?: number // default is 10 + file_upload_limit: number // default is 5 } export type InvitationResult = { From 43bcf40f809d1d546fd3f396418514d0a63afc9c Mon Sep 17 00:00:00 2001 From: GuanMu Date: Mon, 27 Oct 2025 14:38:58 +0800 Subject: [PATCH 02/15] refactor: update installed app component to handle missing params and improve type safety (#27331) --- .../explore/installed/[appId]/page.tsx | 6 +- web/app/account/oauth/authorize/constants.ts | 3 + web/app/account/oauth/authorize/page.tsx | 10 +- .../components/app/app-publisher/index.tsx | 13 +- .../text-generation-item.tsx | 8 +- .../debug/debug-with-single-model/index.tsx | 4 +- .../app/configuration/debug/hooks.tsx | 18 +- .../app/configuration/debug/index.tsx | 15 +- .../components/app/configuration/index.tsx | 311 +++++++++--------- .../chat/chat-with-history/chat-wrapper.tsx | 3 +- .../chat/embedded-chatbot/chat-wrapper.tsx | 3 +- web/app/components/base/chat/types.ts | 2 +- .../base/content-dialog/index.stories.tsx | 5 + .../time-picker/index.spec.tsx | 4 +- .../base/date-and-time-picker/utils/dayjs.ts | 4 +- .../components/base/dialog/index.stories.tsx | 4 + web/app/components/base/form/types.ts | 2 +- .../base/markdown-blocks/think-block.tsx | 19 +- .../base/modal-like-wrap/index.stories.tsx | 6 + web/app/components/base/popover/index.tsx | 22 +- .../base/portal-to-follow-elem/index.tsx | 7 +- .../components/base/prompt-editor/hooks.ts | 4 +- .../prompt-editor/plugins/placeholder.tsx | 3 +- web/app/components/base/voice-input/utils.ts | 12 +- web/app/components/billing/pricing/index.tsx | 1 - .../billing/pricing/plans/index.tsx | 9 +- .../create/embedding-process/index.tsx | 18 +- .../datasets/create/file-uploader/index.tsx | 2 + .../data-source/local-file/index.tsx | 12 +- .../detail/batch-modal/csv-uploader.tsx | 12 +- .../datasets/documents/detail/index.tsx | 16 +- .../documents/detail/metadata/index.tsx | 36 +- .../detail/settings/document-settings.tsx | 109 ++++-- .../model-selector/index.tsx | 3 +- .../model-selector/popup.tsx | 2 +- .../plugins/install-plugin/utils.ts | 13 +- .../plugin-detail-panel/endpoint-modal.tsx | 2 +- .../model-selector/index.tsx | 5 +- .../tools/add-tool-modal/category.tsx | 25 +- .../components/tools/add-tool-modal/index.tsx | 3 +- .../components/tools/add-tool-modal/tools.tsx | 18 +- .../panel/debug-and-preview/chat-wrapper.tsx | 4 +- .../utils/{layout.ts => elk-layout.ts} | 12 +- web/app/components/workflow/utils/index.ts | 2 +- web/app/signin/utils/post-login-redirect.ts | 2 +- web/context/debug-configuration.ts | 10 + web/models/datasets.ts | 2 + web/models/debug.ts | 11 + web/types/app.ts | 16 +- 49 files changed, 531 insertions(+), 302 deletions(-) create mode 100644 web/app/account/oauth/authorize/constants.ts rename web/app/components/workflow/utils/{layout.ts => elk-layout.ts} (97%) diff --git a/web/app/(commonLayout)/explore/installed/[appId]/page.tsx b/web/app/(commonLayout)/explore/installed/[appId]/page.tsx index e288c62b5d..983fdb9d23 100644 --- a/web/app/(commonLayout)/explore/installed/[appId]/page.tsx +++ b/web/app/(commonLayout)/explore/installed/[appId]/page.tsx @@ -2,14 +2,14 @@ import React from 'react' import Main from '@/app/components/explore/installed-app' export type IInstalledAppProps = { - params: { + params?: Promise<{ appId: string - } + }> } // Using Next.js page convention for async server components async function InstalledApp({ params }: IInstalledAppProps) { - const appId = (await params).appId + const { appId } = await (params ?? Promise.reject(new Error('Missing params'))) return (
) diff --git a/web/app/account/oauth/authorize/constants.ts b/web/app/account/oauth/authorize/constants.ts new file mode 100644 index 0000000000..f1d8b98ef4 --- /dev/null +++ b/web/app/account/oauth/authorize/constants.ts @@ -0,0 +1,3 @@ +export const OAUTH_AUTHORIZE_PENDING_KEY = 'oauth_authorize_pending' +export const REDIRECT_URL_KEY = 'oauth_redirect_url' +export const OAUTH_AUTHORIZE_PENDING_TTL = 60 * 3 diff --git a/web/app/account/oauth/authorize/page.tsx b/web/app/account/oauth/authorize/page.tsx index 4aa5fa0b8e..c9b26b97c1 100644 --- a/web/app/account/oauth/authorize/page.tsx +++ b/web/app/account/oauth/authorize/page.tsx @@ -19,11 +19,11 @@ import { } from '@remixicon/react' import dayjs from 'dayjs' import { useIsLogin } from '@/service/use-common' - -export const OAUTH_AUTHORIZE_PENDING_KEY = 'oauth_authorize_pending' -export const REDIRECT_URL_KEY = 'oauth_redirect_url' - -const OAUTH_AUTHORIZE_PENDING_TTL = 60 * 3 +import { + OAUTH_AUTHORIZE_PENDING_KEY, + OAUTH_AUTHORIZE_PENDING_TTL, + REDIRECT_URL_KEY, +} from './constants' function setItemWithExpiry(key: string, value: string, ttl: number) { const item = { diff --git a/web/app/components/app/app-publisher/index.tsx b/web/app/components/app/app-publisher/index.tsx index df2618b49c..d3306ac141 100644 --- a/web/app/components/app/app-publisher/index.tsx +++ b/web/app/components/app/app-publisher/index.tsx @@ -44,7 +44,7 @@ import { appDefaultIconBackground } from '@/config' import type { PublishWorkflowParams } from '@/types/workflow' import { useAppWhiteListSubjects, useGetUserCanAccessApp } from '@/service/access-control' import { AccessMode } from '@/models/access-control' -import { fetchAppDetail } from '@/service/apps' +import { fetchAppDetailDirect } from '@/service/apps' import { useGlobalPublicStore } from '@/context/global-public-context' import { useFormatTimeFromNow } from '@/hooks/use-format-time-from-now' @@ -162,11 +162,16 @@ const AppPublisher = ({ } }, [appDetail?.id]) - const handleAccessControlUpdate = useCallback(() => { - fetchAppDetail({ url: '/apps', id: appDetail!.id }).then((res) => { + const handleAccessControlUpdate = useCallback(async () => { + if (!appDetail) + return + try { + const res = await fetchAppDetailDirect({ url: '/apps', id: appDetail.id }) setAppDetail(res) + } + finally { setShowAppAccessControl(false) - }) + } }, [appDetail, setAppDetail]) const [embeddingModalOpen, setEmbeddingModalOpen] = useState(false) diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx index 8f8555efa4..670e5a1467 100644 --- a/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx +++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx @@ -14,7 +14,8 @@ import { TransferMethod } from '@/app/components/base/chat/types' import { useEventEmitterContextContext } from '@/context/event-emitter' import { useProviderContext } from '@/context/provider-context' import { useFeatures } from '@/app/components/base/features/hooks' -import { noop } from 'lodash-es' +import { cloneDeep, noop } from 'lodash-es' +import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config' type TextGenerationItemProps = { modelAndParameter: ModelAndParameter @@ -50,8 +51,8 @@ const TextGenerationItem: FC = ({ const config: TextGenerationConfig = { pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '', prompt_type: promptMode, - chat_prompt_config: isAdvancedMode ? chatPromptConfig : {}, - completion_prompt_config: isAdvancedMode ? completionPromptConfig : {}, + chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG), + completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG), user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables), dataset_query_variable: contextVar || '', // features @@ -74,6 +75,7 @@ const TextGenerationItem: FC = ({ datasets: [...postDatasets], } as any, }, + system_parameters: modelConfig.system_parameters, } const { completion, diff --git a/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx b/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx index d439b00939..506e18cc62 100644 --- a/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx +++ b/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx @@ -6,7 +6,7 @@ import { import Chat from '@/app/components/base/chat/chat' import { useChat } from '@/app/components/base/chat/chat/hooks' import { useDebugConfigurationContext } from '@/context/debug-configuration' -import type { ChatConfig, ChatItem, ChatItemInTree, OnSend } from '@/app/components/base/chat/types' +import type { ChatConfig, ChatItem, OnSend } from '@/app/components/base/chat/types' import { useProviderContext } from '@/context/provider-context' import { fetchConversationMessages, @@ -126,7 +126,7 @@ const DebugWithSingleModel = ( ) }, [appId, chatList, checkCanSend, completionParams, config, handleSend, inputs, modelConfig.mode, modelConfig.model_id, modelConfig.provider, textGenerationModelList]) - const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => { + const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => { const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)! const parentAnswer = chatList.find(item => item.id === question.parentMessageId) doSend(editedQuestion ? editedQuestion.message : question.content, diff --git a/web/app/components/app/configuration/debug/hooks.tsx b/web/app/components/app/configuration/debug/hooks.tsx index 12022e706a..9f628c46af 100644 --- a/web/app/components/app/configuration/debug/hooks.tsx +++ b/web/app/components/app/configuration/debug/hooks.tsx @@ -12,12 +12,15 @@ import type { ChatConfig, ChatItem, } from '@/app/components/base/chat/types' +import cloneDeep from 'lodash-es/cloneDeep' import { AgentStrategy, } from '@/types/app' +import { SupportUploadFileTypes } from '@/app/components/workflow/types' import { promptVariablesToUserInputsForm } from '@/utils/model-config' import { useDebugConfigurationContext } from '@/context/debug-configuration' import { useEventEmitterContextContext } from '@/context/event-emitter' +import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config' export const useDebugWithSingleOrMultipleModel = (appId: string) => { const localeDebugWithSingleOrMultipleModelConfigs = localStorage.getItem('app-debug-with-single-or-multiple-models') @@ -95,16 +98,14 @@ export const useConfigFromDebugContext = () => { const config: ChatConfig = { pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '', prompt_type: promptMode, - chat_prompt_config: isAdvancedMode ? chatPromptConfig : {}, - completion_prompt_config: isAdvancedMode ? completionPromptConfig : {}, + chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG), + completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG), user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables), dataset_query_variable: contextVar || '', opening_statement: introduction, - more_like_this: { - enabled: false, - }, + more_like_this: modelConfig.more_like_this ?? { enabled: false }, suggested_questions: openingSuggestedQuestions, - suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig, + suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig ?? { enabled: false }, text_to_speech: textToSpeechConfig, speech_to_text: speechToTextConfig, retriever_resource: citationConfig, @@ -121,8 +122,13 @@ export const useConfigFromDebugContext = () => { }, file_upload: { image: visionConfig, + allowed_file_upload_methods: visionConfig.transfer_methods ?? [], + allowed_file_types: [SupportUploadFileTypes.image], + max_length: visionConfig.number_limits ?? 0, + number_limits: visionConfig.number_limits, }, annotation_reply: annotationConfig, + system_parameters: modelConfig.system_parameters, supportAnnotation: true, appId, diff --git a/web/app/components/app/configuration/debug/index.tsx b/web/app/components/app/configuration/debug/index.tsx index 23e1fdf9c4..ef3b9355b9 100644 --- a/web/app/components/app/configuration/debug/index.tsx +++ b/web/app/components/app/configuration/debug/index.tsx @@ -3,6 +3,7 @@ import type { FC } from 'react' import { useTranslation } from 'react-i18next' import React, { useCallback, useEffect, useRef, useState } from 'react' import { produce, setAutoFreeze } from 'immer' +import cloneDeep from 'lodash-es/cloneDeep' import { useBoolean } from 'ahooks' import { RiAddLine, @@ -36,7 +37,7 @@ import ActionButton, { ActionButtonState } from '@/app/components/base/action-bu import type { ModelConfig as BackendModelConfig, VisionFile, VisionSettings } from '@/types/app' import { formatBooleanInputs, promptVariablesToUserInputsForm } from '@/utils/model-config' import TextGeneration from '@/app/components/app/text-generate/item' -import { IS_CE_EDITION } from '@/config' +import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG, IS_CE_EDITION } from '@/config' import type { Inputs } from '@/models/debug' import { useDefaultModel } from '@/app/components/header/account-setting/model-provider-page/hooks' import { ModelFeatureEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' @@ -90,6 +91,7 @@ const Debug: FC = ({ completionParams, hasSetContextVar, datasetConfigs, + externalDataToolsConfig, } = useContext(ConfigContext) const { eventEmitter } = useEventEmitterContextContext() const { data: text2speechDefaultModel } = useDefaultModel(ModelTypeEnum.textEmbedding) @@ -223,8 +225,8 @@ const Debug: FC = ({ const postModelConfig: BackendModelConfig = { pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '', prompt_type: promptMode, - chat_prompt_config: {}, - completion_prompt_config: {}, + chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG), + completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG), user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables), dataset_query_variable: contextVar || '', dataset_configs: { @@ -251,11 +253,8 @@ const Debug: FC = ({ suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig, speech_to_text: speechToTextConfig, retriever_resource: citationConfig, - } - - if (isAdvancedMode) { - postModelConfig.chat_prompt_config = chatPromptConfig - postModelConfig.completion_prompt_config = completionPromptConfig + system_parameters: modelConfig.system_parameters, + external_data_tools: externalDataToolsConfig, } const data: Record = { diff --git a/web/app/components/app/configuration/index.tsx b/web/app/components/app/configuration/index.tsx index a1710c8f39..4f47bfd883 100644 --- a/web/app/components/app/configuration/index.tsx +++ b/web/app/components/app/configuration/index.tsx @@ -36,14 +36,14 @@ import type { } from '@/models/debug' import type { ExternalDataTool } from '@/models/common' import type { DataSet } from '@/models/datasets' -import type { ModelConfig as BackendModelConfig, VisionSettings } from '@/types/app' +import type { ModelConfig as BackendModelConfig, UserInputFormItem, VisionSettings } from '@/types/app' import ConfigContext from '@/context/debug-configuration' import Config from '@/app/components/app/configuration/config' import Debug from '@/app/components/app/configuration/debug' import Confirm from '@/app/components/base/confirm' import { ModelFeatureEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { ToastContext } from '@/app/components/base/toast' -import { fetchAppDetail, updateAppModelConfig } from '@/service/apps' +import { fetchAppDetailDirect, updateAppModelConfig } from '@/service/apps' import { promptVariablesToUserInputsForm, userInputsFormToPromptVariables } from '@/utils/model-config' import { fetchDatasets } from '@/service/datasets' import { useProviderContext } from '@/context/provider-context' @@ -186,6 +186,8 @@ const Configuration: FC = () => { prompt_template: '', prompt_variables: [] as PromptVariable[], }, + chat_prompt_config: clone(DEFAULT_CHAT_PROMPT_CONFIG), + completion_prompt_config: clone(DEFAULT_COMPLETION_PROMPT_CONFIG), more_like_this: null, opening_statement: '', suggested_questions: [], @@ -196,6 +198,14 @@ const Configuration: FC = () => { suggested_questions_after_answer: null, retriever_resource: null, annotation_reply: null, + external_data_tools: [], + system_parameters: { + audio_file_size_limit: 0, + file_size_limit: 0, + image_file_size_limit: 0, + video_file_size_limit: 0, + workflow_file_upload_limit: 0, + }, dataSets: [], agentConfig: DEFAULT_AGENT_SETTING, }) @@ -543,169 +553,169 @@ const Configuration: FC = () => { }) } setCollectionList(collectionList) - fetchAppDetail({ url: '/apps', id: appId }).then(async (res: any) => { - setMode(res.mode) - const modelConfig = res.model_config - const promptMode = modelConfig.prompt_type === PromptMode.advanced ? PromptMode.advanced : PromptMode.simple - doSetPromptMode(promptMode) - if (promptMode === PromptMode.advanced) { - if (modelConfig.chat_prompt_config && modelConfig.chat_prompt_config.prompt.length > 0) - setChatPromptConfig(modelConfig.chat_prompt_config) - else - setChatPromptConfig(clone(DEFAULT_CHAT_PROMPT_CONFIG)) - setCompletionPromptConfig(modelConfig.completion_prompt_config || clone(DEFAULT_COMPLETION_PROMPT_CONFIG) as any) - setCanReturnToSimpleMode(false) - } + const res = await fetchAppDetailDirect({ url: '/apps', id: appId }) + setMode(res.mode) + const modelConfig = res.model_config as BackendModelConfig + const promptMode = modelConfig.prompt_type === PromptMode.advanced ? PromptMode.advanced : PromptMode.simple + doSetPromptMode(promptMode) + if (promptMode === PromptMode.advanced) { + if (modelConfig.chat_prompt_config && modelConfig.chat_prompt_config.prompt.length > 0) + setChatPromptConfig(modelConfig.chat_prompt_config) + else + setChatPromptConfig(clone(DEFAULT_CHAT_PROMPT_CONFIG)) + setCompletionPromptConfig(modelConfig.completion_prompt_config || clone(DEFAULT_COMPLETION_PROMPT_CONFIG) as any) + setCanReturnToSimpleMode(false) + } - const model = res.model_config.model + const model = modelConfig.model - let datasets: any = null + let datasets: any = null // old dataset struct - if (modelConfig.agent_mode?.tools?.find(({ dataset }: any) => dataset?.enabled)) - datasets = modelConfig.agent_mode?.tools.filter(({ dataset }: any) => dataset?.enabled) + if (modelConfig.agent_mode?.tools?.find(({ dataset }: any) => dataset?.enabled)) + datasets = modelConfig.agent_mode?.tools.filter(({ dataset }: any) => dataset?.enabled) // new dataset struct - else if (modelConfig.dataset_configs.datasets?.datasets?.length > 0) - datasets = modelConfig.dataset_configs?.datasets?.datasets + else if (modelConfig.dataset_configs.datasets?.datasets?.length > 0) + datasets = modelConfig.dataset_configs?.datasets?.datasets - if (dataSets && datasets?.length && datasets?.length > 0) { - const { data: dataSetsWithDetail } = await fetchDatasets({ url: '/datasets', params: { page: 1, ids: datasets.map(({ dataset }: any) => dataset.id) } }) - datasets = dataSetsWithDetail - setDataSets(datasets) - } + if (dataSets && datasets?.length && datasets?.length > 0) { + const { data: dataSetsWithDetail } = await fetchDatasets({ url: '/datasets', params: { page: 1, ids: datasets.map(({ dataset }: any) => dataset.id) } }) + datasets = dataSetsWithDetail + setDataSets(datasets) + } - setIntroduction(modelConfig.opening_statement) - setSuggestedQuestions(modelConfig.suggested_questions || []) - if (modelConfig.more_like_this) - setMoreLikeThisConfig(modelConfig.more_like_this) + setIntroduction(modelConfig.opening_statement) + setSuggestedQuestions(modelConfig.suggested_questions || []) + if (modelConfig.more_like_this) + setMoreLikeThisConfig(modelConfig.more_like_this) - if (modelConfig.suggested_questions_after_answer) - setSuggestedQuestionsAfterAnswerConfig(modelConfig.suggested_questions_after_answer) + if (modelConfig.suggested_questions_after_answer) + setSuggestedQuestionsAfterAnswerConfig(modelConfig.suggested_questions_after_answer) - if (modelConfig.speech_to_text) - setSpeechToTextConfig(modelConfig.speech_to_text) + if (modelConfig.speech_to_text) + setSpeechToTextConfig(modelConfig.speech_to_text) - if (modelConfig.text_to_speech) - setTextToSpeechConfig(modelConfig.text_to_speech) + if (modelConfig.text_to_speech) + setTextToSpeechConfig(modelConfig.text_to_speech) - if (modelConfig.retriever_resource) - setCitationConfig(modelConfig.retriever_resource) + if (modelConfig.retriever_resource) + setCitationConfig(modelConfig.retriever_resource) - if (modelConfig.annotation_reply) { - let annotationConfig = modelConfig.annotation_reply - if (modelConfig.annotation_reply.enabled) { - annotationConfig = { - ...modelConfig.annotation_reply, - embedding_model: { - ...modelConfig.annotation_reply.embedding_model, - embedding_provider_name: correctModelProvider(modelConfig.annotation_reply.embedding_model.embedding_provider_name), - }, - } + if (modelConfig.annotation_reply) { + let annotationConfig = modelConfig.annotation_reply + if (modelConfig.annotation_reply.enabled) { + annotationConfig = { + ...modelConfig.annotation_reply, + embedding_model: { + ...modelConfig.annotation_reply.embedding_model, + embedding_provider_name: correctModelProvider(modelConfig.annotation_reply.embedding_model.embedding_provider_name), + }, } - setAnnotationConfig(annotationConfig, true) } + setAnnotationConfig(annotationConfig, true) + } - if (modelConfig.sensitive_word_avoidance) - setModerationConfig(modelConfig.sensitive_word_avoidance) + if (modelConfig.sensitive_word_avoidance) + setModerationConfig(modelConfig.sensitive_word_avoidance) - if (modelConfig.external_data_tools) - setExternalDataToolsConfig(modelConfig.external_data_tools) + if (modelConfig.external_data_tools) + setExternalDataToolsConfig(modelConfig.external_data_tools) - const config = { - modelConfig: { - provider: correctModelProvider(model.provider), - model_id: model.name, - mode: model.mode, - configs: { - prompt_template: modelConfig.pre_prompt || '', - prompt_variables: userInputsFormToPromptVariables( - [ - ...modelConfig.user_input_form, - ...( - modelConfig.external_data_tools?.length - ? modelConfig.external_data_tools.map((item: any) => { - return { - external_data_tool: { - variable: item.variable as string, - label: item.label as string, - enabled: item.enabled, - type: item.type as string, - config: item.config, - required: true, - icon: item.icon, - icon_background: item.icon_background, - }, - } - }) - : [] - ), - ], - modelConfig.dataset_query_variable, - ), - }, - more_like_this: modelConfig.more_like_this, - opening_statement: modelConfig.opening_statement, - suggested_questions: modelConfig.suggested_questions, - sensitive_word_avoidance: modelConfig.sensitive_word_avoidance, - speech_to_text: modelConfig.speech_to_text, - text_to_speech: modelConfig.text_to_speech, - file_upload: modelConfig.file_upload, - suggested_questions_after_answer: modelConfig.suggested_questions_after_answer, - retriever_resource: modelConfig.retriever_resource, - annotation_reply: modelConfig.annotation_reply, - external_data_tools: modelConfig.external_data_tools, - dataSets: datasets || [], - agentConfig: res.mode === 'agent-chat' ? { - max_iteration: DEFAULT_AGENT_SETTING.max_iteration, - ...modelConfig.agent_mode, + const config: PublishConfig = { + modelConfig: { + provider: correctModelProvider(model.provider), + model_id: model.name, + mode: model.mode, + configs: { + prompt_template: modelConfig.pre_prompt || '', + prompt_variables: userInputsFormToPromptVariables( + ([ + ...modelConfig.user_input_form, + ...( + modelConfig.external_data_tools?.length + ? modelConfig.external_data_tools.map((item: any) => { + return { + external_data_tool: { + variable: item.variable as string, + label: item.label as string, + enabled: item.enabled, + type: item.type as string, + config: item.config, + required: true, + icon: item.icon, + icon_background: item.icon_background, + }, + } + }) + : [] + ), + ]) as unknown as UserInputFormItem[], + modelConfig.dataset_query_variable, + ), + }, + more_like_this: modelConfig.more_like_this ?? { enabled: false }, + opening_statement: modelConfig.opening_statement, + suggested_questions: modelConfig.suggested_questions ?? [], + sensitive_word_avoidance: modelConfig.sensitive_word_avoidance, + speech_to_text: modelConfig.speech_to_text, + text_to_speech: modelConfig.text_to_speech, + file_upload: modelConfig.file_upload ?? null, + suggested_questions_after_answer: modelConfig.suggested_questions_after_answer ?? { enabled: false }, + retriever_resource: modelConfig.retriever_resource, + annotation_reply: modelConfig.annotation_reply ?? null, + external_data_tools: modelConfig.external_data_tools ?? [], + system_parameters: modelConfig.system_parameters, + dataSets: datasets || [], + agentConfig: res.mode === 'agent-chat' ? { + max_iteration: DEFAULT_AGENT_SETTING.max_iteration, + ...modelConfig.agent_mode, // remove dataset - enabled: true, // modelConfig.agent_mode?.enabled is not correct. old app: the value of app with dataset's is always true - tools: modelConfig.agent_mode?.tools.filter((tool: any) => { - return !tool.dataset - }).map((tool: any) => { - const toolInCollectionList = collectionList.find(c => tool.provider_id === c.id) - return { - ...tool, - isDeleted: res.deleted_tools?.some((deletedTool: any) => deletedTool.id === tool.id && deletedTool.tool_name === tool.tool_name), - notAuthor: toolInCollectionList?.is_team_authorization === false, - ...(tool.provider_type === 'builtin' ? { - provider_id: correctToolProvider(tool.provider_name, !!toolInCollectionList), - provider_name: correctToolProvider(tool.provider_name, !!toolInCollectionList), - } : {}), - } - }), - } : DEFAULT_AGENT_SETTING, - }, - completionParams: model.completion_params, - } + enabled: true, // modelConfig.agent_mode?.enabled is not correct. old app: the value of app with dataset's is always true + tools: (modelConfig.agent_mode?.tools ?? []).filter((tool: any) => { + return !tool.dataset + }).map((tool: any) => { + const toolInCollectionList = collectionList.find(c => tool.provider_id === c.id) + return { + ...tool, + isDeleted: res.deleted_tools?.some((deletedTool: any) => deletedTool.id === tool.id && deletedTool.tool_name === tool.tool_name) ?? false, + notAuthor: toolInCollectionList?.is_team_authorization === false, + ...(tool.provider_type === 'builtin' ? { + provider_id: correctToolProvider(tool.provider_name, !!toolInCollectionList), + provider_name: correctToolProvider(tool.provider_name, !!toolInCollectionList), + } : {}), + } + }), + strategy: modelConfig.agent_mode?.strategy ?? AgentStrategy.react, + } : DEFAULT_AGENT_SETTING, + }, + completionParams: model.completion_params, + } - if (modelConfig.file_upload) - handleSetVisionConfig(modelConfig.file_upload.image, true) + if (modelConfig.file_upload) + handleSetVisionConfig(modelConfig.file_upload.image, true) - syncToPublishedConfig(config) - setPublishedConfig(config) - const retrievalConfig = getMultipleRetrievalConfig({ - ...modelConfig.dataset_configs, - reranking_model: modelConfig.dataset_configs.reranking_model && { - provider: modelConfig.dataset_configs.reranking_model.reranking_provider_name, - model: modelConfig.dataset_configs.reranking_model.reranking_model_name, - }, - }, datasets, datasets, { - provider: currentRerankProvider?.provider, - model: currentRerankModel?.model, - }) - setDatasetConfigs({ - retrieval_model: RETRIEVE_TYPE.multiWay, - ...modelConfig.dataset_configs, - ...retrievalConfig, - ...(retrievalConfig.reranking_model ? { - reranking_model: { - reranking_model_name: retrievalConfig.reranking_model.model, - reranking_provider_name: correctModelProvider(retrievalConfig.reranking_model.provider), - }, - } : {}), - }) - setHasFetchedDetail(true) + syncToPublishedConfig(config) + setPublishedConfig(config) + const retrievalConfig = getMultipleRetrievalConfig({ + ...modelConfig.dataset_configs, + reranking_model: modelConfig.dataset_configs.reranking_model && { + provider: modelConfig.dataset_configs.reranking_model.reranking_provider_name, + model: modelConfig.dataset_configs.reranking_model.reranking_model_name, + }, + }, datasets, datasets, { + provider: currentRerankProvider?.provider, + model: currentRerankModel?.model, }) + setDatasetConfigs({ + ...modelConfig.dataset_configs, + ...retrievalConfig, + ...(retrievalConfig.reranking_model ? { + reranking_model: { + reranking_model_name: retrievalConfig.reranking_model.model, + reranking_provider_name: correctModelProvider(retrievalConfig.reranking_model.provider), + }, + } : {}), + } as DatasetConfigs) + setHasFetchedDetail(true) })() }, [appId]) @@ -780,8 +790,8 @@ const Configuration: FC = () => { // Simple Mode prompt pre_prompt: !isAdvancedMode ? promptTemplate : '', prompt_type: promptMode, - chat_prompt_config: {}, - completion_prompt_config: {}, + chat_prompt_config: isAdvancedMode ? chatPromptConfig : clone(DEFAULT_CHAT_PROMPT_CONFIG), + completion_prompt_config: isAdvancedMode ? completionPromptConfig : clone(DEFAULT_COMPLETION_PROMPT_CONFIG), user_input_form: promptVariablesToUserInputsForm(promptVariables), dataset_query_variable: contextVar || '', // features @@ -798,6 +808,7 @@ const Configuration: FC = () => { ...modelConfig.agentConfig, strategy: isFunctionCall ? AgentStrategy.functionCall : AgentStrategy.react, }, + external_data_tools: externalDataToolsConfig, model: { provider: modelAndParameter?.provider || modelConfig.provider, name: modelId, @@ -810,11 +821,7 @@ const Configuration: FC = () => { datasets: [...postDatasets], } as any, }, - } - - if (isAdvancedMode) { - data.chat_prompt_config = chatPromptConfig - data.completion_prompt_config = completionPromptConfig + system_parameters: modelConfig.system_parameters, } await updateAppModelConfig({ url: `/apps/${appId}/model-config`, body: data }) diff --git a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx index 29b27a60ad..302fb9a3c7 100644 --- a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx +++ b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx @@ -3,7 +3,6 @@ import Chat from '../chat' import type { ChatConfig, ChatItem, - ChatItemInTree, OnSend, } from '../types' import { useChat } from '../chat/hooks' @@ -149,7 +148,7 @@ const ChatWrapper = () => { ) }, [chatList, handleNewConversationCompleted, handleSend, currentConversationId, currentConversationInputs, newConversationInputs, isInstalledApp, appId]) - const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => { + const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => { const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)! const parentAnswer = chatList.find(item => item.id === question.parentMessageId) doSend(editedQuestion ? editedQuestion.message : question.content, diff --git a/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx b/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx index 1bb3dbf56f..5fba104d35 100644 --- a/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx +++ b/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx @@ -3,7 +3,6 @@ import Chat from '../chat' import type { ChatConfig, ChatItem, - ChatItemInTree, OnSend, } from '../types' import { useChat } from '../chat/hooks' @@ -147,7 +146,7 @@ const ChatWrapper = () => { ) }, [currentConversationId, currentConversationInputs, newConversationInputs, chatList, handleSend, isInstalledApp, appId, handleNewConversationCompleted]) - const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => { + const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => { const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)! const parentAnswer = chatList.find(item => item.id === question.parentMessageId) doSend(editedQuestion ? editedQuestion.message : question.content, diff --git a/web/app/components/base/chat/types.ts b/web/app/components/base/chat/types.ts index f7f7aa4dce..5b0fe1f248 100644 --- a/web/app/components/base/chat/types.ts +++ b/web/app/components/base/chat/types.ts @@ -85,7 +85,7 @@ export type OnSend = { (message: string, files: FileEntity[] | undefined, isRegenerate: boolean, lastAnswer?: ChatItem | null): void } -export type OnRegenerate = (chatItem: ChatItem) => void +export type OnRegenerate = (chatItem: ChatItem, editedQuestion?: { message: string; files?: FileEntity[] }) => void export type Callback = { onSuccess: () => void diff --git a/web/app/components/base/content-dialog/index.stories.tsx b/web/app/components/base/content-dialog/index.stories.tsx index 29b3914704..67781a17a0 100644 --- a/web/app/components/base/content-dialog/index.stories.tsx +++ b/web/app/components/base/content-dialog/index.stories.tsx @@ -32,6 +32,7 @@ const meta = { }, args: { show: false, + children: null, }, } satisfies Meta @@ -92,6 +93,9 @@ const DemoWrapper = (props: Props) => { } export const Default: Story = { + args: { + children: null, + }, render: args => , } @@ -99,6 +103,7 @@ export const NarrowPanel: Story = { render: args => , args: { className: 'max-w-[420px]', + children: null, }, parameters: { docs: { diff --git a/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx b/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx index 40bc2928c8..bd4468e82d 100644 --- a/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx +++ b/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx @@ -3,6 +3,7 @@ import { fireEvent, render, screen } from '@testing-library/react' import TimePicker from './index' import dayjs from '../utils/dayjs' import { isDayjsObject } from '../utils/dayjs' +import type { TimePickerProps } from '../types' jest.mock('react-i18next', () => ({ useTranslation: () => ({ @@ -30,9 +31,10 @@ jest.mock('./options', () => () =>
) jest.mock('./header', () => () =>
) describe('TimePicker', () => { - const baseProps = { + const baseProps: Pick = { onChange: jest.fn(), onClear: jest.fn(), + value: undefined, } beforeEach(() => { diff --git a/web/app/components/base/date-and-time-picker/utils/dayjs.ts b/web/app/components/base/date-and-time-picker/utils/dayjs.ts index 808b50247a..4f53c766ea 100644 --- a/web/app/components/base/date-and-time-picker/utils/dayjs.ts +++ b/web/app/components/base/date-and-time-picker/utils/dayjs.ts @@ -150,7 +150,7 @@ export const toDayjs = (value: string | Dayjs | undefined, options: ToDayjsOptio if (format) { const parsedWithFormat = tzName - ? dayjs.tz(trimmed, format, tzName, true) + ? dayjs(trimmed, format, true).tz(tzName, true) : dayjs(trimmed, format, true) if (parsedWithFormat.isValid()) return parsedWithFormat @@ -191,7 +191,7 @@ export const toDayjs = (value: string | Dayjs | undefined, options: ToDayjsOptio const candidateFormats = formats ?? COMMON_PARSE_FORMATS for (const fmt of candidateFormats) { const parsed = tzName - ? dayjs.tz(trimmed, fmt, tzName, true) + ? dayjs(trimmed, fmt, true).tz(tzName, true) : dayjs(trimmed, fmt, true) if (parsed.isValid()) return parsed diff --git a/web/app/components/base/dialog/index.stories.tsx b/web/app/components/base/dialog/index.stories.tsx index 62ae7c00ce..94998c6d21 100644 --- a/web/app/components/base/dialog/index.stories.tsx +++ b/web/app/components/base/dialog/index.stories.tsx @@ -47,6 +47,7 @@ const meta = { args: { title: 'Manage API Keys', show: false, + children: null, }, } satisfies Meta @@ -102,6 +103,7 @@ export const Default: Story = { ), + children: null, }, } @@ -110,6 +112,7 @@ export const WithoutFooter: Story = { args: { footer: undefined, title: 'Read-only summary', + children: null, }, parameters: { docs: { @@ -140,6 +143,7 @@ export const CustomStyling: Story = {
), + children: null, }, parameters: { docs: { diff --git a/web/app/components/base/form/types.ts b/web/app/components/base/form/types.ts index d18c166186..ce3b5ec965 100644 --- a/web/app/components/base/form/types.ts +++ b/web/app/components/base/form/types.ts @@ -42,7 +42,7 @@ export type FormOption = { icon?: string } -export type AnyValidators = FieldValidators +export type AnyValidators = FieldValidators export type FormSchema = { type: FormTypeEnum diff --git a/web/app/components/base/markdown-blocks/think-block.tsx b/web/app/components/base/markdown-blocks/think-block.tsx index a3b0561677..9c43578e4c 100644 --- a/web/app/components/base/markdown-blocks/think-block.tsx +++ b/web/app/components/base/markdown-blocks/think-block.tsx @@ -1,6 +1,7 @@ import React, { useEffect, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { useChatContext } from '../chat/chat/context' +import cn from '@/utils/classnames' const hasEndThink = (children: any): boolean => { if (typeof children === 'string') @@ -40,7 +41,7 @@ const useThinkTimer = (children: any) => { const [startTime] = useState(() => Date.now()) const [elapsedTime, setElapsedTime] = useState(0) const [isComplete, setIsComplete] = useState(false) - const timerRef = useRef() + const timerRef = useRef(null) useEffect(() => { if (isComplete) return @@ -63,16 +64,26 @@ const useThinkTimer = (children: any) => { return { elapsedTime, isComplete } } -const ThinkBlock = ({ children, ...props }: React.ComponentProps<'details'>) => { +type ThinkBlockProps = React.ComponentProps<'details'> & { + 'data-think'?: boolean +} + +const ThinkBlock = ({ children, ...props }: ThinkBlockProps) => { const { elapsedTime, isComplete } = useThinkTimer(children) const displayContent = removeEndThink(children) const { t } = useTranslation() + const { 'data-think': isThink = false, className, open, ...rest } = props - if (!(props['data-think'] ?? false)) + if (!isThink) return (
{children}
) return ( -
+
console.log('close'), onConfirm: () => console.log('confirm'), + children: null, }, } satisfies Meta @@ -68,6 +69,9 @@ export const Default: Story = { ), + args: { + children: null, + }, } export const WithBackLink: Story = { @@ -90,6 +94,7 @@ export const WithBackLink: Story = { ), args: { title: 'Select metadata type', + children: null, }, parameters: { docs: { @@ -114,6 +119,7 @@ export const CustomWidth: Story = { ), args: { title: 'Advanced configuration', + children: null, }, parameters: { docs: { diff --git a/web/app/components/base/popover/index.tsx b/web/app/components/base/popover/index.tsx index 41df06f43a..2387737d02 100644 --- a/web/app/components/base/popover/index.tsx +++ b/web/app/components/base/popover/index.tsx @@ -1,5 +1,5 @@ import { Popover, PopoverButton, PopoverPanel, Transition } from '@headlessui/react' -import { Fragment, cloneElement, useRef } from 'react' +import { Fragment, cloneElement, isValidElement, useRef } from 'react' import cn from '@/utils/classnames' export type HtmlContentProps = { @@ -103,15 +103,17 @@ export default function CustomPopover({ }) } > - {cloneElement(htmlContent as React.ReactElement, { - open, - onClose: close, - ...(manualClose - ? { - onClick: close, - } - : {}), - })} + {isValidElement(htmlContent) + ? cloneElement(htmlContent as React.ReactElement, { + open, + onClose: close, + ...(manualClose + ? { + onClick: close, + } + : {}), + }) + : htmlContent}
)} diff --git a/web/app/components/base/portal-to-follow-elem/index.tsx b/web/app/components/base/portal-to-follow-elem/index.tsx index 71ee251edd..e1192fe73b 100644 --- a/web/app/components/base/portal-to-follow-elem/index.tsx +++ b/web/app/components/base/portal-to-follow-elem/index.tsx @@ -125,7 +125,7 @@ export const PortalToFollowElemTrigger = ( children, asChild = false, ...props - }: React.HTMLProps & { ref?: React.RefObject, asChild?: boolean }, + }: React.HTMLProps & { ref?: React.RefObject, asChild?: boolean }, ) => { const context = usePortalToFollowElemContext() const childrenRef = (children as any).props?.ref @@ -133,12 +133,13 @@ export const PortalToFollowElemTrigger = ( // `asChild` allows the user to pass any element as the anchor if (asChild && React.isValidElement(children)) { + const childProps = (children.props ?? {}) as Record return React.cloneElement( children, context.getReferenceProps({ ref, ...props, - ...children.props, + ...childProps, 'data-state': context.open ? 'open' : 'closed', } as React.HTMLProps), ) @@ -164,7 +165,7 @@ export const PortalToFollowElemContent = ( style, ...props }: React.HTMLProps & { - ref?: React.RefObject; + ref?: React.RefObject; }, ) => { const context = usePortalToFollowElemContext() diff --git a/web/app/components/base/prompt-editor/hooks.ts b/web/app/components/base/prompt-editor/hooks.ts index 87119f8b49..b3d2b22236 100644 --- a/web/app/components/base/prompt-editor/hooks.ts +++ b/web/app/components/base/prompt-editor/hooks.ts @@ -35,7 +35,7 @@ import { DELETE_QUERY_BLOCK_COMMAND } from './plugins/query-block' import type { CustomTextNode } from './plugins/custom-text/node' import { registerLexicalTextEntity } from './utils' -export type UseSelectOrDeleteHandler = (nodeKey: string, command?: LexicalCommand) => [RefObject, boolean] +export type UseSelectOrDeleteHandler = (nodeKey: string, command?: LexicalCommand) => [RefObject, boolean] export const useSelectOrDelete: UseSelectOrDeleteHandler = (nodeKey: string, command?: LexicalCommand) => { const ref = useRef(null) const [editor] = useLexicalComposerContext() @@ -110,7 +110,7 @@ export const useSelectOrDelete: UseSelectOrDeleteHandler = (nodeKey: string, com return [ref, isSelected] } -export type UseTriggerHandler = () => [RefObject, boolean, Dispatch>] +export type UseTriggerHandler = () => [RefObject, boolean, Dispatch>] export const useTrigger: UseTriggerHandler = () => { const triggerRef = useRef(null) const [open, setOpen] = useState(false) diff --git a/web/app/components/base/prompt-editor/plugins/placeholder.tsx b/web/app/components/base/prompt-editor/plugins/placeholder.tsx index c2c2623992..187b574cea 100644 --- a/web/app/components/base/prompt-editor/plugins/placeholder.tsx +++ b/web/app/components/base/prompt-editor/plugins/placeholder.tsx @@ -1,4 +1,5 @@ import { memo } from 'react' +import type { ReactNode } from 'react' import { useTranslation } from 'react-i18next' import cn from '@/utils/classnames' @@ -8,7 +9,7 @@ const Placeholder = ({ className, }: { compact?: boolean - value?: string | JSX.Element + value?: ReactNode className?: string }) => { const { t } = useTranslation() diff --git a/web/app/components/base/voice-input/utils.ts b/web/app/components/base/voice-input/utils.ts index 70133f459f..a8ac9eba03 100644 --- a/web/app/components/base/voice-input/utils.ts +++ b/web/app/components/base/voice-input/utils.ts @@ -14,13 +14,19 @@ export const convertToMp3 = (recorder: any) => { const { channels, sampleRate } = wav const mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128) const result = recorder.getChannelData() - const buffer = [] + const buffer: BlobPart[] = [] const leftData = result.left && new Int16Array(result.left.buffer, 0, result.left.byteLength / 2) const rightData = result.right && new Int16Array(result.right.buffer, 0, result.right.byteLength / 2) const remaining = leftData.length + (rightData ? rightData.length : 0) const maxSamples = 1152 + const toArrayBuffer = (bytes: Int8Array) => { + const arrayBuffer = new ArrayBuffer(bytes.length) + new Uint8Array(arrayBuffer).set(bytes) + return arrayBuffer + } + for (let i = 0; i < remaining; i += maxSamples) { const left = leftData.subarray(i, i + maxSamples) let right = null @@ -35,13 +41,13 @@ export const convertToMp3 = (recorder: any) => { } if (mp3buf.length > 0) - buffer.push(mp3buf) + buffer.push(toArrayBuffer(mp3buf)) } const enc = mp3enc.flush() if (enc.length > 0) - buffer.push(enc) + buffer.push(toArrayBuffer(enc)) return new Blob(buffer, { type: 'audio/mp3' }) } diff --git a/web/app/components/billing/pricing/index.tsx b/web/app/components/billing/pricing/index.tsx index 8b678ab272..ae8cb2056f 100644 --- a/web/app/components/billing/pricing/index.tsx +++ b/web/app/components/billing/pricing/index.tsx @@ -32,7 +32,6 @@ const Pricing: FC = ({ const [planRange, setPlanRange] = React.useState(PlanRange.monthly) const [currentCategory, setCurrentCategory] = useState(CategoryEnum.CLOUD) const canPay = isCurrentWorkspaceManager - useKeyPress(['esc'], onCancel) const pricingPageLanguage = useGetPricingPageLanguage() diff --git a/web/app/components/billing/pricing/plans/index.tsx b/web/app/components/billing/pricing/plans/index.tsx index 0d6d61b690..d648613c8f 100644 --- a/web/app/components/billing/pricing/plans/index.tsx +++ b/web/app/components/billing/pricing/plans/index.tsx @@ -6,7 +6,7 @@ import SelfHostedPlanItem from './self-hosted-plan-item' type PlansProps = { plan: { - type: BasicPlan + type: Plan usage: UsagePlanInfo total: UsagePlanInfo } @@ -21,6 +21,7 @@ const Plans = ({ planRange, canPay, }: PlansProps) => { + const currentPlanType: BasicPlan = plan.type === Plan.enterprise ? Plan.team : plan.type return (
@@ -28,21 +29,21 @@ const Plans = ({ currentPlan === 'cloud' && ( <> = ({ datasetId, batchId, documents = [], index return doc?.data_source_type as DataSourceType } + const isLegacyDataSourceInfo = (info: DataSourceInfo): info is LegacyDataSourceInfo => { + return info != null && typeof (info as LegacyDataSourceInfo).upload_file === 'object' + } + const getIcon = (id: string) => { const doc = documents.find(document => document.id === id) - - return doc?.data_source_info.notion_page_icon + const info = doc?.data_source_info + if (info && isLegacyDataSourceInfo(info)) + return info.notion_page_icon + return undefined } const isSourceEmbedding = (detail: IndexingStatusResponse) => ['indexing', 'splitting', 'parsing', 'cleaning', 'waiting'].includes(detail.indexing_status || '') diff --git a/web/app/components/datasets/create/file-uploader/index.tsx b/web/app/components/datasets/create/file-uploader/index.tsx index 463715bb62..75557b37c9 100644 --- a/web/app/components/datasets/create/file-uploader/index.tsx +++ b/web/app/components/datasets/create/file-uploader/index.tsx @@ -105,6 +105,8 @@ const FileUploader = ({ return isValidType && isValidSize }, [fileUploadConfig, notify, t, ACCEPTS]) + type UploadResult = Awaited> + const fileUpload = useCallback(async (fileItem: FileItem): Promise => { const formData = new FormData() formData.append('file', fileItem.file) diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/local-file/index.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/local-file/index.tsx index 47da96c2de..361378362e 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/local-file/index.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/local-file/index.tsx @@ -121,6 +121,8 @@ const LocalFile = ({ return isValidType && isValidSize }, [fileUploadConfig, notify, t, ACCEPTS]) + type UploadResult = Awaited> + const fileUpload = useCallback(async (fileItem: FileItem): Promise => { const formData = new FormData() formData.append('file', fileItem.file) @@ -136,10 +138,14 @@ const LocalFile = ({ data: formData, onprogress: onProgress, }, false, undefined, '?source=datasets') - .then((res: File) => { - const completeFile = { + .then((res: UploadResult) => { + const updatedFile = Object.assign({}, fileItem.file, { + id: res.id, + ...(res as Partial), + }) as File + const completeFile: FileItem = { fileID: fileItem.fileID, - file: res, + file: updatedFile, progress: -1, } const index = fileListRef.current.findIndex(item => item.fileID === fileItem.fileID) diff --git a/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx b/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx index 7e8749f0bf..96cab11c9c 100644 --- a/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx +++ b/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx @@ -38,6 +38,8 @@ const CSVUploader: FC = ({ file_size_limit: 15, }, [fileUploadConfigResponse]) + type UploadResult = Awaited> + const fileUpload = useCallback(async (fileItem: FileItem): Promise => { fileItem.progress = 0 @@ -58,10 +60,14 @@ const CSVUploader: FC = ({ data: formData, onprogress: onProgress, }, false, undefined, '?source=datasets') - .then((res: File) => { - const completeFile = { + .then((res: UploadResult) => { + const updatedFile = Object.assign({}, fileItem.file, { + id: res.id, + ...(res as Partial), + }) as File + const completeFile: FileItem = { fileID: fileItem.fileID, - file: res, + file: updatedFile, progress: 100, } updateFile(completeFile) diff --git a/web/app/components/datasets/documents/detail/index.tsx b/web/app/components/datasets/documents/detail/index.tsx index b4f47253fb..ddec9b6dbe 100644 --- a/web/app/components/datasets/documents/detail/index.tsx +++ b/web/app/components/datasets/documents/detail/index.tsx @@ -17,7 +17,7 @@ import Divider from '@/app/components/base/divider' import Loading from '@/app/components/base/loading' import Toast from '@/app/components/base/toast' import { ChunkingMode } from '@/models/datasets' -import type { FileItem } from '@/models/datasets' +import type { DataSourceInfo, FileItem, LegacyDataSourceInfo } from '@/models/datasets' import { useDatasetDetailContextWithSelector } from '@/context/dataset-detail' import FloatRightContainer from '@/app/components/base/float-right-container' import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' @@ -109,6 +109,18 @@ const DocumentDetail: FC = ({ datasetId, documentId }) => { const embedding = ['queuing', 'indexing', 'paused'].includes((documentDetail?.display_status || '').toLowerCase()) + const isLegacyDataSourceInfo = (info?: DataSourceInfo): info is LegacyDataSourceInfo => { + return !!info && 'upload_file' in info + } + + const documentUploadFile = useMemo(() => { + if (!documentDetail?.data_source_info) + return undefined + if (isLegacyDataSourceInfo(documentDetail.data_source_info)) + return documentDetail.data_source_info.upload_file + return undefined + }, [documentDetail?.data_source_info]) + const invalidChunkList = useInvalid(useSegmentListKey) const invalidChildChunkList = useInvalid(useChildSegmentListKey) const invalidDocumentList = useInvalidDocumentList(datasetId) @@ -153,7 +165,7 @@ const DocumentDetail: FC = ({ datasetId, documentId }) => {
void } +type MetadataState = { + documentType?: DocType | '' + metadata: Record +} + const Metadata: FC = ({ docDetail, loading, onUpdate }) => { const { doc_metadata = {} } = docDetail || {} - const doc_type = docDetail?.doc_type || '' + const rawDocType = docDetail?.doc_type ?? '' + const doc_type = rawDocType === 'others' ? '' : rawDocType const { t } = useTranslation() const metadataMap = useMetadataMap() @@ -143,18 +149,16 @@ const Metadata: FC = ({ docDetail, loading, onUpdate }) => { const businessDocCategoryMap = useBusinessDocCategories() const [editStatus, setEditStatus] = useState(!doc_type) // if no documentType, in editing status by default // the initial values are according to the documentType - const [metadataParams, setMetadataParams] = useState<{ - documentType?: DocType | '' - metadata: { [key: string]: string } - }>( + const [metadataParams, setMetadataParams] = useState( doc_type ? { - documentType: doc_type, - metadata: doc_metadata || {}, + documentType: doc_type as DocType, + metadata: (doc_metadata || {}) as Record, } - : { metadata: {} }) + : { metadata: {} }, + ) const [showDocTypes, setShowDocTypes] = useState(!doc_type) // whether show doc types - const [tempDocType, setTempDocType] = useState('') // for remember icon click + const [tempDocType, setTempDocType] = useState('') // for remember icon click const [saveLoading, setSaveLoading] = useState(false) const { notify } = useContext(ToastContext) @@ -165,13 +169,13 @@ const Metadata: FC = ({ docDetail, loading, onUpdate }) => { if (docDetail?.doc_type) { setEditStatus(false) setShowDocTypes(false) - setTempDocType(docDetail?.doc_type) + setTempDocType(doc_type as DocType | '') setMetadataParams({ - documentType: docDetail?.doc_type, - metadata: docDetail?.doc_metadata || {}, + documentType: doc_type as DocType | '', + metadata: (docDetail?.doc_metadata || {}) as Record, }) } - }, [docDetail?.doc_type]) + }, [docDetail?.doc_type, docDetail?.doc_metadata, doc_type]) // confirm doc type const confirmDocType = () => { @@ -179,7 +183,7 @@ const Metadata: FC = ({ docDetail, loading, onUpdate }) => { return setMetadataParams({ documentType: tempDocType, - metadata: tempDocType === metadataParams.documentType ? metadataParams.metadata : {}, // change doc type, clear metadata + metadata: tempDocType === metadataParams.documentType ? metadataParams.metadata : {} as Record, // change doc type, clear metadata }) setEditStatus(true) setShowDocTypes(false) @@ -187,7 +191,7 @@ const Metadata: FC = ({ docDetail, loading, onUpdate }) => { // cancel doc type const cancelDocType = () => { - setTempDocType(metadataParams.documentType) + setTempDocType(metadataParams.documentType ?? '') setEditStatus(true) setShowDocTypes(false) } @@ -209,7 +213,7 @@ const Metadata: FC = ({ docDetail, loading, onUpdate }) => { {t('datasetDocuments.metadata.docTypeChangeTitle')} {t('datasetDocuments.metadata.docTypeSelectWarning')} } - + {CUSTOMIZABLE_DOC_TYPES.map((type, index) => { const currValue = tempDocType ?? documentType return diff --git a/web/app/components/datasets/documents/detail/settings/document-settings.tsx b/web/app/components/datasets/documents/detail/settings/document-settings.tsx index 048645c9cf..3bcb8ef3aa 100644 --- a/web/app/components/datasets/documents/detail/settings/document-settings.tsx +++ b/web/app/components/datasets/documents/detail/settings/document-settings.tsx @@ -4,7 +4,17 @@ import { useBoolean } from 'ahooks' import { useContext } from 'use-context-selector' import { useRouter } from 'next/navigation' import DatasetDetailContext from '@/context/dataset-detail' -import type { CrawlOptions, CustomFile, DataSourceType } from '@/models/datasets' +import type { + CrawlOptions, + CustomFile, + DataSourceInfo, + DataSourceType, + LegacyDataSourceInfo, + LocalFileInfo, + OnlineDocumentInfo, + WebsiteCrawlInfo, +} from '@/models/datasets' +import type { DataSourceProvider } from '@/models/common' import Loading from '@/app/components/base/loading' import StepTwo from '@/app/components/datasets/create/step-two' import AccountSetting from '@/app/components/header/account-setting' @@ -42,15 +52,78 @@ const DocumentSettings = ({ datasetId, documentId }: DocumentSettingsProps) => { params: { metadata: 'without' }, }) + const dataSourceInfo = documentDetail?.data_source_info + + const isLegacyDataSourceInfo = (info: DataSourceInfo | undefined): info is LegacyDataSourceInfo => { + return !!info && 'upload_file' in info + } + const isWebsiteCrawlInfo = (info: DataSourceInfo | undefined): info is WebsiteCrawlInfo => { + return !!info && 'source_url' in info && 'title' in info + } + const isOnlineDocumentInfo = (info: DataSourceInfo | undefined): info is OnlineDocumentInfo => { + return !!info && 'page' in info + } + const isLocalFileInfo = (info: DataSourceInfo | undefined): info is LocalFileInfo => { + return !!info && 'related_id' in info && 'transfer_method' in info + } + const legacyInfo = isLegacyDataSourceInfo(dataSourceInfo) ? dataSourceInfo : undefined + const websiteInfo = isWebsiteCrawlInfo(dataSourceInfo) ? dataSourceInfo : undefined + const onlineDocumentInfo = isOnlineDocumentInfo(dataSourceInfo) ? dataSourceInfo : undefined + const localFileInfo = isLocalFileInfo(dataSourceInfo) ? dataSourceInfo : undefined + const currentPage = useMemo(() => { - return { - workspace_id: documentDetail?.data_source_info.notion_workspace_id, - page_id: documentDetail?.data_source_info.notion_page_id, - page_name: documentDetail?.name, - page_icon: documentDetail?.data_source_info.notion_page_icon, - type: documentDetail?.data_source_type, + if (legacyInfo) { + return { + workspace_id: legacyInfo.notion_workspace_id ?? '', + page_id: legacyInfo.notion_page_id ?? '', + page_name: documentDetail?.name, + page_icon: legacyInfo.notion_page_icon, + type: documentDetail?.data_source_type, + } } - }, [documentDetail]) + if (onlineDocumentInfo) { + return { + workspace_id: onlineDocumentInfo.workspace_id, + page_id: onlineDocumentInfo.page.page_id, + page_name: onlineDocumentInfo.page.page_name, + page_icon: onlineDocumentInfo.page.page_icon, + type: onlineDocumentInfo.page.type, + } + } + return undefined + }, [documentDetail?.data_source_type, documentDetail?.name, legacyInfo, onlineDocumentInfo]) + + const files = useMemo(() => { + if (legacyInfo?.upload_file) + return [legacyInfo.upload_file as CustomFile] + if (localFileInfo) { + const { related_id, name, extension } = localFileInfo + return [{ + id: related_id, + name, + extension, + } as unknown as CustomFile] + } + return [] + }, [legacyInfo?.upload_file, localFileInfo]) + + const websitePages = useMemo(() => { + if (!websiteInfo) + return [] + return [{ + title: websiteInfo.title, + source_url: websiteInfo.source_url, + content: websiteInfo.content, + description: websiteInfo.description, + }] + }, [websiteInfo]) + + const crawlOptions = (dataSourceInfo && typeof dataSourceInfo === 'object' && 'includes' in dataSourceInfo && 'excludes' in dataSourceInfo) + ? dataSourceInfo as unknown as CrawlOptions + : undefined + + const websiteCrawlProvider = (websiteInfo?.provider ?? legacyInfo?.provider) as DataSourceProvider | undefined + const websiteCrawlJobId = websiteInfo?.job_id ?? legacyInfo?.job_id if (error) return @@ -65,22 +138,16 @@ const DocumentSettings = ({ datasetId, documentId }: DocumentSettingsProps) => { onSetting={showSetAPIKey} datasetId={datasetId} dataSourceType={documentDetail.data_source_type as DataSourceType} - notionPages={[currentPage as unknown as NotionPage]} - websitePages={[ - { - title: documentDetail.name, - source_url: documentDetail.data_source_info?.url, - content: '', - description: '', - }, - ]} - websiteCrawlProvider={documentDetail.data_source_info?.provider} - websiteCrawlJobId={documentDetail.data_source_info?.job_id} - crawlOptions={documentDetail.data_source_info as unknown as CrawlOptions} + notionPages={currentPage ? [currentPage as unknown as NotionPage] : []} + notionCredentialId={legacyInfo?.credential_id || onlineDocumentInfo?.credential_id || ''} + websitePages={websitePages} + websiteCrawlProvider={websiteCrawlProvider} + websiteCrawlJobId={websiteCrawlJobId || ''} + crawlOptions={crawlOptions} indexingType={indexingTechnique} isSetting documentDetail={documentDetail} - files={[documentDetail.data_source_info.upload_file as CustomFile]} + files={files} onSave={saveHandler} onCancel={cancelHandler} /> diff --git a/web/app/components/header/account-setting/model-provider-page/model-selector/index.tsx b/web/app/components/header/account-setting/model-provider-page/model-selector/index.tsx index d28959a509..58e96fde69 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-selector/index.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-selector/index.tsx @@ -5,6 +5,7 @@ import type { Model, ModelItem, } from '../declarations' +import type { ModelFeatureEnum } from '../declarations' import { useCurrentProviderAndModel } from '../hooks' import ModelTrigger from './model-trigger' import EmptyTrigger from './empty-trigger' @@ -24,7 +25,7 @@ type ModelSelectorProps = { popupClassName?: string onSelect?: (model: DefaultModel) => void readonly?: boolean - scopeFeatures?: string[] + scopeFeatures?: ModelFeatureEnum[] deprecatedClassName?: string showDeprecatedWarnIcon?: boolean } diff --git a/web/app/components/header/account-setting/model-provider-page/model-selector/popup.tsx b/web/app/components/header/account-setting/model-provider-page/model-selector/popup.tsx index ff32b438ed..b43fcd6301 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-selector/popup.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-selector/popup.tsx @@ -22,7 +22,7 @@ type PopupProps = { defaultModel?: DefaultModel modelList: Model[] onSelect: (provider: string, model: ModelItem) => void - scopeFeatures?: string[] + scopeFeatures?: ModelFeatureEnum[] onHide: () => void } const Popup: FC = ({ diff --git a/web/app/components/plugins/install-plugin/utils.ts b/web/app/components/plugins/install-plugin/utils.ts index f19a7fd287..79c6d7b031 100644 --- a/web/app/components/plugins/install-plugin/utils.ts +++ b/web/app/components/plugins/install-plugin/utils.ts @@ -5,15 +5,17 @@ import { isEmpty } from 'lodash-es' export const pluginManifestToCardPluginProps = (pluginManifest: PluginDeclaration): Plugin => { return { plugin_id: pluginManifest.plugin_unique_identifier, - type: pluginManifest.category, + type: pluginManifest.category as Plugin['type'], category: pluginManifest.category, name: pluginManifest.name, version: pluginManifest.version, latest_version: '', latest_package_identifier: '', org: pluginManifest.author, + author: pluginManifest.author, label: pluginManifest.label, brief: pluginManifest.description, + description: pluginManifest.description, icon: pluginManifest.icon, verified: pluginManifest.verified, introduction: '', @@ -22,14 +24,17 @@ export const pluginManifestToCardPluginProps = (pluginManifest: PluginDeclaratio endpoint: { settings: [], }, - tags: [], + tags: pluginManifest.tags.map(tag => ({ name: tag })), + badges: [], + verification: { authorized_category: 'langgenius' }, + from: 'package', } } export const pluginManifestInMarketToPluginProps = (pluginManifest: PluginManifestInMarket): Plugin => { return { plugin_id: pluginManifest.plugin_unique_identifier, - type: pluginManifest.category, + type: pluginManifest.category as Plugin['type'], category: pluginManifest.category, name: pluginManifest.name, version: pluginManifest.latest_version, @@ -38,6 +43,7 @@ export const pluginManifestInMarketToPluginProps = (pluginManifest: PluginManife org: pluginManifest.org, label: pluginManifest.label, brief: pluginManifest.brief, + description: pluginManifest.brief, icon: pluginManifest.icon, verified: true, introduction: pluginManifest.introduction, @@ -49,6 +55,7 @@ export const pluginManifestInMarketToPluginProps = (pluginManifest: PluginManife tags: [], badges: pluginManifest.badges, verification: isEmpty(pluginManifest.verification) ? { authorized_category: 'langgenius' } : pluginManifest.verification, + from: pluginManifest.from, } } diff --git a/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx b/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx index 3041f13f2f..d4c0bc2d92 100644 --- a/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx +++ b/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx @@ -50,7 +50,7 @@ const EndpointModal: FC = ({ // Fix: Process boolean fields to ensure they are sent as proper boolean values const processedCredential = { ...tempCredential } - formSchemas.forEach((field) => { + formSchemas.forEach((field: any) => { if (field.type === 'boolean' && processedCredential[field.name] !== undefined) { const value = processedCredential[field.name] if (typeof value === 'string') diff --git a/web/app/components/plugins/plugin-detail-panel/model-selector/index.tsx b/web/app/components/plugins/plugin-detail-panel/model-selector/index.tsx index 873f187e8f..1393a1844f 100644 --- a/web/app/components/plugins/plugin-detail-panel/model-selector/index.tsx +++ b/web/app/components/plugins/plugin-detail-panel/model-selector/index.tsx @@ -7,6 +7,7 @@ import { useTranslation } from 'react-i18next' import type { DefaultModel, FormValue, + ModelFeatureEnum, } from '@/app/components/header/account-setting/model-provider-page/declarations' import { ModelStatusEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import ModelSelector from '@/app/components/header/account-setting/model-provider-page/model-selector' @@ -57,7 +58,7 @@ const ModelParameterModal: FC = ({ const { isAPIKeySet } = useProviderContext() const [open, setOpen] = useState(false) const scopeArray = scope.split('&') - const scopeFeatures = useMemo(() => { + const scopeFeatures = useMemo((): ModelFeatureEnum[] => { if (scopeArray.includes('all')) return [] return scopeArray.filter(item => ![ @@ -67,7 +68,7 @@ const ModelParameterModal: FC = ({ ModelTypeEnum.moderation, ModelTypeEnum.speech2text, ModelTypeEnum.tts, - ].includes(item as ModelTypeEnum)) + ].includes(item as ModelTypeEnum)).map(item => item as ModelFeatureEnum) }, [scopeArray]) const { data: textGenerationList } = useModelList(ModelTypeEnum.textGeneration) diff --git a/web/app/components/tools/add-tool-modal/category.tsx b/web/app/components/tools/add-tool-modal/category.tsx index 270b4fc2bf..c1467a0ff4 100644 --- a/web/app/components/tools/add-tool-modal/category.tsx +++ b/web/app/components/tools/add-tool-modal/category.tsx @@ -9,6 +9,7 @@ import I18n from '@/context/i18n' import { getLanguage } from '@/i18n-config/language' import { useStore as useLabelStore } from '@/app/components/tools/labels/store' import { fetchLabelList } from '@/service/tools' +import { renderI18nObject } from '@/i18n-config' type Props = { value: string @@ -55,14 +56,24 @@ const Category = ({ {t('tools.type.all')}
- {labelList.map(label => ( -
onSelect(label.name)}> -
- + {labelList.map((label) => { + const labelText = typeof label.label === 'string' + ? label.label + : (label.label ? renderI18nObject(label.label, language) : '') + return ( +
onSelect(label.name)} + > +
+ +
+ {labelText}
- {label.label[language]} -
- ))} + ) + })}
) } diff --git a/web/app/components/tools/add-tool-modal/index.tsx b/web/app/components/tools/add-tool-modal/index.tsx index e12ba3e334..392fa02f3a 100644 --- a/web/app/components/tools/add-tool-modal/index.tsx +++ b/web/app/components/tools/add-tool-modal/index.tsx @@ -10,6 +10,7 @@ import { } from '@remixicon/react' import { useMount } from 'ahooks' import type { Collection, CustomCollectionBackend, Tool } from '../types' +import type { CollectionType } from '../types' import Type from './type' import Category from './category' import Tools from './tools' @@ -129,7 +130,7 @@ const AddToolModal: FC = ({ const nexModelConfig = produce(modelConfig, (draft: ModelConfig) => { draft.agentConfig.tools.push({ provider_id: collection.id || collection.name, - provider_type: collection.type, + provider_type: collection.type as CollectionType, provider_name: collection.name, tool_name: tool.name, tool_label: tool.label[locale] || tool.label[locale.replaceAll('-', '_')], diff --git a/web/app/components/tools/add-tool-modal/tools.tsx b/web/app/components/tools/add-tool-modal/tools.tsx index 17a3df8357..20f7e6b0da 100644 --- a/web/app/components/tools/add-tool-modal/tools.tsx +++ b/web/app/components/tools/add-tool-modal/tools.tsx @@ -23,6 +23,14 @@ import type { Tool } from '@/app/components/tools/types' import { CollectionType } from '@/app/components/tools/types' import type { AgentTool } from '@/types/app' import { MAX_TOOLS_NUM } from '@/config' +import type { TypeWithI18N } from '@/app/components/header/account-setting/model-provider-page/declarations' +import { renderI18nObject } from '@/i18n-config' + +const resolveI18nText = (value: TypeWithI18N | string | undefined, language: string): string => { + if (!value) + return '' + return typeof value === 'string' ? value : renderI18nObject(value, language) +} type ToolsProps = { showWorkflowEmpty: boolean @@ -53,7 +61,7 @@ const Blocks = ({ className='group mb-1 last-of-type:mb-0' >
- {toolWithProvider.label[language]} + {resolveI18nText(toolWithProvider.label, language)} {t('tools.addToolModal.manageInTools')}
{list.map((tool) => { @@ -62,7 +70,7 @@ const Blocks = ({ return '' return tool.labels.map((name) => { const label = labelList.find(item => item.name === name) - return label?.label[language] + return resolveI18nText(label?.label, language) }).filter(Boolean).join(', ') })() const added = !!addedTools?.find(v => v.provider_id === toolWithProvider.id && v.provider_type === toolWithProvider.type && v.tool_name === tool.name) @@ -79,8 +87,8 @@ const Blocks = ({ type={BlockEnum.Tool} toolIcon={toolWithProvider.icon} /> -
{tool.label[language]}
-
{tool.description[language]}
+
{resolveI18nText(tool.label, language)}
+
{resolveI18nText(tool.description, language)}
{tool.labels?.length > 0 && (
@@ -98,7 +106,7 @@ const Blocks = ({ type={BlockEnum.Tool} toolIcon={toolWithProvider.icon} /> -
{tool.label[language]}
+
{resolveI18nText(tool.label, language)}
{!needAuth && added && (
diff --git a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx index 1a97357da5..6fba10bf81 100644 --- a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx +++ b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx @@ -12,7 +12,7 @@ import ConversationVariableModal from './conversation-variable-modal' import { useChat } from './hooks' import type { ChatWrapperRefType } from './index' import Chat from '@/app/components/base/chat/chat' -import type { ChatItem, ChatItemInTree, OnSend } from '@/app/components/base/chat/types' +import type { ChatItem, OnSend } from '@/app/components/base/chat/types' import { useFeatures } from '@/app/components/base/features/hooks' import { fetchSuggestedQuestions, @@ -117,7 +117,7 @@ const ChatWrapper = ( ) }, [handleSend, workflowStore, conversationId, chatList, appDetail]) - const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => { + const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => { const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)! const parentAnswer = chatList.find(item => item.id === question.parentMessageId) doSend(editedQuestion ? editedQuestion.message : question.content, diff --git a/web/app/components/workflow/utils/layout.ts b/web/app/components/workflow/utils/elk-layout.ts similarity index 97% rename from web/app/components/workflow/utils/layout.ts rename to web/app/components/workflow/utils/elk-layout.ts index b3cf3b0d88..69acbf9aff 100644 --- a/web/app/components/workflow/utils/layout.ts +++ b/web/app/components/workflow/utils/elk-layout.ts @@ -4,18 +4,18 @@ import { cloneDeep } from 'lodash-es' import type { Edge, Node, -} from '../types' +} from '@/app/components/workflow/types' import { BlockEnum, -} from '../types' +} from '@/app/components/workflow/types' import { CUSTOM_NODE, NODE_LAYOUT_HORIZONTAL_PADDING, NODE_LAYOUT_VERTICAL_PADDING, -} from '../constants' -import { CUSTOM_ITERATION_START_NODE } from '../nodes/iteration-start/constants' -import { CUSTOM_LOOP_START_NODE } from '../nodes/loop-start/constants' -import type { CaseItem, IfElseNodeType } from '../nodes/if-else/types' +} from '@/app/components/workflow/constants' +import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants' +import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants' +import type { CaseItem, IfElseNodeType } from '@/app/components/workflow/nodes/if-else/types' // Although the file name refers to Dagre, the implementation now relies on ELK's layered algorithm. // Keep the export signatures unchanged to minimise the blast radius while we migrate the layout stack. diff --git a/web/app/components/workflow/utils/index.ts b/web/app/components/workflow/utils/index.ts index e9ae2d1ef0..53a423de34 100644 --- a/web/app/components/workflow/utils/index.ts +++ b/web/app/components/workflow/utils/index.ts @@ -1,7 +1,7 @@ export * from './node' export * from './edge' export * from './workflow-init' -export * from './layout' +export * from './elk-layout' export * from './common' export * from './tool' export * from './workflow' diff --git a/web/app/signin/utils/post-login-redirect.ts b/web/app/signin/utils/post-login-redirect.ts index 37ab122dfa..45e2c55941 100644 --- a/web/app/signin/utils/post-login-redirect.ts +++ b/web/app/signin/utils/post-login-redirect.ts @@ -1,4 +1,4 @@ -import { OAUTH_AUTHORIZE_PENDING_KEY, REDIRECT_URL_KEY } from '@/app/account/oauth/authorize/page' +import { OAUTH_AUTHORIZE_PENDING_KEY, REDIRECT_URL_KEY } from '@/app/account/oauth/authorize/constants' import dayjs from 'dayjs' import type { ReadonlyURLSearchParams } from 'next/navigation' diff --git a/web/context/debug-configuration.ts b/web/context/debug-configuration.ts index dba2e7a231..1358940e39 100644 --- a/web/context/debug-configuration.ts +++ b/web/context/debug-configuration.ts @@ -210,6 +210,8 @@ const DebugConfigurationContext = createContext({ prompt_template: '', prompt_variables: [], }, + chat_prompt_config: DEFAULT_CHAT_PROMPT_CONFIG, + completion_prompt_config: DEFAULT_COMPLETION_PROMPT_CONFIG, more_like_this: null, opening_statement: '', suggested_questions: [], @@ -220,6 +222,14 @@ const DebugConfigurationContext = createContext({ suggested_questions_after_answer: null, retriever_resource: null, annotation_reply: null, + external_data_tools: [], + system_parameters: { + audio_file_size_limit: 0, + file_size_limit: 0, + image_file_size_limit: 0, + video_file_size_limit: 0, + workflow_file_upload_limit: 0, + }, dataSets: [], agentConfig: DEFAULT_AGENT_SETTING, }, diff --git a/web/models/datasets.ts b/web/models/datasets.ts index aeeb5c161a..39313d68a3 100644 --- a/web/models/datasets.ts +++ b/web/models/datasets.ts @@ -344,6 +344,8 @@ export type WebsiteCrawlInfo = { description: string source_url: string title: string + provider?: string + job_id?: string } export type OnlineDocumentInfo = { diff --git a/web/models/debug.ts b/web/models/debug.ts index 630c48a970..90f79cbf8d 100644 --- a/web/models/debug.ts +++ b/web/models/debug.ts @@ -9,6 +9,7 @@ import type { MetadataFilteringModeEnum, } from '@/app/components/workflow/nodes/knowledge-retrieval/types' import type { ModelConfig as NodeModelConfig } from '@/app/components/workflow/types' +import type { ExternalDataTool } from '@/models/common' export type Inputs = Record export enum PromptMode { @@ -133,6 +134,8 @@ export type ModelConfig = { model_id: string mode: ModelModeType configs: PromptConfig + chat_prompt_config?: ChatPromptConfig | null + completion_prompt_config?: CompletionPromptConfig | null opening_statement: string | null more_like_this: MoreLikeThisConfig | null suggested_questions: string[] | null @@ -143,6 +146,14 @@ export type ModelConfig = { retriever_resource: RetrieverResourceConfig | null sensitive_word_avoidance: ModerationConfig | null annotation_reply: AnnotationReplyConfig | null + external_data_tools?: ExternalDataTool[] | null + system_parameters: { + audio_file_size_limit: number + file_size_limit: number + image_file_size_limit: number + video_file_size_limit: number + workflow_file_upload_limit: number + } dataSets: any[] agentConfig: AgentConfig } diff --git a/web/types/app.ts b/web/types/app.ts index abc5b34ca5..591bbf5e31 100644 --- a/web/types/app.ts +++ b/web/types/app.ts @@ -8,6 +8,7 @@ import type { } from '@/models/datasets' import type { UploadFileSetting } from '@/app/components/workflow/types' import type { AccessMode } from '@/models/access-control' +import type { ExternalDataTool } from '@/models/common' export enum Theme { light = 'light', @@ -206,12 +207,12 @@ export type ModelConfig = { suggested_questions?: string[] pre_prompt: string prompt_type: PromptMode - chat_prompt_config: ChatPromptConfig | {} - completion_prompt_config: CompletionPromptConfig | {} + chat_prompt_config?: ChatPromptConfig | null + completion_prompt_config?: CompletionPromptConfig | null user_input_form: UserInputFormItem[] dataset_query_variable?: string more_like_this: { - enabled?: boolean + enabled: boolean } suggested_questions_after_answer: { enabled: boolean @@ -237,12 +238,20 @@ export type ModelConfig = { strategy?: AgentStrategy tools: ToolItem[] } + external_data_tools?: ExternalDataTool[] model: Model dataset_configs: DatasetConfigs file_upload?: { image: VisionSettings } & UploadFileSetting files?: VisionFile[] + system_parameters: { + audio_file_size_limit: number + file_size_limit: number + image_file_size_limit: number + video_file_size_limit: number + workflow_file_upload_limit: number + } created_at?: number updated_at?: number } @@ -360,6 +369,7 @@ export type App = { updated_at: number updated_by?: string } + deleted_tools?: Array<{ id: string; tool_name: string }> /** access control */ access_mode: AccessMode max_active_requests?: number | null From b6e0abadabf7cb84b852d0afdf3911b48046f9c4 Mon Sep 17 00:00:00 2001 From: Novice Date: Mon, 27 Oct 2025 16:04:24 +0800 Subject: [PATCH 03/15] feat: add flatten_output configuration to iteration node (#27502) --- api/core/workflow/nodes/iteration/entities.py | 1 + .../nodes/iteration/iteration_node.py | 8 + ...ation_flatten_output_disabled_workflow.yml | 258 ++++++++++++++++++ ...ration_flatten_output_enabled_workflow.yml | 258 ++++++++++++++++++ .../test_iteration_flatten_output.py | 96 +++++++ .../workflow/nodes/iteration/default.ts | 1 + .../workflow/nodes/iteration/panel.tsx | 13 + .../workflow/nodes/iteration/types.ts | 1 + .../workflow/nodes/iteration/use-config.ts | 9 + web/i18n/en-US/workflow.ts | 2 + web/i18n/zh-Hans/workflow.ts | 2 + 11 files changed, 649 insertions(+) create mode 100644 api/tests/fixtures/workflow/iteration_flatten_output_disabled_workflow.yml create mode 100644 api/tests/fixtures/workflow/iteration_flatten_output_enabled_workflow.yml create mode 100644 api/tests/unit_tests/core/workflow/graph_engine/test_iteration_flatten_output.py diff --git a/api/core/workflow/nodes/iteration/entities.py b/api/core/workflow/nodes/iteration/entities.py index ed4ab2c11c..63a41ec755 100644 --- a/api/core/workflow/nodes/iteration/entities.py +++ b/api/core/workflow/nodes/iteration/entities.py @@ -23,6 +23,7 @@ class IterationNodeData(BaseIterationNodeData): is_parallel: bool = False # open the parallel mode or not parallel_nums: int = 10 # the numbers of parallel error_handle_mode: ErrorHandleMode = ErrorHandleMode.TERMINATED # how to handle the error + flatten_output: bool = True # whether to flatten the output array if all elements are lists class IterationStartNodeData(BaseNodeData): diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index 3a3a2290be..ce83352dcb 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -98,6 +98,7 @@ class IterationNode(LLMUsageTrackingMixin, Node): "is_parallel": False, "parallel_nums": 10, "error_handle_mode": ErrorHandleMode.TERMINATED, + "flatten_output": True, }, } @@ -411,7 +412,14 @@ class IterationNode(LLMUsageTrackingMixin, Node): """ Flatten the outputs list if all elements are lists. This maintains backward compatibility with version 1.8.1 behavior. + + If flatten_output is False, returns outputs as-is (nested structure). + If flatten_output is True (default), flattens the list if all elements are lists. """ + # If flatten_output is disabled, return outputs as-is + if not self._node_data.flatten_output: + return outputs + if not outputs: return outputs diff --git a/api/tests/fixtures/workflow/iteration_flatten_output_disabled_workflow.yml b/api/tests/fixtures/workflow/iteration_flatten_output_disabled_workflow.yml new file mode 100644 index 0000000000..9cae6385c8 --- /dev/null +++ b/api/tests/fixtures/workflow/iteration_flatten_output_disabled_workflow.yml @@ -0,0 +1,258 @@ +app: + description: 'This workflow tests the iteration node with flatten_output=False. + + + It processes [1, 2, 3], outputs [item, item*2] for each iteration. + + + With flatten_output=False, it should output nested arrays: + + + ``` + + {"output": [[1, 2], [2, 4], [3, 6]]} + + ```' + icon: 🤖 + icon_background: '#FFEAD5' + mode: workflow + name: test_iteration_flatten_disabled + use_icon_as_answer_icon: false +dependencies: [] +kind: app +version: 0.3.1 +workflow: + conversation_variables: [] + environment_variables: [] + features: + file_upload: + enabled: false + opening_statement: '' + retriever_resource: + enabled: true + sensitive_word_avoidance: + enabled: false + speech_to_text: + enabled: false + suggested_questions: [] + suggested_questions_after_answer: + enabled: false + text_to_speech: + enabled: false + graph: + edges: + - data: + isInIteration: false + isInLoop: false + sourceType: start + targetType: code + id: start-source-code-target + source: start_node + sourceHandle: source + target: code_node + targetHandle: target + type: custom + zIndex: 0 + - data: + isInIteration: false + isInLoop: false + sourceType: code + targetType: iteration + id: code-source-iteration-target + source: code_node + sourceHandle: source + target: iteration_node + targetHandle: target + type: custom + zIndex: 0 + - data: + isInIteration: true + isInLoop: false + iteration_id: iteration_node + sourceType: iteration-start + targetType: code + id: iteration-start-source-code-inner-target + source: iteration_nodestart + sourceHandle: source + target: code_inner_node + targetHandle: target + type: custom + zIndex: 1002 + - data: + isInIteration: false + isInLoop: false + sourceType: iteration + targetType: end + id: iteration-source-end-target + source: iteration_node + sourceHandle: source + target: end_node + targetHandle: target + type: custom + zIndex: 0 + nodes: + - data: + desc: '' + selected: false + title: Start + type: start + variables: [] + height: 54 + id: start_node + position: + x: 80 + y: 282 + positionAbsolute: + x: 80 + y: 282 + sourcePosition: right + targetPosition: left + type: custom + width: 244 + - data: + code: "\ndef main() -> dict:\n return {\n \"result\": [1, 2, 3],\n\ + \ }\n" + code_language: python3 + desc: '' + outputs: + result: + children: null + type: array[number] + selected: false + title: Generate Array + type: code + variables: [] + height: 54 + id: code_node + position: + x: 384 + y: 282 + positionAbsolute: + x: 384 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 244 + - data: + desc: '' + error_handle_mode: terminated + flatten_output: false + height: 178 + is_parallel: false + iterator_input_type: array[number] + iterator_selector: + - code_node + - result + output_selector: + - code_inner_node + - result + output_type: array[array[number]] + parallel_nums: 10 + selected: false + start_node_id: iteration_nodestart + title: Iteration with Flatten Disabled + type: iteration + width: 388 + height: 178 + id: iteration_node + position: + x: 684 + y: 282 + positionAbsolute: + x: 684 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 388 + zIndex: 1 + - data: + desc: '' + isInIteration: true + selected: false + title: '' + type: iteration-start + draggable: false + height: 48 + id: iteration_nodestart + parentId: iteration_node + position: + x: 24 + y: 68 + positionAbsolute: + x: 708 + y: 350 + selectable: false + sourcePosition: right + targetPosition: left + type: custom-iteration-start + width: 44 + zIndex: 1002 + - data: + code: "\ndef main(arg1: int) -> dict:\n return {\n \"result\": [arg1,\ + \ arg1 * 2],\n }\n" + code_language: python3 + desc: '' + isInIteration: true + isInLoop: false + iteration_id: iteration_node + outputs: + result: + children: null + type: array[number] + selected: false + title: Generate Pair + type: code + variables: + - value_selector: + - iteration_node + - item + value_type: number + variable: arg1 + height: 54 + id: code_inner_node + parentId: iteration_node + position: + x: 128 + y: 68 + positionAbsolute: + x: 812 + y: 350 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 244 + zIndex: 1002 + - data: + desc: '' + outputs: + - value_selector: + - iteration_node + - output + value_type: array[array[number]] + variable: output + selected: false + title: End + type: end + height: 90 + id: end_node + position: + x: 1132 + y: 282 + positionAbsolute: + x: 1132 + y: 282 + selected: true + sourcePosition: right + targetPosition: left + type: custom + width: 244 + viewport: + x: -476 + y: 3 + zoom: 1 + diff --git a/api/tests/fixtures/workflow/iteration_flatten_output_enabled_workflow.yml b/api/tests/fixtures/workflow/iteration_flatten_output_enabled_workflow.yml new file mode 100644 index 0000000000..0fc76df768 --- /dev/null +++ b/api/tests/fixtures/workflow/iteration_flatten_output_enabled_workflow.yml @@ -0,0 +1,258 @@ +app: + description: 'This workflow tests the iteration node with flatten_output=True. + + + It processes [1, 2, 3], outputs [item, item*2] for each iteration. + + + With flatten_output=True (default), it should output: + + + ``` + + {"output": [1, 2, 2, 4, 3, 6]} + + ```' + icon: 🤖 + icon_background: '#FFEAD5' + mode: workflow + name: test_iteration_flatten_enabled + use_icon_as_answer_icon: false +dependencies: [] +kind: app +version: 0.3.1 +workflow: + conversation_variables: [] + environment_variables: [] + features: + file_upload: + enabled: false + opening_statement: '' + retriever_resource: + enabled: true + sensitive_word_avoidance: + enabled: false + speech_to_text: + enabled: false + suggested_questions: [] + suggested_questions_after_answer: + enabled: false + text_to_speech: + enabled: false + graph: + edges: + - data: + isInIteration: false + isInLoop: false + sourceType: start + targetType: code + id: start-source-code-target + source: start_node + sourceHandle: source + target: code_node + targetHandle: target + type: custom + zIndex: 0 + - data: + isInIteration: false + isInLoop: false + sourceType: code + targetType: iteration + id: code-source-iteration-target + source: code_node + sourceHandle: source + target: iteration_node + targetHandle: target + type: custom + zIndex: 0 + - data: + isInIteration: true + isInLoop: false + iteration_id: iteration_node + sourceType: iteration-start + targetType: code + id: iteration-start-source-code-inner-target + source: iteration_nodestart + sourceHandle: source + target: code_inner_node + targetHandle: target + type: custom + zIndex: 1002 + - data: + isInIteration: false + isInLoop: false + sourceType: iteration + targetType: end + id: iteration-source-end-target + source: iteration_node + sourceHandle: source + target: end_node + targetHandle: target + type: custom + zIndex: 0 + nodes: + - data: + desc: '' + selected: false + title: Start + type: start + variables: [] + height: 54 + id: start_node + position: + x: 80 + y: 282 + positionAbsolute: + x: 80 + y: 282 + sourcePosition: right + targetPosition: left + type: custom + width: 244 + - data: + code: "\ndef main() -> dict:\n return {\n \"result\": [1, 2, 3],\n\ + \ }\n" + code_language: python3 + desc: '' + outputs: + result: + children: null + type: array[number] + selected: false + title: Generate Array + type: code + variables: [] + height: 54 + id: code_node + position: + x: 384 + y: 282 + positionAbsolute: + x: 384 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 244 + - data: + desc: '' + error_handle_mode: terminated + flatten_output: true + height: 178 + is_parallel: false + iterator_input_type: array[number] + iterator_selector: + - code_node + - result + output_selector: + - code_inner_node + - result + output_type: array[array[number]] + parallel_nums: 10 + selected: false + start_node_id: iteration_nodestart + title: Iteration with Flatten Enabled + type: iteration + width: 388 + height: 178 + id: iteration_node + position: + x: 684 + y: 282 + positionAbsolute: + x: 684 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 388 + zIndex: 1 + - data: + desc: '' + isInIteration: true + selected: false + title: '' + type: iteration-start + draggable: false + height: 48 + id: iteration_nodestart + parentId: iteration_node + position: + x: 24 + y: 68 + positionAbsolute: + x: 708 + y: 350 + selectable: false + sourcePosition: right + targetPosition: left + type: custom-iteration-start + width: 44 + zIndex: 1002 + - data: + code: "\ndef main(arg1: int) -> dict:\n return {\n \"result\": [arg1,\ + \ arg1 * 2],\n }\n" + code_language: python3 + desc: '' + isInIteration: true + isInLoop: false + iteration_id: iteration_node + outputs: + result: + children: null + type: array[number] + selected: false + title: Generate Pair + type: code + variables: + - value_selector: + - iteration_node + - item + value_type: number + variable: arg1 + height: 54 + id: code_inner_node + parentId: iteration_node + position: + x: 128 + y: 68 + positionAbsolute: + x: 812 + y: 350 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 244 + zIndex: 1002 + - data: + desc: '' + outputs: + - value_selector: + - iteration_node + - output + value_type: array[number] + variable: output + selected: false + title: End + type: end + height: 90 + id: end_node + position: + x: 1132 + y: 282 + positionAbsolute: + x: 1132 + y: 282 + selected: true + sourcePosition: right + targetPosition: left + type: custom + width: 244 + viewport: + x: -476 + y: 3 + zoom: 1 + diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_iteration_flatten_output.py b/api/tests/unit_tests/core/workflow/graph_engine/test_iteration_flatten_output.py new file mode 100644 index 0000000000..f2095a8a70 --- /dev/null +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_iteration_flatten_output.py @@ -0,0 +1,96 @@ +""" +Test cases for the Iteration node's flatten_output functionality. + +This module tests the iteration node's ability to: +1. Flatten array outputs when flatten_output=True (default) +2. Preserve nested array structure when flatten_output=False +""" + +from .test_table_runner import TableTestRunner, WorkflowTestCase + + +def test_iteration_with_flatten_output_enabled(): + """ + Test iteration node with flatten_output=True (default behavior). + + The fixture implements an iteration that: + 1. Iterates over [1, 2, 3] + 2. For each item, outputs [item, item*2] + 3. With flatten_output=True, should output [1, 2, 2, 4, 3, 6] + """ + runner = TableTestRunner() + + test_case = WorkflowTestCase( + fixture_path="iteration_flatten_output_enabled_workflow", + inputs={}, + expected_outputs={"output": [1, 2, 2, 4, 3, 6]}, + description="Iteration with flatten_output=True flattens nested arrays", + use_auto_mock=False, # Run code nodes directly + ) + + result = runner.run_test_case(test_case) + + assert result.success, f"Test failed: {result.error}" + assert result.actual_outputs is not None, "Should have outputs" + assert result.actual_outputs == {"output": [1, 2, 2, 4, 3, 6]}, ( + f"Expected flattened output [1, 2, 2, 4, 3, 6], got {result.actual_outputs}" + ) + + +def test_iteration_with_flatten_output_disabled(): + """ + Test iteration node with flatten_output=False. + + The fixture implements an iteration that: + 1. Iterates over [1, 2, 3] + 2. For each item, outputs [item, item*2] + 3. With flatten_output=False, should output [[1, 2], [2, 4], [3, 6]] + """ + runner = TableTestRunner() + + test_case = WorkflowTestCase( + fixture_path="iteration_flatten_output_disabled_workflow", + inputs={}, + expected_outputs={"output": [[1, 2], [2, 4], [3, 6]]}, + description="Iteration with flatten_output=False preserves nested structure", + use_auto_mock=False, # Run code nodes directly + ) + + result = runner.run_test_case(test_case) + + assert result.success, f"Test failed: {result.error}" + assert result.actual_outputs is not None, "Should have outputs" + assert result.actual_outputs == {"output": [[1, 2], [2, 4], [3, 6]]}, ( + f"Expected nested output [[1, 2], [2, 4], [3, 6]], got {result.actual_outputs}" + ) + + +def test_iteration_flatten_output_comparison(): + """ + Run both flatten_output configurations in parallel to verify the difference. + """ + runner = TableTestRunner() + + test_cases = [ + WorkflowTestCase( + fixture_path="iteration_flatten_output_enabled_workflow", + inputs={}, + expected_outputs={"output": [1, 2, 2, 4, 3, 6]}, + description="flatten_output=True: Flattened output", + use_auto_mock=False, # Run code nodes directly + ), + WorkflowTestCase( + fixture_path="iteration_flatten_output_disabled_workflow", + inputs={}, + expected_outputs={"output": [[1, 2], [2, 4], [3, 6]]}, + description="flatten_output=False: Nested output", + use_auto_mock=False, # Run code nodes directly + ), + ] + + suite_result = runner.run_table_tests(test_cases, parallel=True) + + # Assert all tests passed + assert suite_result.passed_tests == 2, f"Expected 2 passed tests, got {suite_result.passed_tests}" + assert suite_result.failed_tests == 0, f"Expected 0 failed tests, got {suite_result.failed_tests}" + assert suite_result.success_rate == 100.0, f"Expected 100% success rate, got {suite_result.success_rate}" diff --git a/web/app/components/workflow/nodes/iteration/default.ts b/web/app/components/workflow/nodes/iteration/default.ts index 450379ec6b..c375dbdcbf 100644 --- a/web/app/components/workflow/nodes/iteration/default.ts +++ b/web/app/components/workflow/nodes/iteration/default.ts @@ -22,6 +22,7 @@ const nodeDefault: NodeDefault = { is_parallel: false, parallel_nums: 10, error_handle_mode: ErrorHandleMode.Terminated, + flatten_output: true, }, checkValid(payload: IterationNodeType, t: any) { let errorMessages = '' diff --git a/web/app/components/workflow/nodes/iteration/panel.tsx b/web/app/components/workflow/nodes/iteration/panel.tsx index 23e93b0dd5..63e0d5f8cd 100644 --- a/web/app/components/workflow/nodes/iteration/panel.tsx +++ b/web/app/components/workflow/nodes/iteration/panel.tsx @@ -46,6 +46,7 @@ const Panel: FC> = ({ changeParallel, changeErrorResponseMode, changeParallelNums, + changeFlattenOutput, } = useConfig(id, data) return ( @@ -117,6 +118,18 @@ const Panel: FC> = ({ - {!readonly && headerItems.length > 1 && ( + {!readonly && !!headersItems.length && ( handleRemoveItem(index)} className='mr-2' diff --git a/web/app/components/tools/mcp/modal.tsx b/web/app/components/tools/mcp/modal.tsx index 1d888c57e8..987a517ef5 100644 --- a/web/app/components/tools/mcp/modal.tsx +++ b/web/app/components/tools/mcp/modal.tsx @@ -1,6 +1,7 @@ 'use client' -import React, { useRef, useState } from 'react' +import React, { useCallback, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' +import { v4 as uuid } from 'uuid' import { getDomain } from 'tldts' import { RiCloseLine, RiEditLine } from '@remixicon/react' import { Mcp } from '@/app/components/base/icons/src/vender/other' @@ -11,6 +12,7 @@ import Modal from '@/app/components/base/modal' import Button from '@/app/components/base/button' import Input from '@/app/components/base/input' import HeadersInput from './headers-input' +import type { HeaderItem } from './headers-input' import type { AppIconType } from '@/types/app' import type { ToolWithProvider } from '@/app/components/workflow/types' import { noop } from 'lodash-es' @@ -19,6 +21,9 @@ import { uploadRemoteFileInfo } from '@/service/common' import cn from '@/utils/classnames' import { useHover } from 'ahooks' import { shouldUseMcpIconForAppIcon } from '@/utils/mcp' +import TabSlider from '@/app/components/base/tab-slider' +import { MCPAuthMethod } from '@/app/components/tools/types' +import Switch from '@/app/components/base/switch' export type DuplicateAppModalProps = { data?: ToolWithProvider @@ -30,9 +35,17 @@ export type DuplicateAppModalProps = { icon: string icon_background?: string | null server_identifier: string - timeout: number - sse_read_timeout: number headers?: Record + is_dynamic_registration?: boolean + authentication?: { + client_id?: string + client_secret?: string + grant_type?: string + } + configuration: { + timeout: number + sse_read_timeout: number + } }) => void onHide: () => void } @@ -63,6 +76,20 @@ const MCPModal = ({ const { t } = useTranslation() const isCreate = !data + const authMethods = [ + { + text: t('tools.mcp.modal.authentication'), + value: MCPAuthMethod.authentication, + }, + { + text: t('tools.mcp.modal.headers'), + value: MCPAuthMethod.headers, + }, + { + text: t('tools.mcp.modal.configurations'), + value: MCPAuthMethod.configurations, + }, + ] const originalServerUrl = data?.server_url const originalServerID = data?.server_identifier const [url, setUrl] = React.useState(data?.server_url || '') @@ -72,12 +99,16 @@ const MCPModal = ({ const [serverIdentifier, setServerIdentifier] = React.useState(data?.server_identifier || '') const [timeout, setMcpTimeout] = React.useState(data?.timeout || 30) const [sseReadTimeout, setSseReadTimeout] = React.useState(data?.sse_read_timeout || 300) - const [headers, setHeaders] = React.useState>( - data?.masked_headers || {}, + const [headers, setHeaders] = React.useState( + Object.entries(data?.masked_headers || {}).map(([key, value]) => ({ id: uuid(), key, value })), ) const [isFetchingIcon, setIsFetchingIcon] = useState(false) const appIconRef = useRef(null) const isHovering = useHover(appIconRef) + const [authMethod, setAuthMethod] = useState(MCPAuthMethod.authentication) + const [isDynamicRegistration, setIsDynamicRegistration] = useState(isCreate ? true : data?.is_dynamic_registration) + const [clientID, setClientID] = useState(data?.authentication?.client_id || '') + const [credentials, setCredentials] = useState(data?.authentication?.client_secret || '') // Update states when data changes (for edit mode) React.useEffect(() => { @@ -87,8 +118,11 @@ const MCPModal = ({ setServerIdentifier(data.server_identifier || '') setMcpTimeout(data.timeout || 30) setSseReadTimeout(data.sse_read_timeout || 300) - setHeaders(data.masked_headers || {}) + setHeaders(Object.entries(data.masked_headers || {}).map(([key, value]) => ({ id: uuid(), key, value }))) setAppIcon(getIcon(data)) + setIsDynamicRegistration(data.is_dynamic_registration) + setClientID(data.authentication?.client_id || '') + setCredentials(data.authentication?.client_secret || '') } else { // Reset for create mode @@ -97,8 +131,11 @@ const MCPModal = ({ setServerIdentifier('') setMcpTimeout(30) setSseReadTimeout(300) - setHeaders({}) + setHeaders([]) setAppIcon(DEFAULT_ICON as AppIconSelection) + setIsDynamicRegistration(true) + setClientID('') + setCredentials('') } }, [data]) @@ -150,6 +187,11 @@ const MCPModal = ({ Toast.notify({ type: 'error', message: 'invalid server identifier' }) return } + const formattedHeaders = headers.reduce((acc, item) => { + if (item.key.trim()) + acc[item.key.trim()] = item.value + return acc + }, {} as Record) await onConfirm({ server_url: originalServerUrl === url ? '[__HIDDEN__]' : url.trim(), name, @@ -157,14 +199,25 @@ const MCPModal = ({ icon: appIcon.type === 'emoji' ? appIcon.icon : appIcon.fileId, icon_background: appIcon.type === 'emoji' ? appIcon.background : undefined, server_identifier: serverIdentifier.trim(), - timeout: timeout || 30, - sse_read_timeout: sseReadTimeout || 300, - headers: Object.keys(headers).length > 0 ? headers : undefined, + headers: Object.keys(formattedHeaders).length > 0 ? formattedHeaders : undefined, + is_dynamic_registration: isDynamicRegistration, + authentication: { + client_id: clientID, + client_secret: credentials, + }, + configuration: { + timeout: timeout || 30, + sse_read_timeout: sseReadTimeout || 300, + }, }) if(isCreate) onHide() } + const handleAuthMethodChange = useCallback((value: string) => { + setAuthMethod(value as MCPAuthMethod) + }, []) + return ( <> )}
-
-
- {t('tools.mcp.modal.timeout')} -
- setMcpTimeout(Number(e.target.value))} - onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.timeoutPlaceholder')} - /> -
-
-
- {t('tools.mcp.modal.sseReadTimeout')} -
- setSseReadTimeout(Number(e.target.value))} - onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.timeoutPlaceholder')} - /> -
-
-
- {t('tools.mcp.modal.headers')} -
-
{t('tools.mcp.modal.headersTip')}
- 0} - /> -
+ { + return `flex-1 ${isActive && 'text-text-accent-light-mode-only'}` + }} + value={authMethod} + onChange={handleAuthMethodChange} + options={authMethods} + /> + { + authMethod === MCPAuthMethod.authentication && ( + <> +
+
+ + {t('tools.mcp.modal.useDynamicClientRegistration')} +
+
+
+
+ {t('tools.mcp.modal.clientID')} +
+ setClientID(e.target.value)} + onBlur={e => handleBlur(e.target.value.trim())} + placeholder={t('tools.mcp.modal.clientID')} + disabled={isDynamicRegistration} + /> +
+
+
+ {t('tools.mcp.modal.clientSecret')} +
+ setCredentials(e.target.value)} + onBlur={e => handleBlur(e.target.value.trim())} + placeholder={t('tools.mcp.modal.clientSecretPlaceholder')} + disabled={isDynamicRegistration} + /> +
+ + ) + } + { + authMethod === MCPAuthMethod.headers && ( +
+
+ {t('tools.mcp.modal.headers')} +
+
{t('tools.mcp.modal.headersTip')}
+ item.key.trim()).length > 0} + /> +
+ ) + } + { + authMethod === MCPAuthMethod.configurations && ( + <> +
+
+ {t('tools.mcp.modal.timeout')} +
+ setMcpTimeout(Number(e.target.value))} + onBlur={e => handleBlur(e.target.value.trim())} + placeholder={t('tools.mcp.modal.timeoutPlaceholder')} + /> +
+
+
+ {t('tools.mcp.modal.sseReadTimeout')} +
+ setSseReadTimeout(Number(e.target.value))} + onBlur={e => handleBlur(e.target.value.trim())} + placeholder={t('tools.mcp.modal.timeoutPlaceholder')} + /> +
+ + ) + }
diff --git a/web/app/components/tools/types.ts b/web/app/components/tools/types.ts index 623a7b6d8a..1bfccc04e5 100644 --- a/web/app/components/tools/types.ts +++ b/web/app/components/tools/types.ts @@ -65,6 +65,15 @@ export type Collection = { masked_headers?: Record is_authorized?: boolean provider?: string + is_dynamic_registration?: boolean + authentication?: { + client_id?: string + client_secret?: string + } + configuration?: { + timeout?: number + sse_read_timeout?: number + } } export type ToolParameter = { @@ -192,3 +201,9 @@ export type MCPServerDetail = { parameters?: Record headers?: Record } + +export enum MCPAuthMethod { + authentication = 'authentication', + headers = 'headers', + configurations = 'configurations', +} diff --git a/web/i18n/en-US/tools.ts b/web/i18n/en-US/tools.ts index 3fba10447f..ec78aa2084 100644 --- a/web/i18n/en-US/tools.ts +++ b/web/i18n/en-US/tools.ts @@ -203,6 +203,12 @@ const translation = { timeout: 'Timeout', sseReadTimeout: 'SSE Read Timeout', timeoutPlaceholder: '30', + authentication: 'Authentication', + useDynamicClientRegistration: 'Use Dynamic Client Registration', + clientID: 'Client ID', + clientSecret: 'Client Secret', + clientSecretPlaceholder: 'Client Secret', + configurations: 'Configurations', }, delete: 'Remove MCP Server', deleteConfirmTitle: 'Would you like to remove {{mcp}}?', diff --git a/web/i18n/zh-Hans/tools.ts b/web/i18n/zh-Hans/tools.ts index 15b1c7f592..8382d192f6 100644 --- a/web/i18n/zh-Hans/tools.ts +++ b/web/i18n/zh-Hans/tools.ts @@ -203,6 +203,12 @@ const translation = { timeout: '超时时间', sseReadTimeout: 'SSE 读取超时时间', timeoutPlaceholder: '30', + authentication: '认证', + useDynamicClientRegistration: '使用动态客户端注册', + clientID: '客户端 ID', + clientSecret: '客户端密钥', + clientSecretPlaceholder: '客户端密钥', + configurations: '配置', }, delete: '删除 MCP 服务', deleteConfirmTitle: '你想要删除 {{mcp}} 吗?', From d6bd2a9bdb5439565689c5fc4168559d330fd7a7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 17:39:43 +0800 Subject: [PATCH 06/15] chore: translate i18n files and update type definitions (#27503) Co-authored-by: Nov1c444 <66365942+Nov1c444@users.noreply.github.com> --- web/i18n/de-DE/tools.ts | 6 ++++++ web/i18n/es-ES/tools.ts | 6 ++++++ web/i18n/fa-IR/tools.ts | 6 ++++++ web/i18n/fr-FR/tools.ts | 6 ++++++ web/i18n/hi-IN/tools.ts | 6 ++++++ web/i18n/id-ID/tools.ts | 6 ++++++ web/i18n/it-IT/tools.ts | 6 ++++++ web/i18n/ja-JP/tools.ts | 6 ++++++ web/i18n/ko-KR/tools.ts | 6 ++++++ web/i18n/pl-PL/tools.ts | 6 ++++++ web/i18n/pt-BR/tools.ts | 6 ++++++ web/i18n/ro-RO/tools.ts | 6 ++++++ web/i18n/ru-RU/tools.ts | 6 ++++++ web/i18n/sl-SI/tools.ts | 6 ++++++ web/i18n/th-TH/tools.ts | 6 ++++++ web/i18n/tr-TR/tools.ts | 6 ++++++ web/i18n/uk-UA/tools.ts | 6 ++++++ web/i18n/vi-VN/tools.ts | 6 ++++++ web/i18n/zh-Hant/tools.ts | 6 ++++++ 19 files changed, 114 insertions(+) diff --git a/web/i18n/de-DE/tools.ts b/web/i18n/de-DE/tools.ts index 8cef76b732..8aef8e87f9 100644 --- a/web/i18n/de-DE/tools.ts +++ b/web/i18n/de-DE/tools.ts @@ -203,6 +203,12 @@ const translation = { noHeaders: 'Keine benutzerdefinierten Header konfiguriert', maskedHeadersTip: 'Headerwerte sind zum Schutz maskiert. Änderungen werden die tatsächlichen Werte aktualisieren.', headersTip: 'Zusätzliche HTTP-Header, die mit MCP-Serveranfragen gesendet werden sollen', + clientSecret: 'Client-Geheimnis', + clientSecretPlaceholder: 'Client-Geheimnis', + clientID: 'Kunden-ID', + authentication: 'Authentifizierung', + useDynamicClientRegistration: 'Dynamische Client-Registrierung verwenden', + configurations: 'Konfigurationen', }, delete: 'MCP-Server entfernen', deleteConfirmTitle: 'Möchten Sie {{mcp}} entfernen?', diff --git a/web/i18n/es-ES/tools.ts b/web/i18n/es-ES/tools.ts index 10584c41ca..304247aee9 100644 --- a/web/i18n/es-ES/tools.ts +++ b/web/i18n/es-ES/tools.ts @@ -203,6 +203,12 @@ const translation = { headerValue: 'Valor del encabezado', noHeaders: 'No se han configurado encabezados personalizados', headerKey: 'Nombre del encabezado', + authentication: 'Autenticación', + clientID: 'ID del Cliente', + clientSecretPlaceholder: 'Secreto del Cliente', + useDynamicClientRegistration: 'Usar registro dinámico de clientes', + clientSecret: 'Secreto del Cliente', + configurations: 'Configuraciones', }, delete: 'Eliminar servidor MCP', deleteConfirmTitle: '¿Eliminar {{mcp}}?', diff --git a/web/i18n/fa-IR/tools.ts b/web/i18n/fa-IR/tools.ts index 587c16d960..a6be1d0d42 100644 --- a/web/i18n/fa-IR/tools.ts +++ b/web/i18n/fa-IR/tools.ts @@ -203,6 +203,12 @@ const translation = { noHeaders: 'هیچ هدر سفارشی پیکربندی نشده است', headersTip: 'هدرهای HTTP اضافی برای ارسال با درخواست‌های سرور MCP', maskedHeadersTip: 'مقدارهای هدر به خاطر امنیت مخفی شده‌اند. تغییرات مقادیر واقعی را به‌روزرسانی خواهد کرد.', + authentication: 'احراز هویت', + configurations: 'تنظیمات', + clientSecretPlaceholder: 'رمز مشتری', + clientID: 'شناسه مشتری', + clientSecret: 'رمز مشتری', + useDynamicClientRegistration: 'استفاده از ثبت‌نام پویا برای مشتری', }, delete: 'حذف سرور MCP', deleteConfirmTitle: 'آیا مایل به حذف {mcp} هستید؟', diff --git a/web/i18n/fr-FR/tools.ts b/web/i18n/fr-FR/tools.ts index c91952d6c5..7c0e4db020 100644 --- a/web/i18n/fr-FR/tools.ts +++ b/web/i18n/fr-FR/tools.ts @@ -203,6 +203,12 @@ const translation = { headersTip: 'En-têtes HTTP supplémentaires à envoyer avec les requêtes au serveur MCP', addHeader: 'Ajouter un en-tête', maskedHeadersTip: 'Les valeurs d\'en-tête sont masquées pour des raisons de sécurité. Les modifications mettront à jour les valeurs réelles.', + clientSecretPlaceholder: 'Secret client', + configurations: 'Configurations', + clientID: 'ID client', + authentication: 'Authentification', + useDynamicClientRegistration: 'Utiliser l\'enregistrement dynamique des clients', + clientSecret: 'Secret client', }, delete: 'Supprimer le Serveur MCP', deleteConfirmTitle: 'Souhaitez-vous supprimer {mcp}?', diff --git a/web/i18n/hi-IN/tools.ts b/web/i18n/hi-IN/tools.ts index 7279d3bcbe..6e6dcf0ff6 100644 --- a/web/i18n/hi-IN/tools.ts +++ b/web/i18n/hi-IN/tools.ts @@ -208,6 +208,12 @@ const translation = { noHeaders: 'कोई कस्टम हेडर कॉन्फ़िगर नहीं किए गए हैं', maskedHeadersTip: 'सुरक्षा के लिए हेडर मानों को छिपाया गया है। परिवर्तन वास्तविक मानों को अपडेट करेगा।', headersTip: 'MCP सर्वर अनुरोधों के साथ भेजने के लिए अतिरिक्त HTTP हेडर्स', + clientSecretPlaceholder: 'क्लाइंट सीक्रेट', + clientSecret: 'क्लाइंट सीक्रेट', + clientID: 'क्लाइंट आईडी', + configurations: 'संरचनाएँ', + authentication: 'प्रमाणीकरण', + useDynamicClientRegistration: 'डायनामिक क्लाइंट पंजीकरण का उपयोग करें', }, delete: 'MCP सर्वर हटाएँ', deleteConfirmTitle: '{mcp} हटाना चाहते हैं?', diff --git a/web/i18n/id-ID/tools.ts b/web/i18n/id-ID/tools.ts index e3817e0111..707594446d 100644 --- a/web/i18n/id-ID/tools.ts +++ b/web/i18n/id-ID/tools.ts @@ -185,6 +185,12 @@ const translation = { headerValuePlaceholder: 'Bearer 123', noHeaders: 'Tidak ada header kustom yang dikonfigurasi', maskedHeadersTip: 'Nilai header disembunyikan untuk keamanan. Perubahan akan memperbarui nilai yang sebenarnya.', + clientSecretPlaceholder: 'Rahasia Klien', + authentication: 'Otentikasi', + useDynamicClientRegistration: 'Gunakan Pendaftaran Klien Dinamis', + configurations: 'Konfigurasi', + clientSecret: 'Rahasia Klien', + clientID: 'ID Klien', }, operation: { edit: 'Mengedit', diff --git a/web/i18n/it-IT/tools.ts b/web/i18n/it-IT/tools.ts index 5e54b8f837..0b8b122518 100644 --- a/web/i18n/it-IT/tools.ts +++ b/web/i18n/it-IT/tools.ts @@ -213,6 +213,12 @@ const translation = { headerValuePlaceholder: 'ad esempio, Token di accesso123', headersTip: 'Intestazioni HTTP aggiuntive da inviare con le richieste al server MCP', maskedHeadersTip: 'I valori dell\'intestazione sono mascherati per motivi di sicurezza. Le modifiche aggiorneranno i valori effettivi.', + clientID: 'ID cliente', + clientSecret: 'Segreto del Cliente', + useDynamicClientRegistration: 'Usa la Registrazione Dinamica del Client', + clientSecretPlaceholder: 'Segreto del Cliente', + authentication: 'Autenticazione', + configurations: 'Configurazioni', }, delete: 'Rimuovi Server MCP', deleteConfirmTitle: 'Vuoi rimuovere {mcp}?', diff --git a/web/i18n/ja-JP/tools.ts b/web/i18n/ja-JP/tools.ts index 2fed3768c0..812b5f3c92 100644 --- a/web/i18n/ja-JP/tools.ts +++ b/web/i18n/ja-JP/tools.ts @@ -203,6 +203,12 @@ const translation = { noHeaders: 'カスタムヘッダーは設定されていません', headersTip: 'MCPサーバーへのリクエストに送信する追加のHTTPヘッダー', maskedHeadersTip: 'ヘッダー値はセキュリティのためマスクされています。変更は実際の値を更新します。', + configurations: '設定', + authentication: '認証', + clientID: 'クライアントID', + useDynamicClientRegistration: '動的クライアント登録を使用する', + clientSecretPlaceholder: 'クライアントシークレット', + clientSecret: 'クライアントシークレット', }, delete: 'MCP サーバーを削除', deleteConfirmTitle: '{{mcp}} を削除しますか?', diff --git a/web/i18n/ko-KR/tools.ts b/web/i18n/ko-KR/tools.ts index d8e975e61c..ddcefe4bd4 100644 --- a/web/i18n/ko-KR/tools.ts +++ b/web/i18n/ko-KR/tools.ts @@ -203,6 +203,12 @@ const translation = { noHeaders: '사용자 정의 헤더가 구성되어 있지 않습니다.', headersTip: 'MCP 서버 요청과 함께 보낼 추가 HTTP 헤더', maskedHeadersTip: '헤더 값은 보안상 마스킹 처리되어 있습니다. 변경 사항은 실제 값에 업데이트됩니다.', + authentication: '인증', + configurations: '구성', + useDynamicClientRegistration: '동적 클라이언트 등록 사용', + clientSecret: '클라이언트 시크릿', + clientID: '클라이언트 ID', + clientSecretPlaceholder: '클라이언트 시크릿', }, delete: 'MCP 서버 제거', deleteConfirmTitle: '{mcp}를 제거하시겠습니까?', diff --git a/web/i18n/pl-PL/tools.ts b/web/i18n/pl-PL/tools.ts index dfa83d1231..16fe5037df 100644 --- a/web/i18n/pl-PL/tools.ts +++ b/web/i18n/pl-PL/tools.ts @@ -207,6 +207,12 @@ const translation = { headerValue: 'Wartość nagłówka', noHeaders: 'Brak skonfigurowanych nagłówków niestandardowych', maskedHeadersTip: 'Wartości nagłówków są ukryte dla bezpieczeństwa. Zmiany zaktualizują rzeczywiste wartości.', + configurations: 'Konfiguracje', + authentication: 'Uwierzytelnianie', + clientSecretPlaceholder: 'Tajny klucz klienta', + clientSecret: 'Tajny klucz klienta', + useDynamicClientRegistration: 'Użyj dynamicznej rejestracji klienta', + clientID: 'ID klienta', }, delete: 'Usuń serwer MCP', deleteConfirmTitle: 'Usunąć {mcp}?', diff --git a/web/i18n/pt-BR/tools.ts b/web/i18n/pt-BR/tools.ts index 401a81f615..66b040275d 100644 --- a/web/i18n/pt-BR/tools.ts +++ b/web/i18n/pt-BR/tools.ts @@ -203,6 +203,12 @@ const translation = { headerKey: 'Nome do Cabeçalho', noHeaders: 'Nenhum cabeçalho personalizado configurado', headerValuePlaceholder: 'ex: Token de portador 123', + useDynamicClientRegistration: 'Usar Registro Dinâmico de Cliente', + configurations: 'Configurações', + clientSecret: 'Segredo do Cliente', + authentication: 'Autenticação', + clientID: 'ID do Cliente', + clientSecretPlaceholder: 'Segredo do Cliente', }, delete: 'Remover Servidor MCP', deleteConfirmTitle: 'Você gostaria de remover {{mcp}}?', diff --git a/web/i18n/ro-RO/tools.ts b/web/i18n/ro-RO/tools.ts index b732128684..72c3954c97 100644 --- a/web/i18n/ro-RO/tools.ts +++ b/web/i18n/ro-RO/tools.ts @@ -203,6 +203,12 @@ const translation = { maskedHeadersTip: 'Valorile de antet sunt mascate pentru securitate. Modificările vor actualiza valorile reale.', headersTip: 'Header-uri HTTP suplimentare de trimis cu cererile către serverul MCP', noHeaders: 'Nu sunt configurate antete personalizate.', + authentication: 'Autentificare', + configurations: 'Configurații', + clientSecretPlaceholder: 'Secretul Clientului', + clientID: 'ID client', + useDynamicClientRegistration: 'Utilizați înregistrarea dinamică a clientului', + clientSecret: 'Secretul Clientului', }, delete: 'Eliminare Server MCP', deleteConfirmTitle: 'Ștergeți {mcp}?', diff --git a/web/i18n/ru-RU/tools.ts b/web/i18n/ru-RU/tools.ts index 36d48affc2..7ee263657d 100644 --- a/web/i18n/ru-RU/tools.ts +++ b/web/i18n/ru-RU/tools.ts @@ -203,6 +203,12 @@ const translation = { noHeaders: 'Нет настроенных пользовательских заголовков', maskedHeadersTip: 'Значения заголовков скрыты для безопасности. Изменения обновят фактические значения.', headersTip: 'Дополнительные HTTP заголовки для отправки с запросами к серверу MCP', + configurations: 'Конфигурации', + clientID: 'Идентификатор клиента', + clientSecretPlaceholder: 'Секрет клиента', + useDynamicClientRegistration: 'Использовать динамическую регистрацию клиентов', + clientSecret: 'Секрет клиента', + authentication: 'Аутентификация', }, delete: 'Удалить MCP сервер', deleteConfirmTitle: 'Вы действительно хотите удалить {mcp}?', diff --git a/web/i18n/sl-SI/tools.ts b/web/i18n/sl-SI/tools.ts index 8eb28c21bf..dccf8b9178 100644 --- a/web/i18n/sl-SI/tools.ts +++ b/web/i18n/sl-SI/tools.ts @@ -203,6 +203,12 @@ const translation = { headerValuePlaceholder: 'npr., Bearer žeton123', noHeaders: 'Nobena prilagojena glava ni konfigurirana', maskedHeadersTip: 'Vrednosti glave so zakrite zaradi varnosti. Spremembe bodo posodobile dejanske vrednosti.', + authentication: 'Avtentikacija', + configurations: 'Konfiguracije', + clientSecret: 'Skrivnost stranke', + useDynamicClientRegistration: 'Uporabi dinamično registracijo odjemalca', + clientID: 'ID stranke', + clientSecretPlaceholder: 'Skrivnost stranke', }, delete: 'Odstrani strežnik MCP', deleteConfirmTitle: 'Odstraniti {mcp}?', diff --git a/web/i18n/th-TH/tools.ts b/web/i18n/th-TH/tools.ts index 71175ff26c..848a3f51b6 100644 --- a/web/i18n/th-TH/tools.ts +++ b/web/i18n/th-TH/tools.ts @@ -203,6 +203,12 @@ const translation = { noHeaders: 'ไม่มีการกำหนดหัวข้อที่กำหนดเอง', headersTip: 'HTTP header เพิ่มเติมที่จะส่งไปกับคำขอ MCP server', maskedHeadersTip: 'ค่าหัวถูกปกปิดเพื่อความปลอดภัย การเปลี่ยนแปลงจะปรับปรุงค่าที่แท้จริง', + clientSecret: 'รหัสลับของลูกค้า', + configurations: 'การตั้งค่า', + authentication: 'การตรวจสอบตัวตน', + clientSecretPlaceholder: 'รหัสลับของลูกค้า', + useDynamicClientRegistration: 'ใช้การลงทะเบียนลูกค้าแบบไดนามิก', + clientID: 'รหัสลูกค้า', }, delete: 'ลบเซิร์ฟเวอร์ MCP', deleteConfirmTitle: 'คุณต้องการลบ {mcp} หรือไม่?', diff --git a/web/i18n/tr-TR/tools.ts b/web/i18n/tr-TR/tools.ts index d309b78689..ccc97fef10 100644 --- a/web/i18n/tr-TR/tools.ts +++ b/web/i18n/tr-TR/tools.ts @@ -203,6 +203,12 @@ const translation = { headersTip: 'MCP sunucu istekleri ile gönderilecek ek HTTP başlıkları', headerValuePlaceholder: 'örneğin, Taşıyıcı jeton123', maskedHeadersTip: 'Başlık değerleri güvenlik amacıyla gizlenmiştir. Değişiklikler gerçek değerleri güncelleyecektir.', + clientID: 'Müşteri Kimliği', + configurations: 'Yapılandırmalar', + clientSecretPlaceholder: 'İstemci Sırrı', + clientSecret: 'İstemci Sırrı', + authentication: 'Kimlik Doğrulama', + useDynamicClientRegistration: 'Dinamik İstemci Kaydını Kullan', }, delete: 'MCP Sunucusunu Kaldır', deleteConfirmTitle: '{mcp} kaldırılsın mı?', diff --git a/web/i18n/uk-UA/tools.ts b/web/i18n/uk-UA/tools.ts index 596153974f..40e35a1236 100644 --- a/web/i18n/uk-UA/tools.ts +++ b/web/i18n/uk-UA/tools.ts @@ -203,6 +203,12 @@ const translation = { headerKeyPlaceholder: 'наприклад, Авторизація', maskedHeadersTip: 'Значення заголовків маскуються для безпеки. Зміни оновлять фактичні значення.', headersTip: 'Додаткові HTTP заголовки для відправлення з запитами до сервера MCP', + clientSecret: 'Секрет клієнта', + clientSecretPlaceholder: 'Секрет клієнта', + clientID: 'Ідентифікатор клієнта', + authentication: 'Аутентифікація', + configurations: 'Конфігурації', + useDynamicClientRegistration: 'Використовувати динамічну реєстрацію клієнтів', }, delete: 'Видалити сервер MCP', deleteConfirmTitle: 'Видалити {mcp}?', diff --git a/web/i18n/vi-VN/tools.ts b/web/i18n/vi-VN/tools.ts index 7c0826890e..08041ce400 100644 --- a/web/i18n/vi-VN/tools.ts +++ b/web/i18n/vi-VN/tools.ts @@ -203,6 +203,12 @@ const translation = { headerValue: 'Giá trị tiêu đề', maskedHeadersTip: 'Các giá trị tiêu đề được mã hóa để đảm bảo an ninh. Các thay đổi sẽ cập nhật các giá trị thực tế.', headersTip: 'Các tiêu đề HTTP bổ sung để gửi cùng với các yêu cầu máy chủ MCP', + authentication: 'Xác thực', + clientSecret: 'Bí mật của khách hàng', + clientID: 'ID khách hàng', + configurations: 'Cấu hình', + useDynamicClientRegistration: 'Sử dụng Đăng ký Khách hàng Động', + clientSecretPlaceholder: 'Bí mật của khách hàng', }, delete: 'Xóa Máy chủ MCP', deleteConfirmTitle: 'Xóa {mcp}?', diff --git a/web/i18n/zh-Hant/tools.ts b/web/i18n/zh-Hant/tools.ts index 3c53b87c72..0e8e937419 100644 --- a/web/i18n/zh-Hant/tools.ts +++ b/web/i18n/zh-Hant/tools.ts @@ -203,6 +203,12 @@ const translation = { headersTip: '與 MCP 伺服器請求一同發送的附加 HTTP 標頭', maskedHeadersTip: '標頭值已被遮罩以保障安全。更改將更新實際值。', headers: '標題', + authentication: '身份驗證', + clientID: '客戶編號', + clientSecretPlaceholder: '客戶端密鑰', + configurations: '設定', + useDynamicClientRegistration: '使用動態客戶端註冊', + clientSecret: '客戶端密鑰', }, delete: '刪除 MCP 伺服器', deleteConfirmTitle: '您確定要刪除 {{mcp}} 嗎?', From dc1ae57dc669f4dbce1daa08b37f2e78068d5074 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Mon, 27 Oct 2025 18:39:52 +0900 Subject: [PATCH 07/15] example for 24421 doc (#27511) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../rag_pipeline/datasource_content_preview.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py b/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py index 856e4a1c70..d413def27f 100644 --- a/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py +++ b/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py @@ -4,7 +4,7 @@ from flask_restx import ( # type: ignore ) from werkzeug.exceptions import Forbidden -from controllers.console import console_ns +from controllers.console import api, console_ns from controllers.console.datasets.wraps import get_rag_pipeline from controllers.console.wraps import account_initialization_required, setup_required from libs.login import current_user, login_required @@ -12,9 +12,17 @@ from models import Account from models.dataset import Pipeline from services.rag_pipeline.rag_pipeline import RagPipelineService +parser = ( + reqparse.RequestParser() + .add_argument("inputs", type=dict, required=True, nullable=False, location="json") + .add_argument("datasource_type", type=str, required=True, location="json") + .add_argument("credential_id", type=str, required=False, location="json") +) + @console_ns.route("/rag/pipelines//workflows/published/datasource/nodes//preview") class DataSourceContentPreviewApi(Resource): + @api.expect(parser) @setup_required @login_required @account_initialization_required @@ -26,12 +34,6 @@ class DataSourceContentPreviewApi(Resource): if not isinstance(current_user, Account): raise Forbidden() - parser = ( - reqparse.RequestParser() - .add_argument("inputs", type=dict, required=True, nullable=False, location="json") - .add_argument("datasource_type", type=str, required=True, location="json") - .add_argument("credential_id", type=str, required=False, location="json") - ) args = parser.parse_args() inputs = args.get("inputs") From d9860b8907f5f2593facdd2b1487911edfb9f840 Mon Sep 17 00:00:00 2001 From: QuantumGhost Date: Mon, 27 Oct 2025 21:15:44 +0800 Subject: [PATCH 08/15] fix(api): Disable SSE events truncation for service api (#27484) Disable SSE events truncation for service api invocations to ensure backward compatibility. Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .../common/workflow_response_converter.py | 14 +- api/services/variable_truncator.py | 48 +- ...orkflow_response_converter_process_data.py | 324 ------- ..._workflow_response_converter_truncation.py | 810 ++++++++++++++++++ .../services/test_variable_truncator.py | 31 + 5 files changed, 899 insertions(+), 328 deletions(-) delete mode 100644 api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_process_data.py create mode 100644 api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_truncation.py diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 2c9ce5b56d..eebaaaff80 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from datetime import datetime from typing import Any, NewType, Union -from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, WorkflowAppGenerateEntity +from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom, WorkflowAppGenerateEntity from core.app.entities.queue_entities import ( QueueAgentLogEvent, QueueIterationCompletedEvent, @@ -51,7 +51,7 @@ from core.workflow.workflow_entry import WorkflowEntry from core.workflow.workflow_type_encoder import WorkflowRuntimeTypeConverter from libs.datetime_utils import naive_utc_now from models import Account, EndUser -from services.variable_truncator import VariableTruncator +from services.variable_truncator import BaseTruncator, DummyVariableTruncator, VariableTruncator NodeExecutionId = NewType("NodeExecutionId", str) @@ -70,6 +70,8 @@ class _NodeSnapshot: class WorkflowResponseConverter: + _truncator: BaseTruncator + def __init__( self, *, @@ -81,7 +83,13 @@ class WorkflowResponseConverter: self._user = user self._system_variables = system_variables self._workflow_inputs = self._prepare_workflow_inputs() - self._truncator = VariableTruncator.default() + + # Disable truncation for SERVICE_API calls to keep backward compatibility. + if application_generate_entity.invoke_from == InvokeFrom.SERVICE_API: + self._truncator = DummyVariableTruncator() + else: + self._truncator = VariableTruncator.default() + self._node_snapshots: dict[NodeExecutionId, _NodeSnapshot] = {} self._workflow_execution_id: str | None = None self._workflow_started_at: datetime | None = None diff --git a/api/services/variable_truncator.py b/api/services/variable_truncator.py index 6f8adb7536..6eb8d0031d 100644 --- a/api/services/variable_truncator.py +++ b/api/services/variable_truncator.py @@ -1,4 +1,5 @@ import dataclasses +from abc import ABC, abstractmethod from collections.abc import Mapping from typing import Any, Generic, TypeAlias, TypeVar, overload @@ -66,7 +67,17 @@ class TruncationResult: truncated: bool -class VariableTruncator: +class BaseTruncator(ABC): + @abstractmethod + def truncate(self, segment: Segment) -> TruncationResult: + pass + + @abstractmethod + def truncate_variable_mapping(self, v: Mapping[str, Any]) -> tuple[Mapping[str, Any], bool]: + pass + + +class VariableTruncator(BaseTruncator): """ Handles variable truncation with structure-preserving strategies. @@ -418,3 +429,38 @@ class VariableTruncator: return _PartResult(val, self.calculate_json_size(val), False) else: raise AssertionError("this statement should be unreachable.") + + +class DummyVariableTruncator(BaseTruncator): + """ + A no-op variable truncator that doesn't truncate any data. + + This is used for Service API calls where truncation should be disabled + to maintain backward compatibility and provide complete data. + """ + + def truncate_variable_mapping(self, v: Mapping[str, Any]) -> tuple[Mapping[str, Any], bool]: + """ + Return original mapping without truncation. + + Args: + v: The variable mapping to process + + Returns: + Tuple of (original_mapping, False) where False indicates no truncation occurred + """ + return v, False + + def truncate(self, segment: Segment) -> TruncationResult: + """ + Return original segment without truncation. + + Args: + segment: The segment to process + + Returns: + The original segment unchanged + """ + # For Service API, we want to preserve the original segment + # without any truncation, so just return it as-is + return TruncationResult(result=segment, truncated=False) diff --git a/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_process_data.py b/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_process_data.py deleted file mode 100644 index abe09fb8a4..0000000000 --- a/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_process_data.py +++ /dev/null @@ -1,324 +0,0 @@ -""" -Unit tests for WorkflowResponseConverter focusing on process_data truncation functionality. -""" - -import uuid -from collections.abc import Mapping -from typing import Any -from unittest.mock import Mock - -import pytest - -from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter -from core.app.entities.app_invoke_entities import WorkflowAppGenerateEntity -from core.app.entities.queue_entities import ( - QueueNodeRetryEvent, - QueueNodeStartedEvent, - QueueNodeSucceededEvent, -) -from core.workflow.enums import NodeType -from core.workflow.system_variable import SystemVariable -from libs.datetime_utils import naive_utc_now -from models import Account - - -class TestWorkflowResponseConverterCenarios: - """Test process_data truncation in WorkflowResponseConverter.""" - - def create_mock_generate_entity(self) -> WorkflowAppGenerateEntity: - """Create a mock WorkflowAppGenerateEntity.""" - mock_entity = Mock(spec=WorkflowAppGenerateEntity) - mock_app_config = Mock() - mock_app_config.tenant_id = "test-tenant-id" - mock_entity.app_config = mock_app_config - mock_entity.inputs = {} - return mock_entity - - def create_workflow_response_converter(self) -> WorkflowResponseConverter: - """Create a WorkflowResponseConverter for testing.""" - - mock_entity = self.create_mock_generate_entity() - mock_user = Mock(spec=Account) - mock_user.id = "test-user-id" - mock_user.name = "Test User" - mock_user.email = "test@example.com" - - system_variables = SystemVariable(workflow_id="wf-id", workflow_execution_id="initial-run-id") - return WorkflowResponseConverter( - application_generate_entity=mock_entity, - user=mock_user, - system_variables=system_variables, - ) - - def create_node_started_event(self, *, node_execution_id: str | None = None) -> QueueNodeStartedEvent: - """Create a QueueNodeStartedEvent for testing.""" - return QueueNodeStartedEvent( - node_execution_id=node_execution_id or str(uuid.uuid4()), - node_id="test-node-id", - node_title="Test Node", - node_type=NodeType.CODE, - start_at=naive_utc_now(), - predecessor_node_id=None, - in_iteration_id=None, - in_loop_id=None, - provider_type="built-in", - provider_id="code", - ) - - def create_node_succeeded_event( - self, - *, - node_execution_id: str, - process_data: Mapping[str, Any] | None = None, - ) -> QueueNodeSucceededEvent: - """Create a QueueNodeSucceededEvent for testing.""" - return QueueNodeSucceededEvent( - node_id="test-node-id", - node_type=NodeType.CODE, - node_execution_id=node_execution_id, - start_at=naive_utc_now(), - in_iteration_id=None, - in_loop_id=None, - inputs={}, - process_data=process_data or {}, - outputs={}, - execution_metadata={}, - ) - - def create_node_retry_event( - self, - *, - node_execution_id: str, - process_data: Mapping[str, Any] | None = None, - ) -> QueueNodeRetryEvent: - """Create a QueueNodeRetryEvent for testing.""" - return QueueNodeRetryEvent( - inputs={"data": "inputs"}, - outputs={"data": "outputs"}, - process_data=process_data or {}, - error="oops", - retry_index=1, - node_id="test-node-id", - node_type=NodeType.CODE, - node_title="test code", - provider_type="built-in", - provider_id="code", - node_execution_id=node_execution_id, - start_at=naive_utc_now(), - in_iteration_id=None, - in_loop_id=None, - ) - - def test_workflow_node_finish_response_uses_truncated_process_data(self): - """Test that node finish response uses get_response_process_data().""" - converter = self.create_workflow_response_converter() - - original_data = {"large_field": "x" * 10000, "metadata": "info"} - truncated_data = {"large_field": "[TRUNCATED]", "metadata": "info"} - - converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") - start_event = self.create_node_started_event() - converter.workflow_node_start_to_stream_response( - event=start_event, - task_id="test-task-id", - ) - - event = self.create_node_succeeded_event( - node_execution_id=start_event.node_execution_id, - process_data=original_data, - ) - - def fake_truncate(mapping): - if mapping == dict(original_data): - return truncated_data, True - return mapping, False - - converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] - - response = converter.workflow_node_finish_to_stream_response( - event=event, - task_id="test-task-id", - ) - - # Response should use truncated data, not original - assert response is not None - assert response.data.process_data == truncated_data - assert response.data.process_data != original_data - assert response.data.process_data_truncated is True - - def test_workflow_node_finish_response_without_truncation(self): - """Test node finish response when no truncation is applied.""" - converter = self.create_workflow_response_converter() - - original_data = {"small": "data"} - - converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") - start_event = self.create_node_started_event() - converter.workflow_node_start_to_stream_response( - event=start_event, - task_id="test-task-id", - ) - - event = self.create_node_succeeded_event( - node_execution_id=start_event.node_execution_id, - process_data=original_data, - ) - - def fake_truncate(mapping): - return mapping, False - - converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] - - response = converter.workflow_node_finish_to_stream_response( - event=event, - task_id="test-task-id", - ) - - # Response should use original data - assert response is not None - assert response.data.process_data == original_data - assert response.data.process_data_truncated is False - - def test_workflow_node_finish_response_with_none_process_data(self): - """Test node finish response when process_data is None.""" - converter = self.create_workflow_response_converter() - - converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") - start_event = self.create_node_started_event() - converter.workflow_node_start_to_stream_response( - event=start_event, - task_id="test-task-id", - ) - - event = self.create_node_succeeded_event( - node_execution_id=start_event.node_execution_id, - process_data=None, - ) - - def fake_truncate(mapping): - return mapping, False - - converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] - - response = converter.workflow_node_finish_to_stream_response( - event=event, - task_id="test-task-id", - ) - - # Response should normalize missing process_data to an empty mapping - assert response is not None - assert response.data.process_data == {} - assert response.data.process_data_truncated is False - - def test_workflow_node_retry_response_uses_truncated_process_data(self): - """Test that node retry response uses get_response_process_data().""" - converter = self.create_workflow_response_converter() - - original_data = {"large_field": "x" * 10000, "metadata": "info"} - truncated_data = {"large_field": "[TRUNCATED]", "metadata": "info"} - - converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") - start_event = self.create_node_started_event() - converter.workflow_node_start_to_stream_response( - event=start_event, - task_id="test-task-id", - ) - - event = self.create_node_retry_event( - node_execution_id=start_event.node_execution_id, - process_data=original_data, - ) - - def fake_truncate(mapping): - if mapping == dict(original_data): - return truncated_data, True - return mapping, False - - converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] - - response = converter.workflow_node_retry_to_stream_response( - event=event, - task_id="test-task-id", - ) - - # Response should use truncated data, not original - assert response is not None - assert response.data.process_data == truncated_data - assert response.data.process_data != original_data - assert response.data.process_data_truncated is True - - def test_workflow_node_retry_response_without_truncation(self): - """Test node retry response when no truncation is applied.""" - converter = self.create_workflow_response_converter() - - original_data = {"small": "data"} - - converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") - start_event = self.create_node_started_event() - converter.workflow_node_start_to_stream_response( - event=start_event, - task_id="test-task-id", - ) - - event = self.create_node_retry_event( - node_execution_id=start_event.node_execution_id, - process_data=original_data, - ) - - def fake_truncate(mapping): - return mapping, False - - converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] - - response = converter.workflow_node_retry_to_stream_response( - event=event, - task_id="test-task-id", - ) - - assert response is not None - assert response.data.process_data == original_data - assert response.data.process_data_truncated is False - - def test_iteration_and_loop_nodes_return_none(self): - """Test that iteration and loop nodes return None (no streaming events).""" - converter = self.create_workflow_response_converter() - - iteration_event = QueueNodeSucceededEvent( - node_id="iteration-node", - node_type=NodeType.ITERATION, - node_execution_id=str(uuid.uuid4()), - start_at=naive_utc_now(), - in_iteration_id=None, - in_loop_id=None, - inputs={}, - process_data={}, - outputs={}, - execution_metadata={}, - ) - - response = converter.workflow_node_finish_to_stream_response( - event=iteration_event, - task_id="test-task-id", - ) - assert response is None - - loop_event = iteration_event.model_copy(update={"node_type": NodeType.LOOP}) - response = converter.workflow_node_finish_to_stream_response( - event=loop_event, - task_id="test-task-id", - ) - assert response is None - - def test_finish_without_start_raises(self): - """Ensure finish responses require a prior workflow start.""" - converter = self.create_workflow_response_converter() - event = self.create_node_succeeded_event( - node_execution_id=str(uuid.uuid4()), - process_data={}, - ) - - with pytest.raises(ValueError): - converter.workflow_node_finish_to_stream_response( - event=event, - task_id="test-task-id", - ) diff --git a/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_truncation.py b/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_truncation.py new file mode 100644 index 0000000000..964d62be1f --- /dev/null +++ b/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_truncation.py @@ -0,0 +1,810 @@ +""" +Unit tests for WorkflowResponseConverter focusing on process_data truncation functionality. +""" + +import uuid +from collections.abc import Mapping +from dataclasses import dataclass +from typing import Any +from unittest.mock import Mock + +import pytest + +from core.app.app_config.entities import WorkflowUIBasedAppConfig +from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter +from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity +from core.app.entities.queue_entities import ( + QueueEvent, + QueueIterationStartEvent, + QueueLoopStartEvent, + QueueNodeExceptionEvent, + QueueNodeFailedEvent, + QueueNodeRetryEvent, + QueueNodeStartedEvent, + QueueNodeSucceededEvent, +) +from core.workflow.enums import NodeType +from core.workflow.system_variable import SystemVariable +from libs.datetime_utils import naive_utc_now +from models import Account +from models.model import AppMode + + +class TestWorkflowResponseConverter: + """Test truncation in WorkflowResponseConverter.""" + + def create_mock_generate_entity(self) -> WorkflowAppGenerateEntity: + """Create a mock WorkflowAppGenerateEntity.""" + mock_entity = Mock(spec=WorkflowAppGenerateEntity) + mock_app_config = Mock() + mock_app_config.tenant_id = "test-tenant-id" + mock_entity.invoke_from = InvokeFrom.WEB_APP + mock_entity.app_config = mock_app_config + mock_entity.inputs = {} + return mock_entity + + def create_workflow_response_converter(self) -> WorkflowResponseConverter: + """Create a WorkflowResponseConverter for testing.""" + + mock_entity = self.create_mock_generate_entity() + mock_user = Mock(spec=Account) + mock_user.id = "test-user-id" + mock_user.name = "Test User" + mock_user.email = "test@example.com" + + system_variables = SystemVariable(workflow_id="wf-id", workflow_execution_id="initial-run-id") + return WorkflowResponseConverter( + application_generate_entity=mock_entity, + user=mock_user, + system_variables=system_variables, + ) + + def create_node_started_event(self, *, node_execution_id: str | None = None) -> QueueNodeStartedEvent: + """Create a QueueNodeStartedEvent for testing.""" + return QueueNodeStartedEvent( + node_execution_id=node_execution_id or str(uuid.uuid4()), + node_id="test-node-id", + node_title="Test Node", + node_type=NodeType.CODE, + start_at=naive_utc_now(), + in_iteration_id=None, + in_loop_id=None, + provider_type="built-in", + provider_id="code", + ) + + def create_node_succeeded_event( + self, + *, + node_execution_id: str, + process_data: Mapping[str, Any] | None = None, + ) -> QueueNodeSucceededEvent: + """Create a QueueNodeSucceededEvent for testing.""" + return QueueNodeSucceededEvent( + node_id="test-node-id", + node_type=NodeType.CODE, + node_execution_id=node_execution_id, + start_at=naive_utc_now(), + in_iteration_id=None, + in_loop_id=None, + inputs={}, + process_data=process_data or {}, + outputs={}, + execution_metadata={}, + ) + + def create_node_retry_event( + self, + *, + node_execution_id: str, + process_data: Mapping[str, Any] | None = None, + ) -> QueueNodeRetryEvent: + """Create a QueueNodeRetryEvent for testing.""" + return QueueNodeRetryEvent( + inputs={"data": "inputs"}, + outputs={"data": "outputs"}, + process_data=process_data or {}, + error="oops", + retry_index=1, + node_id="test-node-id", + node_type=NodeType.CODE, + node_title="test code", + provider_type="built-in", + provider_id="code", + node_execution_id=node_execution_id, + start_at=naive_utc_now(), + in_iteration_id=None, + in_loop_id=None, + ) + + def test_workflow_node_finish_response_uses_truncated_process_data(self): + """Test that node finish response uses get_response_process_data().""" + converter = self.create_workflow_response_converter() + + original_data = {"large_field": "x" * 10000, "metadata": "info"} + truncated_data = {"large_field": "[TRUNCATED]", "metadata": "info"} + + converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") + start_event = self.create_node_started_event() + converter.workflow_node_start_to_stream_response( + event=start_event, + task_id="test-task-id", + ) + + event = self.create_node_succeeded_event( + node_execution_id=start_event.node_execution_id, + process_data=original_data, + ) + + def fake_truncate(mapping): + if mapping == dict(original_data): + return truncated_data, True + return mapping, False + + converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test-task-id", + ) + + # Response should use truncated data, not original + assert response is not None + assert response.data.process_data == truncated_data + assert response.data.process_data != original_data + assert response.data.process_data_truncated is True + + def test_workflow_node_finish_response_without_truncation(self): + """Test node finish response when no truncation is applied.""" + converter = self.create_workflow_response_converter() + + original_data = {"small": "data"} + + converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") + start_event = self.create_node_started_event() + converter.workflow_node_start_to_stream_response( + event=start_event, + task_id="test-task-id", + ) + + event = self.create_node_succeeded_event( + node_execution_id=start_event.node_execution_id, + process_data=original_data, + ) + + def fake_truncate(mapping): + return mapping, False + + converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test-task-id", + ) + + # Response should use original data + assert response is not None + assert response.data.process_data == original_data + assert response.data.process_data_truncated is False + + def test_workflow_node_finish_response_with_none_process_data(self): + """Test node finish response when process_data is None.""" + converter = self.create_workflow_response_converter() + + converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") + start_event = self.create_node_started_event() + converter.workflow_node_start_to_stream_response( + event=start_event, + task_id="test-task-id", + ) + + event = self.create_node_succeeded_event( + node_execution_id=start_event.node_execution_id, + process_data=None, + ) + + def fake_truncate(mapping): + return mapping, False + + converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test-task-id", + ) + + # Response should normalize missing process_data to an empty mapping + assert response is not None + assert response.data.process_data == {} + assert response.data.process_data_truncated is False + + def test_workflow_node_retry_response_uses_truncated_process_data(self): + """Test that node retry response uses get_response_process_data().""" + converter = self.create_workflow_response_converter() + + original_data = {"large_field": "x" * 10000, "metadata": "info"} + truncated_data = {"large_field": "[TRUNCATED]", "metadata": "info"} + + converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") + start_event = self.create_node_started_event() + converter.workflow_node_start_to_stream_response( + event=start_event, + task_id="test-task-id", + ) + + event = self.create_node_retry_event( + node_execution_id=start_event.node_execution_id, + process_data=original_data, + ) + + def fake_truncate(mapping): + if mapping == dict(original_data): + return truncated_data, True + return mapping, False + + converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] + + response = converter.workflow_node_retry_to_stream_response( + event=event, + task_id="test-task-id", + ) + + # Response should use truncated data, not original + assert response is not None + assert response.data.process_data == truncated_data + assert response.data.process_data != original_data + assert response.data.process_data_truncated is True + + def test_workflow_node_retry_response_without_truncation(self): + """Test node retry response when no truncation is applied.""" + converter = self.create_workflow_response_converter() + + original_data = {"small": "data"} + + converter.workflow_start_to_stream_response(task_id="bootstrap", workflow_run_id="run-id", workflow_id="wf-id") + start_event = self.create_node_started_event() + converter.workflow_node_start_to_stream_response( + event=start_event, + task_id="test-task-id", + ) + + event = self.create_node_retry_event( + node_execution_id=start_event.node_execution_id, + process_data=original_data, + ) + + def fake_truncate(mapping): + return mapping, False + + converter._truncator.truncate_variable_mapping = fake_truncate # type: ignore[assignment] + + response = converter.workflow_node_retry_to_stream_response( + event=event, + task_id="test-task-id", + ) + + assert response is not None + assert response.data.process_data == original_data + assert response.data.process_data_truncated is False + + def test_iteration_and_loop_nodes_return_none(self): + """Test that iteration and loop nodes return None (no streaming events).""" + converter = self.create_workflow_response_converter() + + iteration_event = QueueNodeSucceededEvent( + node_id="iteration-node", + node_type=NodeType.ITERATION, + node_execution_id=str(uuid.uuid4()), + start_at=naive_utc_now(), + in_iteration_id=None, + in_loop_id=None, + inputs={}, + process_data={}, + outputs={}, + execution_metadata={}, + ) + + response = converter.workflow_node_finish_to_stream_response( + event=iteration_event, + task_id="test-task-id", + ) + assert response is None + + loop_event = iteration_event.model_copy(update={"node_type": NodeType.LOOP}) + response = converter.workflow_node_finish_to_stream_response( + event=loop_event, + task_id="test-task-id", + ) + assert response is None + + def test_finish_without_start_raises(self): + """Ensure finish responses require a prior workflow start.""" + converter = self.create_workflow_response_converter() + event = self.create_node_succeeded_event( + node_execution_id=str(uuid.uuid4()), + process_data={}, + ) + + with pytest.raises(ValueError): + converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test-task-id", + ) + + +@dataclass +class TestCase: + """Test case data for table-driven tests.""" + + name: str + invoke_from: InvokeFrom + expected_truncation_enabled: bool + description: str + + +class TestWorkflowResponseConverterServiceApiTruncation: + """Test class for Service API truncation functionality in WorkflowResponseConverter.""" + + def create_test_app_generate_entity(self, invoke_from: InvokeFrom) -> WorkflowAppGenerateEntity: + """Create a test WorkflowAppGenerateEntity with specified invoke_from.""" + # Create a minimal WorkflowUIBasedAppConfig for testing + app_config = WorkflowUIBasedAppConfig( + tenant_id="test_tenant", + app_id="test_app", + app_mode=AppMode.WORKFLOW, + workflow_id="test_workflow_id", + ) + + entity = WorkflowAppGenerateEntity( + task_id="test_task_id", + app_id="test_app_id", + app_config=app_config, + tenant_id="test_tenant", + app_mode="workflow", + invoke_from=invoke_from, + inputs={"test_input": "test_value"}, + user_id="test_user_id", + stream=True, + files=[], + workflow_execution_id="test_workflow_exec_id", + ) + return entity + + def create_test_user(self) -> Account: + """Create a test user account.""" + account = Account( + name="Test User", + email="test@example.com", + ) + # Manually set the ID for testing purposes + account.id = "test_user_id" + return account + + def create_test_system_variables(self) -> SystemVariable: + """Create test system variables.""" + return SystemVariable() + + def create_test_converter(self, invoke_from: InvokeFrom) -> WorkflowResponseConverter: + """Create WorkflowResponseConverter with specified invoke_from.""" + entity = self.create_test_app_generate_entity(invoke_from) + user = self.create_test_user() + system_variables = self.create_test_system_variables() + + converter = WorkflowResponseConverter( + application_generate_entity=entity, + user=user, + system_variables=system_variables, + ) + # ensure `workflow_run_id` is set. + converter.workflow_start_to_stream_response( + task_id="test-task-id", + workflow_run_id="test-workflow-run-id", + workflow_id="test-workflow-id", + ) + return converter + + @pytest.mark.parametrize( + "test_case", + [ + TestCase( + name="service_api_truncation_disabled", + invoke_from=InvokeFrom.SERVICE_API, + expected_truncation_enabled=False, + description="Service API calls should have truncation disabled", + ), + TestCase( + name="web_app_truncation_enabled", + invoke_from=InvokeFrom.WEB_APP, + expected_truncation_enabled=True, + description="Web app calls should have truncation enabled", + ), + TestCase( + name="debugger_truncation_enabled", + invoke_from=InvokeFrom.DEBUGGER, + expected_truncation_enabled=True, + description="Debugger calls should have truncation enabled", + ), + TestCase( + name="explore_truncation_enabled", + invoke_from=InvokeFrom.EXPLORE, + expected_truncation_enabled=True, + description="Explore calls should have truncation enabled", + ), + TestCase( + name="published_truncation_enabled", + invoke_from=InvokeFrom.PUBLISHED, + expected_truncation_enabled=True, + description="Published app calls should have truncation enabled", + ), + ], + ids=lambda x: x.name, + ) + def test_truncator_selection_based_on_invoke_from(self, test_case: TestCase): + """Test that the correct truncator is selected based on invoke_from.""" + converter = self.create_test_converter(test_case.invoke_from) + + # Test truncation behavior instead of checking private attribute + + # Create a test event with large data + large_value = {"key": ["x"] * 2000} # Large data that would be truncated + + event = QueueNodeSucceededEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=large_value, + process_data=large_value, + outputs=large_value, + error=None, + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test_task", + ) + + # Verify response is not None + assert response is not None + + # Verify truncation behavior matches expectations + if test_case.expected_truncation_enabled: + # Truncation should be enabled for non-service-api calls + assert response.data.inputs_truncated + assert response.data.process_data_truncated + assert response.data.outputs_truncated + else: + # SERVICE_API should not truncate + assert not response.data.inputs_truncated + assert not response.data.process_data_truncated + assert not response.data.outputs_truncated + + def test_service_api_truncator_no_op_mapping(self): + """Test that Service API truncator doesn't truncate variable mappings.""" + converter = self.create_test_converter(InvokeFrom.SERVICE_API) + + # Create a test event with large data + large_value: dict[str, Any] = { + "large_string": "x" * 10000, # Large string + "large_list": list(range(2000)), # Large array + "nested_data": {"deep_nested": {"very_deep": {"value": "x" * 5000}}}, + } + + event = QueueNodeSucceededEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=large_value, + process_data=large_value, + outputs=large_value, + error=None, + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test_task", + ) + + # Verify response is not None + data = response.data + assert data.inputs == large_value + assert data.process_data == large_value + assert data.outputs == large_value + # Service API should not truncate + assert data.inputs_truncated is False + assert data.process_data_truncated is False + assert data.outputs_truncated is False + + def test_web_app_truncator_works_normally(self): + """Test that web app truncator still works normally.""" + converter = self.create_test_converter(InvokeFrom.WEB_APP) + + # Create a test event with large data + large_value = { + "large_string": "x" * 10000, # Large string + "large_list": list(range(2000)), # Large array + } + + event = QueueNodeSucceededEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=large_value, + process_data=large_value, + outputs=large_value, + error=None, + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test_task", + ) + + # Verify response is not None + assert response is not None + + # Web app should truncate + data = response.data + assert data.inputs != large_value + assert data.process_data != large_value + assert data.outputs != large_value + # The exact behavior depends on VariableTruncator implementation + # Just verify that truncation flags are present + assert data.inputs_truncated is True + assert data.process_data_truncated is True + assert data.outputs_truncated is True + + @staticmethod + def _create_event_by_type( + type_: QueueEvent, inputs: Mapping[str, Any], process_data: Mapping[str, Any], outputs: Mapping[str, Any] + ) -> QueueNodeSucceededEvent | QueueNodeFailedEvent | QueueNodeExceptionEvent: + if type_ == QueueEvent.NODE_SUCCEEDED: + return QueueNodeSucceededEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=inputs, + process_data=process_data, + outputs=outputs, + error=None, + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + elif type_ == QueueEvent.NODE_FAILED: + return QueueNodeFailedEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=inputs, + process_data=process_data, + outputs=outputs, + error="oops", + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + elif type_ == QueueEvent.NODE_EXCEPTION: + return QueueNodeExceptionEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=inputs, + process_data=process_data, + outputs=outputs, + error="oops", + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + else: + raise Exception("unknown type.") + + @pytest.mark.parametrize( + "event_type", + [ + QueueEvent.NODE_SUCCEEDED, + QueueEvent.NODE_FAILED, + QueueEvent.NODE_EXCEPTION, + ], + ) + def test_service_api_node_finish_event_no_truncation(self, event_type: QueueEvent): + """Test that Service API doesn't truncate node finish events.""" + converter = self.create_test_converter(InvokeFrom.SERVICE_API) + # Create test event with large data + large_inputs = {"input1": "x" * 5000, "input2": list(range(2000))} + large_process_data = {"process1": "y" * 5000, "process2": {"nested": ["z"] * 2000}} + large_outputs = {"output1": "result" * 1000, "output2": list(range(2000))} + + event = TestWorkflowResponseConverterServiceApiTruncation._create_event_by_type( + event_type, large_inputs, large_process_data, large_outputs + ) + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test_task", + ) + + # Verify response is not None + assert response is not None + + # Verify response contains full data (not truncated) + assert response.data.inputs == large_inputs + assert response.data.process_data == large_process_data + assert response.data.outputs == large_outputs + assert not response.data.inputs_truncated + assert not response.data.process_data_truncated + assert not response.data.outputs_truncated + + def test_service_api_node_retry_event_no_truncation(self): + """Test that Service API doesn't truncate node retry events.""" + converter = self.create_test_converter(InvokeFrom.SERVICE_API) + + # Create test event with large data + large_inputs = {"retry_input": "x" * 5000} + large_process_data = {"retry_process": "y" * 5000} + large_outputs = {"retry_output": "z" * 5000} + + # First, we need to store a snapshot by simulating a start event + start_event = QueueNodeStartedEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + node_title="Test Node", + node_run_index=1, + start_at=naive_utc_now(), + in_iteration_id=None, + in_loop_id=None, + agent_strategy=None, + provider_type="plugin", + provider_id="test/test_plugin", + ) + converter.workflow_node_start_to_stream_response(event=start_event, task_id="test_task") + + # Now create retry event + event = QueueNodeRetryEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + node_title="Test Node", + node_run_index=1, + start_at=naive_utc_now(), + inputs=large_inputs, + process_data=large_process_data, + outputs=large_outputs, + error="Retry error", + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + retry_index=1, + provider_type="plugin", + provider_id="test/test_plugin", + ) + + response = converter.workflow_node_retry_to_stream_response( + event=event, + task_id="test_task", + ) + + # Verify response is not None + assert response is not None + + # Verify response contains full data (not truncated) + assert response.data.inputs == large_inputs + assert response.data.process_data == large_process_data + assert response.data.outputs == large_outputs + assert not response.data.inputs_truncated + assert not response.data.process_data_truncated + assert not response.data.outputs_truncated + + def test_service_api_iteration_events_no_truncation(self): + """Test that Service API doesn't truncate iteration events.""" + converter = self.create_test_converter(InvokeFrom.SERVICE_API) + + # Test iteration start event + large_value = {"iteration_input": ["x"] * 2000} + + start_event = QueueIterationStartEvent( + node_execution_id="test_iter_exec_id", + node_id="test_iteration", + node_type=NodeType.ITERATION, + node_title="Test Iteration", + node_run_index=0, + start_at=naive_utc_now(), + inputs=large_value, + metadata={}, + ) + + response = converter.workflow_iteration_start_to_stream_response( + task_id="test_task", + workflow_execution_id="test_workflow_exec_id", + event=start_event, + ) + + assert response is not None + assert response.data.inputs == large_value + assert not response.data.inputs_truncated + + def test_service_api_loop_events_no_truncation(self): + """Test that Service API doesn't truncate loop events.""" + converter = self.create_test_converter(InvokeFrom.SERVICE_API) + + # Test loop start event + large_inputs = {"loop_input": ["x"] * 2000} + + start_event = QueueLoopStartEvent( + node_execution_id="test_loop_exec_id", + node_id="test_loop", + node_type=NodeType.LOOP, + node_title="Test Loop", + start_at=naive_utc_now(), + inputs=large_inputs, + metadata={}, + node_run_index=0, + ) + + response = converter.workflow_loop_start_to_stream_response( + task_id="test_task", + workflow_execution_id="test_workflow_exec_id", + event=start_event, + ) + + assert response is not None + assert response.data.inputs == large_inputs + assert not response.data.inputs_truncated + + def test_web_app_node_finish_event_truncation_works(self): + """Test that web app still truncates node finish events.""" + converter = self.create_test_converter(InvokeFrom.WEB_APP) + + # Create test event with large data that should be truncated + large_inputs = {"input1": ["x"] * 2000} + large_process_data = {"process1": ["y"] * 2000} + large_outputs = {"output1": ["z"] * 2000} + + event = QueueNodeSucceededEvent( + node_execution_id="test_node_exec_id", + node_id="test_node", + node_type=NodeType.LLM, + start_at=naive_utc_now(), + inputs=large_inputs, + process_data=large_process_data, + outputs=large_outputs, + error=None, + execution_metadata=None, + in_iteration_id=None, + in_loop_id=None, + ) + + response = converter.workflow_node_finish_to_stream_response( + event=event, + task_id="test_task", + ) + + # Verify response is not None + assert response is not None + + # Verify response contains truncated data + # The exact behavior depends on VariableTruncator implementation + # Just verify truncation flags are set correctly (may or may not be truncated depending on size) + # At minimum, the truncation mechanism should work + assert isinstance(response.data.inputs, dict) + assert response.data.inputs_truncated + assert isinstance(response.data.process_data, dict) + assert response.data.process_data_truncated + assert isinstance(response.data.outputs, dict) + assert response.data.outputs_truncated diff --git a/api/tests/unit_tests/services/test_variable_truncator.py b/api/tests/unit_tests/services/test_variable_truncator.py index 6761f939e3..cf6fb25c1c 100644 --- a/api/tests/unit_tests/services/test_variable_truncator.py +++ b/api/tests/unit_tests/services/test_variable_truncator.py @@ -21,6 +21,7 @@ from core.file.enums import FileTransferMethod, FileType from core.file.models import File from core.variables.segments import ( ArrayFileSegment, + ArrayNumberSegment, ArraySegment, FileSegment, FloatSegment, @@ -30,6 +31,7 @@ from core.variables.segments import ( StringSegment, ) from services.variable_truncator import ( + DummyVariableTruncator, MaxDepthExceededError, TruncationResult, UnknownTypeError, @@ -596,3 +598,32 @@ class TestIntegrationScenarios: truncated_mapping, truncated = truncator.truncate_variable_mapping(mapping) assert truncated is False assert truncated_mapping == mapping + + +def test_dummy_variable_truncator_methods(): + """Test DummyVariableTruncator methods work correctly.""" + truncator = DummyVariableTruncator() + + # Test truncate_variable_mapping + test_data: dict[str, Any] = { + "key1": "value1", + "key2": ["item1", "item2"], + "large_array": list(range(2000)), + } + result, is_truncated = truncator.truncate_variable_mapping(test_data) + + assert result == test_data + assert not is_truncated + + # Test truncate method + segment = StringSegment(value="test string") + result = truncator.truncate(segment) + assert isinstance(result, TruncationResult) + assert result.result == segment + assert result.truncated is False + + segment = ArrayNumberSegment(value=list(range(2000))) + result = truncator.truncate(segment) + assert isinstance(result, TruncationResult) + assert result.result == segment + assert result.truncated is False From 29afc0657db4f8eef2c07c83fbe0bbc170acc074 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 28 Oct 2025 09:19:54 +0800 Subject: [PATCH 09/15] Fix/27468 in dify 192 the iframe embed cannot pass the user id in system variable (#27524) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- web/__tests__/embedded-user-id-auth.test.tsx | 132 +++++++++++++++ web/__tests__/embedded-user-id-store.test.tsx | 155 ++++++++++++++++++ web/app/(shareLayout)/components/splash.tsx | 9 +- .../webapp-signin/check-code/page.tsx | 7 +- .../components/mail-and-password-auth.tsx | 7 +- .../base/chat/embedded-chatbot/hooks.tsx | 14 +- web/context/web-app-context.tsx | 36 ++++ 7 files changed, 351 insertions(+), 9 deletions(-) create mode 100644 web/__tests__/embedded-user-id-auth.test.tsx create mode 100644 web/__tests__/embedded-user-id-store.test.tsx diff --git a/web/__tests__/embedded-user-id-auth.test.tsx b/web/__tests__/embedded-user-id-auth.test.tsx new file mode 100644 index 0000000000..5c3c3c943f --- /dev/null +++ b/web/__tests__/embedded-user-id-auth.test.tsx @@ -0,0 +1,132 @@ +import React from 'react' +import { fireEvent, render, screen, waitFor } from '@testing-library/react' + +import MailAndPasswordAuth from '@/app/(shareLayout)/webapp-signin/components/mail-and-password-auth' +import CheckCode from '@/app/(shareLayout)/webapp-signin/check-code/page' + +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => key, + }), +})) + +const replaceMock = jest.fn() +const backMock = jest.fn() + +jest.mock('next/navigation', () => ({ + usePathname: jest.fn(() => '/chatbot/test-app'), + useRouter: jest.fn(() => ({ + replace: replaceMock, + back: backMock, + })), + useSearchParams: jest.fn(), +})) + +const mockStoreState = { + embeddedUserId: 'embedded-user-99', + shareCode: 'test-app', +} + +const useWebAppStoreMock = jest.fn((selector?: (state: typeof mockStoreState) => any) => { + return selector ? selector(mockStoreState) : mockStoreState +}) + +jest.mock('@/context/web-app-context', () => ({ + useWebAppStore: (selector?: (state: typeof mockStoreState) => any) => useWebAppStoreMock(selector), +})) + +const webAppLoginMock = jest.fn() +const webAppEmailLoginWithCodeMock = jest.fn() +const sendWebAppEMailLoginCodeMock = jest.fn() + +jest.mock('@/service/common', () => ({ + webAppLogin: (...args: any[]) => webAppLoginMock(...args), + webAppEmailLoginWithCode: (...args: any[]) => webAppEmailLoginWithCodeMock(...args), + sendWebAppEMailLoginCode: (...args: any[]) => sendWebAppEMailLoginCodeMock(...args), +})) + +const fetchAccessTokenMock = jest.fn() + +jest.mock('@/service/share', () => ({ + fetchAccessToken: (...args: any[]) => fetchAccessTokenMock(...args), +})) + +const setWebAppAccessTokenMock = jest.fn() +const setWebAppPassportMock = jest.fn() + +jest.mock('@/service/webapp-auth', () => ({ + setWebAppAccessToken: (...args: any[]) => setWebAppAccessTokenMock(...args), + setWebAppPassport: (...args: any[]) => setWebAppPassportMock(...args), + webAppLogout: jest.fn(), +})) + +jest.mock('@/app/components/signin/countdown', () => () =>
) + +jest.mock('@remixicon/react', () => ({ + RiMailSendFill: () =>
, + RiArrowLeftLine: () =>
, +})) + +const { useSearchParams } = jest.requireMock('next/navigation') as { + useSearchParams: jest.Mock +} + +beforeEach(() => { + jest.clearAllMocks() +}) + +describe('embedded user id propagation in authentication flows', () => { + it('passes embedded user id when logging in with email and password', async () => { + const params = new URLSearchParams() + params.set('redirect_url', encodeURIComponent('/chatbot/test-app')) + useSearchParams.mockReturnValue(params) + + webAppLoginMock.mockResolvedValue({ result: 'success', data: { access_token: 'login-token' } }) + fetchAccessTokenMock.mockResolvedValue({ access_token: 'passport-token' }) + + render() + + fireEvent.change(screen.getByLabelText('login.email'), { target: { value: 'user@example.com' } }) + fireEvent.change(screen.getByLabelText(/login\.password/), { target: { value: 'strong-password' } }) + fireEvent.click(screen.getByRole('button', { name: 'login.signBtn' })) + + await waitFor(() => { + expect(fetchAccessTokenMock).toHaveBeenCalledWith({ + appCode: 'test-app', + userId: 'embedded-user-99', + }) + }) + expect(setWebAppAccessTokenMock).toHaveBeenCalledWith('login-token') + expect(setWebAppPassportMock).toHaveBeenCalledWith('test-app', 'passport-token') + expect(replaceMock).toHaveBeenCalledWith('/chatbot/test-app') + }) + + it('passes embedded user id when verifying email code', async () => { + const params = new URLSearchParams() + params.set('redirect_url', encodeURIComponent('/chatbot/test-app')) + params.set('email', encodeURIComponent('user@example.com')) + params.set('token', encodeURIComponent('token-abc')) + useSearchParams.mockReturnValue(params) + + webAppEmailLoginWithCodeMock.mockResolvedValue({ result: 'success', data: { access_token: 'code-token' } }) + fetchAccessTokenMock.mockResolvedValue({ access_token: 'passport-token' }) + + render() + + fireEvent.change( + screen.getByPlaceholderText('login.checkCode.verificationCodePlaceholder'), + { target: { value: '123456' } }, + ) + fireEvent.click(screen.getByRole('button', { name: 'login.checkCode.verify' })) + + await waitFor(() => { + expect(fetchAccessTokenMock).toHaveBeenCalledWith({ + appCode: 'test-app', + userId: 'embedded-user-99', + }) + }) + expect(setWebAppAccessTokenMock).toHaveBeenCalledWith('code-token') + expect(setWebAppPassportMock).toHaveBeenCalledWith('test-app', 'passport-token') + expect(replaceMock).toHaveBeenCalledWith('/chatbot/test-app') + }) +}) diff --git a/web/__tests__/embedded-user-id-store.test.tsx b/web/__tests__/embedded-user-id-store.test.tsx new file mode 100644 index 0000000000..24a815222e --- /dev/null +++ b/web/__tests__/embedded-user-id-store.test.tsx @@ -0,0 +1,155 @@ +import React from 'react' +import { render, screen, waitFor } from '@testing-library/react' + +import WebAppStoreProvider, { useWebAppStore } from '@/context/web-app-context' + +jest.mock('next/navigation', () => ({ + usePathname: jest.fn(() => '/chatbot/sample-app'), + useSearchParams: jest.fn(() => { + const params = new URLSearchParams() + return params + }), +})) + +jest.mock('@/service/use-share', () => { + const { AccessMode } = jest.requireActual('@/models/access-control') + return { + useGetWebAppAccessModeByCode: jest.fn(() => ({ + isLoading: false, + data: { accessMode: AccessMode.PUBLIC }, + })), + } +}) + +jest.mock('@/app/components/base/chat/utils', () => ({ + getProcessedSystemVariablesFromUrlParams: jest.fn(), +})) + +const { getProcessedSystemVariablesFromUrlParams: mockGetProcessedSystemVariablesFromUrlParams } + = jest.requireMock('@/app/components/base/chat/utils') as { + getProcessedSystemVariablesFromUrlParams: jest.Mock + } + +jest.mock('@/context/global-public-context', () => { + const mockGlobalStoreState = { + isGlobalPending: false, + setIsGlobalPending: jest.fn(), + systemFeatures: {}, + setSystemFeatures: jest.fn(), + } + const useGlobalPublicStore = Object.assign( + (selector?: (state: typeof mockGlobalStoreState) => any) => + selector ? selector(mockGlobalStoreState) : mockGlobalStoreState, + { + setState: (updater: any) => { + if (typeof updater === 'function') + Object.assign(mockGlobalStoreState, updater(mockGlobalStoreState) ?? {}) + + else + Object.assign(mockGlobalStoreState, updater) + }, + __mockState: mockGlobalStoreState, + }, + ) + return { + useGlobalPublicStore, + } +}) + +const { + useGlobalPublicStore: useGlobalPublicStoreMock, +} = jest.requireMock('@/context/global-public-context') as { + useGlobalPublicStore: ((selector?: (state: any) => any) => any) & { + setState: (updater: any) => void + __mockState: { + isGlobalPending: boolean + setIsGlobalPending: jest.Mock + systemFeatures: Record + setSystemFeatures: jest.Mock + } + } +} +const mockGlobalStoreState = useGlobalPublicStoreMock.__mockState + +const TestConsumer = () => { + const embeddedUserId = useWebAppStore(state => state.embeddedUserId) + const embeddedConversationId = useWebAppStore(state => state.embeddedConversationId) + return ( + <> +
{embeddedUserId ?? 'null'}
+
{embeddedConversationId ?? 'null'}
+ + ) +} + +const initialWebAppStore = (() => { + const snapshot = useWebAppStore.getState() + return { + shareCode: null as string | null, + appInfo: null, + appParams: null, + webAppAccessMode: snapshot.webAppAccessMode, + appMeta: null, + userCanAccessApp: false, + embeddedUserId: null, + embeddedConversationId: null, + updateShareCode: snapshot.updateShareCode, + updateAppInfo: snapshot.updateAppInfo, + updateAppParams: snapshot.updateAppParams, + updateWebAppAccessMode: snapshot.updateWebAppAccessMode, + updateWebAppMeta: snapshot.updateWebAppMeta, + updateUserCanAccessApp: snapshot.updateUserCanAccessApp, + updateEmbeddedUserId: snapshot.updateEmbeddedUserId, + updateEmbeddedConversationId: snapshot.updateEmbeddedConversationId, + } +})() + +beforeEach(() => { + mockGlobalStoreState.isGlobalPending = false + mockGetProcessedSystemVariablesFromUrlParams.mockReset() + useWebAppStore.setState(initialWebAppStore, true) +}) + +describe('WebAppStoreProvider embedded user id handling', () => { + it('hydrates embedded user and conversation ids from system variables', async () => { + mockGetProcessedSystemVariablesFromUrlParams.mockResolvedValue({ + user_id: 'iframe-user-123', + conversation_id: 'conversation-456', + }) + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('embedded-user-id')).toHaveTextContent('iframe-user-123') + expect(screen.getByTestId('embedded-conversation-id')).toHaveTextContent('conversation-456') + }) + expect(useWebAppStore.getState().embeddedUserId).toBe('iframe-user-123') + expect(useWebAppStore.getState().embeddedConversationId).toBe('conversation-456') + }) + + it('clears embedded user id when system variable is absent', async () => { + useWebAppStore.setState(state => ({ + ...state, + embeddedUserId: 'previous-user', + embeddedConversationId: 'existing-conversation', + })) + mockGetProcessedSystemVariablesFromUrlParams.mockResolvedValue({}) + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('embedded-user-id')).toHaveTextContent('null') + expect(screen.getByTestId('embedded-conversation-id')).toHaveTextContent('null') + }) + expect(useWebAppStore.getState().embeddedUserId).toBeNull() + expect(useWebAppStore.getState().embeddedConversationId).toBeNull() + }) +}) diff --git a/web/app/(shareLayout)/components/splash.tsx b/web/app/(shareLayout)/components/splash.tsx index 16d291d4b4..c30ad68950 100644 --- a/web/app/(shareLayout)/components/splash.tsx +++ b/web/app/(shareLayout)/components/splash.tsx @@ -15,6 +15,7 @@ const Splash: FC = ({ children }) => { const { t } = useTranslation() const shareCode = useWebAppStore(s => s.shareCode) const webAppAccessMode = useWebAppStore(s => s.webAppAccessMode) + const embeddedUserId = useWebAppStore(s => s.embeddedUserId) const searchParams = useSearchParams() const router = useRouter() const redirectUrl = searchParams.get('redirect_url') @@ -69,7 +70,10 @@ const Splash: FC = ({ children }) => { } else if (userLoggedIn && !appLoggedIn) { try { - const { access_token } = await fetchAccessToken({ appCode: shareCode! }) + const { access_token } = await fetchAccessToken({ + appCode: shareCode!, + userId: embeddedUserId || undefined, + }) setWebAppPassport(shareCode!, access_token) redirectOrFinish() } @@ -85,7 +89,8 @@ const Splash: FC = ({ children }) => { router, message, webAppAccessMode, - tokenFromUrl]) + tokenFromUrl, + embeddedUserId]) if (message) { return
diff --git a/web/app/(shareLayout)/webapp-signin/check-code/page.tsx b/web/app/(shareLayout)/webapp-signin/check-code/page.tsx index 4a1326fedf..69131cdabe 100644 --- a/web/app/(shareLayout)/webapp-signin/check-code/page.tsx +++ b/web/app/(shareLayout)/webapp-signin/check-code/page.tsx @@ -12,6 +12,7 @@ import { sendWebAppEMailLoginCode, webAppEmailLoginWithCode } from '@/service/co import I18NContext from '@/context/i18n' import { setWebAppAccessToken, setWebAppPassport } from '@/service/webapp-auth' import { fetchAccessToken } from '@/service/share' +import { useWebAppStore } from '@/context/web-app-context' export default function CheckCode() { const { t } = useTranslation() @@ -23,6 +24,7 @@ export default function CheckCode() { const [loading, setIsLoading] = useState(false) const { locale } = useContext(I18NContext) const redirectUrl = searchParams.get('redirect_url') + const embeddedUserId = useWebAppStore(s => s.embeddedUserId) const getAppCodeFromRedirectUrl = useCallback(() => { if (!redirectUrl) @@ -63,7 +65,10 @@ export default function CheckCode() { const ret = await webAppEmailLoginWithCode({ email, code, token }) if (ret.result === 'success') { setWebAppAccessToken(ret.data.access_token) - const { access_token } = await fetchAccessToken({ appCode: appCode! }) + const { access_token } = await fetchAccessToken({ + appCode: appCode!, + userId: embeddedUserId || undefined, + }) setWebAppPassport(appCode!, access_token) router.replace(decodeURIComponent(redirectUrl)) } diff --git a/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx b/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx index ce220b103e..0136445ac9 100644 --- a/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx +++ b/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx @@ -10,6 +10,7 @@ import { emailRegex } from '@/config' import { webAppLogin } from '@/service/common' import Input from '@/app/components/base/input' import I18NContext from '@/context/i18n' +import { useWebAppStore } from '@/context/web-app-context' import { noop } from 'lodash-es' import { fetchAccessToken } from '@/service/share' import { setWebAppAccessToken, setWebAppPassport } from '@/service/webapp-auth' @@ -30,6 +31,7 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut const [isLoading, setIsLoading] = useState(false) const redirectUrl = searchParams.get('redirect_url') + const embeddedUserId = useWebAppStore(s => s.embeddedUserId) const getAppCodeFromRedirectUrl = useCallback(() => { if (!redirectUrl) @@ -82,7 +84,10 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut if (res.result === 'success') { setWebAppAccessToken(res.data.access_token) - const { access_token } = await fetchAccessToken({ appCode: appCode! }) + const { access_token } = await fetchAccessToken({ + appCode: appCode!, + userId: embeddedUserId || undefined, + }) setWebAppPassport(appCode!, access_token) router.replace(decodeURIComponent(redirectUrl)) } diff --git a/web/app/components/base/chat/embedded-chatbot/hooks.tsx b/web/app/components/base/chat/embedded-chatbot/hooks.tsx index cfb221522c..9a9abfbd09 100644 --- a/web/app/components/base/chat/embedded-chatbot/hooks.tsx +++ b/web/app/components/base/chat/embedded-chatbot/hooks.tsx @@ -66,16 +66,20 @@ export const useEmbeddedChatbot = () => { const appInfo = useWebAppStore(s => s.appInfo) const appMeta = useWebAppStore(s => s.appMeta) const appParams = useWebAppStore(s => s.appParams) + const embeddedConversationId = useWebAppStore(s => s.embeddedConversationId) + const embeddedUserId = useWebAppStore(s => s.embeddedUserId) const appId = useMemo(() => appInfo?.app_id, [appInfo]) const [userId, setUserId] = useState() const [conversationId, setConversationId] = useState() + useEffect(() => { - getProcessedSystemVariablesFromUrlParams().then(({ user_id, conversation_id }) => { - setUserId(user_id) - setConversationId(conversation_id) - }) - }, []) + setUserId(embeddedUserId || undefined) + }, [embeddedUserId]) + + useEffect(() => { + setConversationId(embeddedConversationId || undefined) + }, [embeddedConversationId]) useEffect(() => { const setLanguageFromParams = async () => { diff --git a/web/context/web-app-context.tsx b/web/context/web-app-context.tsx index bcbd39b5fc..1b189cd452 100644 --- a/web/context/web-app-context.tsx +++ b/web/context/web-app-context.tsx @@ -9,6 +9,7 @@ import { usePathname, useSearchParams } from 'next/navigation' import type { FC, PropsWithChildren } from 'react' import { useEffect } from 'react' import { create } from 'zustand' +import { getProcessedSystemVariablesFromUrlParams } from '@/app/components/base/chat/utils' import { useGlobalPublicStore } from './global-public-context' type WebAppStore = { @@ -24,6 +25,10 @@ type WebAppStore = { updateWebAppMeta: (appMeta: AppMeta | null) => void userCanAccessApp: boolean updateUserCanAccessApp: (canAccess: boolean) => void + embeddedUserId: string | null + updateEmbeddedUserId: (userId: string | null) => void + embeddedConversationId: string | null + updateEmbeddedConversationId: (conversationId: string | null) => void } export const useWebAppStore = create(set => ({ @@ -39,6 +44,11 @@ export const useWebAppStore = create(set => ({ updateWebAppMeta: (appMeta: AppMeta | null) => set(() => ({ appMeta })), userCanAccessApp: false, updateUserCanAccessApp: (canAccess: boolean) => set(() => ({ userCanAccessApp: canAccess })), + embeddedUserId: null, + updateEmbeddedUserId: (userId: string | null) => set(() => ({ embeddedUserId: userId })), + embeddedConversationId: null, + updateEmbeddedConversationId: (conversationId: string | null) => + set(() => ({ embeddedConversationId: conversationId })), })) const getShareCodeFromRedirectUrl = (redirectUrl: string | null): string | null => { @@ -58,9 +68,12 @@ const WebAppStoreProvider: FC = ({ children }) => { const isGlobalPending = useGlobalPublicStore(s => s.isGlobalPending) const updateWebAppAccessMode = useWebAppStore(state => state.updateWebAppAccessMode) const updateShareCode = useWebAppStore(state => state.updateShareCode) + const updateEmbeddedUserId = useWebAppStore(state => state.updateEmbeddedUserId) + const updateEmbeddedConversationId = useWebAppStore(state => state.updateEmbeddedConversationId) const pathname = usePathname() const searchParams = useSearchParams() const redirectUrlParam = searchParams.get('redirect_url') + const searchParamsString = searchParams.toString() // Compute shareCode directly const shareCode = getShareCodeFromRedirectUrl(redirectUrlParam) || getShareCodeFromPathname(pathname) @@ -68,6 +81,29 @@ const WebAppStoreProvider: FC = ({ children }) => { updateShareCode(shareCode) }, [shareCode, updateShareCode]) + useEffect(() => { + let cancelled = false + const syncEmbeddedUserId = async () => { + try { + const { user_id, conversation_id } = await getProcessedSystemVariablesFromUrlParams() + if (!cancelled) { + updateEmbeddedUserId(user_id || null) + updateEmbeddedConversationId(conversation_id || null) + } + } + catch { + if (!cancelled) { + updateEmbeddedUserId(null) + updateEmbeddedConversationId(null) + } + } + } + syncEmbeddedUserId() + return () => { + cancelled = true + } + }, [searchParamsString, updateEmbeddedUserId, updateEmbeddedConversationId]) + const { isLoading, data: accessModeResult } = useGetWebAppAccessModeByCode(shareCode) useEffect(() => { From a7c855cab8d8f81a705da719100cc806780584f1 Mon Sep 17 00:00:00 2001 From: yangzheli <43645580+yangzheli@users.noreply.github.com> Date: Tue, 28 Oct 2025 09:26:12 +0800 Subject: [PATCH 10/15] fix(workflow): resolve note node copy/duplicate errors (#27528) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- .../workflow/hooks/use-nodes-interactions.ts | 4 +++- web/app/components/workflow/note-node/index.tsx | 11 +++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index fa61cdeb8c..4de53c431c 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -1445,6 +1445,7 @@ export const useNodesInteractions = () => { // If no nodeId is provided, fall back to the current behavior const bundledNodes = nodes.filter((node) => { if (!node.data._isBundled) return false + if (node.type === CUSTOM_NOTE_NODE) return true const { metaData } = nodesMetaDataMap![node.data.type as BlockEnum] if (metaData.isSingleton) return false return !node.data.isInIteration && !node.data.isInLoop @@ -1457,6 +1458,7 @@ export const useNodesInteractions = () => { const selectedNode = nodes.find((node) => { if (!node.data.selected) return false + if (node.type === CUSTOM_NOTE_NODE) return true const { metaData } = nodesMetaDataMap![node.data.type as BlockEnum] return !metaData.isSingleton }) @@ -1495,7 +1497,7 @@ export const useNodesInteractions = () => { = generateNewNode({ type: nodeToPaste.type, data: { - ...nodesMetaDataMap![nodeType].defaultValue, + ...(nodeToPaste.type !== CUSTOM_NOTE_NODE && nodesMetaDataMap![nodeType].defaultValue), ...nodeToPaste.data, selected: false, _isBundled: false, diff --git a/web/app/components/workflow/note-node/index.tsx b/web/app/components/workflow/note-node/index.tsx index 7f2cde42d6..5a0b2677c1 100644 --- a/web/app/components/workflow/note-node/index.tsx +++ b/web/app/components/workflow/note-node/index.tsx @@ -1,6 +1,5 @@ import { memo, - useCallback, useRef, } from 'react' import { useTranslation } from 'react-i18next' @@ -51,10 +50,6 @@ const NoteNode = ({ } = useNodesInteractions() const { handleNodeDataUpdateWithSyncDraft } = useNodeDataUpdate() - const handleDeleteNode = useCallback(() => { - handleNodeDelete(id) - }, [id, handleNodeDelete]) - useClickAway(() => { handleNodeDataUpdateWithSyncDraft({ id, data: { selected: false } }) }, ref) @@ -102,9 +97,9 @@ const NoteNode = ({ handleNodesCopy(id)} + onDuplicate={() => handleNodesDuplicate(id)} + onDelete={() => handleNodeDelete(id)} showAuthor={data.showAuthor} onShowAuthorChange={handleShowAuthorChange} /> From f01907aac2f04b3d3a5ba23984dee77403263e4f Mon Sep 17 00:00:00 2001 From: quicksand Date: Tue, 28 Oct 2025 09:46:33 +0800 Subject: [PATCH 11/15] fix: knowledge sync from website error (#27534) --- api/services/dataset_service.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index c97d419545..1e040abe3e 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -1417,7 +1417,7 @@ class DocumentService: assert isinstance(current_user, Account) assert current_user.current_tenant_id is not None assert knowledge_config.data_source - assert knowledge_config.data_source.info_list.file_info_list + assert knowledge_config.data_source.info_list features = FeatureService.get_features(current_user.current_tenant_id) @@ -1426,6 +1426,8 @@ class DocumentService: count = 0 if knowledge_config.data_source: if knowledge_config.data_source.info_list.data_source_type == "upload_file": + if not knowledge_config.data_source.info_list.file_info_list: + raise ValueError("File source info is required") upload_file_list = knowledge_config.data_source.info_list.file_info_list.file_ids count = len(upload_file_list) elif knowledge_config.data_source.info_list.data_source_type == "notion_import": @@ -1531,6 +1533,8 @@ class DocumentService: document_ids = [] duplicate_document_ids = [] if knowledge_config.data_source.info_list.data_source_type == "upload_file": + if not knowledge_config.data_source.info_list.file_info_list: + raise ValueError("File source info is required") upload_file_list = knowledge_config.data_source.info_list.file_info_list.file_ids for file_id in upload_file_list: file = ( From 341b3ae7c9209e35c2da008be5647f658c501a2e Mon Sep 17 00:00:00 2001 From: yalei <269870927@qq.com> Date: Tue, 28 Oct 2025 09:59:16 +0800 Subject: [PATCH 12/15] Sync log detail drawer with conversation_id query parameter, so that we can share a specific conversation (#27518) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- web/app/components/app/log/list.tsx | 120 ++++++++++++++++++++++++---- 1 file changed, 103 insertions(+), 17 deletions(-) diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx index 8b3370b678..d295784083 100644 --- a/web/app/components/app/log/list.tsx +++ b/web/app/components/app/log/list.tsx @@ -14,6 +14,7 @@ import timezone from 'dayjs/plugin/timezone' import { createContext, useContext } from 'use-context-selector' import { useShallow } from 'zustand/react/shallow' import { useTranslation } from 'react-i18next' +import { usePathname, useRouter, useSearchParams } from 'next/navigation' import type { ChatItemInTree } from '../../base/chat/types' import Indicator from '../../header/indicator' import VarPanel from './var-panel' @@ -42,6 +43,10 @@ import cn from '@/utils/classnames' import { noop } from 'lodash-es' import PromptLogModal from '../../base/prompt-log-modal' +type AppStoreState = ReturnType +type ConversationListItem = ChatConversationGeneralDetail | CompletionConversationGeneralDetail +type ConversationSelection = ConversationListItem | { id: string; isPlaceholder?: true } + dayjs.extend(utc) dayjs.extend(timezone) @@ -201,7 +206,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) { const { formatTime } = useTimestamp() const { onClose, appDetail } = useContext(DrawerContext) const { notify } = useContext(ToastContext) - const { currentLogItem, setCurrentLogItem, showMessageLogModal, setShowMessageLogModal, showPromptLogModal, setShowPromptLogModal, currentLogModalActiveTab } = useAppStore(useShallow(state => ({ + const { currentLogItem, setCurrentLogItem, showMessageLogModal, setShowMessageLogModal, showPromptLogModal, setShowPromptLogModal, currentLogModalActiveTab } = useAppStore(useShallow((state: AppStoreState) => ({ currentLogItem: state.currentLogItem, setCurrentLogItem: state.setCurrentLogItem, showMessageLogModal: state.showMessageLogModal, @@ -893,20 +898,113 @@ const ChatConversationDetailComp: FC<{ appId?: string; conversationId?: string } const ConversationList: FC = ({ logs, appDetail, onRefresh }) => { const { t } = useTranslation() const { formatTime } = useTimestamp() + const router = useRouter() + const pathname = usePathname() + const searchParams = useSearchParams() + const conversationIdInUrl = searchParams.get('conversation_id') ?? undefined const media = useBreakpoints() const isMobile = media === MediaType.mobile const [showDrawer, setShowDrawer] = useState(false) // Whether to display the chat details drawer - const [currentConversation, setCurrentConversation] = useState() // Currently selected conversation + const [currentConversation, setCurrentConversation] = useState() // Currently selected conversation + const closingConversationIdRef = useRef(null) + const pendingConversationIdRef = useRef(null) + const pendingConversationCacheRef = useRef(undefined) const isChatMode = appDetail.mode !== 'completion' // Whether the app is a chat app const isChatflow = appDetail.mode === 'advanced-chat' // Whether the app is a chatflow app - const { setShowPromptLogModal, setShowAgentLogModal, setShowMessageLogModal } = useAppStore(useShallow(state => ({ + const { setShowPromptLogModal, setShowAgentLogModal, setShowMessageLogModal } = useAppStore(useShallow((state: AppStoreState) => ({ setShowPromptLogModal: state.setShowPromptLogModal, setShowAgentLogModal: state.setShowAgentLogModal, setShowMessageLogModal: state.setShowMessageLogModal, }))) + const activeConversationId = conversationIdInUrl ?? pendingConversationIdRef.current ?? currentConversation?.id + + const buildUrlWithConversation = useCallback((conversationId?: string) => { + const params = new URLSearchParams(searchParams.toString()) + if (conversationId) + params.set('conversation_id', conversationId) + else + params.delete('conversation_id') + + const queryString = params.toString() + return queryString ? `${pathname}?${queryString}` : pathname + }, [pathname, searchParams]) + + const handleRowClick = useCallback((log: ConversationListItem) => { + if (conversationIdInUrl === log.id) { + if (!showDrawer) + setShowDrawer(true) + + if (!currentConversation || currentConversation.id !== log.id) + setCurrentConversation(log) + return + } + + pendingConversationIdRef.current = log.id + pendingConversationCacheRef.current = log + if (!showDrawer) + setShowDrawer(true) + + if (currentConversation?.id !== log.id) + setCurrentConversation(undefined) + + router.push(buildUrlWithConversation(log.id), { scroll: false }) + }, [buildUrlWithConversation, conversationIdInUrl, currentConversation, router, showDrawer]) + + const currentConversationId = currentConversation?.id + + useEffect(() => { + if (!conversationIdInUrl) { + if (pendingConversationIdRef.current) + return + + if (showDrawer || currentConversationId) { + setShowDrawer(false) + setCurrentConversation(undefined) + } + closingConversationIdRef.current = null + pendingConversationCacheRef.current = undefined + return + } + + if (closingConversationIdRef.current === conversationIdInUrl) + return + + if (pendingConversationIdRef.current === conversationIdInUrl) + pendingConversationIdRef.current = null + + const matchedConversation = logs?.data?.find((item: ConversationListItem) => item.id === conversationIdInUrl) + const nextConversation: ConversationSelection = matchedConversation + ?? pendingConversationCacheRef.current + ?? { id: conversationIdInUrl, isPlaceholder: true } + + if (!showDrawer) + setShowDrawer(true) + + if (!currentConversation || currentConversation.id !== conversationIdInUrl || (!('created_at' in currentConversation) && matchedConversation)) + setCurrentConversation(nextConversation) + + if (pendingConversationCacheRef.current?.id === conversationIdInUrl || matchedConversation) + pendingConversationCacheRef.current = undefined + }, [conversationIdInUrl, currentConversation, isChatMode, logs?.data, showDrawer]) + + const onCloseDrawer = useCallback(() => { + onRefresh() + setShowDrawer(false) + setCurrentConversation(undefined) + setShowPromptLogModal(false) + setShowAgentLogModal(false) + setShowMessageLogModal(false) + pendingConversationIdRef.current = null + pendingConversationCacheRef.current = undefined + closingConversationIdRef.current = conversationIdInUrl ?? null + + if (conversationIdInUrl) + router.replace(buildUrlWithConversation(), { scroll: false }) + }, [buildUrlWithConversation, conversationIdInUrl, onRefresh, router, setShowAgentLogModal, setShowMessageLogModal, setShowPromptLogModal]) + // Annotated data needs to be highlighted const renderTdValue = (value: string | number | null, isEmptyStyle: boolean, isHighlight = false, annotation?: LogAnnotation) => { return ( @@ -925,15 +1023,6 @@ const ConversationList: FC = ({ logs, appDetail, onRefresh }) ) } - const onCloseDrawer = () => { - onRefresh() - setShowDrawer(false) - setCurrentConversation(undefined) - setShowPromptLogModal(false) - setShowAgentLogModal(false) - setShowMessageLogModal(false) - } - if (!logs) return @@ -960,11 +1049,8 @@ const ConversationList: FC = ({ logs, appDetail, onRefresh }) const rightValue = get(log, isChatMode ? 'message_count' : 'message.answer') return { - setShowDrawer(true) - setCurrentConversation(log) - }}> + className={cn('cursor-pointer border-b border-divider-subtle hover:bg-background-default-hover', activeConversationId !== log.id ? '' : 'bg-background-default-hover')} + onClick={() => handleRowClick(log)}> {!log.read_at && (
From 543c5236e7735028013865cc66a1dae93b44697d Mon Sep 17 00:00:00 2001 From: heyszt <270985384@qq.com> Date: Tue, 28 Oct 2025 09:59:30 +0800 Subject: [PATCH 13/15] refactor:Decouple Domain Models from Direct Database Access (#27316) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../console/app/workflow_statistic.py | 222 ++++++---------- api/core/memory/token_buffer_memory.py | 24 +- api/core/ops/ops_trace_manager.py | 129 +++++----- .../api_workflow_run_repository.py | 154 ++++++++++- .../sqlalchemy_api_workflow_run_repository.py | 241 +++++++++++++++++- api/repositories/types.py | 21 ++ api/services/rag_pipeline/rag_pipeline.py | 68 ++--- 7 files changed, 595 insertions(+), 264 deletions(-) create mode 100644 api/repositories/types.py diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index bbea04640a..c246b3ffd5 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -1,10 +1,9 @@ from datetime import datetime -from decimal import Decimal import pytz -import sqlalchemy as sa from flask import jsonify from flask_restx import Resource, reqparse +from sqlalchemy.orm import sessionmaker from controllers.console import api, console_ns from controllers.console.app.wraps import get_app_model @@ -14,10 +13,16 @@ from libs.helper import DatetimeString from libs.login import current_account_with_tenant, login_required from models.enums import WorkflowRunTriggeredFrom from models.model import AppMode +from repositories.factory import DifyAPIRepositoryFactory @console_ns.route("/apps//workflow/statistics/daily-conversations") class WorkflowDailyRunsStatistic(Resource): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + @api.doc("get_workflow_daily_runs_statistic") @api.doc(description="Get workflow daily runs statistics") @api.doc(params={"app_id": "Application ID"}) @@ -37,57 +42,44 @@ class WorkflowDailyRunsStatistic(Resource): ) args = parser.parse_args() - sql_query = """SELECT - DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - COUNT(id) AS runs -FROM - workflow_runs -WHERE - app_id = :app_id - AND triggered_from = :triggered_from""" - arg_dict = { - "tz": account.timezone, - "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, - } assert account.timezone is not None timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc + start_date = None + end_date = None + if args["start"]: start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M") start_datetime = start_datetime.replace(second=0) - start_datetime_timezone = timezone.localize(start_datetime) - start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - - sql_query += " AND created_at >= :start" - arg_dict["start"] = start_datetime_utc + start_date = start_datetime_timezone.astimezone(utc_timezone) if args["end"]: end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M") end_datetime = end_datetime.replace(second=0) - end_datetime_timezone = timezone.localize(end_datetime) - end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) + end_date = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " AND created_at < :end" - arg_dict["end"] = end_datetime_utc - - sql_query += " GROUP BY date ORDER BY date" - - response_data = [] - - with db.engine.begin() as conn: - rs = conn.execute(sa.text(sql_query), arg_dict) - for i in rs: - response_data.append({"date": str(i.date), "runs": i.runs}) + response_data = self._workflow_run_repo.get_daily_runs_statistics( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + start_date=start_date, + end_date=end_date, + timezone=account.timezone, + ) return jsonify({"data": response_data}) @console_ns.route("/apps//workflow/statistics/daily-terminals") class WorkflowDailyTerminalsStatistic(Resource): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + @api.doc("get_workflow_daily_terminals_statistic") @api.doc(description="Get workflow daily terminals statistics") @api.doc(params={"app_id": "Application ID"}) @@ -107,57 +99,44 @@ class WorkflowDailyTerminalsStatistic(Resource): ) args = parser.parse_args() - sql_query = """SELECT - DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - COUNT(DISTINCT workflow_runs.created_by) AS terminal_count -FROM - workflow_runs -WHERE - app_id = :app_id - AND triggered_from = :triggered_from""" - arg_dict = { - "tz": account.timezone, - "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, - } assert account.timezone is not None timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc + start_date = None + end_date = None + if args["start"]: start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M") start_datetime = start_datetime.replace(second=0) - start_datetime_timezone = timezone.localize(start_datetime) - start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - - sql_query += " AND created_at >= :start" - arg_dict["start"] = start_datetime_utc + start_date = start_datetime_timezone.astimezone(utc_timezone) if args["end"]: end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M") end_datetime = end_datetime.replace(second=0) - end_datetime_timezone = timezone.localize(end_datetime) - end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) + end_date = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " AND created_at < :end" - arg_dict["end"] = end_datetime_utc - - sql_query += " GROUP BY date ORDER BY date" - - response_data = [] - - with db.engine.begin() as conn: - rs = conn.execute(sa.text(sql_query), arg_dict) - for i in rs: - response_data.append({"date": str(i.date), "terminal_count": i.terminal_count}) + response_data = self._workflow_run_repo.get_daily_terminals_statistics( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + start_date=start_date, + end_date=end_date, + timezone=account.timezone, + ) return jsonify({"data": response_data}) @console_ns.route("/apps//workflow/statistics/token-costs") class WorkflowDailyTokenCostStatistic(Resource): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + @api.doc("get_workflow_daily_token_cost_statistic") @api.doc(description="Get workflow daily token cost statistics") @api.doc(params={"app_id": "Application ID"}) @@ -177,62 +156,44 @@ class WorkflowDailyTokenCostStatistic(Resource): ) args = parser.parse_args() - sql_query = """SELECT - DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - SUM(workflow_runs.total_tokens) AS token_count -FROM - workflow_runs -WHERE - app_id = :app_id - AND triggered_from = :triggered_from""" - arg_dict = { - "tz": account.timezone, - "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, - } assert account.timezone is not None timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc + start_date = None + end_date = None + if args["start"]: start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M") start_datetime = start_datetime.replace(second=0) - start_datetime_timezone = timezone.localize(start_datetime) - start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - - sql_query += " AND created_at >= :start" - arg_dict["start"] = start_datetime_utc + start_date = start_datetime_timezone.astimezone(utc_timezone) if args["end"]: end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M") end_datetime = end_datetime.replace(second=0) - end_datetime_timezone = timezone.localize(end_datetime) - end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) + end_date = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " AND created_at < :end" - arg_dict["end"] = end_datetime_utc - - sql_query += " GROUP BY date ORDER BY date" - - response_data = [] - - with db.engine.begin() as conn: - rs = conn.execute(sa.text(sql_query), arg_dict) - for i in rs: - response_data.append( - { - "date": str(i.date), - "token_count": i.token_count, - } - ) + response_data = self._workflow_run_repo.get_daily_token_cost_statistics( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + start_date=start_date, + end_date=end_date, + timezone=account.timezone, + ) return jsonify({"data": response_data}) @console_ns.route("/apps//workflow/statistics/average-app-interactions") class WorkflowAverageAppInteractionStatistic(Resource): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + @api.doc("get_workflow_average_app_interaction_statistic") @api.doc(description="Get workflow average app interaction statistics") @api.doc(params={"app_id": "Application ID"}) @@ -252,67 +213,32 @@ class WorkflowAverageAppInteractionStatistic(Resource): ) args = parser.parse_args() - sql_query = """SELECT - AVG(sub.interactions) AS interactions, - sub.date -FROM - ( - SELECT - DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - c.created_by, - COUNT(c.id) AS interactions - FROM - workflow_runs c - WHERE - c.app_id = :app_id - AND c.triggered_from = :triggered_from - {{start}} - {{end}} - GROUP BY - date, c.created_by - ) sub -GROUP BY - sub.date""" - arg_dict = { - "tz": account.timezone, - "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, - } assert account.timezone is not None timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc + start_date = None + end_date = None + if args["start"]: start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M") start_datetime = start_datetime.replace(second=0) - start_datetime_timezone = timezone.localize(start_datetime) - start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - - sql_query = sql_query.replace("{{start}}", " AND c.created_at >= :start") - arg_dict["start"] = start_datetime_utc - else: - sql_query = sql_query.replace("{{start}}", "") + start_date = start_datetime_timezone.astimezone(utc_timezone) if args["end"]: end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M") end_datetime = end_datetime.replace(second=0) - end_datetime_timezone = timezone.localize(end_datetime) - end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) + end_date = end_datetime_timezone.astimezone(utc_timezone) - sql_query = sql_query.replace("{{end}}", " AND c.created_at < :end") - arg_dict["end"] = end_datetime_utc - else: - sql_query = sql_query.replace("{{end}}", "") - - response_data = [] - - with db.engine.begin() as conn: - rs = conn.execute(sa.text(sql_query), arg_dict) - for i in rs: - response_data.append( - {"date": str(i.date), "interactions": float(i.interactions.quantize(Decimal("0.01")))} - ) + response_data = self._workflow_run_repo.get_average_app_interaction_statistics( + tenant_id=app_model.tenant_id, + app_id=app_model.id, + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + start_date=start_date, + end_date=end_date, + timezone=account.timezone, + ) return jsonify({"data": response_data}) diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index 35af742f2a..3ebbb60f85 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -1,6 +1,7 @@ from collections.abc import Sequence from sqlalchemy import select +from sqlalchemy.orm import sessionmaker from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.file import file_manager @@ -18,7 +19,9 @@ from core.prompt.utils.extract_thread_messages import extract_thread_messages from extensions.ext_database import db from factories import file_factory from models.model import AppMode, Conversation, Message, MessageFile -from models.workflow import Workflow, WorkflowRun +from models.workflow import Workflow +from repositories.api_workflow_run_repository import APIWorkflowRunRepository +from repositories.factory import DifyAPIRepositoryFactory class TokenBufferMemory: @@ -29,6 +32,14 @@ class TokenBufferMemory: ): self.conversation = conversation self.model_instance = model_instance + self._workflow_run_repo: APIWorkflowRunRepository | None = None + + @property + def workflow_run_repo(self) -> APIWorkflowRunRepository: + if self._workflow_run_repo is None: + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + return self._workflow_run_repo def _build_prompt_message_with_files( self, @@ -50,7 +61,16 @@ class TokenBufferMemory: if self.conversation.mode in {AppMode.AGENT_CHAT, AppMode.COMPLETION, AppMode.CHAT}: file_extra_config = FileUploadConfigManager.convert(self.conversation.model_config) elif self.conversation.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}: - workflow_run = db.session.scalar(select(WorkflowRun).where(WorkflowRun.id == message.workflow_run_id)) + app = self.conversation.app + if not app: + raise ValueError("App not found for conversation") + + if not message.workflow_run_id: + raise ValueError("Workflow run ID not found") + + workflow_run = self.workflow_run_repo.get_workflow_run_by_id( + tenant_id=app.tenant_id, app_id=app.id, run_id=message.workflow_run_id + ) if not workflow_run: raise ValueError(f"Workflow run not found: {message.workflow_run_id}") workflow = db.session.scalar(select(Workflow).where(Workflow.id == workflow_run.workflow_id)) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 7db9b076d2..de0d4560e3 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -12,7 +12,7 @@ from uuid import UUID, uuid4 from cachetools import LRUCache from flask import current_app from sqlalchemy import select -from sqlalchemy.orm import Session +from sqlalchemy.orm import Session, sessionmaker from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.entities.config_entity import ( @@ -34,7 +34,8 @@ from core.ops.utils import get_message_data from extensions.ext_database import db from extensions.ext_storage import storage from models.model import App, AppModelConfig, Conversation, Message, MessageFile, TraceAppConfig -from models.workflow import WorkflowAppLog, WorkflowRun +from models.workflow import WorkflowAppLog +from repositories.factory import DifyAPIRepositoryFactory from tasks.ops_trace_task import process_trace_tasks if TYPE_CHECKING: @@ -419,6 +420,18 @@ class OpsTraceManager: class TraceTask: + _workflow_run_repo = None + _repo_lock = threading.Lock() + + @classmethod + def _get_workflow_run_repo(cls): + if cls._workflow_run_repo is None: + with cls._repo_lock: + if cls._workflow_run_repo is None: + session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) + cls._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + return cls._workflow_run_repo + def __init__( self, trace_type: Any, @@ -486,27 +499,27 @@ class TraceTask: if not workflow_run_id: return {} + workflow_run_repo = self._get_workflow_run_repo() + workflow_run = workflow_run_repo.get_workflow_run_by_id_without_tenant(run_id=workflow_run_id) + if not workflow_run: + raise ValueError("Workflow run not found") + + workflow_id = workflow_run.workflow_id + tenant_id = workflow_run.tenant_id + workflow_run_id = workflow_run.id + workflow_run_elapsed_time = workflow_run.elapsed_time + workflow_run_status = workflow_run.status + workflow_run_inputs = workflow_run.inputs_dict + workflow_run_outputs = workflow_run.outputs_dict + workflow_run_version = workflow_run.version + error = workflow_run.error or "" + + total_tokens = workflow_run.total_tokens + + file_list = workflow_run_inputs.get("sys.file") or [] + query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" + with Session(db.engine) as session: - workflow_run_stmt = select(WorkflowRun).where(WorkflowRun.id == workflow_run_id) - workflow_run = session.scalars(workflow_run_stmt).first() - if not workflow_run: - raise ValueError("Workflow run not found") - - workflow_id = workflow_run.workflow_id - tenant_id = workflow_run.tenant_id - workflow_run_id = workflow_run.id - workflow_run_elapsed_time = workflow_run.elapsed_time - workflow_run_status = workflow_run.status - workflow_run_inputs = workflow_run.inputs_dict - workflow_run_outputs = workflow_run.outputs_dict - workflow_run_version = workflow_run.version - error = workflow_run.error or "" - - total_tokens = workflow_run.total_tokens - - file_list = workflow_run_inputs.get("sys.file") or [] - query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" - # get workflow_app_log_id workflow_app_log_data_stmt = select(WorkflowAppLog.id).where( WorkflowAppLog.tenant_id == tenant_id, @@ -523,43 +536,43 @@ class TraceTask: ) message_id = session.scalar(message_data_stmt) - metadata = { - "workflow_id": workflow_id, - "conversation_id": conversation_id, - "workflow_run_id": workflow_run_id, - "tenant_id": tenant_id, - "elapsed_time": workflow_run_elapsed_time, - "status": workflow_run_status, - "version": workflow_run_version, - "total_tokens": total_tokens, - "file_list": file_list, - "triggered_from": workflow_run.triggered_from, - "user_id": user_id, - "app_id": workflow_run.app_id, - } + metadata = { + "workflow_id": workflow_id, + "conversation_id": conversation_id, + "workflow_run_id": workflow_run_id, + "tenant_id": tenant_id, + "elapsed_time": workflow_run_elapsed_time, + "status": workflow_run_status, + "version": workflow_run_version, + "total_tokens": total_tokens, + "file_list": file_list, + "triggered_from": workflow_run.triggered_from, + "user_id": user_id, + "app_id": workflow_run.app_id, + } - workflow_trace_info = WorkflowTraceInfo( - trace_id=self.trace_id, - workflow_data=workflow_run.to_dict(), - conversation_id=conversation_id, - workflow_id=workflow_id, - tenant_id=tenant_id, - workflow_run_id=workflow_run_id, - workflow_run_elapsed_time=workflow_run_elapsed_time, - workflow_run_status=workflow_run_status, - workflow_run_inputs=workflow_run_inputs, - workflow_run_outputs=workflow_run_outputs, - workflow_run_version=workflow_run_version, - error=error, - total_tokens=total_tokens, - file_list=file_list, - query=query, - metadata=metadata, - workflow_app_log_id=workflow_app_log_id, - message_id=message_id, - start_time=workflow_run.created_at, - end_time=workflow_run.finished_at, - ) + workflow_trace_info = WorkflowTraceInfo( + trace_id=self.trace_id, + workflow_data=workflow_run.to_dict(), + conversation_id=conversation_id, + workflow_id=workflow_id, + tenant_id=tenant_id, + workflow_run_id=workflow_run_id, + workflow_run_elapsed_time=workflow_run_elapsed_time, + workflow_run_status=workflow_run_status, + workflow_run_inputs=workflow_run_inputs, + workflow_run_outputs=workflow_run_outputs, + workflow_run_version=workflow_run_version, + error=error, + total_tokens=total_tokens, + file_list=file_list, + query=query, + metadata=metadata, + workflow_app_log_id=workflow_app_log_id, + message_id=message_id, + start_time=workflow_run.created_at, + end_time=workflow_run.finished_at, + ) return workflow_trace_info def message_trace(self, message_id: str | None): diff --git a/api/repositories/api_workflow_run_repository.py b/api/repositories/api_workflow_run_repository.py index 72de9fed31..eb6d599224 100644 --- a/api/repositories/api_workflow_run_repository.py +++ b/api/repositories/api_workflow_run_repository.py @@ -28,7 +28,7 @@ Example: runs = repo.get_paginated_workflow_runs( tenant_id="tenant-123", app_id="app-456", - triggered_from="debugging", + triggered_from=WorkflowRunTriggeredFrom.DEBUGGING, limit=20 ) ``` @@ -40,7 +40,14 @@ from typing import Protocol from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository from libs.infinite_scroll_pagination import InfiniteScrollPagination +from models.enums import WorkflowRunTriggeredFrom from models.workflow import WorkflowRun +from repositories.types import ( + AverageInteractionStats, + DailyRunsStats, + DailyTerminalsStats, + DailyTokenCostStats, +) class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol): @@ -56,7 +63,7 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol): self, tenant_id: str, app_id: str, - triggered_from: str, + triggered_from: WorkflowRunTriggeredFrom | Sequence[WorkflowRunTriggeredFrom], limit: int = 20, last_id: str | None = None, status: str | None = None, @@ -71,7 +78,7 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol): Args: tenant_id: Tenant identifier for multi-tenant isolation app_id: Application identifier - triggered_from: Filter by trigger source (e.g., "debugging", "app-run") + triggered_from: Filter by trigger source(s) (e.g., "debugging", "app-run", or list of values) limit: Maximum number of records to return (default: 20) last_id: Cursor for pagination - ID of the last record from previous page status: Optional filter by status (e.g., "running", "succeeded", "failed") @@ -109,6 +116,31 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol): """ ... + def get_workflow_run_by_id_without_tenant( + self, + run_id: str, + ) -> WorkflowRun | None: + """ + Get a specific workflow run by ID without tenant/app context. + + Retrieves a single workflow run using only the run ID, without + requiring tenant_id or app_id. This method is intended for internal + system operations like tracing and monitoring where the tenant context + is not available upfront. + + Args: + run_id: Workflow run identifier + + Returns: + WorkflowRun object if found, None otherwise + + Note: + This method bypasses tenant isolation checks and should only be used + in trusted system contexts like ops trace collection. For user-facing + operations, use get_workflow_run_by_id() with proper tenant isolation. + """ + ... + def get_workflow_runs_count( self, tenant_id: str, @@ -218,3 +250,119 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol): and ensure proper data retention policies are followed. """ ... + + def get_daily_runs_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[DailyRunsStats]: + """ + Get daily runs statistics. + + Retrieves daily workflow runs count grouped by date for a specific app + and trigger source. Used for workflow statistics dashboard. + + Args: + tenant_id: Tenant identifier for multi-tenant isolation + app_id: Application identifier + triggered_from: Filter by trigger source (e.g., "app-run") + start_date: Optional start date filter + end_date: Optional end date filter + timezone: Timezone for date grouping (default: "UTC") + + Returns: + List of dictionaries containing date and runs count: + [{"date": "2024-01-01", "runs": 10}, ...] + """ + ... + + def get_daily_terminals_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[DailyTerminalsStats]: + """ + Get daily terminals statistics. + + Retrieves daily unique terminal count grouped by date for a specific app + and trigger source. Used for workflow statistics dashboard. + + Args: + tenant_id: Tenant identifier for multi-tenant isolation + app_id: Application identifier + triggered_from: Filter by trigger source (e.g., "app-run") + start_date: Optional start date filter + end_date: Optional end date filter + timezone: Timezone for date grouping (default: "UTC") + + Returns: + List of dictionaries containing date and terminal count: + [{"date": "2024-01-01", "terminal_count": 5}, ...] + """ + ... + + def get_daily_token_cost_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[DailyTokenCostStats]: + """ + Get daily token cost statistics. + + Retrieves daily total token count grouped by date for a specific app + and trigger source. Used for workflow statistics dashboard. + + Args: + tenant_id: Tenant identifier for multi-tenant isolation + app_id: Application identifier + triggered_from: Filter by trigger source (e.g., "app-run") + start_date: Optional start date filter + end_date: Optional end date filter + timezone: Timezone for date grouping (default: "UTC") + + Returns: + List of dictionaries containing date and token count: + [{"date": "2024-01-01", "token_count": 1000}, ...] + """ + ... + + def get_average_app_interaction_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[AverageInteractionStats]: + """ + Get average app interaction statistics. + + Retrieves daily average interactions per user grouped by date for a specific app + and trigger source. Used for workflow statistics dashboard. + + Args: + tenant_id: Tenant identifier for multi-tenant isolation + app_id: Application identifier + triggered_from: Filter by trigger source (e.g., "app-run") + start_date: Optional start date filter + end_date: Optional end date filter + timezone: Timezone for date grouping (default: "UTC") + + Returns: + List of dictionaries containing date and average interactions: + [{"date": "2024-01-01", "interactions": 2.5}, ...] + """ + ... diff --git a/api/repositories/sqlalchemy_api_workflow_run_repository.py b/api/repositories/sqlalchemy_api_workflow_run_repository.py index 68affb59f3..f08eab0b01 100644 --- a/api/repositories/sqlalchemy_api_workflow_run_repository.py +++ b/api/repositories/sqlalchemy_api_workflow_run_repository.py @@ -22,16 +22,25 @@ Implementation Notes: import logging from collections.abc import Sequence from datetime import datetime -from typing import cast +from decimal import Decimal +from typing import Any, cast +import sqlalchemy as sa from sqlalchemy import delete, func, select from sqlalchemy.engine import CursorResult from sqlalchemy.orm import Session, sessionmaker from libs.infinite_scroll_pagination import InfiniteScrollPagination from libs.time_parser import get_time_threshold +from models.enums import WorkflowRunTriggeredFrom from models.workflow import WorkflowRun from repositories.api_workflow_run_repository import APIWorkflowRunRepository +from repositories.types import ( + AverageInteractionStats, + DailyRunsStats, + DailyTerminalsStats, + DailyTokenCostStats, +) logger = logging.getLogger(__name__) @@ -61,7 +70,7 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): self, tenant_id: str, app_id: str, - triggered_from: str, + triggered_from: WorkflowRunTriggeredFrom | Sequence[WorkflowRunTriggeredFrom], limit: int = 20, last_id: str | None = None, status: str | None = None, @@ -78,9 +87,14 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): base_stmt = select(WorkflowRun).where( WorkflowRun.tenant_id == tenant_id, WorkflowRun.app_id == app_id, - WorkflowRun.triggered_from == triggered_from, ) + # Handle triggered_from values + if isinstance(triggered_from, WorkflowRunTriggeredFrom): + triggered_from = [triggered_from] + if triggered_from: + base_stmt = base_stmt.where(WorkflowRun.triggered_from.in_(triggered_from)) + # Add optional status filter if status: base_stmt = base_stmt.where(WorkflowRun.status == status) @@ -126,6 +140,17 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): ) return session.scalar(stmt) + def get_workflow_run_by_id_without_tenant( + self, + run_id: str, + ) -> WorkflowRun | None: + """ + Get a specific workflow run by ID without tenant/app context. + """ + with self._session_maker() as session: + stmt = select(WorkflowRun).where(WorkflowRun.id == run_id) + return session.scalar(stmt) + def get_workflow_runs_count( self, tenant_id: str, @@ -275,3 +300,213 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): logger.info("Total deleted %s workflow runs for app %s", total_deleted, app_id) return total_deleted + + def get_daily_runs_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[DailyRunsStats]: + """ + Get daily runs statistics using raw SQL for optimal performance. + """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(id) AS runs +FROM + workflow_runs +WHERE + tenant_id = :tenant_id + AND app_id = :app_id + AND triggered_from = :triggered_from""" + + arg_dict: dict[str, Any] = { + "tz": timezone, + "tenant_id": tenant_id, + "app_id": app_id, + "triggered_from": triggered_from, + } + + if start_date: + sql_query += " AND created_at >= :start_date" + arg_dict["start_date"] = start_date + + if end_date: + sql_query += " AND created_at < :end_date" + arg_dict["end_date"] = end_date + + sql_query += " GROUP BY date ORDER BY date" + + response_data = [] + with self._session_maker() as session: + rs = session.execute(sa.text(sql_query), arg_dict) + for row in rs: + response_data.append({"date": str(row.date), "runs": row.runs}) + + return cast(list[DailyRunsStats], response_data) + + def get_daily_terminals_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[DailyTerminalsStats]: + """ + Get daily terminals statistics using raw SQL for optimal performance. + """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT created_by) AS terminal_count +FROM + workflow_runs +WHERE + tenant_id = :tenant_id + AND app_id = :app_id + AND triggered_from = :triggered_from""" + + arg_dict: dict[str, Any] = { + "tz": timezone, + "tenant_id": tenant_id, + "app_id": app_id, + "triggered_from": triggered_from, + } + + if start_date: + sql_query += " AND created_at >= :start_date" + arg_dict["start_date"] = start_date + + if end_date: + sql_query += " AND created_at < :end_date" + arg_dict["end_date"] = end_date + + sql_query += " GROUP BY date ORDER BY date" + + response_data = [] + with self._session_maker() as session: + rs = session.execute(sa.text(sql_query), arg_dict) + for row in rs: + response_data.append({"date": str(row.date), "terminal_count": row.terminal_count}) + + return cast(list[DailyTerminalsStats], response_data) + + def get_daily_token_cost_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[DailyTokenCostStats]: + """ + Get daily token cost statistics using raw SQL for optimal performance. + """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + SUM(total_tokens) AS token_count +FROM + workflow_runs +WHERE + tenant_id = :tenant_id + AND app_id = :app_id + AND triggered_from = :triggered_from""" + + arg_dict: dict[str, Any] = { + "tz": timezone, + "tenant_id": tenant_id, + "app_id": app_id, + "triggered_from": triggered_from, + } + + if start_date: + sql_query += " AND created_at >= :start_date" + arg_dict["start_date"] = start_date + + if end_date: + sql_query += " AND created_at < :end_date" + arg_dict["end_date"] = end_date + + sql_query += " GROUP BY date ORDER BY date" + + response_data = [] + with self._session_maker() as session: + rs = session.execute(sa.text(sql_query), arg_dict) + for row in rs: + response_data.append( + { + "date": str(row.date), + "token_count": row.token_count, + } + ) + + return cast(list[DailyTokenCostStats], response_data) + + def get_average_app_interaction_statistics( + self, + tenant_id: str, + app_id: str, + triggered_from: str, + start_date: datetime | None = None, + end_date: datetime | None = None, + timezone: str = "UTC", + ) -> list[AverageInteractionStats]: + """ + Get average app interaction statistics using raw SQL for optimal performance. + """ + sql_query = """SELECT + AVG(sub.interactions) AS interactions, + sub.date +FROM + ( + SELECT + DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + c.created_by, + COUNT(c.id) AS interactions + FROM + workflow_runs c + WHERE + c.tenant_id = :tenant_id + AND c.app_id = :app_id + AND c.triggered_from = :triggered_from + {{start}} + {{end}} + GROUP BY + date, c.created_by + ) sub +GROUP BY + sub.date""" + + arg_dict: dict[str, Any] = { + "tz": timezone, + "tenant_id": tenant_id, + "app_id": app_id, + "triggered_from": triggered_from, + } + + if start_date: + sql_query = sql_query.replace("{{start}}", " AND c.created_at >= :start_date") + arg_dict["start_date"] = start_date + else: + sql_query = sql_query.replace("{{start}}", "") + + if end_date: + sql_query = sql_query.replace("{{end}}", " AND c.created_at < :end_date") + arg_dict["end_date"] = end_date + else: + sql_query = sql_query.replace("{{end}}", "") + + response_data = [] + with self._session_maker() as session: + rs = session.execute(sa.text(sql_query), arg_dict) + for row in rs: + response_data.append( + {"date": str(row.date), "interactions": float(row.interactions.quantize(Decimal("0.01")))} + ) + + return cast(list[AverageInteractionStats], response_data) diff --git a/api/repositories/types.py b/api/repositories/types.py new file mode 100644 index 0000000000..3b3ef7f635 --- /dev/null +++ b/api/repositories/types.py @@ -0,0 +1,21 @@ +from typing import TypedDict + + +class DailyRunsStats(TypedDict): + date: str + runs: int + + +class DailyTerminalsStats(TypedDict): + date: str + terminal_count: int + + +class DailyTokenCostStats(TypedDict): + date: str + token_count: int + + +class AverageInteractionStats(TypedDict): + date: str + interactions: float diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index f6dddd75a3..50dec458a9 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -9,7 +9,7 @@ from typing import Any, Union, cast from uuid import uuid4 from flask_login import current_user -from sqlalchemy import func, or_, select +from sqlalchemy import func, select from sqlalchemy.orm import Session, sessionmaker import contexts @@ -94,6 +94,7 @@ class RagPipelineService: self._node_execution_service_repo = DifyAPIRepositoryFactory.create_api_workflow_node_execution_repository( session_maker ) + self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) @classmethod def get_pipeline_templates(cls, type: str = "built-in", language: str = "en-US") -> dict: @@ -1015,48 +1016,21 @@ class RagPipelineService: :param args: request args """ limit = int(args.get("limit", 20)) + last_id = args.get("last_id") - base_query = db.session.query(WorkflowRun).where( - WorkflowRun.tenant_id == pipeline.tenant_id, - WorkflowRun.app_id == pipeline.id, - or_( - WorkflowRun.triggered_from == WorkflowRunTriggeredFrom.RAG_PIPELINE_RUN.value, - WorkflowRun.triggered_from == WorkflowRunTriggeredFrom.RAG_PIPELINE_DEBUGGING.value, - ), + triggered_from_values = [ + WorkflowRunTriggeredFrom.RAG_PIPELINE_RUN, + WorkflowRunTriggeredFrom.RAG_PIPELINE_DEBUGGING, + ] + + return self._workflow_run_repo.get_paginated_workflow_runs( + tenant_id=pipeline.tenant_id, + app_id=pipeline.id, + triggered_from=triggered_from_values, + limit=limit, + last_id=last_id, ) - if args.get("last_id"): - last_workflow_run = base_query.where( - WorkflowRun.id == args.get("last_id"), - ).first() - - if not last_workflow_run: - raise ValueError("Last workflow run not exists") - - workflow_runs = ( - base_query.where( - WorkflowRun.created_at < last_workflow_run.created_at, WorkflowRun.id != last_workflow_run.id - ) - .order_by(WorkflowRun.created_at.desc()) - .limit(limit) - .all() - ) - else: - workflow_runs = base_query.order_by(WorkflowRun.created_at.desc()).limit(limit).all() - - has_more = False - if len(workflow_runs) == limit: - current_page_first_workflow_run = workflow_runs[-1] - rest_count = base_query.where( - WorkflowRun.created_at < current_page_first_workflow_run.created_at, - WorkflowRun.id != current_page_first_workflow_run.id, - ).count() - - if rest_count > 0: - has_more = True - - return InfiniteScrollPagination(data=workflow_runs, limit=limit, has_more=has_more) - def get_rag_pipeline_workflow_run(self, pipeline: Pipeline, run_id: str) -> WorkflowRun | None: """ Get workflow run detail @@ -1064,18 +1038,12 @@ class RagPipelineService: :param app_model: app model :param run_id: workflow run id """ - workflow_run = ( - db.session.query(WorkflowRun) - .where( - WorkflowRun.tenant_id == pipeline.tenant_id, - WorkflowRun.app_id == pipeline.id, - WorkflowRun.id == run_id, - ) - .first() + return self._workflow_run_repo.get_workflow_run_by_id( + tenant_id=pipeline.tenant_id, + app_id=pipeline.id, + run_id=run_id, ) - return workflow_run - def get_rag_pipeline_workflow_run_node_executions( self, pipeline: Pipeline, From ff32dff1636e40d6c79dd9b25a5b90ffc1e3de96 Mon Sep 17 00:00:00 2001 From: Eric Guo Date: Tue, 28 Oct 2025 10:04:24 +0800 Subject: [PATCH 14/15] Enabled cross-subdomain console sessions by making the cookie domain configurable and aligning the frontend so it reads the shared CSRF cookie. (#27190) --- api/.env.example | 3 ++ api/configs/feature/__init__.py | 5 ++++ api/libs/external_api.py | 13 ++------- api/libs/token.py | 33 ++++++++++++++++++++- api/tests/unit_tests/libs/test_token.py | 39 ++++++++++++++++++++++++- docker/.env.example | 5 ++++ docker/docker-compose-template.yaml | 1 + docker/docker-compose.yaml | 3 ++ web/.env.example | 3 ++ web/config/index.ts | 2 ++ 10 files changed, 94 insertions(+), 13 deletions(-) diff --git a/api/.env.example b/api/.env.example index 4df6adf348..c59d3ea16f 100644 --- a/api/.env.example +++ b/api/.env.example @@ -156,6 +156,9 @@ SUPABASE_URL=your-server-url # CORS configuration WEB_API_CORS_ALLOW_ORIGINS=http://localhost:3000,* CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,* +# Set COOKIE_DOMAIN when the console frontend and API are on different subdomains. +# Provide the registrable domain (e.g. example.com); leading dots are optional. +COOKIE_DOMAIN= # Vector database configuration # Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`. diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index a02f8a4d49..b2a2f8d0fd 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -337,6 +337,11 @@ class HttpConfig(BaseSettings): HTTP-related configurations for the application """ + COOKIE_DOMAIN: str = Field( + description="Explicit cookie domain for console/service cookies when sharing across subdomains", + default="", + ) + API_COMPRESSION_ENABLED: bool = Field( description="Enable or disable gzip compression for HTTP responses", default=False, diff --git a/api/libs/external_api.py b/api/libs/external_api.py index 1a4fde960c..61a90ee4a9 100644 --- a/api/libs/external_api.py +++ b/api/libs/external_api.py @@ -9,9 +9,8 @@ from werkzeug.exceptions import HTTPException from werkzeug.http import HTTP_STATUS_CODES from configs import dify_config -from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_CSRF_TOKEN, COOKIE_NAME_REFRESH_TOKEN from core.errors.error import AppInvokeQuotaExceededError -from libs.token import is_secure +from libs.token import build_force_logout_cookie_headers def http_status_message(code): @@ -73,15 +72,7 @@ def register_external_error_handlers(api: Api): error_code = getattr(e, "error_code", None) if error_code == "unauthorized_and_force_logout": # Add Set-Cookie headers to clear auth cookies - - secure = is_secure() - # response is not accessible, so we need to do it ugly - common_part = "Path=/; Expires=Thu, 01 Jan 1970 00:00:00 GMT; HttpOnly" - headers["Set-Cookie"] = [ - f'{COOKIE_NAME_ACCESS_TOKEN}=""; {common_part}{"; Secure" if secure else ""}; SameSite=Lax', - f'{COOKIE_NAME_CSRF_TOKEN}=""; {common_part}{"; Secure" if secure else ""}; SameSite=Lax', - f'{COOKIE_NAME_REFRESH_TOKEN}=""; {common_part}{"; Secure" if secure else ""}; SameSite=Lax', - ] + headers["Set-Cookie"] = build_force_logout_cookie_headers() return data, status_code, headers _ = handle_http_exception diff --git a/api/libs/token.py b/api/libs/token.py index b53663c89a..098ff958da 100644 --- a/api/libs/token.py +++ b/api/libs/token.py @@ -30,8 +30,22 @@ def is_secure() -> bool: return dify_config.CONSOLE_WEB_URL.startswith("https") and dify_config.CONSOLE_API_URL.startswith("https") +def _cookie_domain() -> str | None: + """ + Returns the normalized cookie domain. + + Leading dots are stripped from the configured domain. Historically, a leading dot + indicated that a cookie should be sent to all subdomains, but modern browsers treat + 'example.com' and '.example.com' identically. This normalization ensures consistent + behavior and avoids confusion. + """ + domain = dify_config.COOKIE_DOMAIN.strip() + domain = domain.removeprefix(".") + return domain or None + + def _real_cookie_name(cookie_name: str) -> str: - if is_secure(): + if is_secure() and _cookie_domain() is None: return "__Host-" + cookie_name else: return cookie_name @@ -91,6 +105,7 @@ def set_access_token_to_cookie(request: Request, response: Response, token: str, _real_cookie_name(COOKIE_NAME_ACCESS_TOKEN), value=token, httponly=True, + domain=_cookie_domain(), secure=is_secure(), samesite=samesite, max_age=int(dify_config.ACCESS_TOKEN_EXPIRE_MINUTES * 60), @@ -103,6 +118,7 @@ def set_refresh_token_to_cookie(request: Request, response: Response, token: str _real_cookie_name(COOKIE_NAME_REFRESH_TOKEN), value=token, httponly=True, + domain=_cookie_domain(), secure=is_secure(), samesite="Lax", max_age=int(60 * 60 * 24 * dify_config.REFRESH_TOKEN_EXPIRE_DAYS), @@ -115,6 +131,7 @@ def set_csrf_token_to_cookie(request: Request, response: Response, token: str): _real_cookie_name(COOKIE_NAME_CSRF_TOKEN), value=token, httponly=False, + domain=_cookie_domain(), secure=is_secure(), samesite="Lax", max_age=int(60 * dify_config.ACCESS_TOKEN_EXPIRE_MINUTES), @@ -133,6 +150,7 @@ def _clear_cookie( "", expires=0, path="/", + domain=_cookie_domain(), secure=is_secure(), httponly=http_only, samesite=samesite, @@ -155,6 +173,19 @@ def clear_csrf_token_from_cookie(response: Response): _clear_cookie(response, COOKIE_NAME_CSRF_TOKEN, http_only=False) +def build_force_logout_cookie_headers() -> list[str]: + """ + Generate Set-Cookie header values that clear all auth-related cookies. + This mirrors the behavior of the standard cookie clearing helpers while + allowing callers that do not have a Response instance to reuse the logic. + """ + response = Response() + clear_access_token_from_cookie(response) + clear_csrf_token_from_cookie(response) + clear_refresh_token_from_cookie(response) + return response.headers.getlist("Set-Cookie") + + def check_csrf_token(request: Request, user_id: str): # some apis are sent by beacon, so we need to bypass csrf token check # since these APIs are post, they are already protected by SameSite: Lax, so csrf is not required. diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index a611d3eb0e..6a65b5faa0 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,10 @@ +from unittest.mock import MagicMock + +from werkzeug.wrappers import Response + from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN -from libs.token import extract_access_token, extract_webapp_access_token +from libs import token +from libs.token import extract_access_token, extract_webapp_access_token, set_csrf_token_to_cookie class MockRequest: @@ -23,3 +28,35 @@ def test_extract_access_token(): for request, expected_console, expected_webapp in test_cases: assert extract_access_token(request) == expected_console # pyright: ignore[reportArgumentType] assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] + + +def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): + monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) + monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) + monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", "", raising=False) + + assert token._real_cookie_name("csrf_token") == "__Host-csrf_token" + + +def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): + monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) + monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) + monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) + + assert token._real_cookie_name("csrf_token") == "csrf_token" + + +def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch): + monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) + monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) + monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) + + response = Response() + request = MagicMock() + + set_csrf_token_to_cookie(request, response, "abc123") + + cookies = response.headers.getlist("Set-Cookie") + assert any("csrf_token=abc123" in c for c in cookies) + assert any("Domain=example.com" in c for c in cookies) + assert all("__Host-" not in c for c in cookies) diff --git a/docker/.env.example b/docker/.env.example index e47bea2ff9..672d3d9836 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -348,6 +348,11 @@ WEB_API_CORS_ALLOW_ORIGINS=* # Specifies the allowed origins for cross-origin requests to the console API, # e.g. https://cloud.dify.ai or * for all origins. CONSOLE_CORS_ALLOW_ORIGINS=* +# Set COOKIE_DOMAIN when the console frontend and API are on different subdomains. +# Provide the registrable domain (e.g. example.com); leading dots are optional. +COOKIE_DOMAIN= +# The frontend reads NEXT_PUBLIC_COOKIE_DOMAIN to align cookie handling with the API. +NEXT_PUBLIC_COOKIE_DOMAIN= # ------------------------------ # File Storage Configuration diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 886335a96b..fd63f5f00c 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -81,6 +81,7 @@ services: environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} + NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} SENTRY_DSN: ${WEB_SENTRY_DSN:-} NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 606d5ec58f..1b4012b446 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -99,6 +99,8 @@ x-shared-env: &shared-api-worker-env CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} + COOKIE_DOMAIN: ${COOKIE_DOMAIN:-} + NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} STORAGE_TYPE: ${STORAGE_TYPE:-opendal} OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} @@ -691,6 +693,7 @@ services: environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} + NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} SENTRY_DSN: ${WEB_SENTRY_DSN:-} NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} diff --git a/web/.env.example b/web/.env.example index 4c5c8641e0..5bfcc9dac0 100644 --- a/web/.env.example +++ b/web/.env.example @@ -34,6 +34,9 @@ NEXT_PUBLIC_CSP_WHITELIST= # Default is not allow to embed into iframe to prevent Clickjacking: https://owasp.org/www-community/attacks/Clickjacking NEXT_PUBLIC_ALLOW_EMBED= +# Shared cookie domain when console UI and API use different subdomains (e.g. example.com) +NEXT_PUBLIC_COOKIE_DOMAIN= + # Allow rendering unsafe URLs which have "data:" scheme. NEXT_PUBLIC_ALLOW_UNSAFE_DATA_SCHEME=false diff --git a/web/config/index.ts b/web/config/index.ts index 158d9976fc..4e98182c0e 100644 --- a/web/config/index.ts +++ b/web/config/index.ts @@ -144,7 +144,9 @@ export const getMaxToken = (modelId: string) => { export const LOCALE_COOKIE_NAME = 'locale' +const COOKIE_DOMAIN = (process.env.NEXT_PUBLIC_COOKIE_DOMAIN || '').trim() export const CSRF_COOKIE_NAME = () => { + if (COOKIE_DOMAIN) return 'csrf_token' const isSecure = API_PREFIX.startsWith('https://') return isSecure ? '__Host-csrf_token' : 'csrf_token' } From 0e62a66cc2f6ad4038f065de9ec7f743085f1f8f Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Tue, 28 Oct 2025 10:22:16 +0800 Subject: [PATCH 15/15] feat: Introduce RAG tool recommendations and refactor related components for improved plugin management (#27259) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../hooks/use-refresh-plugin-list.tsx | 5 +- .../plugin-auth/hooks/use-credential.ts | 9 +- .../components/plugins/plugin-auth/types.ts | 3 + .../workflow/block-selector/all-tools.tsx | 4 +- .../market-place-plugin/list.tsx | 2 +- .../index.tsx} | 41 +++---- .../rag-tool-recommendations/list.tsx | 102 ++++++++++++++++++ .../uninstalled-item.tsx | 63 +++++++++++ .../workflow/block-selector/tools.tsx | 4 +- .../workflow/hooks/use-checklist.ts | 25 +++-- .../hooks/use-fetch-workflow-inspect-vars.ts | 24 +++-- .../hooks/use-inspect-vars-crud-common.ts | 27 +++-- .../workflow/hooks/use-nodes-meta-data.ts | 23 ++-- .../workflow/hooks/use-tool-icon.ts | 38 ++++--- .../workflow/hooks/use-workflow-search.tsx | 23 ++-- .../workflow/hooks/use-workflow-variables.ts | 44 ++++---- .../components/workflow/hooks/use-workflow.ts | 53 +-------- web/app/components/workflow/index.tsx | 30 +++--- .../_base/components/workflow-panel/index.tsx | 7 +- .../nodes/_base/hooks/use-one-step-run.ts | 27 +++-- .../condition-list/condition-item.tsx | 35 +++--- .../workflow/nodes/iteration/use-config.ts | 24 +++-- .../workflow/nodes/loop/use-config.ts | 35 ++++-- .../extract-parameter/import-from-tool.tsx | 18 ++-- .../workflow/nodes/tool/use-config.ts | 50 +++++---- .../workflow/store/workflow/tool-slice.ts | 19 ---- web/app/components/workflow/types.ts | 11 +- web/i18n/en-US/pipeline.ts | 2 +- web/i18n/ja-JP/pipeline.ts | 2 +- web/i18n/zh-Hans/pipeline.ts | 2 +- web/service/use-plugins.ts | 14 ++- web/service/use-tools.ts | 12 +++ 32 files changed, 490 insertions(+), 288 deletions(-) rename web/app/components/workflow/block-selector/{rag-tool-suggestions.tsx => rag-tool-recommendations/index.tsx} (69%) create mode 100644 web/app/components/workflow/block-selector/rag-tool-recommendations/list.tsx create mode 100644 web/app/components/workflow/block-selector/rag-tool-recommendations/uninstalled-item.tsx diff --git a/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx b/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx index 024444cd6a..7c3ab29c49 100644 --- a/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx +++ b/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx @@ -2,7 +2,7 @@ import { useModelList } from '@/app/components/header/account-setting/model-prov import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { useProviderContext } from '@/context/provider-context' import { useInvalidateInstalledPluginList } from '@/service/use-plugins' -import { useInvalidateAllBuiltInTools, useInvalidateAllToolProviders } from '@/service/use-tools' +import { useInvalidateAllBuiltInTools, useInvalidateAllToolProviders, useInvalidateRAGRecommendedPlugins } from '@/service/use-tools' import { useInvalidateStrategyProviders } from '@/service/use-strategy' import type { Plugin, PluginDeclaration, PluginManifestInMarket } from '../../types' import { PluginType } from '../../types' @@ -23,6 +23,8 @@ const useRefreshPluginList = () => { const invalidateDataSourceListAuth = useInvalidDataSourceListAuth() const invalidateStrategyProviders = useInvalidateStrategyProviders() + + const invalidateRAGRecommendedPlugins = useInvalidateRAGRecommendedPlugins() return { refreshPluginList: (manifest?: PluginManifestInMarket | Plugin | PluginDeclaration | null, refreshAllType?: boolean) => { // installed list @@ -32,6 +34,7 @@ const useRefreshPluginList = () => { if ((manifest && PluginType.tool.includes(manifest.category)) || refreshAllType) { invalidateAllToolProviders() invalidateAllBuiltInTools() + invalidateRAGRecommendedPlugins() // TODO: update suggested tools. It's a function in hook useMarketplacePlugins,handleUpdatePlugins } diff --git a/web/app/components/plugins/plugin-auth/hooks/use-credential.ts b/web/app/components/plugins/plugin-auth/hooks/use-credential.ts index 5a7a497ad9..9c342f2ced 100644 --- a/web/app/components/plugins/plugin-auth/hooks/use-credential.ts +++ b/web/app/components/plugins/plugin-auth/hooks/use-credential.ts @@ -15,6 +15,7 @@ import { import { useGetApi } from './use-get-api' import type { PluginPayload } from '../types' import type { CredentialTypeEnum } from '../types' +import { useInvalidToolsByType } from '@/service/use-tools' export const useGetPluginCredentialInfoHook = (pluginPayload: PluginPayload, enable?: boolean) => { const apiMap = useGetApi(pluginPayload) @@ -29,8 +30,14 @@ export const useDeletePluginCredentialHook = (pluginPayload: PluginPayload) => { export const useInvalidPluginCredentialInfoHook = (pluginPayload: PluginPayload) => { const apiMap = useGetApi(pluginPayload) + const invalidPluginCredentialInfo = useInvalidPluginCredentialInfo(apiMap.getCredentialInfo) + const providerType = pluginPayload.providerType + const invalidToolsByType = useInvalidToolsByType(providerType) - return useInvalidPluginCredentialInfo(apiMap.getCredentialInfo) + return () => { + invalidPluginCredentialInfo() + invalidToolsByType() + } } export const useSetPluginDefaultCredentialHook = (pluginPayload: PluginPayload) => { diff --git a/web/app/components/plugins/plugin-auth/types.ts b/web/app/components/plugins/plugin-auth/types.ts index 6366c80de3..fb23269b4b 100644 --- a/web/app/components/plugins/plugin-auth/types.ts +++ b/web/app/components/plugins/plugin-auth/types.ts @@ -1,3 +1,5 @@ +import type { CollectionType } from '../../tools/types' + export type { AddApiKeyButtonProps } from './authorize/add-api-key-button' export type { AddOAuthButtonProps } from './authorize/add-oauth-button' @@ -10,6 +12,7 @@ export enum AuthCategory { export type PluginPayload = { category: AuthCategory provider: string + providerType: CollectionType | string } export enum CredentialTypeEnum { diff --git a/web/app/components/workflow/block-selector/all-tools.tsx b/web/app/components/workflow/block-selector/all-tools.tsx index 6a2e07a411..7db8b9acf5 100644 --- a/web/app/components/workflow/block-selector/all-tools.tsx +++ b/web/app/components/workflow/block-selector/all-tools.tsx @@ -25,7 +25,7 @@ import PluginList, { type ListProps } from '@/app/components/workflow/block-sele import { PluginType } from '../../plugins/types' import { useMarketplacePlugins } from '../../plugins/marketplace/hooks' import { useGlobalPublicStore } from '@/context/global-public-context' -import RAGToolSuggestions from './rag-tool-suggestions' +import RAGToolRecommendations from './rag-tool-recommendations' type AllToolsProps = { className?: string @@ -148,7 +148,7 @@ const AllTools = ({ onScroll={pluginRef.current?.handleScroll} > {isShowRAGRecommendations && ( - > } -const RAGToolSuggestions: React.FC = ({ +const RAGToolRecommendations = ({ viewType, onSelect, onTagsChange, -}) => { +}: RAGToolRecommendationsProps) => { const { t } = useTranslation() const { data: ragRecommendedPlugins, + isLoading: isLoadingRAGRecommendedPlugins, isFetching: isFetchingRAGRecommendedPlugins, } = useRAGRecommendedPlugins() const recommendedPlugins = useMemo(() => { if (ragRecommendedPlugins) - return [...ragRecommendedPlugins.installed_recommended_plugins] + return ragRecommendedPlugins.installed_recommended_plugins + return [] + }, [ragRecommendedPlugins]) + + const unInstalledPlugins = useMemo(() => { + if (ragRecommendedPlugins) + return (ragRecommendedPlugins.uninstalled_recommended_plugins).map(getFormattedPlugin) return [] }, [ragRecommendedPlugins]) @@ -48,15 +55,16 @@ const RAGToolSuggestions: React.FC = ({
{t('pipeline.ragToolSuggestions.title')}
- {isFetchingRAGRecommendedPlugins && ( + {/* For first time loading, show loading */} + {isLoadingRAGRecommendedPlugins && (
)} - {!isFetchingRAGRecommendedPlugins && recommendedPlugins.length === 0 && ( + {!isFetchingRAGRecommendedPlugins && recommendedPlugins.length === 0 && unInstalledPlugins.length === 0 && (

= ({ />

)} - {!isFetchingRAGRecommendedPlugins && recommendedPlugins.length > 0 && ( + {(recommendedPlugins.length > 0 || unInstalledPlugins.length > 0) && ( <> -
= ({ ) } -export default React.memo(RAGToolSuggestions) +export default React.memo(RAGToolRecommendations) diff --git a/web/app/components/workflow/block-selector/rag-tool-recommendations/list.tsx b/web/app/components/workflow/block-selector/rag-tool-recommendations/list.tsx new file mode 100644 index 0000000000..19378caf48 --- /dev/null +++ b/web/app/components/workflow/block-selector/rag-tool-recommendations/list.tsx @@ -0,0 +1,102 @@ +import { + useMemo, + useRef, +} from 'react' +import type { BlockEnum, ToolWithProvider } from '../../types' +import type { ToolDefaultValue } from '../types' +import { ViewType } from '../view-type-select' +import { useGetLanguage } from '@/context/i18n' +import { groupItems } from '../index-bar' +import cn from '@/utils/classnames' +import ToolListTreeView from '../tool/tool-list-tree-view/list' +import ToolListFlatView from '../tool/tool-list-flat-view/list' +import UninstalledItem from './uninstalled-item' +import type { Plugin } from '@/app/components/plugins/types' + +type ListProps = { + onSelect: (type: BlockEnum, tool?: ToolDefaultValue) => void + tools: ToolWithProvider[] + viewType: ViewType + unInstalledPlugins: Plugin[] + className?: string +} + +const List = ({ + onSelect, + tools, + viewType, + unInstalledPlugins, + className, +}: ListProps) => { + const language = useGetLanguage() + const isFlatView = viewType === ViewType.flat + + const { letters, groups: withLetterAndGroupViewToolsData } = groupItems(tools, tool => tool.label[language][0]) + const treeViewToolsData = useMemo(() => { + const result: Record = {} + Object.keys(withLetterAndGroupViewToolsData).forEach((letter) => { + Object.keys(withLetterAndGroupViewToolsData[letter]).forEach((groupName) => { + if (!result[groupName]) + result[groupName] = [] + result[groupName].push(...withLetterAndGroupViewToolsData[letter][groupName]) + }) + }) + return result + }, [withLetterAndGroupViewToolsData]) + + const listViewToolData = useMemo(() => { + const result: ToolWithProvider[] = [] + letters.forEach((letter) => { + Object.keys(withLetterAndGroupViewToolsData[letter]).forEach((groupName) => { + result.push(...withLetterAndGroupViewToolsData[letter][groupName].map((item) => { + return { + ...item, + letter, + } + })) + }) + }) + + return result + }, [withLetterAndGroupViewToolsData, letters]) + + const toolRefs = useRef({}) + + return ( +
+ {!!tools.length && ( + isFlatView ? ( + + ) : ( + + ) + )} + { + unInstalledPlugins.map((item) => { + return ( + + ) + }) + } +
+ ) +} + +export default List diff --git a/web/app/components/workflow/block-selector/rag-tool-recommendations/uninstalled-item.tsx b/web/app/components/workflow/block-selector/rag-tool-recommendations/uninstalled-item.tsx new file mode 100644 index 0000000000..98395ec25a --- /dev/null +++ b/web/app/components/workflow/block-selector/rag-tool-recommendations/uninstalled-item.tsx @@ -0,0 +1,63 @@ +'use client' +import React from 'react' +import { useContext } from 'use-context-selector' +import { useTranslation } from 'react-i18next' +import type { Plugin } from '@/app/components/plugins/types' +import InstallFromMarketplace from '@/app/components/plugins/install-plugin/install-from-marketplace' +import I18n from '@/context/i18n' +import { useBoolean } from 'ahooks' +import { BlockEnum } from '../../types' +import BlockIcon from '../../block-icon' + +type UninstalledItemProps = { + payload: Plugin +} + +const UninstalledItem = ({ + payload, +}: UninstalledItemProps) => { + const { t } = useTranslation() + const { locale } = useContext(I18n) + + const getLocalizedText = (obj: Record | undefined) => + obj?.[locale] || obj?.['en-US'] || obj?.en_US || '' + const [isShowInstallModal, { + setTrue: showInstallModal, + setFalse: hideInstallModal, + }] = useBoolean(false) + + return ( +
+ +
+
+ + {getLocalizedText(payload.label)} + + + {payload.org} + +
+
+ {t('plugin.installAction')} +
+ {isShowInstallModal && ( + + )} +
+
+ ) +} +export default React.memo(UninstalledItem) diff --git a/web/app/components/workflow/block-selector/tools.tsx b/web/app/components/workflow/block-selector/tools.tsx index feb34d2651..71ed4092a3 100644 --- a/web/app/components/workflow/block-selector/tools.tsx +++ b/web/app/components/workflow/block-selector/tools.tsx @@ -30,7 +30,7 @@ type ToolsProps = { canChooseMCPTool?: boolean isShowRAGRecommendations?: boolean } -const Blocks = ({ +const Tools = ({ onSelect, canNotSelectMultiple, onSelectMultiple, @@ -146,4 +146,4 @@ const Blocks = ({ ) } -export default memo(Blocks) +export default memo(Tools) diff --git a/web/app/components/workflow/hooks/use-checklist.ts b/web/app/components/workflow/hooks/use-checklist.ts index 32945a8927..d29827f273 100644 --- a/web/app/components/workflow/hooks/use-checklist.ts +++ b/web/app/components/workflow/hooks/use-checklist.ts @@ -45,14 +45,19 @@ import { getNodeUsedVars, isSpecialVar } from '../nodes/_base/components/variabl import { useModelList } from '@/app/components/header/account-setting/model-provider-page/hooks' import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import type { KnowledgeBaseNodeType } from '../nodes/knowledge-base/types' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllWorkflowTools, +} from '@/service/use-tools' export const useChecklist = (nodes: Node[], edges: Edge[]) => { const { t } = useTranslation() const language = useGetLanguage() const { nodesMap: nodesExtraData } = useNodesMetaData() - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() const dataSourceList = useStore(s => s.dataSourceList) const { data: strategyProviders } = useStrategyProviders() const datasetsDetail = useDatasetsDetailStore(s => s.datasetsDetail) @@ -104,7 +109,7 @@ export const useChecklist = (nodes: Node[], edges: Edge[]) => { let usedVars: ValueSelector[] = [] if (node.data.type === BlockEnum.Tool) - moreDataForCheckValid = getToolCheckParams(node.data as ToolNodeType, buildInTools, customTools, workflowTools, language) + moreDataForCheckValid = getToolCheckParams(node.data as ToolNodeType, buildInTools || [], customTools || [], workflowTools || [], language) if (node.data.type === BlockEnum.DataSource) moreDataForCheckValid = getDataSourceCheckParams(node.data as DataSourceNodeType, dataSourceList || [], language) @@ -194,6 +199,9 @@ export const useChecklistBeforePublish = () => { const { getNodesAvailableVarList } = useGetNodesAvailableVarList() const { data: embeddingModelList } = useModelList(ModelTypeEnum.textEmbedding) const { data: rerankModelList } = useModelList(ModelTypeEnum.rerank) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() const getCheckData = useCallback((data: CommonNodeType<{}>, datasets: DataSet[]) => { let checkData = data @@ -221,7 +229,7 @@ export const useChecklistBeforePublish = () => { } as CommonNodeType } return checkData - }, []) + }, [embeddingModelList, rerankModelList]) const handleCheckBeforePublish = useCallback(async () => { const { @@ -230,9 +238,6 @@ export const useChecklistBeforePublish = () => { } = store.getState() const { dataSourceList, - buildInTools, - customTools, - workflowTools, } = workflowStore.getState() const nodes = getNodes() const filteredNodes = nodes.filter(node => node.type === CUSTOM_NODE) @@ -275,7 +280,7 @@ export const useChecklistBeforePublish = () => { let moreDataForCheckValid let usedVars: ValueSelector[] = [] if (node.data.type === BlockEnum.Tool) - moreDataForCheckValid = getToolCheckParams(node.data as ToolNodeType, buildInTools, customTools, workflowTools, language) + moreDataForCheckValid = getToolCheckParams(node.data as ToolNodeType, buildInTools || [], customTools || [], workflowTools || [], language) if (node.data.type === BlockEnum.DataSource) moreDataForCheckValid = getDataSourceCheckParams(node.data as DataSourceNodeType, dataSourceList || [], language) @@ -340,7 +345,7 @@ export const useChecklistBeforePublish = () => { } return true - }, [store, notify, t, language, nodesExtraData, strategyProviders, updateDatasetsDetail, getCheckData, getStartNodes, workflowStore]) + }, [store, notify, t, language, nodesExtraData, strategyProviders, updateDatasetsDetail, getCheckData, getStartNodes, workflowStore, buildInTools, customTools, workflowTools]) return { handleCheckBeforePublish, diff --git a/web/app/components/workflow/hooks/use-fetch-workflow-inspect-vars.ts b/web/app/components/workflow/hooks/use-fetch-workflow-inspect-vars.ts index 1527fb82e2..60f839b93d 100644 --- a/web/app/components/workflow/hooks/use-fetch-workflow-inspect-vars.ts +++ b/web/app/components/workflow/hooks/use-fetch-workflow-inspect-vars.ts @@ -11,6 +11,12 @@ import useMatchSchemaType, { getMatchedSchemaType } from '../nodes/_base/compone import { toNodeOutputVars } from '../nodes/_base/components/variable/utils' import type { SchemaTypeDefinition } from '@/service/use-common' import { useCallback } from 'react' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' type Params = { flowType: FlowType @@ -27,17 +33,17 @@ export const useSetWorkflowVarsWithValue = ({ const invalidateSysVarValues = useInvalidateSysVarValues(flowType, flowId) const { handleCancelAllNodeSuccessStatus } = useNodesInteractionsWithoutSync() const { schemaTypeDefinitions } = useMatchSchemaType() - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() const dataSourceList = useStore(s => s.dataSourceList) const allPluginInfoList = { - buildInTools, - customTools, - workflowTools, - mcpTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], + dataSourceList: dataSourceList || [], } const setInspectVarsToStore = (inspectVars: VarInInspect[], passedInAllPluginInfoList?: Record, passedInSchemaTypeDefinitions?: SchemaTypeDefinition[]) => { diff --git a/web/app/components/workflow/hooks/use-inspect-vars-crud-common.ts b/web/app/components/workflow/hooks/use-inspect-vars-crud-common.ts index f35f0c7dab..6b7acd0a85 100644 --- a/web/app/components/workflow/hooks/use-inspect-vars-crud-common.ts +++ b/web/app/components/workflow/hooks/use-inspect-vars-crud-common.ts @@ -18,6 +18,12 @@ import type { FlowType } from '@/types/common' import useFLow from '@/service/use-flow' import { useStoreApi } from 'reactflow' import type { SchemaTypeDefinition } from '@/service/use-common' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' type Params = { flowId: string @@ -51,6 +57,11 @@ export const useInspectVarsCrudCommon = ({ const { mutateAsync: doEditInspectorVar } = useEditInspectorVar(flowId) const { handleCancelNodeSuccessStatus } = useNodesInteractionsWithoutSync() const { handleEdgeCancelRunningStatus } = useEdgesInteractionsWithoutSync() + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() + const getNodeInspectVars = useCallback((nodeId: string) => { const { nodesWithInspectVars } = workflowStore.getState() const node = nodesWithInspectVars.find(node => node.nodeId === nodeId) @@ -98,10 +109,6 @@ export const useInspectVarsCrudCommon = ({ const fetchInspectVarValue = useCallback(async (selector: ValueSelector, schemaTypeDefinitions: SchemaTypeDefinition[]) => { const { setNodeInspectVars, - buildInTools, - customTools, - workflowTools, - mcpTools, dataSourceList, } = workflowStore.getState() const nodeId = selector[0] @@ -119,11 +126,11 @@ export const useInspectVarsCrudCommon = ({ const nodeArr = getNodes() const currentNode = nodeArr.find(node => node.id === nodeId) const allPluginInfoList = { - buildInTools, - customTools, - workflowTools, - mcpTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], + dataSourceList: dataSourceList || [], } const currentNodeOutputVars = toNodeOutputVars([currentNode], false, () => true, [], [], [], allPluginInfoList, schemaTypeDefinitions) const vars = await fetchNodeInspectVars(flowType, flowId, nodeId) @@ -135,7 +142,7 @@ export const useInspectVarsCrudCommon = ({ } }) setNodeInspectVars(nodeId, varsWithSchemaType) - }, [workflowStore, flowType, flowId, invalidateSysVarValues, invalidateConversationVarValues]) + }, [workflowStore, flowType, flowId, invalidateSysVarValues, invalidateConversationVarValues, buildInTools, customTools, workflowTools, mcpTools]) // after last run would call this const appendNodeInspectVars = useCallback((nodeId: string, payload: VarInInspect[], allNodes: Node[]) => { diff --git a/web/app/components/workflow/hooks/use-nodes-meta-data.ts b/web/app/components/workflow/hooks/use-nodes-meta-data.ts index cfeb41de34..fd63f23590 100644 --- a/web/app/components/workflow/hooks/use-nodes-meta-data.ts +++ b/web/app/components/workflow/hooks/use-nodes-meta-data.ts @@ -7,6 +7,11 @@ import { CollectionType } from '@/app/components/tools/types' import { useStore } from '@/app/components/workflow/store' import { canFindTool } from '@/utils' import { useGetLanguage } from '@/context/i18n' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllWorkflowTools, +} from '@/service/use-tools' export const useNodesMetaData = () => { const availableNodesMetaData = useHooksStore(s => s.availableNodesMetaData) @@ -21,9 +26,9 @@ export const useNodesMetaData = () => { export const useNodeMetaData = (node: Node) => { const language = useGetLanguage() - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() const dataSourceList = useStore(s => s.dataSourceList) const availableNodesMetaData = useNodesMetaData() const { data } = node @@ -34,10 +39,10 @@ export const useNodeMetaData = (node: Node) => { if (data.type === BlockEnum.Tool) { if (data.provider_type === CollectionType.builtIn) - return buildInTools.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.author + return buildInTools?.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.author if (data.provider_type === CollectionType.workflow) - return workflowTools.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.author - return customTools.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.author + return workflowTools?.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.author + return customTools?.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.author } return nodeMetaData?.metaData.author }, [data, buildInTools, customTools, workflowTools, nodeMetaData, dataSourceList]) @@ -47,10 +52,10 @@ export const useNodeMetaData = (node: Node) => { return dataSourceList?.find(dataSource => dataSource.plugin_id === data.plugin_id)?.description[language] if (data.type === BlockEnum.Tool) { if (data.provider_type === CollectionType.builtIn) - return buildInTools.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.description[language] + return buildInTools?.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.description[language] if (data.provider_type === CollectionType.workflow) - return workflowTools.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.description[language] - return customTools.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.description[language] + return workflowTools?.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.description[language] + return customTools?.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.description[language] } return nodeMetaData?.metaData.description }, [data, buildInTools, customTools, workflowTools, nodeMetaData, dataSourceList, language]) diff --git a/web/app/components/workflow/hooks/use-tool-icon.ts b/web/app/components/workflow/hooks/use-tool-icon.ts index 734a7da390..32d65365db 100644 --- a/web/app/components/workflow/hooks/use-tool-icon.ts +++ b/web/app/components/workflow/hooks/use-tool-icon.ts @@ -14,12 +14,18 @@ import { } from '../store' import { CollectionType } from '@/app/components/tools/types' import { canFindTool } from '@/utils' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' export const useToolIcon = (data?: Node['data']) => { - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() const dataSourceList = useStore(s => s.dataSourceList) // const a = useStore(s => s.data) const toolIcon = useMemo(() => { @@ -27,15 +33,15 @@ export const useToolIcon = (data?: Node['data']) => { return '' if (data.type === BlockEnum.Tool) { // eslint-disable-next-line sonarjs/no-dead-store - let targetTools = buildInTools + let targetTools = buildInTools || [] if (data.provider_type === CollectionType.builtIn) - targetTools = buildInTools + targetTools = buildInTools || [] else if (data.provider_type === CollectionType.custom) - targetTools = customTools + targetTools = customTools || [] else if (data.provider_type === CollectionType.mcp) - targetTools = mcpTools + targetTools = mcpTools || [] else - targetTools = workflowTools + targetTools = workflowTools || [] return targetTools.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.icon } if (data.type === BlockEnum.DataSource) @@ -46,24 +52,24 @@ export const useToolIcon = (data?: Node['data']) => { } export const useGetToolIcon = () => { + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() const workflowStore = useWorkflowStore() const getToolIcon = useCallback((data: Node['data']) => { const { - buildInTools, - customTools, - workflowTools, dataSourceList, } = workflowStore.getState() if (data.type === BlockEnum.Tool) { // eslint-disable-next-line sonarjs/no-dead-store - let targetTools = buildInTools + let targetTools = buildInTools || [] if (data.provider_type === CollectionType.builtIn) - targetTools = buildInTools + targetTools = buildInTools || [] else if (data.provider_type === CollectionType.custom) - targetTools = customTools + targetTools = customTools || [] else - targetTools = workflowTools + targetTools = workflowTools || [] return targetTools.find(toolWithProvider => canFindTool(toolWithProvider.id, data.provider_id))?.icon } diff --git a/web/app/components/workflow/hooks/use-workflow-search.tsx b/web/app/components/workflow/hooks/use-workflow-search.tsx index 095ae4577a..68ad9873f9 100644 --- a/web/app/components/workflow/hooks/use-workflow-search.tsx +++ b/web/app/components/workflow/hooks/use-workflow-search.tsx @@ -8,11 +8,16 @@ import { workflowNodesAction } from '@/app/components/goto-anything/actions/work import BlockIcon from '@/app/components/workflow/block-icon' import { setupNodeSelectionListener } from '../utils/node-navigation' import { BlockEnum } from '../types' -import { useStore } from '../store' import type { Emoji } from '@/app/components/tools/types' import { CollectionType } from '@/app/components/tools/types' import { canFindTool } from '@/utils' import type { LLMNodeType } from '../nodes/llm/types' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' /** * Hook to register workflow nodes search functionality @@ -22,23 +27,23 @@ export const useWorkflowSearch = () => { const { handleNodeSelect } = useNodesInteractions() // Filter and process nodes for search - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() // Extract tool icon logic - clean separation of concerns const getToolIcon = useCallback((nodeData: CommonNodeType): string | Emoji | undefined => { if (nodeData?.type !== BlockEnum.Tool) return undefined const toolCollections: Record = { - [CollectionType.builtIn]: buildInTools, - [CollectionType.custom]: customTools, - [CollectionType.mcp]: mcpTools, + [CollectionType.builtIn]: buildInTools || [], + [CollectionType.custom]: customTools || [], + [CollectionType.mcp]: mcpTools || [], } const targetTools = (nodeData.provider_type && toolCollections[nodeData.provider_type]) || workflowTools - return targetTools.find((tool: any) => canFindTool(tool.id, nodeData.provider_id))?.icon + return targetTools?.find((tool: any) => canFindTool(tool.id, nodeData.provider_id))?.icon }, [buildInTools, customTools, workflowTools, mcpTools]) // Extract model info logic - clean extraction diff --git a/web/app/components/workflow/hooks/use-workflow-variables.ts b/web/app/components/workflow/hooks/use-workflow-variables.ts index 8422a7fd0d..871937365a 100644 --- a/web/app/components/workflow/hooks/use-workflow-variables.ts +++ b/web/app/components/workflow/hooks/use-workflow-variables.ts @@ -10,20 +10,25 @@ import type { } from '@/app/components/workflow/types' import { useIsChatMode } from './use-workflow' import { useStoreApi } from 'reactflow' -import { useStore } from '@/app/components/workflow/store' import type { Type } from '../nodes/llm/types' import useMatchSchemaType from '../nodes/_base/components/variable/use-match-schema-type' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' export const useWorkflowVariables = () => { const { t } = useTranslation() const workflowStore = useWorkflowStore() const { schemaTypeDefinitions } = useMatchSchemaType() - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) - const dataSourceList = useStore(s => s.dataSourceList) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() + const getNodeAvailableVars = useCallback(({ parentNode, beforeNodes, @@ -43,6 +48,7 @@ export const useWorkflowVariables = () => { conversationVariables, environmentVariables, ragPipelineVariables, + dataSourceList, } = workflowStore.getState() return toNodeAvailableVars({ parentNode, @@ -54,15 +60,15 @@ export const useWorkflowVariables = () => { ragVariables: ragPipelineVariables, filterVar, allPluginInfoList: { - buildInTools, - customTools, - workflowTools, - mcpTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], + dataSourceList: dataSourceList || [], }, schemaTypeDefinitions, }) - }, [t, workflowStore, schemaTypeDefinitions, buildInTools]) + }, [t, workflowStore, schemaTypeDefinitions, buildInTools, customTools, workflowTools, mcpTools]) const getCurrentVariableType = useCallback(({ parentNode, @@ -87,10 +93,6 @@ export const useWorkflowVariables = () => { conversationVariables, environmentVariables, ragPipelineVariables, - buildInTools, - customTools, - workflowTools, - mcpTools, dataSourceList, } = workflowStore.getState() return getVarType({ @@ -105,16 +107,16 @@ export const useWorkflowVariables = () => { conversationVariables, ragVariables: ragPipelineVariables, allPluginInfoList: { - buildInTools, - customTools, - workflowTools, - mcpTools, + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], dataSourceList: dataSourceList ?? [], }, schemaTypeDefinitions, preferSchemaType, }) - }, [workflowStore, getVarType, schemaTypeDefinitions]) + }, [workflowStore, getVarType, schemaTypeDefinitions, buildInTools, customTools, workflowTools, mcpTools]) return { getNodeAvailableVars, diff --git a/web/app/components/workflow/hooks/use-workflow.ts b/web/app/components/workflow/hooks/use-workflow.ts index 3f9f8106cf..66c499dc59 100644 --- a/web/app/components/workflow/hooks/use-workflow.ts +++ b/web/app/components/workflow/hooks/use-workflow.ts @@ -32,15 +32,9 @@ import { CUSTOM_NOTE_NODE } from '../note-node/constants' import { findUsedVarNodes, getNodeOutputVars, updateNodeVars } from '../nodes/_base/components/variable/utils' import { useAvailableBlocks } from './use-available-blocks' import { useStore as useAppStore } from '@/app/components/app/store' -import { - fetchAllBuiltInTools, - fetchAllCustomTools, - fetchAllMCPTools, - fetchAllWorkflowTools, -} from '@/service/tools' + import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants' import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants' -import { basePath } from '@/utils/var' import { useNodesMetaData } from '.' export const useIsChatMode = () => { @@ -416,51 +410,6 @@ export const useWorkflow = () => { } } -export const useFetchToolsData = () => { - const workflowStore = useWorkflowStore() - - const handleFetchAllTools = useCallback(async (type: string) => { - if (type === 'builtin') { - const buildInTools = await fetchAllBuiltInTools() - - if (basePath) { - buildInTools.forEach((item) => { - if (typeof item.icon == 'string' && !item.icon.includes(basePath)) - item.icon = `${basePath}${item.icon}` - }) - } - workflowStore.setState({ - buildInTools: buildInTools || [], - }) - } - if (type === 'custom') { - const customTools = await fetchAllCustomTools() - - workflowStore.setState({ - customTools: customTools || [], - }) - } - if (type === 'workflow') { - const workflowTools = await fetchAllWorkflowTools() - - workflowStore.setState({ - workflowTools: workflowTools || [], - }) - } - if (type === 'mcp') { - const mcpTools = await fetchAllMCPTools() - - workflowStore.setState({ - mcpTools: mcpTools || [], - }) - } - }, [workflowStore]) - - return { - handleFetchAllTools, - } -} - export const useWorkflowReadOnly = () => { const workflowStore = useWorkflowStore() const workflowRunningData = useStore(s => s.workflowRunningData) diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index b289cafefd..86c6bf153e 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -37,7 +37,6 @@ import { } from './types' import { useEdgesInteractions, - useFetchToolsData, useNodesInteractions, useNodesReadOnly, useNodesSyncDraft, @@ -92,6 +91,12 @@ import useMatchSchemaType from './nodes/_base/components/variable/use-match-sche import type { VarInInspect } from '@/types/workflow' import { fetchAllInspectVars } from '@/service/workflow' import cn from '@/utils/classnames' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' const Confirm = dynamic(() => import('@/app/components/base/confirm'), { ssr: false, @@ -242,13 +247,6 @@ export const Workflow: FC = memo(({ }) } }) - const { handleFetchAllTools } = useFetchToolsData() - useEffect(() => { - handleFetchAllTools('builtin') - handleFetchAllTools('custom') - handleFetchAllTools('workflow') - handleFetchAllTools('mcp') - }, [handleFetchAllTools]) const { handleNodeDragStart, @@ -299,10 +297,10 @@ export const Workflow: FC = memo(({ const { schemaTypeDefinitions } = useMatchSchemaType() const { fetchInspectVars } = useSetWorkflowVarsWithValue() - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() const dataSourceList = useStore(s => s.dataSourceList) // buildInTools, customTools, workflowTools, mcpTools, dataSourceList const configsMap = useHooksStore(s => s.configsMap) @@ -323,10 +321,10 @@ export const Workflow: FC = memo(({ passInVars: true, vars, passedInAllPluginInfoList: { - buildInTools, - customTools, - workflowTools, - mcpTools, + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], dataSourceList: dataSourceList ?? [], }, passedInSchemaTypeDefinitions: schemaTypeDefinitions, diff --git a/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx b/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx index 03b142ba43..29aebd4fd5 100644 --- a/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx +++ b/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx @@ -75,6 +75,7 @@ import { DataSourceClassification } from '@/app/components/workflow/nodes/data-s import { useModalContext } from '@/context/modal-context' import DataSourceBeforeRunForm from '@/app/components/workflow/nodes/data-source/before-run-form' import useInspectVarsCrud from '@/app/components/workflow/hooks/use-inspect-vars-crud' +import { useAllBuiltInTools } from '@/service/use-tools' const getCustomRunForm = (params: CustomRunFormProps): React.JSX.Element => { const nodeType = params.payload.type @@ -259,9 +260,9 @@ const BasePanel: FC = ({ return {} })() - const buildInTools = useStore(s => s.buildInTools) + const { data: buildInTools } = useAllBuiltInTools() const currCollection = useMemo(() => { - return buildInTools.find(item => canFindTool(item.id, data.provider_id)) + return buildInTools?.find(item => canFindTool(item.id, data.provider_id)) }, [buildInTools, data.provider_id]) const showPluginAuth = useMemo(() => { return data.type === BlockEnum.Tool && currCollection?.allow_delete @@ -450,6 +451,7 @@ const BasePanel: FC = ({ className='px-4 pb-2' pluginPayload={{ provider: currCollection?.name || '', + providerType: currCollection?.type || '', category: AuthCategory.tool, }} > @@ -461,6 +463,7 @@ const BasePanel: FC = ({ = { [BlockEnum.LLM]: checkLLMValid, @@ -133,21 +140,23 @@ const useOneStepRun = ({ const availableNodesIncludeParent = getBeforeNodesInSameBranchIncludeParent(id) const workflowStore = useWorkflowStore() const { schemaTypeDefinitions } = useMatchSchemaType() + + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() + const getVar = (valueSelector: ValueSelector): Var | undefined => { const isSystem = valueSelector[0] === 'sys' const { - buildInTools, - customTools, - workflowTools, - mcpTools, dataSourceList, } = workflowStore.getState() const allPluginInfoList = { - buildInTools, - customTools, - workflowTools, - mcpTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], + dataSourceList: dataSourceList || [], } const allOutputVars = toNodeOutputVars(availableNodes, isChatMode, undefined, undefined, conversationVariables, [], allPluginInfoList, schemaTypeDefinitions) diff --git a/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx b/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx index 9bcd4b9671..65dac6f5be 100644 --- a/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx +++ b/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx @@ -42,6 +42,12 @@ import BoolValue from '@/app/components/workflow/panel/chat-variable-panel/compo import { getVarType } from '@/app/components/workflow/nodes/_base/components/variable/utils' import { useIsChatMode } from '@/app/components/workflow/hooks/use-workflow' import useMatchSchemaType from '../../../_base/components/variable/use-match-schema-type' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' const optionNameI18NPrefix = 'workflow.nodes.ifElse.optionName' type ConditionItemProps = { @@ -91,15 +97,12 @@ const ConditionItem = ({ const [isHovered, setIsHovered] = useState(false) const [open, setOpen] = useState(false) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() + const workflowStore = useWorkflowStore() - const { - setControlPromptEditorRerenderKey, - buildInTools, - customTools, - mcpTools, - workflowTools, - dataSourceList, - } = workflowStore.getState() const doUpdateCondition = useCallback((newCondition: Condition) => { if (isSubVariableKey) @@ -213,6 +216,8 @@ const ConditionItem = ({ const handleVarChange = useCallback((valueSelector: ValueSelector, _varItem: Var) => { const { conversationVariables, + setControlPromptEditorRerenderKey, + dataSourceList, } = workflowStore.getState() const resolvedVarType = getVarType({ valueSelector, @@ -220,11 +225,11 @@ const ConditionItem = ({ availableNodes, isChatMode, allPluginInfoList: { - buildInTools, - customTools, - mcpTools, - workflowTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + mcpTools: mcpTools || [], + workflowTools: workflowTools || [], + dataSourceList: dataSourceList || [], }, schemaTypeDefinitions, }) @@ -241,12 +246,12 @@ const ConditionItem = ({ }) doUpdateCondition(newCondition) setOpen(false) - }, [condition, doUpdateCondition, availableNodes, isChatMode, setControlPromptEditorRerenderKey, schemaTypeDefinitions]) + }, [condition, doUpdateCondition, availableNodes, isChatMode, schemaTypeDefinitions, buildInTools, customTools, mcpTools, workflowTools]) const showBooleanInput = useMemo(() => { if(condition.varType === VarType.boolean) return true - // eslint-disable-next-line sonarjs/prefer-single-boolean-return + if(condition.varType === VarType.arrayBoolean && [ComparisonOperator.contains, ComparisonOperator.notContains].includes(condition.comparison_operator!)) return true return false diff --git a/web/app/components/workflow/nodes/iteration/use-config.ts b/web/app/components/workflow/nodes/iteration/use-config.ts index 9fd31d0484..2e47bb3740 100644 --- a/web/app/components/workflow/nodes/iteration/use-config.ts +++ b/web/app/components/workflow/nodes/iteration/use-config.ts @@ -15,6 +15,12 @@ import type { Item } from '@/app/components/base/select' import useInspectVarsCrud from '../../hooks/use-inspect-vars-crud' import { isEqual } from 'lodash-es' import { useStore } from '../../store' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' const useConfig = (id: string, payload: IterationNodeType) => { const { @@ -40,17 +46,17 @@ const useConfig = (id: string, payload: IterationNodeType) => { // output const { getIterationNodeChildren } = useWorkflow() const iterationChildrenNodes = getIterationNodeChildren(id) - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() const dataSourceList = useStore(s => s.dataSourceList) const allPluginInfoList = { - buildInTools, - customTools, - workflowTools, - mcpTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], + dataSourceList: dataSourceList || [], } const childrenNodeVars = toNodeOutputVars(iterationChildrenNodes, isChatMode, undefined, [], [], [], allPluginInfoList) diff --git a/web/app/components/workflow/nodes/loop/use-config.ts b/web/app/components/workflow/nodes/loop/use-config.ts index fcf437eb96..e8504fb5e9 100644 --- a/web/app/components/workflow/nodes/loop/use-config.ts +++ b/web/app/components/workflow/nodes/loop/use-config.ts @@ -15,9 +15,24 @@ import useNodeCrud from '../_base/hooks/use-node-crud' import { toNodeOutputVars } from '../_base/components/variable/utils' import { getOperators } from './utils' import { LogicalOperator } from './types' -import type { HandleAddCondition, HandleAddSubVariableCondition, HandleRemoveCondition, HandleToggleConditionLogicalOperator, HandleToggleSubVariableConditionLogicalOperator, HandleUpdateCondition, HandleUpdateSubVariableCondition, LoopNodeType } from './types' +import type { + HandleAddCondition, + HandleAddSubVariableCondition, + HandleRemoveCondition, + HandleToggleConditionLogicalOperator, + HandleToggleSubVariableConditionLogicalOperator, + HandleUpdateCondition, + HandleUpdateSubVariableCondition, + LoopNodeType, +} from './types' import useIsVarFileAttribute from './use-is-var-file-attribute' import { useStore } from '@/app/components/workflow/store' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, +} from '@/service/use-tools' const useConfig = (id: string, payload: LoopNodeType) => { const { nodesReadOnly: readOnly } = useNodesReadOnly() @@ -38,17 +53,17 @@ const useConfig = (id: string, payload: LoopNodeType) => { // output const { getLoopNodeChildren } = useWorkflow() const loopChildrenNodes = [{ id, data: payload } as any, ...getLoopNodeChildren(id)] - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() const dataSourceList = useStore(s => s.dataSourceList) const allPluginInfoList = { - buildInTools, - customTools, - workflowTools, - mcpTools, - dataSourceList: dataSourceList ?? [], + buildInTools: buildInTools || [], + customTools: customTools || [], + workflowTools: workflowTools || [], + mcpTools: mcpTools || [], + dataSourceList: dataSourceList || [], } const childrenNodeVars = toNodeOutputVars(loopChildrenNodes, isChatMode, undefined, [], conversationVariables, [], allPluginInfoList) diff --git a/web/app/components/workflow/nodes/parameter-extractor/components/extract-parameter/import-from-tool.tsx b/web/app/components/workflow/nodes/parameter-extractor/components/extract-parameter/import-from-tool.tsx index d93d08a0ac..9392f28736 100644 --- a/web/app/components/workflow/nodes/parameter-extractor/components/extract-parameter/import-from-tool.tsx +++ b/web/app/components/workflow/nodes/parameter-extractor/components/extract-parameter/import-from-tool.tsx @@ -8,7 +8,6 @@ import { useTranslation } from 'react-i18next' import BlockSelector from '../../../../block-selector' import type { Param, ParamType } from '../../types' import cn from '@/utils/classnames' -import { useStore } from '@/app/components/workflow/store' import type { DataSourceDefaultValue, ToolDefaultValue, @@ -18,6 +17,11 @@ import { CollectionType } from '@/app/components/tools/types' import type { BlockEnum } from '@/app/components/workflow/types' import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks' import { canFindTool } from '@/utils' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllWorkflowTools, +} from '@/service/use-tools' const i18nPrefix = 'workflow.nodes.parameterExtractor' @@ -42,9 +46,9 @@ const ImportFromTool: FC = ({ const { t } = useTranslation() const language = useLanguage() - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() const handleSelectTool = useCallback((_type: BlockEnum, toolInfo?: ToolDefaultValue | DataSourceDefaultValue) => { if (!toolInfo || 'datasource_name' in toolInfo) @@ -54,11 +58,11 @@ const ImportFromTool: FC = ({ const currentTools = (() => { switch (provider_type) { case CollectionType.builtIn: - return buildInTools + return buildInTools || [] case CollectionType.custom: - return customTools + return customTools || [] case CollectionType.workflow: - return workflowTools + return workflowTools || [] default: return [] } diff --git a/web/app/components/workflow/nodes/tool/use-config.ts b/web/app/components/workflow/nodes/tool/use-config.ts index 5b8827936c..fe3fe543e9 100644 --- a/web/app/components/workflow/nodes/tool/use-config.ts +++ b/web/app/components/workflow/nodes/tool/use-config.ts @@ -2,7 +2,7 @@ import { useCallback, useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import { produce } from 'immer' import { useBoolean } from 'ahooks' -import { useStore, useWorkflowStore } from '../../store' +import { useWorkflowStore } from '../../store' import type { ToolNodeType, ToolVarInputs } from './types' import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks' import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud' @@ -15,15 +15,20 @@ import { import Toast from '@/app/components/base/toast' import type { InputVar } from '@/app/components/workflow/types' import { - useFetchToolsData, useNodesReadOnly, } from '@/app/components/workflow/hooks' import { canFindTool } from '@/utils' +import { + useAllBuiltInTools, + useAllCustomTools, + useAllMCPTools, + useAllWorkflowTools, + useInvalidToolsByType, +} from '@/service/use-tools' const useConfig = (id: string, payload: ToolNodeType) => { const workflowStore = useWorkflowStore() const { nodesReadOnly: readOnly } = useNodesReadOnly() - const { handleFetchAllTools } = useFetchToolsData() const { t } = useTranslation() const language = useLanguage() @@ -43,21 +48,21 @@ const useConfig = (id: string, payload: ToolNodeType) => { tool_parameters, } = inputs const isBuiltIn = provider_type === CollectionType.builtIn - const buildInTools = useStore(s => s.buildInTools) - const customTools = useStore(s => s.customTools) - const workflowTools = useStore(s => s.workflowTools) - const mcpTools = useStore(s => s.mcpTools) + const { data: buildInTools } = useAllBuiltInTools() + const { data: customTools } = useAllCustomTools() + const { data: workflowTools } = useAllWorkflowTools() + const { data: mcpTools } = useAllMCPTools() const currentTools = useMemo(() => { switch (provider_type) { case CollectionType.builtIn: - return buildInTools + return buildInTools || [] case CollectionType.custom: - return customTools + return customTools || [] case CollectionType.workflow: - return workflowTools + return workflowTools || [] case CollectionType.mcp: - return mcpTools + return mcpTools || [] default: return [] } @@ -75,6 +80,7 @@ const useConfig = (id: string, payload: ToolNodeType) => { { setTrue: showSetAuthModal, setFalse: hideSetAuthModal }, ] = useBoolean(false) + const invalidToolsByType = useInvalidToolsByType(provider_type) const handleSaveAuth = useCallback( async (value: any) => { await updateBuiltInToolCredential(currCollection?.name as string, value) @@ -83,14 +89,14 @@ const useConfig = (id: string, payload: ToolNodeType) => { type: 'success', message: t('common.api.actionSuccess'), }) - handleFetchAllTools(provider_type) + invalidToolsByType() hideSetAuthModal() }, [ currCollection?.name, hideSetAuthModal, t, - handleFetchAllTools, + invalidToolsByType, provider_type, ], ) @@ -241,17 +247,15 @@ const useConfig = (id: string, payload: ToolNodeType) => { name: outputKey, type: output.type === 'array' - ? `Array[${ - output.items?.type - ? output.items.type.slice(0, 1).toLocaleUpperCase() - + output.items.type.slice(1) - : 'Unknown' + ? `Array[${output.items?.type + ? output.items.type.slice(0, 1).toLocaleUpperCase() + + output.items.type.slice(1) + : 'Unknown' }]` - : `${ - output.type - ? output.type.slice(0, 1).toLocaleUpperCase() - + output.type.slice(1) - : 'Unknown' + : `${output.type + ? output.type.slice(0, 1).toLocaleUpperCase() + + output.type.slice(1) + : 'Unknown' }`, description: output.description, }) diff --git a/web/app/components/workflow/store/workflow/tool-slice.ts b/web/app/components/workflow/store/workflow/tool-slice.ts index d6d89abcf0..c5180022fc 100644 --- a/web/app/components/workflow/store/workflow/tool-slice.ts +++ b/web/app/components/workflow/store/workflow/tool-slice.ts @@ -1,30 +1,11 @@ import type { StateCreator } from 'zustand' -import type { - ToolWithProvider, -} from '@/app/components/workflow/types' export type ToolSliceShape = { - buildInTools: ToolWithProvider[] - setBuildInTools: (tools: ToolWithProvider[]) => void - customTools: ToolWithProvider[] - setCustomTools: (tools: ToolWithProvider[]) => void - workflowTools: ToolWithProvider[] - setWorkflowTools: (tools: ToolWithProvider[]) => void - mcpTools: ToolWithProvider[] - setMcpTools: (tools: ToolWithProvider[]) => void toolPublished: boolean setToolPublished: (toolPublished: boolean) => void } export const createToolSlice: StateCreator = set => ({ - buildInTools: [], - setBuildInTools: buildInTools => set(() => ({ buildInTools })), - customTools: [], - setCustomTools: customTools => set(() => ({ customTools })), - workflowTools: [], - setWorkflowTools: workflowTools => set(() => ({ workflowTools })), - mcpTools: [], - setMcpTools: mcpTools => set(() => ({ mcpTools })), toolPublished: false, setToolPublished: toolPublished => set(() => ({ toolPublished })), }) diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index f6a706a982..324443cfd1 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -19,7 +19,7 @@ import type { } from '@/app/components/workflow/nodes/_base/components/error-handle/types' import type { WorkflowRetryConfig } from '@/app/components/workflow/nodes/_base/components/retry/types' import type { StructuredOutput } from '@/app/components/workflow/nodes/llm/types' -import type { PluginMeta } from '../plugins/types' +import type { Plugin, PluginMeta } from '@/app/components/plugins/types' import type { BlockClassificationEnum } from '@/app/components/workflow/block-selector/types' import type { SchemaTypeDefinition } from '@/service/use-common' @@ -451,16 +451,9 @@ export type ToolWithProvider = Collection & { meta: PluginMeta } -export type UninstalledRecommendedPlugin = { - plugin_id: string - name: string - icon: string - plugin_unique_identifier: string -} - export type RAGRecommendedPlugins = { installed_recommended_plugins: ToolWithProvider[] - uninstalled_recommended_plugins: UninstalledRecommendedPlugin[] + uninstalled_recommended_plugins: Plugin[] } export enum SupportUploadFileTypes { diff --git a/web/i18n/en-US/pipeline.ts b/web/i18n/en-US/pipeline.ts index 4b29bdbb00..8e5fd8a3e0 100644 --- a/web/i18n/en-US/pipeline.ts +++ b/web/i18n/en-US/pipeline.ts @@ -33,7 +33,7 @@ const translation = { }, ragToolSuggestions: { title: 'Suggestions for RAG', - noRecommendationPluginsInstalled: 'No recommended plugins installed, find more in Marketplace', + noRecommendationPlugins: 'No recommended plugins, find more in Marketplace', }, } diff --git a/web/i18n/ja-JP/pipeline.ts b/web/i18n/ja-JP/pipeline.ts index 64700acc09..9ec1b68273 100644 --- a/web/i18n/ja-JP/pipeline.ts +++ b/web/i18n/ja-JP/pipeline.ts @@ -33,7 +33,7 @@ const translation = { }, ragToolSuggestions: { title: 'RAGのための提案', - noRecommendationPluginsInstalled: '推奨プラグインがインストールされていません。マーケットプレイスで詳細をご確認ください', + noRecommendationPlugins: '推奨プラグインがありません。マーケットプレイスで詳細をご確認ください', }, } diff --git a/web/i18n/zh-Hans/pipeline.ts b/web/i18n/zh-Hans/pipeline.ts index 3c3a7a6506..1ae087fcfd 100644 --- a/web/i18n/zh-Hans/pipeline.ts +++ b/web/i18n/zh-Hans/pipeline.ts @@ -33,7 +33,7 @@ const translation = { }, ragToolSuggestions: { title: 'RAG 工具推荐', - noRecommendationPluginsInstalled: '暂无已安装的推荐插件,更多插件请在 Marketplace 中查找', + noRecommendationPlugins: '暂无推荐插件,更多插件请在 Marketplace 中查找', }, } diff --git a/web/service/use-plugins.ts b/web/service/use-plugins.ts index f59e500792..1dec97cdfa 100644 --- a/web/service/use-plugins.ts +++ b/web/service/use-plugins.ts @@ -1,4 +1,4 @@ -import { useCallback, useEffect } from 'react' +import { useCallback, useEffect, useState } from 'react' import type { FormOption, ModelProvider, @@ -39,7 +39,7 @@ import { useQuery, useQueryClient, } from '@tanstack/react-query' -import { useInvalidateAllBuiltInTools, useInvalidateRAGRecommendedPlugins } from './use-tools' +import { useInvalidateAllBuiltInTools } from './use-tools' import useReferenceSetting from '@/app/components/plugins/plugin-page/use-reference-setting' import { uninstallPlugin } from '@/service/plugins' import useRefreshPluginList from '@/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list' @@ -135,14 +135,12 @@ export const useInstalledLatestVersion = (pluginIds: string[]) => { export const useInvalidateInstalledPluginList = () => { const queryClient = useQueryClient() const invalidateAllBuiltInTools = useInvalidateAllBuiltInTools() - const invalidateRAGRecommendedPlugins = useInvalidateRAGRecommendedPlugins() return () => { queryClient.invalidateQueries( { queryKey: useInstalledPluginListKey, }) invalidateAllBuiltInTools() - invalidateRAGRecommendedPlugins() } } @@ -489,6 +487,7 @@ export const useFetchPluginsInMarketPlaceByInfo = (infos: Record[]) const usePluginTaskListKey = [NAME_SPACE, 'pluginTaskList'] export const usePluginTaskList = (category?: PluginType) => { + const [initialized, setInitialized] = useState(false) const { canManagement, } = useReferenceSetting() @@ -512,7 +511,8 @@ export const usePluginTaskList = (category?: PluginType) => { useEffect(() => { // After first fetch, refresh plugin list each time all tasks are done - if (!isRefetching) { + // Skip initialization period, because the query cache is not updated yet + if (initialized && !isRefetching) { const lastData = cloneDeep(data) const taskDone = lastData?.tasks.every(task => task.status === TaskStatus.success || task.status === TaskStatus.failed) const taskAllFailed = lastData?.tasks.every(task => task.status === TaskStatus.failed) @@ -523,6 +523,10 @@ export const usePluginTaskList = (category?: PluginType) => { } }, [isRefetching]) + useEffect(() => { + setInitialized(true) + }, []) + const handleRefetch = useCallback(() => { refetch() }, [refetch]) diff --git a/web/service/use-tools.ts b/web/service/use-tools.ts index a881441cd5..306cb903df 100644 --- a/web/service/use-tools.ts +++ b/web/service/use-tools.ts @@ -4,9 +4,11 @@ import type { MCPServerDetail, Tool, } from '@/app/components/tools/types' +import { CollectionType } from '@/app/components/tools/types' import type { RAGRecommendedPlugins, ToolWithProvider } from '@/app/components/workflow/types' import type { AppIconType } from '@/types/app' import { useInvalid } from './use-base' +import type { QueryKey } from '@tanstack/react-query' import { useMutation, useQuery, @@ -76,6 +78,16 @@ export const useInvalidateAllMCPTools = () => { return useInvalid(useAllMCPToolsKey) } +const useInvalidToolsKeyMap: Record = { + [CollectionType.builtIn]: useAllBuiltInToolsKey, + [CollectionType.custom]: useAllCustomToolsKey, + [CollectionType.workflow]: useAllWorkflowToolsKey, + [CollectionType.mcp]: useAllMCPToolsKey, +} +export const useInvalidToolsByType = (type: CollectionType | string) => { + return useInvalid(useInvalidToolsKeyMap[type]) +} + export const useCreateMCP = () => { return useMutation({ mutationKey: [NAME_SPACE, 'create-mcp'],