diff --git a/web/app/components/app/configuration/debug/chat-user-input.spec.tsx b/web/app/components/app/configuration/debug/chat-user-input.spec.tsx
new file mode 100644
index 0000000000..e6678ebf29
--- /dev/null
+++ b/web/app/components/app/configuration/debug/chat-user-input.spec.tsx
@@ -0,0 +1,710 @@
+import type { Inputs, ModelConfig } from '@/models/debug'
+import type { PromptVariable } from '@/types/app'
+import { fireEvent, render, screen } from '@testing-library/react'
+import ChatUserInput from './chat-user-input'
+
+const mockSetInputs = vi.fn()
+const mockUseContext = vi.fn()
+
+vi.mock('react-i18next', () => ({
+ useTranslation: () => ({
+ t: (key: string) => key,
+ }),
+}))
+
+vi.mock('use-context-selector', () => ({
+ useContext: () => mockUseContext(),
+ createContext: vi.fn(() => ({})),
+}))
+
+vi.mock('@/app/components/base/input', () => ({
+ default: ({ value, onChange, placeholder, autoFocus, maxLength, readOnly, type }: {
+ value: string
+ onChange: (e: { target: { value: string } }) => void
+ placeholder?: string
+ autoFocus?: boolean
+ maxLength?: number
+ readOnly?: boolean
+ type?: string
+ }) => (
+
+ ),
+}))
+
+vi.mock('@/app/components/base/select', () => ({
+ default: ({ defaultValue, onSelect, items, disabled, className }: {
+ defaultValue: string
+ onSelect: (item: { value: string }) => void
+ items: { name: string, value: string }[]
+ allowSearch?: boolean
+ disabled?: boolean
+ className?: string
+ }) => (
+
+ ),
+}))
+
+vi.mock('@/app/components/base/textarea', () => ({
+ default: ({ value, onChange, placeholder, readOnly, className }: {
+ value: string
+ onChange: (e: { target: { value: string } }) => void
+ placeholder?: string
+ readOnly?: boolean
+ className?: string
+ }) => (
+
+ ),
+}))
+
+vi.mock('@/app/components/workflow/nodes/_base/components/before-run-form/bool-input', () => ({
+ default: ({ name, value, required, onChange, readonly }: {
+ name: string
+ value: boolean
+ required?: boolean
+ onChange: (value: boolean) => void
+ readonly?: boolean
+ }) => (
+
+ onChange(e.target.checked)}
+ disabled={readonly}
+ data-required={required}
+ />
+ {name}
+
+ ),
+}))
+
+// Extended type to match runtime behavior (includes 'paragraph', 'checkbox', 'default')
+type ExtendedPromptVariable = {
+ key: string
+ name: string
+ type: 'string' | 'number' | 'select' | 'paragraph' | 'checkbox'
+ required: boolean
+ options?: string[]
+ max_length?: number
+ default?: string | null
+}
+
+const createPromptVariable = (overrides: Partial = {}): ExtendedPromptVariable => ({
+ key: 'test-key',
+ name: 'Test Name',
+ type: 'string',
+ required: false,
+ ...overrides,
+})
+
+const createModelConfig = (promptVariables: ExtendedPromptVariable[] = []): ModelConfig => ({
+ provider: 'openai',
+ model_id: 'gpt-4',
+ mode: 'chat',
+ configs: {
+ prompt_template: '',
+ prompt_variables: promptVariables as PromptVariable[],
+ },
+} as ModelConfig)
+
+const createContextValue = (overrides: Partial<{
+ modelConfig: ModelConfig
+ setInputs: (inputs: Inputs) => void
+ readonly: boolean
+}> = {}) => ({
+ modelConfig: createModelConfig(),
+ setInputs: mockSetInputs,
+ readonly: false,
+ ...overrides,
+})
+
+describe('ChatUserInput', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ mockUseContext.mockReturnValue(createContextValue())
+ })
+
+ describe('Rendering', () => {
+ it('should return null when no prompt variables exist', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([]),
+ }))
+
+ const { container } = render()
+ expect(container.firstChild).toBeNull()
+ })
+
+ it('should return null when prompt variables have empty keys', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: '', name: 'Test' }),
+ createPromptVariable({ key: ' ', name: 'Test2' }),
+ ]),
+ }))
+
+ const { container } = render()
+ expect(container.firstChild).toBeNull()
+ })
+
+ it('should return null when prompt variables have empty names', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'key1', name: '' }),
+ createPromptVariable({ key: 'key2', name: ' ' }),
+ ]),
+ }))
+
+ const { container } = render()
+ expect(container.firstChild).toBeNull()
+ })
+
+ it('should render string input type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toBeInTheDocument()
+ })
+
+ it('should render paragraph input type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'description', name: 'Description', type: 'paragraph' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('textarea-Description')).toBeInTheDocument()
+ })
+
+ it('should render select input type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'choice', name: 'Choice', type: 'select', options: ['A', 'B', 'C'] }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('select-input')).toBeInTheDocument()
+ expect(screen.getByText('A')).toBeInTheDocument()
+ expect(screen.getByText('B')).toBeInTheDocument()
+ expect(screen.getByText('C')).toBeInTheDocument()
+ })
+
+ it('should render number input type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'count', name: 'Count', type: 'number' }),
+ ]),
+ }))
+
+ render()
+ const input = screen.getByTestId('input-Count')
+ expect(input).toBeInTheDocument()
+ expect(input).toHaveAttribute('type', 'number')
+ })
+
+ it('should render checkbox input type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'enabled', name: 'Enabled', type: 'checkbox' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('bool-input-Enabled')).toBeInTheDocument()
+ })
+
+ it('should render multiple input types', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ createPromptVariable({ key: 'desc', name: 'Description', type: 'paragraph' }),
+ createPromptVariable({ key: 'choice', name: 'Choice', type: 'select', options: ['X', 'Y'] }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toBeInTheDocument()
+ expect(screen.getByTestId('textarea-Description')).toBeInTheDocument()
+ expect(screen.getByTestId('select-input')).toBeInTheDocument()
+ })
+
+ it('should show optional label for non-required fields', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', required: false }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByText('panel.optional')).toBeInTheDocument()
+ })
+
+ it('should not show optional label for required fields', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', required: true }),
+ ]),
+ }))
+
+ render()
+ expect(screen.queryByText('panel.optional')).not.toBeInTheDocument()
+ })
+
+ it('should use key as label when name is not provided', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'myKey', name: '', type: 'string' }),
+ ]),
+ }))
+
+ // This should actually return null because name is empty
+ const { container } = render()
+ expect(container.firstChild).toBeNull()
+ })
+ })
+
+ describe('Input Values', () => {
+ it('should display existing input values for string type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toHaveValue('John')
+ })
+
+ it('should display existing input values for paragraph type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'desc', name: 'Description', type: 'paragraph' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('textarea-Description')).toHaveValue('Long text here')
+ })
+
+ it('should display existing input values for number type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'count', name: 'Count', type: 'number' }),
+ ]),
+ }))
+
+ render()
+ // Number type input still uses string value internally
+ expect(screen.getByTestId('input-Count')).toHaveValue(42)
+ })
+
+ it('should display checkbox as checked when value is truthy', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'enabled', name: 'Enabled', type: 'checkbox' }),
+ ]),
+ }))
+
+ render()
+ const checkbox = screen.getByTestId('bool-input-Enabled').querySelector('input')
+ expect(checkbox).toBeChecked()
+ })
+
+ it('should display checkbox as unchecked when value is falsy', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'enabled', name: 'Enabled', type: 'checkbox' }),
+ ]),
+ }))
+
+ render()
+ const checkbox = screen.getByTestId('bool-input-Enabled').querySelector('input')
+ expect(checkbox).not.toBeChecked()
+ })
+
+ it('should handle empty string values', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toHaveValue('')
+ })
+
+ it('should handle undefined values', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toHaveValue('')
+ })
+ })
+
+ describe('User Interactions', () => {
+ it('should call setInputs when string input changes', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ fireEvent.change(screen.getByTestId('input-Name'), { target: { value: 'New Value' } })
+
+ expect(mockSetInputs).toHaveBeenCalledWith({ name: 'New Value' })
+ })
+
+ it('should call setInputs when paragraph input changes', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'desc', name: 'Description', type: 'paragraph' }),
+ ]),
+ }))
+
+ render()
+ fireEvent.change(screen.getByTestId('textarea-Description'), { target: { value: 'New Description' } })
+
+ expect(mockSetInputs).toHaveBeenCalledWith({ desc: 'New Description' })
+ })
+
+ it('should call setInputs when select input changes', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'choice', name: 'Choice', type: 'select', options: ['A', 'B', 'C'] }),
+ ]),
+ }))
+
+ render()
+ fireEvent.change(screen.getByTestId('select-input'), { target: { value: 'B' } })
+
+ expect(mockSetInputs).toHaveBeenCalledWith({ choice: 'B' })
+ })
+
+ it('should call setInputs when number input changes', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'count', name: 'Count', type: 'number' }),
+ ]),
+ }))
+
+ render()
+ fireEvent.change(screen.getByTestId('input-Count'), { target: { value: '100' } })
+
+ expect(mockSetInputs).toHaveBeenCalledWith({ count: '100' })
+ })
+
+ it('should call setInputs when checkbox changes', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'enabled', name: 'Enabled', type: 'checkbox' }),
+ ]),
+ }))
+
+ render()
+ const checkbox = screen.getByTestId('bool-input-Enabled').querySelector('input')!
+ fireEvent.click(checkbox)
+
+ expect(mockSetInputs).toHaveBeenCalledWith({ enabled: true })
+ })
+
+ it('should not call setInputs for unknown keys', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+
+ // The component filters by promptVariableObj, so unknown keys won't trigger updates
+ // This is tested indirectly - only valid keys should trigger setInputs
+ fireEvent.change(screen.getByTestId('input-Name'), { target: { value: 'Valid' } })
+
+ expect(mockSetInputs).toHaveBeenCalledTimes(1)
+ expect(mockSetInputs).toHaveBeenCalledWith({ name: 'Valid' })
+ })
+ })
+
+ describe('Readonly Mode', () => {
+ it('should set string input as readonly when readonly is true', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ readonly: true,
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toHaveAttribute('readonly')
+ })
+
+ it('should set paragraph input as readonly when readonly is true', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'desc', name: 'Description', type: 'paragraph' }),
+ ]),
+ readonly: true,
+ }))
+
+ render()
+ expect(screen.getByTestId('textarea-Description')).toHaveAttribute('readonly')
+ })
+
+ it('should disable select when readonly is true', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'choice', name: 'Choice', type: 'select', options: ['A', 'B'] }),
+ ]),
+ readonly: true,
+ }))
+
+ render()
+ expect(screen.getByTestId('select-input')).toBeDisabled()
+ })
+
+ it('should disable checkbox when readonly is true', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'enabled', name: 'Enabled', type: 'checkbox' }),
+ ]),
+ readonly: true,
+ }))
+
+ render()
+ const checkbox = screen.getByTestId('bool-input-Enabled').querySelector('input')
+ expect(checkbox).toBeDisabled()
+ })
+ })
+
+ describe('Default Values', () => {
+ it('should initialize inputs with default values when field is empty', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', default: 'Default Name' }),
+ ]),
+ }))
+
+ render()
+
+ expect(mockSetInputs).toHaveBeenCalledWith({ name: 'Default Name' })
+ })
+
+ it('should not override existing values with defaults', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', default: 'Default' }),
+ ]),
+ }))
+
+ render()
+
+ // setInputs should not be called since there's already a value
+ expect(mockSetInputs).not.toHaveBeenCalled()
+ })
+
+ it('should handle multiple default values', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', default: 'Default Name' }),
+ createPromptVariable({ key: 'count', name: 'Count', type: 'number', default: '10' }),
+ ]),
+ }))
+
+ render()
+
+ expect(mockSetInputs).toHaveBeenCalledWith({
+ name: 'Default Name',
+ count: '10',
+ })
+ })
+
+ it('should not set default when default is empty string', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', default: '' }),
+ ]),
+ }))
+
+ render()
+
+ expect(mockSetInputs).not.toHaveBeenCalled()
+ })
+
+ it('should not set default when default is undefined', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+
+ expect(mockSetInputs).not.toHaveBeenCalled()
+ })
+
+ it('should not set default when default is null', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', default: null as unknown as string }),
+ ]),
+ }))
+
+ render()
+
+ expect(mockSetInputs).not.toHaveBeenCalled()
+ })
+ })
+
+ describe('AutoFocus', () => {
+ it('should set autoFocus on first string input', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'first', name: 'First', type: 'string' }),
+ createPromptVariable({ key: 'second', name: 'Second', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-First')).toHaveAttribute('data-autofocus', 'true')
+ expect(screen.getByTestId('input-Second')).not.toHaveAttribute('data-autofocus')
+ })
+
+ it('should set autoFocus on first number input when it is the first field', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'count', name: 'Count', type: 'number' }),
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Count')).toHaveAttribute('data-autofocus', 'true')
+ })
+ })
+
+ describe('MaxLength', () => {
+ it('should pass maxLength to string input', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string', max_length: 50 }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Name')).toHaveAttribute('maxLength', '50')
+ })
+
+ it('should pass maxLength to number input', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'count', name: 'Count', type: 'number', max_length: 10 }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Count')).toHaveAttribute('maxLength', '10')
+ })
+ })
+
+ describe('Edge Cases', () => {
+ it('should handle select with empty options', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'choice', name: 'Choice', type: 'select', options: [] }),
+ ]),
+ }))
+
+ render()
+ const select = screen.getByTestId('select-input')
+ expect(select).toBeInTheDocument()
+ expect(select.children).toHaveLength(0)
+ })
+
+ it('should handle select with undefined options', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'choice', name: 'Choice', type: 'select' }),
+ ]),
+ }))
+
+ render()
+ const select = screen.getByTestId('select-input')
+ expect(select).toBeInTheDocument()
+ })
+
+ it('should preserve other input values when updating one field', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'name', name: 'Name', type: 'string' }),
+ createPromptVariable({ key: 'desc', name: 'Description', type: 'paragraph' }),
+ ]),
+ }))
+
+ render()
+ fireEvent.change(screen.getByTestId('input-Name'), { target: { value: 'Updated' } })
+
+ expect(mockSetInputs).toHaveBeenCalledWith({
+ name: 'Updated',
+ desc: 'Also Existing',
+ })
+ })
+
+ it('should convert non-string values to string for display', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'value', name: 'Value', type: 'string' }),
+ ]),
+ }))
+
+ render()
+ expect(screen.getByTestId('input-Value')).toHaveValue('123')
+ })
+
+ it('should not hide label for checkbox type', () => {
+ mockUseContext.mockReturnValue(createContextValue({
+ modelConfig: createModelConfig([
+ createPromptVariable({ key: 'enabled', name: 'Is Enabled', type: 'checkbox' }),
+ ]),
+ }))
+
+ render()
+ // For checkbox, the label is rendered inside BoolInput, not in the header
+ expect(screen.queryByText('Is Enabled')).toBeInTheDocument()
+ })
+ })
+})
diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.spec.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.spec.tsx
new file mode 100644
index 0000000000..d621bb3941
--- /dev/null
+++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.spec.tsx
@@ -0,0 +1,641 @@
+import type { ModelAndParameter } from '../types'
+import type { ChatConfig, ChatItem as ChatItemType, OnSend } from '@/app/components/base/chat/types'
+import { render, screen } from '@testing-library/react'
+import { TransferMethod } from '@/app/components/base/chat/types'
+import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
+import { APP_CHAT_WITH_MULTIPLE_MODEL, APP_CHAT_WITH_MULTIPLE_MODEL_RESTART } from '../types'
+import ChatItem from './chat-item'
+
+const mockUseAppContext = vi.fn()
+const mockUseDebugConfigurationContext = vi.fn()
+const mockUseProviderContext = vi.fn()
+const mockUseFeatures = vi.fn()
+const mockUseConfigFromDebugContext = vi.fn()
+const mockUseFormattingChangedSubscription = vi.fn()
+const mockUseChat = vi.fn()
+const mockUseEventEmitterContextContext = vi.fn()
+const mockFetchConversationMessages = vi.fn()
+const mockFetchSuggestedQuestions = vi.fn()
+const mockStopChatMessageResponding = vi.fn()
+
+let capturedChatProps: {
+ config: ChatConfig
+ chatList: ChatItemType[]
+ isResponding: boolean
+ onSend: OnSend
+ suggestedQuestions: string[]
+ allToolIcons: Record
+} | null = null
+
+let eventSubscriptionCallback: ((v: { type: string, payload?: Record }) => void) | null = null
+
+vi.mock('@/context/app-context', () => ({
+ useAppContext: () => mockUseAppContext(),
+}))
+
+vi.mock('@/context/debug-configuration', () => ({
+ useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
+}))
+
+vi.mock('@/context/provider-context', () => ({
+ useProviderContext: () => mockUseProviderContext(),
+}))
+
+vi.mock('@/app/components/base/features/hooks', () => ({
+ useFeatures: (selector: (state: Record) => unknown) => mockUseFeatures(selector),
+}))
+
+vi.mock('../hooks', () => ({
+ useConfigFromDebugContext: () => mockUseConfigFromDebugContext(),
+ useFormattingChangedSubscription: (chatList: ChatItemType[]) => mockUseFormattingChangedSubscription(chatList),
+}))
+
+vi.mock('@/app/components/base/chat/chat/hooks', () => ({
+ useChat: () => mockUseChat(),
+}))
+
+vi.mock('@/context/event-emitter', () => ({
+ useEventEmitterContextContext: () => mockUseEventEmitterContextContext(),
+}))
+
+vi.mock('@/service/debug', () => ({
+ fetchConversationMessages: (...args: unknown[]) => mockFetchConversationMessages(...args),
+ fetchSuggestedQuestions: (...args: unknown[]) => mockFetchSuggestedQuestions(...args),
+ stopChatMessageResponding: (...args: unknown[]) => mockStopChatMessageResponding(...args),
+}))
+
+vi.mock('@/app/components/base/chat/utils', () => ({
+ getLastAnswer: (chatList: ChatItemType[]) => chatList.find(item => item.isAnswer),
+}))
+
+vi.mock('@/utils', () => ({
+ canFindTool: (collectionId: string, providerId: string) => collectionId === providerId,
+}))
+
+vi.mock('@/app/components/base/chat/chat', () => ({
+ default: (props: typeof capturedChatProps) => {
+ capturedChatProps = props
+ return (
+
+ {props?.chatList?.length || 0}
+ {props?.isResponding ? 'yes' : 'no'}
+
+
+ )
+ },
+}))
+
+vi.mock('@/app/components/base/avatar', () => ({
+ default: ({ name }: { name: string }) => {name}
,
+}))
+
+const createModelAndParameter = (overrides: Partial = {}): ModelAndParameter => ({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: { temperature: 0.7 },
+ ...overrides,
+})
+
+const createDefaultMocks = () => {
+ mockUseAppContext.mockReturnValue({
+ userProfile: { avatar_url: 'http://avatar.url', name: 'Test User' },
+ })
+
+ mockUseDebugConfigurationContext.mockReturnValue({
+ modelConfig: {
+ configs: { prompt_variables: [] },
+ agentConfig: { tools: [] },
+ },
+ appId: 'app-123',
+ inputs: { key: 'value' },
+ collectionList: [],
+ })
+
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [
+ {
+ provider: 'openai',
+ models: [
+ {
+ model: 'gpt-3.5-turbo',
+ features: [ModelFeatureEnum.vision],
+ model_properties: { mode: 'chat' },
+ },
+ ],
+ },
+ ],
+ })
+
+ mockUseFeatures.mockImplementation((selector: (state: Record) => unknown) => {
+ const state = {
+ features: {
+ moreLikeThis: { enabled: false },
+ opening: { enabled: true, opening_statement: 'Hello!', suggested_questions: ['Q1'] },
+ moderation: { enabled: false },
+ speech2text: { enabled: true },
+ text2speech: { enabled: false },
+ file: { enabled: true },
+ suggested: { enabled: true },
+ citation: { enabled: false },
+ annotationReply: { enabled: false },
+ },
+ }
+ return selector(state)
+ })
+
+ mockUseConfigFromDebugContext.mockReturnValue({
+ base_config: 'test',
+ })
+
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1', content: 'Hello', isAnswer: true }],
+ isResponding: false,
+ handleSend: vi.fn(),
+ suggestedQuestions: ['Question 1', 'Question 2'],
+ handleRestart: vi.fn(),
+ })
+
+ mockUseEventEmitterContextContext.mockReturnValue({
+ eventEmitter: {
+ useSubscription: (callback: (v: { type: string, payload?: Record }) => void) => {
+ eventSubscriptionCallback = callback
+ },
+ },
+ })
+}
+
+const renderComponent = (props: Partial<{ modelAndParameter: ModelAndParameter }> = {}) => {
+ const defaultProps = {
+ modelAndParameter: createModelAndParameter(),
+ ...props,
+ }
+ return render()
+}
+
+describe('ChatItem', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ capturedChatProps = null
+ eventSubscriptionCallback = null
+ createDefaultMocks()
+ })
+
+ describe('rendering', () => {
+ it('should render Chat component when chatList is not empty', () => {
+ renderComponent()
+
+ expect(screen.getByTestId('chat-component')).toBeInTheDocument()
+ expect(screen.getByTestId('chat-list-length')).toHaveTextContent('1')
+ })
+
+ it('should not render when chatList is empty', () => {
+ mockUseChat.mockReturnValue({
+ chatList: [],
+ isResponding: false,
+ handleSend: vi.fn(),
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(screen.queryByTestId('chat-component')).not.toBeInTheDocument()
+ })
+
+ it('should pass correct config to Chat', () => {
+ renderComponent()
+
+ expect(capturedChatProps?.config).toMatchObject({
+ base_config: 'test',
+ opening_statement: 'Hello!',
+ suggested_questions: ['Q1'],
+ })
+ })
+
+ it('should pass suggestedQuestions to Chat', () => {
+ renderComponent()
+
+ expect(capturedChatProps?.suggestedQuestions).toEqual(['Question 1', 'Question 2'])
+ })
+
+ it('should pass isResponding to Chat', () => {
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: true,
+ handleSend: vi.fn(),
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('is-responding')).toHaveTextContent('yes')
+ })
+ })
+
+ describe('config composition', () => {
+ it('should include opening statement when enabled', () => {
+ renderComponent()
+
+ expect(capturedChatProps?.config.opening_statement).toBe('Hello!')
+ })
+
+ it('should use empty opening statement when disabled', () => {
+ mockUseFeatures.mockImplementation((selector: (state: Record) => unknown) => {
+ const state = {
+ features: {
+ moreLikeThis: { enabled: false },
+ opening: { enabled: false, opening_statement: 'Should not appear' },
+ moderation: { enabled: false },
+ speech2text: { enabled: false },
+ text2speech: { enabled: false },
+ file: { enabled: false },
+ suggested: { enabled: false },
+ citation: { enabled: false },
+ annotationReply: { enabled: false },
+ },
+ }
+ return selector(state)
+ })
+
+ renderComponent()
+
+ expect(capturedChatProps?.config.opening_statement).toBe('')
+ expect(capturedChatProps?.config.suggested_questions).toEqual([])
+ })
+ })
+
+ describe('inputsForm transformation', () => {
+ it('should filter out API type variables', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ modelConfig: {
+ configs: {
+ prompt_variables: [
+ { key: 'var1', name: 'Var 1', type: 'string' },
+ { key: 'var2', name: 'Var 2', type: 'api' },
+ { key: 'var3', name: 'Var 3', type: 'number' },
+ ],
+ },
+ agentConfig: { tools: [] },
+ },
+ appId: 'app-123',
+ inputs: {},
+ collectionList: [],
+ })
+
+ renderComponent()
+
+ // The component transforms prompt_variables into inputsForm
+ // We can verify this through the useChat call
+ expect(mockUseChat).toHaveBeenCalled()
+ })
+ })
+
+ describe('event subscription', () => {
+ it('should handle APP_CHAT_WITH_MULTIPLE_MODEL event', () => {
+ const handleSend = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ // Trigger the event
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Hello', files: [{ id: 'file-1' }] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ 'apps/app-123/chat-messages',
+ expect.objectContaining({
+ query: 'Hello',
+ inputs: { key: 'value' },
+ }),
+ expect.any(Object),
+ )
+ })
+
+ it('should handle APP_CHAT_WITH_MULTIPLE_MODEL_RESTART event', () => {
+ const handleRestart = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend: vi.fn(),
+ suggestedQuestions: [],
+ handleRestart,
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL_RESTART,
+ })
+
+ expect(handleRestart).toHaveBeenCalled()
+ })
+
+ it('should ignore unrelated events', () => {
+ const handleSend = vi.fn()
+ const handleRestart = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart,
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: 'SOME_OTHER_EVENT',
+ payload: {},
+ })
+
+ expect(handleSend).not.toHaveBeenCalled()
+ expect(handleRestart).not.toHaveBeenCalled()
+ })
+ })
+
+ describe('doSend function', () => {
+ it('should include files when vision is supported and file upload enabled', () => {
+ const handleSend = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Hello', files: [{ id: 'file-1' }] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.objectContaining({
+ files: [{ id: 'file-1' }],
+ }),
+ expect.any(Object),
+ )
+ })
+
+ it('should not include files when vision is not supported', () => {
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [
+ {
+ provider: 'openai',
+ models: [
+ {
+ model: 'gpt-3.5-turbo',
+ features: [], // No vision support
+ model_properties: { mode: 'chat' },
+ },
+ ],
+ },
+ ],
+ })
+
+ const handleSend = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Hello', files: [{ id: 'file-1' }] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.not.objectContaining({
+ files: expect.anything(),
+ }),
+ expect.any(Object),
+ )
+ })
+
+ it('should include model configuration in request', () => {
+ const handleSend = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ const modelAndParameter = createModelAndParameter({
+ provider: 'openai',
+ model: 'gpt-3.5-turbo',
+ parameters: { temperature: 0.5 },
+ })
+
+ renderComponent({ modelAndParameter })
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Hello', files: [] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.objectContaining({
+ model_config: expect.objectContaining({
+ model: expect.objectContaining({
+ provider: 'openai',
+ name: 'gpt-3.5-turbo',
+ completion_params: { temperature: 0.5 },
+ }),
+ }),
+ }),
+ expect.any(Object),
+ )
+ })
+
+ it('should use parent_message_id from last answer', () => {
+ const handleSend = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [
+ { id: 'msg-1', content: 'Hi', isAnswer: false },
+ { id: 'msg-2', content: 'Hello', isAnswer: true },
+ ],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Hello', files: [] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.objectContaining({
+ parent_message_id: 'msg-2',
+ }),
+ expect.any(Object),
+ )
+ })
+ })
+
+ describe('allToolIcons', () => {
+ it('should compute tool icons from collectionList', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ modelConfig: {
+ configs: { prompt_variables: [] },
+ agentConfig: {
+ tools: [
+ { tool_name: 'tool1', provider_id: 'collection1' },
+ { tool_name: 'tool2', provider_id: 'collection2' },
+ ],
+ },
+ },
+ appId: 'app-123',
+ inputs: {},
+ collectionList: [
+ { id: 'collection1', icon: 'icon1' },
+ { id: 'collection2', icon: 'icon2' },
+ ],
+ })
+
+ renderComponent()
+
+ expect(capturedChatProps?.allToolIcons).toEqual({
+ tool1: 'icon1',
+ tool2: 'icon2',
+ })
+ })
+
+ it('should handle tools without matching collection', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ modelConfig: {
+ configs: { prompt_variables: [] },
+ agentConfig: {
+ tools: [
+ { tool_name: 'tool1', provider_id: 'nonexistent' },
+ ],
+ },
+ },
+ appId: 'app-123',
+ inputs: {},
+ collectionList: [],
+ })
+
+ renderComponent()
+
+ expect(capturedChatProps?.allToolIcons).toEqual({
+ tool1: undefined,
+ })
+ })
+
+ it('should handle empty tools array', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ modelConfig: {
+ configs: { prompt_variables: [] },
+ agentConfig: { tools: [] },
+ },
+ appId: 'app-123',
+ inputs: {},
+ collectionList: [],
+ })
+
+ renderComponent()
+
+ expect(capturedChatProps?.allToolIcons).toEqual({})
+ })
+ })
+
+ describe('useFormattingChangedSubscription', () => {
+ it('should call useFormattingChangedSubscription with chatList', () => {
+ const chatList = [{ id: 'msg-1', content: 'Hello' }]
+ mockUseChat.mockReturnValue({
+ chatList,
+ isResponding: false,
+ handleSend: vi.fn(),
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(mockUseFormattingChangedSubscription).toHaveBeenCalledWith(chatList)
+ })
+ })
+
+ describe('edge cases', () => {
+ it('should handle missing provider in textGenerationModelList', () => {
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [],
+ })
+
+ const handleSend = vi.fn()
+ mockUseChat.mockReturnValue({
+ chatList: [{ id: 'msg-1' }],
+ isResponding: false,
+ handleSend,
+ suggestedQuestions: [],
+ handleRestart: vi.fn(),
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Hello', files: [] },
+ })
+
+ // Should still call handleSend without crashing
+ expect(handleSend).toHaveBeenCalled()
+ })
+
+ it('should handle null eventEmitter', () => {
+ mockUseEventEmitterContextContext.mockReturnValue({
+ eventEmitter: null,
+ })
+
+ expect(() => renderComponent()).not.toThrow()
+ })
+
+ it('should handle undefined tools in agentConfig', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ modelConfig: {
+ configs: { prompt_variables: [] },
+ agentConfig: { tools: undefined },
+ },
+ appId: 'app-123',
+ inputs: {},
+ collectionList: [],
+ })
+
+ // This may throw since the code does agentConfig.tools?.forEach
+ // But the optional chaining should handle it
+ expect(() => renderComponent()).not.toThrow()
+ })
+ })
+})
diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/context.spec.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/context.spec.tsx
new file mode 100644
index 0000000000..e26fcec607
--- /dev/null
+++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/context.spec.tsx
@@ -0,0 +1,224 @@
+import type { ModelAndParameter } from '../types'
+import type { DebugWithMultipleModelContextType } from './context'
+import { render, screen } from '@testing-library/react'
+import {
+ DebugWithMultipleModelContextProvider,
+ useDebugWithMultipleModelContext,
+} from './context'
+
+const createModelAndParameter = (overrides: Partial = {}): ModelAndParameter => ({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: {},
+ ...overrides,
+})
+
+const TestConsumer = () => {
+ const context = useDebugWithMultipleModelContext()
+ return (
+
+ {context.multipleModelConfigs.length}
+ {context.checkCanSend ? 'yes' : 'no'}
+
+
+
+ )
+}
+
+describe('DebugWithMultipleModelContext', () => {
+ describe('useDebugWithMultipleModelContext', () => {
+ it('should return default values when used outside provider', () => {
+ render()
+
+ expect(screen.getByTestId('configs-count')).toHaveTextContent('0')
+ expect(screen.getByTestId('has-check-can-send')).toHaveTextContent('no')
+ })
+
+ it('should return default noop functions that do not throw', () => {
+ render()
+
+ // These should not throw when called
+ expect(() => {
+ screen.getByTestId('call-on-change').click()
+ }).not.toThrow()
+
+ expect(() => {
+ screen.getByTestId('call-on-debug-change').click()
+ }).not.toThrow()
+ })
+ })
+
+ describe('DebugWithMultipleModelContextProvider', () => {
+ it('should provide multipleModelConfigs to children', () => {
+ const multipleModelConfigs = [
+ createModelAndParameter({ id: 'model-1' }),
+ createModelAndParameter({ id: 'model-2' }),
+ ]
+
+ render(
+
+
+ ,
+ )
+
+ expect(screen.getByTestId('configs-count')).toHaveTextContent('2')
+ })
+
+ it('should provide checkCanSend function to children', () => {
+ const checkCanSend = vi.fn(() => true)
+
+ render(
+
+
+ ,
+ )
+
+ expect(screen.getByTestId('has-check-can-send')).toHaveTextContent('yes')
+ })
+
+ it('should call onMultipleModelConfigsChange when invoked from context', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+
+ render(
+
+
+ ,
+ )
+
+ screen.getByTestId('call-on-change').click()
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(true, [])
+ })
+
+ it('should call onDebugWithMultipleModelChange when invoked from context', () => {
+ const onDebugWithMultipleModelChange = vi.fn()
+
+ render(
+
+
+ ,
+ )
+
+ screen.getByTestId('call-on-debug-change').click()
+
+ expect(onDebugWithMultipleModelChange).toHaveBeenCalledWith(
+ expect.objectContaining({ id: 'model-1' }),
+ )
+ })
+
+ it('should handle undefined checkCanSend', () => {
+ render(
+
+
+ ,
+ )
+
+ expect(screen.getByTestId('has-check-can-send')).toHaveTextContent('no')
+ })
+
+ it('should render children correctly', () => {
+ render(
+
+ Child Content
+ ,
+ )
+
+ expect(screen.getByTestId('child-element')).toHaveTextContent('Child Content')
+ })
+
+ it('should update context when props change', () => {
+ const { rerender } = render(
+
+
+ ,
+ )
+
+ expect(screen.getByTestId('configs-count')).toHaveTextContent('1')
+
+ rerender(
+
+
+ ,
+ )
+
+ expect(screen.getByTestId('configs-count')).toHaveTextContent('2')
+ })
+
+ it('should pass all context values correctly', () => {
+ const contextValues: DebugWithMultipleModelContextType = {
+ multipleModelConfigs: [createModelAndParameter()],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ checkCanSend: () => true,
+ }
+
+ const FullTestConsumer = () => {
+ const context = useDebugWithMultipleModelContext()
+ return (
+
+ {JSON.stringify(context.multipleModelConfigs)}
+ {typeof context.onMultipleModelConfigsChange}
+ {typeof context.onDebugWithMultipleModelChange}
+ {typeof context.checkCanSend}
+
+ )
+ }
+
+ render(
+
+
+ ,
+ )
+
+ expect(screen.getByTestId('configs')).toHaveTextContent('model-1')
+ expect(screen.getByTestId('has-on-change')).toHaveTextContent('function')
+ expect(screen.getByTestId('has-on-debug-change')).toHaveTextContent('function')
+ expect(screen.getByTestId('has-check')).toHaveTextContent('function')
+ })
+ })
+})
diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/debug-item.spec.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/debug-item.spec.tsx
new file mode 100644
index 0000000000..efc477fb47
--- /dev/null
+++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/debug-item.spec.tsx
@@ -0,0 +1,552 @@
+import type { CSSProperties } from 'react'
+import type { ModelAndParameter } from '../types'
+import type { Item } from '@/app/components/base/dropdown'
+import { fireEvent, render, screen } from '@testing-library/react'
+import { ModelStatusEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
+import { AppModeEnum } from '@/types/app'
+import DebugItem from './debug-item'
+
+const mockUseDebugConfigurationContext = vi.fn()
+const mockUseDebugWithMultipleModelContext = vi.fn()
+const mockUseProviderContext = vi.fn()
+
+let capturedDropdownProps: {
+ onSelect: (item: Item) => void
+ items: Item[]
+ secondItems?: Item[]
+} | null = null
+
+let capturedModelParameterTriggerProps: {
+ modelAndParameter: ModelAndParameter
+} | null = null
+
+vi.mock('@/context/debug-configuration', () => ({
+ useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
+}))
+
+vi.mock('./context', () => ({
+ useDebugWithMultipleModelContext: () => mockUseDebugWithMultipleModelContext(),
+}))
+
+vi.mock('@/context/provider-context', () => ({
+ useProviderContext: () => mockUseProviderContext(),
+}))
+
+vi.mock('./chat-item', () => ({
+ default: ({ modelAndParameter }: { modelAndParameter: ModelAndParameter }) => (
+ ChatItem
+ ),
+}))
+
+vi.mock('./text-generation-item', () => ({
+ default: ({ modelAndParameter }: { modelAndParameter: ModelAndParameter }) => (
+ TextGenerationItem
+ ),
+}))
+
+vi.mock('./model-parameter-trigger', () => ({
+ default: (props: { modelAndParameter: ModelAndParameter }) => {
+ capturedModelParameterTriggerProps = props
+ return ModelParameterTrigger
+ },
+}))
+
+vi.mock('@/app/components/base/dropdown', () => ({
+ default: (props: { onSelect: (item: Item) => void, items: Item[], secondItems?: Item[] }) => {
+ capturedDropdownProps = props
+ return (
+
+ {props.items.map(item => (
+
+ ))}
+ {props.secondItems?.map(item => (
+
+ ))}
+
+ )
+ },
+}))
+
+const createModelAndParameter = (overrides: Partial = {}): ModelAndParameter => ({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: {},
+ ...overrides,
+})
+
+const createTextGenerationModelList = (models: Array<{ provider: string, model: string, status?: ModelStatusEnum }> = []) => {
+ const providers: Record }> = {}
+
+ models.forEach(({ provider, model, status = ModelStatusEnum.active }) => {
+ if (!providers[provider]) {
+ providers[provider] = { provider, models: [] }
+ }
+ providers[provider].models.push({ model, status })
+ })
+
+ return Object.values(providers)
+}
+
+type DebugItemProps = {
+ modelAndParameter: ModelAndParameter
+ className?: string
+ style?: CSSProperties
+}
+
+const renderComponent = (props: Partial = {}) => {
+ const defaultProps: DebugItemProps = {
+ modelAndParameter: createModelAndParameter(),
+ ...props,
+ }
+ return render()
+}
+
+describe('DebugItem', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ capturedDropdownProps = null
+ capturedModelParameterTriggerProps = null
+
+ mockUseDebugConfigurationContext.mockReturnValue({
+ mode: AppModeEnum.CHAT,
+ })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [createModelAndParameter()],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'openai', model: 'gpt-3.5-turbo' },
+ ]),
+ })
+ })
+
+ describe('rendering', () => {
+ it('should render with basic props', () => {
+ renderComponent()
+
+ expect(screen.getByTestId('model-parameter-trigger')).toBeInTheDocument()
+ expect(screen.getByTestId('dropdown')).toBeInTheDocument()
+ })
+
+ it('should display correct index number', () => {
+ const modelConfigs = [
+ createModelAndParameter({ id: 'model-1' }),
+ createModelAndParameter({ id: 'model-2' }),
+ ]
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: modelConfigs,
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ const { container } = renderComponent({ modelAndParameter: createModelAndParameter({ id: 'model-2' }) })
+
+ // The index is displayed as "#2" in the component
+ const indexElement = container.querySelector('.font-medium.italic')
+ expect(indexElement?.textContent?.trim()).toContain('2')
+ })
+
+ it('should apply className and style props', () => {
+ const { container } = renderComponent({
+ className: 'custom-class',
+ style: { backgroundColor: 'red' },
+ })
+
+ const wrapper = container.firstChild as HTMLElement
+ expect(wrapper).toHaveClass('custom-class')
+ expect(wrapper.style.backgroundColor).toBe('red')
+ })
+
+ it('should pass modelAndParameter to ModelParameterTrigger', () => {
+ const modelAndParameter = createModelAndParameter({ id: 'test-model' })
+ renderComponent({ modelAndParameter })
+
+ expect(capturedModelParameterTriggerProps?.modelAndParameter).toEqual(modelAndParameter)
+ })
+ })
+
+ describe('ChatItem rendering', () => {
+ it('should render ChatItem in CHAT mode with active model', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.CHAT })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'openai', model: 'gpt-3.5-turbo', status: ModelStatusEnum.active },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('chat-item')).toBeInTheDocument()
+ expect(screen.queryByTestId('text-generation-item')).not.toBeInTheDocument()
+ })
+
+ it('should render ChatItem in AGENT_CHAT mode with active model', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.AGENT_CHAT })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'openai', model: 'gpt-3.5-turbo', status: ModelStatusEnum.active },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('chat-item')).toBeInTheDocument()
+ })
+
+ it('should not render ChatItem when model is not active', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.CHAT })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'openai', model: 'gpt-3.5-turbo', status: ModelStatusEnum.disabled },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
+ })
+
+ it('should not render ChatItem when provider not found', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.CHAT })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'anthropic', model: 'claude-3', status: ModelStatusEnum.active },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
+ })
+
+ it('should not render ChatItem when model not found', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.CHAT })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.active },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
+ })
+ })
+
+ describe('TextGenerationItem rendering', () => {
+ it('should render TextGenerationItem in COMPLETION mode with active model', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.COMPLETION })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'openai', model: 'gpt-3.5-turbo', status: ModelStatusEnum.active },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('text-generation-item')).toBeInTheDocument()
+ expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
+ })
+
+ it('should not render TextGenerationItem when provider is not found', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.COMPLETION })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'anthropic', model: 'claude-3', status: ModelStatusEnum.active },
+ ]),
+ })
+
+ renderComponent()
+
+ expect(screen.queryByTestId('text-generation-item')).not.toBeInTheDocument()
+ })
+ })
+
+ describe('dropdown menu', () => {
+ it('should show duplicate option when less than 4 models', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [createModelAndParameter()],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(capturedDropdownProps?.items).toContainEqual(
+ expect.objectContaining({ value: 'duplicate' }),
+ )
+ })
+
+ it('should hide duplicate option when 4 or more models', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [
+ createModelAndParameter({ id: '1' }),
+ createModelAndParameter({ id: '2' }),
+ createModelAndParameter({ id: '3' }),
+ createModelAndParameter({ id: '4' }),
+ ],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(capturedDropdownProps?.items).not.toContainEqual(
+ expect.objectContaining({ value: 'duplicate' }),
+ )
+ })
+
+ it('should show debug-as-single-model option when provider and model are set', () => {
+ renderComponent({
+ modelAndParameter: createModelAndParameter({
+ provider: 'openai',
+ model: 'gpt-3.5-turbo',
+ }),
+ })
+
+ expect(capturedDropdownProps?.items).toContainEqual(
+ expect.objectContaining({ value: 'debug-as-single-model' }),
+ )
+ })
+
+ it('should hide debug-as-single-model option when provider is missing', () => {
+ renderComponent({
+ modelAndParameter: createModelAndParameter({
+ provider: '',
+ model: 'gpt-3.5-turbo',
+ }),
+ })
+
+ expect(capturedDropdownProps?.items).not.toContainEqual(
+ expect.objectContaining({ value: 'debug-as-single-model' }),
+ )
+ })
+
+ it('should hide debug-as-single-model option when model is missing', () => {
+ renderComponent({
+ modelAndParameter: createModelAndParameter({
+ provider: 'openai',
+ model: '',
+ }),
+ })
+
+ expect(capturedDropdownProps?.items).not.toContainEqual(
+ expect.objectContaining({ value: 'debug-as-single-model' }),
+ )
+ })
+
+ it('should show remove option in secondItems when more than 2 models', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [
+ createModelAndParameter({ id: '1' }),
+ createModelAndParameter({ id: '2' }),
+ createModelAndParameter({ id: '3' }),
+ ],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(capturedDropdownProps?.secondItems).toContainEqual(
+ expect.objectContaining({ value: 'remove' }),
+ )
+ })
+
+ it('should not show remove option when 2 or fewer models', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [
+ createModelAndParameter({ id: '1' }),
+ createModelAndParameter({ id: '2' }),
+ ],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent()
+
+ expect(capturedDropdownProps?.secondItems).toBeUndefined()
+ })
+ })
+
+ describe('dropdown actions', () => {
+ it('should duplicate model when duplicate is selected', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const originalModel = createModelAndParameter({ id: 'original' })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [originalModel],
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter: originalModel })
+
+ fireEvent.click(screen.getByTestId('dropdown-item-duplicate'))
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
+ true,
+ expect.arrayContaining([
+ originalModel,
+ expect.objectContaining({
+ model: originalModel.model,
+ provider: originalModel.provider,
+ parameters: originalModel.parameters,
+ }),
+ ]),
+ )
+ })
+
+ it('should not duplicate when already at 4 models', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const models = [
+ createModelAndParameter({ id: '1' }),
+ createModelAndParameter({ id: '2' }),
+ createModelAndParameter({ id: '3' }),
+ createModelAndParameter({ id: '4' }),
+ ]
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: models,
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter: models[0] })
+
+ // Since duplicate is not shown when >= 4 models, we need to manually call handleSelect
+ capturedDropdownProps?.onSelect({ value: 'duplicate', text: 'Duplicate' })
+
+ expect(onMultipleModelConfigsChange).not.toHaveBeenCalled()
+ })
+
+ it('should call onDebugWithMultipleModelChange when debug-as-single-model is selected', () => {
+ const onDebugWithMultipleModelChange = vi.fn()
+ const modelAndParameter = createModelAndParameter()
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [modelAndParameter],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange,
+ })
+
+ renderComponent({ modelAndParameter })
+
+ fireEvent.click(screen.getByTestId('dropdown-item-debug-as-single-model'))
+
+ expect(onDebugWithMultipleModelChange).toHaveBeenCalledWith(modelAndParameter)
+ })
+
+ it('should remove model when remove is selected', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const models = [
+ createModelAndParameter({ id: '1' }),
+ createModelAndParameter({ id: '2' }),
+ createModelAndParameter({ id: '3' }),
+ ]
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: models,
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter: models[1] })
+
+ fireEvent.click(screen.getByTestId('dropdown-second-item-remove'))
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
+ true,
+ [models[0], models[2]],
+ )
+ })
+
+ it('should insert duplicated model at correct position', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const models = [
+ createModelAndParameter({ id: '1' }),
+ createModelAndParameter({ id: '2' }),
+ createModelAndParameter({ id: '3' }),
+ ]
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: models,
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ // Duplicate the second model
+ renderComponent({ modelAndParameter: models[1] })
+
+ fireEvent.click(screen.getByTestId('dropdown-item-duplicate'))
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
+ true,
+ expect.arrayContaining([
+ models[0],
+ models[1],
+ expect.objectContaining({ model: models[1].model }),
+ models[2],
+ ]),
+ )
+ })
+ })
+
+ describe('edge cases', () => {
+ it('should handle model not found in multipleModelConfigs', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ const { container } = renderComponent()
+
+ // Should show index 0 (not found returns -1, but display shows index + 1)
+ const indexElement = container.querySelector('.font-medium.italic')
+ expect(indexElement?.textContent?.trim()).toContain('0')
+ })
+
+ it('should handle empty textGenerationModelList', () => {
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [],
+ })
+
+ renderComponent()
+
+ expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
+ expect(screen.queryByTestId('text-generation-item')).not.toBeInTheDocument()
+ })
+
+ it('should handle model with quotaExceeded status', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({ mode: AppModeEnum.CHAT })
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: createTextGenerationModelList([
+ { provider: 'anthropic', model: 'not-matching', status: ModelStatusEnum.quotaExceeded },
+ ]),
+ })
+
+ renderComponent()
+
+ // When provider/model doesn't match, ChatItem won't render
+ expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
+ })
+ })
+})
diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/model-parameter-trigger.spec.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/model-parameter-trigger.spec.tsx
new file mode 100644
index 0000000000..5ef1dcadbb
--- /dev/null
+++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/model-parameter-trigger.spec.tsx
@@ -0,0 +1,405 @@
+import type { ReactNode } from 'react'
+import type { ModelAndParameter } from '../types'
+import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations'
+import { render, screen } from '@testing-library/react'
+import { ModelStatusEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
+import ModelParameterTrigger from './model-parameter-trigger'
+
+const mockUseDebugConfigurationContext = vi.fn()
+const mockUseDebugWithMultipleModelContext = vi.fn()
+const mockUseLanguage = vi.fn()
+
+type RenderTriggerProps = {
+ open: boolean
+ currentProvider: { provider: string } | null
+ currentModel: { model: string, status: ModelStatusEnum } | null
+}
+
+let capturedModalProps: {
+ isAdvancedMode: boolean
+ provider: string
+ modelId: string
+ completionParams: FormValue
+ onCompletionParamsChange: (params: FormValue) => void
+ setModel: (model: { modelId: string, provider: string }) => void
+ debugWithMultipleModel: boolean
+ onDebugWithMultipleModelChange: () => void
+ renderTrigger: (props: RenderTriggerProps) => ReactNode
+} | null = null
+
+vi.mock('@/context/debug-configuration', () => ({
+ useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
+}))
+
+vi.mock('./context', () => ({
+ useDebugWithMultipleModelContext: () => mockUseDebugWithMultipleModelContext(),
+}))
+
+vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
+ useLanguage: () => mockUseLanguage(),
+}))
+
+vi.mock('@/app/components/header/account-setting/model-provider-page/model-parameter-modal', () => ({
+ default: (props: typeof capturedModalProps) => {
+ capturedModalProps = props
+ // Render the trigger that the component passes
+ const triggerContent = props?.renderTrigger({
+ open: false,
+ currentProvider: null,
+ currentModel: null,
+ })
+ return (
+
+ {triggerContent}
+
+ )
+ },
+}))
+
+vi.mock('@/app/components/header/account-setting/model-provider-page/model-icon', () => ({
+ default: ({ provider, modelName }: { provider: { provider: string }, modelName?: string }) => (
+
+ ModelIcon
+
+ ),
+}))
+
+vi.mock('@/app/components/header/account-setting/model-provider-page/model-name', () => ({
+ default: ({ modelItem }: { modelItem: { model: string } }) => (
+ {modelItem?.model}
+ ),
+}))
+
+vi.mock('@/app/components/base/tooltip', () => ({
+ default: ({ children, popupContent }: { children: ReactNode, popupContent: string }) => (
+ {children}
+ ),
+}))
+
+const createModelAndParameter = (overrides: Partial = {}): ModelAndParameter => ({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: { temperature: 0.7 },
+ ...overrides,
+})
+
+const renderComponent = (props: Partial<{ modelAndParameter: ModelAndParameter }> = {}) => {
+ const defaultProps = {
+ modelAndParameter: createModelAndParameter(),
+ ...props,
+ }
+ return render()
+}
+
+describe('ModelParameterTrigger', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ capturedModalProps = null
+
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [createModelAndParameter()],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ mockUseLanguage.mockReturnValue('en_US')
+ })
+
+ describe('rendering', () => {
+ it('should render ModelParameterModal', () => {
+ renderComponent()
+
+ expect(screen.getByTestId('model-parameter-modal')).toBeInTheDocument()
+ })
+
+ it('should pass correct props to ModelParameterModal', () => {
+ const modelAndParameter = createModelAndParameter({
+ provider: 'anthropic',
+ model: 'claude-3',
+ parameters: { max_tokens: 1000 },
+ })
+
+ renderComponent({ modelAndParameter })
+
+ expect(capturedModalProps?.provider).toBe('anthropic')
+ expect(capturedModalProps?.modelId).toBe('claude-3')
+ expect(capturedModalProps?.completionParams).toEqual({ max_tokens: 1000 })
+ expect(capturedModalProps?.debugWithMultipleModel).toBe(true)
+ })
+
+ it('should pass isAdvancedMode from context', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: true,
+ })
+
+ renderComponent()
+
+ expect(capturedModalProps?.isAdvancedMode).toBe(true)
+ })
+ })
+
+ describe('handleSelectModel', () => {
+ it('should call onMultipleModelConfigsChange with updated model', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const modelAndParameter = createModelAndParameter({ id: 'model-1' })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [modelAndParameter],
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter })
+
+ // Directly call the setModel callback
+ capturedModalProps?.setModel({ modelId: 'gpt-4', provider: 'openai' })
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(true, [
+ expect.objectContaining({
+ id: 'model-1',
+ model: 'gpt-4',
+ provider: 'openai',
+ }),
+ ])
+ })
+
+ it('should update correct model in array', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const models = [
+ createModelAndParameter({ id: 'model-1' }),
+ createModelAndParameter({ id: 'model-2' }),
+ createModelAndParameter({ id: 'model-3' }),
+ ]
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: models,
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter: models[1] })
+
+ capturedModalProps?.setModel({ modelId: 'gpt-4', provider: 'openai' })
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(true, [
+ models[0],
+ expect.objectContaining({
+ id: 'model-2',
+ model: 'gpt-4',
+ provider: 'openai',
+ }),
+ models[2],
+ ])
+ })
+ })
+
+ describe('handleParamsChange', () => {
+ it('should call onMultipleModelConfigsChange with updated parameters', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const modelAndParameter = createModelAndParameter({ id: 'model-1' })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [modelAndParameter],
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter })
+
+ capturedModalProps?.onCompletionParamsChange({ temperature: 0.8 })
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(true, [
+ expect.objectContaining({
+ id: 'model-1',
+ parameters: { temperature: 0.8 },
+ }),
+ ])
+ })
+
+ it('should preserve other model properties when changing params', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const modelAndParameter = createModelAndParameter({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: { temperature: 0.7 },
+ })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [modelAndParameter],
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter })
+
+ capturedModalProps?.onCompletionParamsChange({ temperature: 0.8 })
+
+ expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(true, [
+ expect.objectContaining({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: { temperature: 0.8 },
+ }),
+ ])
+ })
+ })
+
+ describe('onDebugWithMultipleModelChange', () => {
+ it('should call context onDebugWithMultipleModelChange with modelAndParameter', () => {
+ const onDebugWithMultipleModelChange = vi.fn()
+ const modelAndParameter = createModelAndParameter()
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [modelAndParameter],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange,
+ })
+
+ renderComponent({ modelAndParameter })
+
+ capturedModalProps?.onDebugWithMultipleModelChange()
+
+ expect(onDebugWithMultipleModelChange).toHaveBeenCalledWith(modelAndParameter)
+ })
+ })
+
+ describe('index calculation', () => {
+ it('should find correct index in multipleModelConfigs', () => {
+ const models = [
+ createModelAndParameter({ id: 'model-1' }),
+ createModelAndParameter({ id: 'model-2' }),
+ createModelAndParameter({ id: 'model-3' }),
+ ]
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: models,
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter: models[2] })
+
+ // The component uses the index to update the correct model
+ // We verify this through the handleSelectModel behavior
+ expect(capturedModalProps).not.toBeNull()
+ })
+
+ it('should handle model not found in configs', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [createModelAndParameter({ id: 'other' })],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ // Should not throw even if model is not found
+ expect(() => renderComponent()).not.toThrow()
+ })
+ })
+
+ describe('trigger rendering', () => {
+ it('should render trigger content from renderTrigger', () => {
+ renderComponent()
+
+ // The trigger is rendered via renderTrigger callback
+ expect(screen.getByTestId('model-parameter-modal')).toBeInTheDocument()
+ })
+
+ it('should render "Select Model" text when no provider/model', () => {
+ renderComponent()
+
+ // When currentProvider and currentModel are null, shows "Select Model"
+ expect(screen.getByText('common.modelProvider.selectModel')).toBeInTheDocument()
+ })
+ })
+
+ describe('language context', () => {
+ it('should use language from useLanguage hook', () => {
+ mockUseLanguage.mockReturnValue('zh_Hans')
+
+ renderComponent()
+
+ // The language is used for MODEL_STATUS_TEXT tooltip
+ // We verify the hook is called
+ expect(mockUseLanguage).toHaveBeenCalled()
+ })
+ })
+
+ describe('edge cases', () => {
+ it('should handle empty multipleModelConfigs', () => {
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [],
+ onMultipleModelConfigsChange: vi.fn(),
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ expect(() => renderComponent()).not.toThrow()
+ })
+
+ it('should handle undefined parameters', () => {
+ const modelAndParameter = createModelAndParameter({
+ parameters: undefined as unknown as FormValue,
+ })
+
+ expect(() => renderComponent({ modelAndParameter })).not.toThrow()
+ expect(capturedModalProps?.completionParams).toBeUndefined()
+ })
+
+ it('should handle model selection for model not in list', () => {
+ const onMultipleModelConfigsChange = vi.fn()
+ const modelAndParameter = createModelAndParameter({ id: 'not-in-list' })
+
+ mockUseDebugWithMultipleModelContext.mockReturnValue({
+ multipleModelConfigs: [createModelAndParameter({ id: 'different-model' })],
+ onMultipleModelConfigsChange,
+ onDebugWithMultipleModelChange: vi.fn(),
+ })
+
+ renderComponent({ modelAndParameter })
+
+ capturedModalProps?.setModel({ modelId: 'gpt-4', provider: 'openai' })
+
+ // index will be -1, so newModelConfigs[-1] will be undefined
+ // This tests the edge case behavior
+ expect(onMultipleModelConfigsChange).toHaveBeenCalled()
+ })
+ })
+
+ describe('renderTrigger with different states', () => {
+ it('should pass correct props to renderTrigger', () => {
+ renderComponent()
+
+ expect(capturedModalProps?.renderTrigger).toBeDefined()
+ expect(typeof capturedModalProps?.renderTrigger).toBe('function')
+ })
+
+ it('should render trigger with provider info when available', () => {
+ // Mock the modal to render trigger with provider
+ vi.doMock('@/app/components/header/account-setting/model-provider-page/model-parameter-modal', () => ({
+ default: (props: typeof capturedModalProps) => {
+ capturedModalProps = props
+ const triggerContent = props?.renderTrigger({
+ open: false,
+ currentProvider: { provider: 'openai' },
+ currentModel: { model: 'gpt-3.5-turbo', status: ModelStatusEnum.active },
+ })
+ return (
+
+ {triggerContent}
+
+ )
+ },
+ }))
+
+ renderComponent()
+
+ expect(screen.getByTestId('model-parameter-modal')).toBeInTheDocument()
+ })
+ })
+})
diff --git a/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.spec.tsx b/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.spec.tsx
new file mode 100644
index 0000000000..1876a10a0c
--- /dev/null
+++ b/web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.spec.tsx
@@ -0,0 +1,721 @@
+import type { ModelAndParameter } from '../types'
+import { render, screen } from '@testing-library/react'
+import { TransferMethod } from '@/app/components/base/chat/types'
+import { APP_CHAT_WITH_MULTIPLE_MODEL } from '../types'
+import TextGenerationItem from './text-generation-item'
+
+const mockUseDebugConfigurationContext = vi.fn()
+const mockUseProviderContext = vi.fn()
+const mockUseFeatures = vi.fn()
+const mockUseTextGeneration = vi.fn()
+const mockUseEventEmitterContextContext = vi.fn()
+const mockPromptVariablesToUserInputsForm = vi.fn()
+
+let capturedTextGenerationProps: {
+ content: string
+ isLoading: boolean
+ isResponding: boolean
+ messageId: string | null
+ className?: string
+} | null = null
+
+let eventSubscriptionCallback: ((v: { type: string, payload?: Record }) => void) | null = null
+
+vi.mock('@/context/debug-configuration', () => ({
+ useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
+}))
+
+vi.mock('@/context/provider-context', () => ({
+ useProviderContext: () => mockUseProviderContext(),
+}))
+
+vi.mock('@/app/components/base/features/hooks', () => ({
+ useFeatures: (selector: (state: Record) => unknown) => mockUseFeatures(selector),
+}))
+
+vi.mock('@/app/components/base/text-generation/hooks', () => ({
+ useTextGeneration: () => mockUseTextGeneration(),
+}))
+
+vi.mock('@/context/event-emitter', () => ({
+ useEventEmitterContextContext: () => mockUseEventEmitterContextContext(),
+}))
+
+vi.mock('@/utils/model-config', () => ({
+ promptVariablesToUserInputsForm: (...args: unknown[]) => mockPromptVariablesToUserInputsForm(...args),
+}))
+
+vi.mock('@/app/components/app/text-generate/item', () => ({
+ default: (props: typeof capturedTextGenerationProps) => {
+ capturedTextGenerationProps = props
+ return (
+
+ {props?.content}
+ {props?.isLoading ? 'yes' : 'no'}
+ {props?.isResponding ? 'yes' : 'no'}
+ {props?.messageId || 'null'}
+
+ )
+ },
+}))
+
+const createModelAndParameter = (overrides: Partial = {}): ModelAndParameter => ({
+ id: 'model-1',
+ model: 'gpt-3.5-turbo',
+ provider: 'openai',
+ parameters: { temperature: 0.7 },
+ ...overrides,
+})
+
+const createDefaultMocks = () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: {
+ prompt_template: 'Hello {{name}}',
+ prompt_variables: [
+ { key: 'name', name: 'Name', type: 'string', is_context_var: false },
+ ],
+ },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: { name: 'World' },
+ promptMode: 'simple',
+ speechToTextConfig: { enabled: true },
+ introduction: 'Welcome!',
+ suggestedQuestionsAfterAnswerConfig: { enabled: false },
+ citationConfig: { enabled: true },
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [{ id: 'ds-1' }],
+ datasetConfigs: { retrieval_model: 'single' },
+ })
+
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [
+ {
+ provider: 'openai',
+ models: [
+ {
+ model: 'gpt-3.5-turbo',
+ model_properties: { mode: 'chat' },
+ },
+ ],
+ },
+ ],
+ })
+
+ mockUseFeatures.mockImplementation((selector: (state: Record) => unknown) => {
+ const state = {
+ features: {
+ moreLikeThis: { enabled: false },
+ moderation: { enabled: false },
+ text2speech: { enabled: false },
+ file: { enabled: true },
+ },
+ }
+ return selector(state)
+ })
+
+ mockUseTextGeneration.mockReturnValue({
+ completion: 'Generated text',
+ handleSend: vi.fn(),
+ isResponding: false,
+ messageId: 'msg-123',
+ })
+
+ mockUseEventEmitterContextContext.mockReturnValue({
+ eventEmitter: {
+ useSubscription: (callback: (v: { type: string, payload?: Record }) => void) => {
+ eventSubscriptionCallback = callback
+ },
+ },
+ })
+
+ mockPromptVariablesToUserInputsForm.mockReturnValue([
+ { variable: 'name', label: 'Name', type: 'text-input', required: true },
+ ])
+}
+
+const renderComponent = (props: Partial<{ modelAndParameter: ModelAndParameter }> = {}) => {
+ const defaultProps = {
+ modelAndParameter: createModelAndParameter(),
+ ...props,
+ }
+ return render()
+}
+
+describe('TextGenerationItem', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ capturedTextGenerationProps = null
+ eventSubscriptionCallback = null
+ createDefaultMocks()
+ })
+
+ describe('rendering', () => {
+ it('should render TextGeneration component', () => {
+ renderComponent()
+
+ expect(screen.getByTestId('text-generation')).toBeInTheDocument()
+ })
+
+ it('should pass completion content to TextGeneration', () => {
+ mockUseTextGeneration.mockReturnValue({
+ completion: 'Hello World',
+ handleSend: vi.fn(),
+ isResponding: false,
+ messageId: 'msg-1',
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('content')).toHaveTextContent('Hello World')
+ })
+
+ it('should show loading when no completion and responding', () => {
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend: vi.fn(),
+ isResponding: true,
+ messageId: null,
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('is-loading')).toHaveTextContent('yes')
+ })
+
+ it('should not show loading when completion exists', () => {
+ mockUseTextGeneration.mockReturnValue({
+ completion: 'Some text',
+ handleSend: vi.fn(),
+ isResponding: true,
+ messageId: 'msg-1',
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('is-loading')).toHaveTextContent('no')
+ })
+
+ it('should pass isResponding to TextGeneration', () => {
+ mockUseTextGeneration.mockReturnValue({
+ completion: 'Text',
+ handleSend: vi.fn(),
+ isResponding: true,
+ messageId: 'msg-1',
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('is-responding')).toHaveTextContent('yes')
+ })
+
+ it('should pass messageId to TextGeneration', () => {
+ mockUseTextGeneration.mockReturnValue({
+ completion: 'Text',
+ handleSend: vi.fn(),
+ isResponding: false,
+ messageId: 'msg-456',
+ })
+
+ renderComponent()
+
+ expect(screen.getByTestId('message-id')).toHaveTextContent('msg-456')
+ })
+ })
+
+ describe('config composition', () => {
+ it('should use prompt_template in non-advanced mode', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: {
+ prompt_template: 'My Template',
+ prompt_variables: [],
+ },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'simple',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [],
+ datasetConfigs: {},
+ })
+
+ renderComponent()
+
+ // Config is built internally - we verify through the component rendering
+ expect(capturedTextGenerationProps).not.toBeNull()
+ })
+
+ it('should use empty pre_prompt in advanced mode', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: true,
+ modelConfig: {
+ configs: {
+ prompt_template: 'Should not be used',
+ prompt_variables: [],
+ },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'advanced',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: { custom: true },
+ completionPromptConfig: { custom: true },
+ dataSets: [],
+ datasetConfigs: {},
+ })
+
+ renderComponent()
+
+ expect(capturedTextGenerationProps).not.toBeNull()
+ })
+
+ it('should find context variable from prompt_variables', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: {
+ prompt_template: '',
+ prompt_variables: [
+ { key: 'context', name: 'Context', type: 'string', is_context_var: true },
+ { key: 'query', name: 'Query', type: 'string', is_context_var: false },
+ ],
+ },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'simple',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [],
+ datasetConfigs: {},
+ })
+
+ renderComponent()
+
+ expect(capturedTextGenerationProps).not.toBeNull()
+ })
+ })
+
+ describe('dataset configuration', () => {
+ it('should transform dataSets to postDatasets format', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: { prompt_template: '', prompt_variables: [] },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'simple',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [{ id: 'ds-1' }, { id: 'ds-2' }],
+ datasetConfigs: { retrieval_model: 'multiple' },
+ })
+
+ renderComponent()
+
+ // postDatasets is used in config.dataset_configs.datasets
+ expect(capturedTextGenerationProps).not.toBeNull()
+ })
+ })
+
+ describe('event subscription', () => {
+ it('should handle APP_CHAT_WITH_MULTIPLE_MODEL event', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Generate text', files: [] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ 'apps/app-123/completion-messages',
+ expect.objectContaining({
+ inputs: { name: 'World' },
+ }),
+ )
+ })
+
+ it('should ignore other event types', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: 'OTHER_EVENT',
+ payload: {},
+ })
+
+ expect(handleSend).not.toHaveBeenCalled()
+ })
+ })
+
+ describe('doSend function', () => {
+ it('should include model configuration', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ const modelAndParameter = createModelAndParameter({
+ provider: 'anthropic',
+ model: 'claude-3',
+ parameters: { max_tokens: 2000 },
+ })
+
+ renderComponent({ modelAndParameter })
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test', files: [] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.objectContaining({
+ model_config: expect.objectContaining({
+ model: expect.objectContaining({
+ provider: 'anthropic',
+ name: 'claude-3',
+ completion_params: { max_tokens: 2000 },
+ }),
+ }),
+ }),
+ )
+ })
+
+ it('should include files with local_file transfer method handled', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ renderComponent()
+
+ const files = [
+ { id: 'f1', transfer_method: TransferMethod.local_file, url: 'blob:123' },
+ { id: 'f2', transfer_method: TransferMethod.remote_url, url: 'https://example.com/file' },
+ ]
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test', files },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.objectContaining({
+ files: [
+ expect.objectContaining({ id: 'f1', transfer_method: TransferMethod.local_file, url: '' }),
+ expect.objectContaining({ id: 'f2', transfer_method: TransferMethod.remote_url, url: 'https://example.com/file' }),
+ ],
+ }),
+ )
+ })
+
+ it('should not include files when file upload is disabled', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ mockUseFeatures.mockImplementation((selector: (state: Record) => unknown) => {
+ const state = {
+ features: {
+ moreLikeThis: { enabled: false },
+ moderation: { enabled: false },
+ text2speech: { enabled: false },
+ file: { enabled: false },
+ },
+ }
+ return selector(state)
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test', files: [{ id: 'f1' }] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.not.objectContaining({
+ files: expect.anything(),
+ }),
+ )
+ })
+
+ it('should not include files when files array is empty', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test', files: [] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.not.objectContaining({
+ files: expect.anything(),
+ }),
+ )
+ })
+
+ it('should not include files when files is undefined', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test' },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.not.objectContaining({
+ files: expect.anything(),
+ }),
+ )
+ })
+ })
+
+ describe('model resolution', () => {
+ it('should find current provider and model', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [
+ {
+ provider: 'openai',
+ models: [
+ { model: 'gpt-3.5-turbo', model_properties: { mode: 'chat' } },
+ { model: 'gpt-4', model_properties: { mode: 'chat' } },
+ ],
+ },
+ ],
+ })
+
+ const modelAndParameter = createModelAndParameter({
+ provider: 'openai',
+ model: 'gpt-4',
+ })
+
+ renderComponent({ modelAndParameter })
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test', files: [] },
+ })
+
+ expect(handleSend).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.objectContaining({
+ model_config: expect.objectContaining({
+ model: expect.objectContaining({
+ mode: 'chat',
+ }),
+ }),
+ }),
+ )
+ })
+
+ it('should handle provider not found', () => {
+ const handleSend = vi.fn()
+ mockUseTextGeneration.mockReturnValue({
+ completion: '',
+ handleSend,
+ isResponding: false,
+ messageId: null,
+ })
+
+ mockUseProviderContext.mockReturnValue({
+ textGenerationModelList: [],
+ })
+
+ renderComponent()
+
+ eventSubscriptionCallback?.({
+ type: APP_CHAT_WITH_MULTIPLE_MODEL,
+ payload: { message: 'Test', files: [] },
+ })
+
+ // Should still call handleSend without crashing
+ expect(handleSend).toHaveBeenCalled()
+ })
+ })
+
+ describe('edge cases', () => {
+ it('should handle null eventEmitter', () => {
+ mockUseEventEmitterContextContext.mockReturnValue({
+ eventEmitter: null,
+ })
+
+ expect(() => renderComponent()).not.toThrow()
+ })
+
+ it('should handle empty prompt_variables', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: { prompt_template: '', prompt_variables: [] },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'simple',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [],
+ datasetConfigs: {},
+ })
+
+ expect(() => renderComponent()).not.toThrow()
+ })
+
+ it('should handle no context variable found', () => {
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: {
+ prompt_template: '',
+ prompt_variables: [
+ { key: 'var1', name: 'Var1', type: 'string', is_context_var: false },
+ ],
+ },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'simple',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [],
+ datasetConfigs: {},
+ })
+
+ renderComponent()
+
+ // Should use empty string for dataset_query_variable
+ expect(capturedTextGenerationProps).not.toBeNull()
+ })
+ })
+
+ describe('promptVariablesToUserInputsForm', () => {
+ it('should call promptVariablesToUserInputsForm with prompt_variables', () => {
+ const promptVariables = [
+ { key: 'name', name: 'Name', type: 'string' },
+ { key: 'age', name: 'Age', type: 'number' },
+ ]
+
+ mockUseDebugConfigurationContext.mockReturnValue({
+ isAdvancedMode: false,
+ modelConfig: {
+ configs: { prompt_template: '', prompt_variables: promptVariables },
+ system_parameters: {},
+ },
+ appId: 'app-123',
+ inputs: {},
+ promptMode: 'simple',
+ speechToTextConfig: {},
+ introduction: '',
+ suggestedQuestionsAfterAnswerConfig: {},
+ citationConfig: {},
+ externalDataToolsConfig: [],
+ chatPromptConfig: {},
+ completionPromptConfig: {},
+ dataSets: [],
+ datasetConfigs: {},
+ })
+
+ renderComponent()
+
+ expect(mockPromptVariablesToUserInputsForm).toHaveBeenCalledWith(promptVariables)
+ })
+ })
+})
diff --git a/web/app/components/datasets/create/website/watercrawl/index.spec.tsx b/web/app/components/datasets/create/website/watercrawl/index.spec.tsx
index 646c59eb75..c3caab895a 100644
--- a/web/app/components/datasets/create/website/watercrawl/index.spec.tsx
+++ b/web/app/components/datasets/create/website/watercrawl/index.spec.tsx
@@ -73,6 +73,12 @@ const createDefaultProps = (overrides: Partial[0]>
describe('WaterCrawl', () => {
beforeEach(() => {
vi.clearAllMocks()
+ vi.useFakeTimers({ shouldAdvanceTime: true })
+ })
+
+ afterEach(() => {
+ vi.runOnlyPendingTimers()
+ vi.useRealTimers()
})
// Tests for initial component rendering