feat(web): add reset button

This commit is contained in:
JzoNg 2026-04-29 13:32:46 +08:00
parent c56f1a8216
commit d8173b1cda
7 changed files with 231 additions and 59 deletions

View File

@ -128,7 +128,7 @@ const renderWithQueryClient = (ui: ReactNode) => {
describe('Evaluation', () => {
beforeEach(() => {
useEvaluationStore.setState({ resources: {} })
useEvaluationStore.setState({ resources: {}, initialResources: {} })
vi.clearAllMocks()
mockUseEvaluationConfig.mockReturnValue({
data: null,
@ -251,6 +251,37 @@ describe('Evaluation', () => {
})
})
it('should reset unsaved non-pipeline config changes to the hydrated config', () => {
mockUseEvaluationConfig.mockReturnValue({
data: {
evaluation_model: 'gpt-4o-mini',
evaluation_model_provider: 'openai',
default_metrics: [],
customized_metrics: null,
judgment_config: null,
},
})
renderWithQueryClient(<Evaluation resourceType="apps" resourceId="app-reset" />)
const resetButton = screen.getByRole('button', { name: 'common.operation.reset' })
expect(resetButton).toBeDisabled()
fireEvent.click(screen.getByRole('button', { name: 'evaluation.metrics.add' }))
fireEvent.change(screen.getByPlaceholderText('evaluation.metrics.searchNodeOrMetrics'), {
target: { value: 'faith' },
})
fireEvent.click(screen.getByTestId('evaluation-metric-node-faithfulness-node-faithfulness'))
expect(useEvaluationStore.getState().resources['apps:app-reset']!.metrics).toHaveLength(1)
expect(resetButton).toBeEnabled()
fireEvent.click(resetButton)
expect(useEvaluationStore.getState().resources['apps:app-reset']!.metrics).toHaveLength(0)
expect(resetButton).toBeDisabled()
})
it('should hide the value row for empty operators', () => {
const resourceType = 'apps'
const resourceId = 'app-2'

View File

@ -10,7 +10,7 @@ import { buildEvaluationConfigPayload, buildEvaluationRunRequest } from '../stor
describe('evaluation store', () => {
beforeEach(() => {
useEvaluationStore.setState({ resources: {} })
useEvaluationStore.setState({ resources: {}, initialResources: {} })
})
it('should configure a custom metric mapping to a valid state', () => {

View File

@ -1,13 +1,9 @@
'use client'
import type { BatchTestTab, EvaluationResourceProps } from '../../types'
import { Button } from '@langgenius/dify-ui/button'
import { cn } from '@langgenius/dify-ui/cn'
import { toast } from '@langgenius/dify-ui/toast'
import { useTranslation } from 'react-i18next'
import { useSaveEvaluationConfigMutation } from '@/service/use-evaluation'
import { isEvaluationRunnable, useEvaluationResource, useEvaluationStore } from '../../store'
import { buildEvaluationConfigPayload } from '../../store-utils'
import { TAB_CLASS_NAME } from '../../utils'
import HistoryTab from './history-tab'
import InputFieldsTab from './input-fields-tab'
@ -19,63 +15,21 @@ const BatchTestPanel = ({
resourceId,
}: EvaluationResourceProps) => {
const { t } = useTranslation('evaluation')
const { t: tCommon } = useTranslation('common')
const tabLabels: Record<BatchTestTab, string> = {
'input-fields': t('batch.tabs.input-fields'),
'history': t('batch.tabs.history'),
}
const resource = useEvaluationResource(resourceType, resourceId)
const setBatchTab = useEvaluationStore(state => state.setBatchTab)
const saveConfigMutation = useSaveEvaluationConfigMutation()
const isRunnable = isEvaluationRunnable(resource)
const isPanelReady = !!resource.judgeModelId && resource.metrics.length > 0
const handleSave = () => {
if (!isRunnable) {
toast.warning(t('batch.validation'))
return
}
const body = buildEvaluationConfigPayload(resource, resourceType)
if (!body) {
toast.warning(t('batch.validation'))
return
}
saveConfigMutation.mutate({
params: {
targetType: resourceType,
targetId: resourceId,
},
body,
}, {
onSuccess: () => {
toast.success(tCommon('api.saved'))
},
onError: () => {
toast.error(t('config.saveFailed'))
},
})
}
return (
<div className="flex h-full min-h-0 flex-col bg-background-default">
<div className="px-6 py-4">
<div className="flex items-start justify-between gap-3">
<div className="min-w-0">
<div className="system-xl-semibold text-text-primary">{t('batch.title')}</div>
<div className="mt-1 system-sm-regular text-text-tertiary">{t('batch.description')}</div>
</div>
<Button
className="shrink-0"
variant="primary"
disabled={!isRunnable}
loading={saveConfigMutation.isPending}
onClick={handleSave}
>
{tCommon('operation.save')}
</Button>
<div className="min-w-0">
<div className="system-xl-semibold text-text-primary">{t('batch.title')}</div>
<div className="mt-1 system-sm-regular text-text-tertiary">{t('batch.description')}</div>
</div>
<div className="mt-4 rounded-xl border border-divider-subtle bg-components-card-bg p-3">
<div className="flex items-start gap-3">

View File

@ -0,0 +1,80 @@
'use client'
import type { EvaluationResourceProps } from '../types'
import { Button } from '@langgenius/dify-ui/button'
import { toast } from '@langgenius/dify-ui/toast'
import { useTranslation } from 'react-i18next'
import { useSaveEvaluationConfigMutation } from '@/service/use-evaluation'
import {
isEvaluationRunnable,
useEvaluationResource,
useEvaluationStore,
useIsEvaluationConfigDirty,
} from '../store'
import { buildEvaluationConfigPayload } from '../store-utils'
const EvaluationConfigActions = ({
resourceType,
resourceId,
}: EvaluationResourceProps) => {
const { t } = useTranslation('evaluation')
const { t: tCommon } = useTranslation('common')
const resource = useEvaluationResource(resourceType, resourceId)
const isDirty = useIsEvaluationConfigDirty(resourceType, resourceId)
const resetResourceConfig = useEvaluationStore(state => state.resetResourceConfig)
const markResourceConfigSaved = useEvaluationStore(state => state.markResourceConfigSaved)
const saveConfigMutation = useSaveEvaluationConfigMutation()
const isRunnable = isEvaluationRunnable(resource)
const handleSave = () => {
if (!isRunnable) {
toast.warning(t('batch.validation'))
return
}
const body = buildEvaluationConfigPayload(resource, resourceType)
if (!body) {
toast.warning(t('batch.validation'))
return
}
saveConfigMutation.mutate({
params: {
targetType: resourceType,
targetId: resourceId,
},
body,
}, {
onSuccess: () => {
markResourceConfigSaved(resourceType, resourceId)
toast.success(tCommon('api.saved'))
},
onError: () => {
toast.error(t('config.saveFailed'))
},
})
}
return (
<div className="flex shrink-0 items-center gap-2">
<Button
variant="secondary"
disabled={!isDirty || saveConfigMutation.isPending}
onClick={() => resetResourceConfig(resourceType, resourceId)}
>
{tCommon('operation.reset')}
</Button>
<Button
variant="primary"
disabled={!isRunnable}
loading={saveConfigMutation.isPending}
onClick={handleSave}
>
{tCommon('operation.save')}
</Button>
</div>
)
}
export default EvaluationConfigActions

View File

@ -5,6 +5,7 @@ import { useTranslation } from 'react-i18next'
import { useDocLink } from '@/context/i18n'
import BatchTestPanel from '../batch-test-panel'
import ConditionsSection from '../conditions-section'
import EvaluationConfigActions from '../config-actions'
import JudgeModelSelector from '../judge-model-selector'
import MetricSection from '../metric-section'
import SectionHeader, { InlineSectionHeader } from '../section-header'
@ -38,6 +39,7 @@ const NonPipelineEvaluation = ({
</>
)}
descriptionClassName="max-w-[700px]"
action={<EvaluationConfigActions resourceType={resourceType} resourceId={resourceId} />}
/>
<section className="max-w-[700px] py-4">
<InlineSectionHeader title={t('judgeModel.title')} tooltip={t('judgeModel.description')} />

View File

@ -6,6 +6,7 @@ import { useTranslation } from 'react-i18next'
import { useDocLink } from '@/context/i18n'
import { useEvaluationStore } from '../../store'
import HistoryTab from '../batch-test-panel/history-tab'
import EvaluationConfigActions from '../config-actions'
import JudgeModelSelector from '../judge-model-selector'
import PipelineBatchActions from '../pipeline/pipeline-batch-actions'
import PipelineMetricsSection from '../pipeline/pipeline-metrics-section'
@ -45,6 +46,7 @@ const PipelineEvaluation = ({
</a>
</>
)}
action={<EvaluationConfigActions resourceType={resourceType} resourceId={resourceId} />}
/>
</div>

View File

@ -4,6 +4,7 @@ import type {
EvaluationResourceType,
} from './types'
import type { EvaluationConfig, NodeInfo } from '@/types/evaluation'
import { isEqual } from 'es-toolkit/predicate'
import { create } from 'zustand'
import { getEvaluationMockConfig } from './mock'
import {
@ -28,8 +29,11 @@ import { buildConditionMetricOptions } from './utils'
type EvaluationStore = {
resources: Record<string, EvaluationResourceState>
initialResources: Record<string, EvaluationResourceState>
ensureResource: (resourceType: EvaluationResourceType, resourceId: string) => void
hydrateResource: (resourceType: EvaluationResourceType, resourceId: string, config: EvaluationConfig) => void
resetResourceConfig: (resourceType: EvaluationResourceType, resourceId: string) => void
markResourceConfigSaved: (resourceType: EvaluationResourceType, resourceId: string) => void
setJudgeModel: (resourceType: EvaluationResourceType, resourceId: string, judgeModelId: string) => void
addBuiltinMetric: (resourceType: EvaluationResourceType, resourceId: string, optionId: string, nodeInfoList?: NodeInfo[]) => void
updateMetricThreshold: (resourceType: EvaluationResourceType, resourceId: string, metricId: string, threshold: number) => void
@ -88,8 +92,68 @@ type EvaluationStore = {
const initialResourceCache: Record<string, EvaluationResourceState> = {}
const cloneEvaluationResourceState = (resource: EvaluationResourceState): EvaluationResourceState => ({
...resource,
metrics: resource.metrics.map(metric => ({
...metric,
nodeInfoList: metric.nodeInfoList?.map(nodeInfo => ({ ...nodeInfo })),
customConfig: metric.customConfig
? {
...metric.customConfig,
mappings: metric.customConfig.mappings.map(mapping => ({ ...mapping })),
outputs: metric.customConfig.outputs.map(output => ({ ...output })),
}
: undefined,
})),
judgmentConfig: {
...resource.judgmentConfig,
conditions: resource.judgmentConfig.conditions.map(condition => ({ ...condition })),
},
batchRecords: resource.batchRecords.map(record => ({ ...record })),
})
const preserveBatchState = (
configState: EvaluationResourceState,
currentResource: EvaluationResourceState | undefined,
resourceType: EvaluationResourceType,
): EvaluationResourceState => {
const initialState = buildInitialState(resourceType)
return {
...cloneEvaluationResourceState(configState),
activeBatchTab: currentResource?.activeBatchTab ?? initialState.activeBatchTab,
uploadedFileId: currentResource?.uploadedFileId ?? initialState.uploadedFileId,
uploadedFileName: currentResource?.uploadedFileName ?? initialState.uploadedFileName,
selectedRunId: currentResource?.selectedRunId ?? initialState.selectedRunId,
batchRecords: currentResource?.batchRecords.map(record => ({ ...record })) ?? initialState.batchRecords,
}
}
const createConfigSnapshot = (
resourceType: EvaluationResourceType,
resource: EvaluationResourceState,
): EvaluationResourceState => {
const initialState = buildInitialState(resourceType)
return {
...cloneEvaluationResourceState(resource),
activeBatchTab: initialState.activeBatchTab,
uploadedFileId: initialState.uploadedFileId,
uploadedFileName: initialState.uploadedFileName,
selectedRunId: initialState.selectedRunId,
batchRecords: initialState.batchRecords,
}
}
const pickConfigComparableState = (resource: EvaluationResourceState) => ({
judgeModelId: resource.judgeModelId,
metrics: resource.metrics,
judgmentConfig: resource.judgmentConfig,
})
export const useEvaluationStore = create<EvaluationStore>((set, get) => ({
resources: {},
initialResources: {},
ensureResource: (resourceType, resourceId) => {
const resourceKey = buildResourceKey(resourceType, resourceId)
if (get().resources[resourceKey])
@ -103,17 +167,42 @@ export const useEvaluationStore = create<EvaluationStore>((set, get) => ({
}))
},
hydrateResource: (resourceType, resourceId, config) => {
const resourceKey = buildResourceKey(resourceType, resourceId)
const configState = buildStateFromEvaluationConfig(resourceType, config)
set(state => ({
resources: {
...state.resources,
[buildResourceKey(resourceType, resourceId)]: {
...buildStateFromEvaluationConfig(resourceType, config),
activeBatchTab: state.resources[buildResourceKey(resourceType, resourceId)]?.activeBatchTab ?? 'input-fields',
uploadedFileId: state.resources[buildResourceKey(resourceType, resourceId)]?.uploadedFileId ?? null,
uploadedFileName: state.resources[buildResourceKey(resourceType, resourceId)]?.uploadedFileName ?? null,
selectedRunId: state.resources[buildResourceKey(resourceType, resourceId)]?.selectedRunId ?? null,
batchRecords: state.resources[buildResourceKey(resourceType, resourceId)]?.batchRecords ?? [],
},
[resourceKey]: preserveBatchState(configState, state.resources[resourceKey], resourceType),
},
initialResources: {
...state.initialResources,
[resourceKey]: createConfigSnapshot(resourceType, configState),
},
}))
},
resetResourceConfig: (resourceType, resourceId) => {
const resourceKey = buildResourceKey(resourceType, resourceId)
set(state => ({
resources: {
...state.resources,
[resourceKey]: preserveBatchState(
state.initialResources[resourceKey] ?? buildInitialState(resourceType),
state.resources[resourceKey],
resourceType,
),
},
}))
},
markResourceConfigSaved: (resourceType, resourceId) => {
const resourceKey = buildResourceKey(resourceType, resourceId)
const resource = get().resources[resourceKey] ?? buildInitialState(resourceType)
set(state => ({
initialResources: {
...state.initialResources,
[resourceKey]: createConfigSnapshot(resourceType, resource),
},
}))
},
@ -436,6 +525,20 @@ export const useEvaluationResource = (resourceType: EvaluationResourceType, reso
return useEvaluationStore(state => state.resources[resourceKey] ?? (initialResourceCache[resourceKey] ??= buildInitialState(resourceType)))
}
export const useIsEvaluationConfigDirty = (resourceType: EvaluationResourceType, resourceId: string) => {
const resourceKey = buildResourceKey(resourceType, resourceId)
return useEvaluationStore((state) => {
const resource = state.resources[resourceKey] ?? (initialResourceCache[resourceKey] ??= buildInitialState(resourceType))
const initialResource = state.initialResources[resourceKey] ?? buildInitialState(resourceType)
return !isEqual(
pickConfigComparableState(resource),
pickConfigComparableState(initialResource),
)
})
}
export const getAllowedOperators = (
metrics: EvaluationResourceState['metrics'],
variableSelector: [string, string] | null,