From 5d3e8a31d0e88fbe6f6d3c232694065c29ccecfc Mon Sep 17 00:00:00 2001 From: Novice Date: Fri, 10 Oct 2025 10:54:32 +0800 Subject: [PATCH 01/49] fix: restore array flattening behavior in iteration node (#26695) --- .../nodes/iteration/iteration_node.py | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index a05a6b1b96..965e22b74c 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -342,10 +342,13 @@ class IterationNode(Node): iterator_list_value: Sequence[object], iter_run_map: dict[str, float], ) -> Generator[NodeEventBase, None, None]: + # Flatten the list of lists if all outputs are lists + flattened_outputs = self._flatten_outputs_if_needed(outputs) + yield IterationSucceededEvent( start_at=started_at, inputs=inputs, - outputs={"output": outputs}, + outputs={"output": flattened_outputs}, steps=len(iterator_list_value), metadata={ WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, @@ -357,13 +360,39 @@ class IterationNode(Node): yield StreamCompletedEvent( node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, - outputs={"output": outputs}, + outputs={"output": flattened_outputs}, metadata={ WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, }, ) ) + def _flatten_outputs_if_needed(self, outputs: list[object]) -> list[object]: + """ + Flatten the outputs list if all elements are lists. + This maintains backward compatibility with version 1.8.1 behavior. + """ + if not outputs: + return outputs + + # Check if all non-None outputs are lists + non_none_outputs = [output for output in outputs if output is not None] + if not non_none_outputs: + return outputs + + if all(isinstance(output, list) for output in non_none_outputs): + # Flatten the list of lists + flattened: list[Any] = [] + for output in outputs: + if isinstance(output, list): + flattened.extend(output) + elif output is not None: + # This shouldn't happen based on our check, but handle it gracefully + flattened.append(output) + return flattened + + return outputs + def _handle_iteration_failure( self, started_at: datetime, @@ -373,10 +402,13 @@ class IterationNode(Node): iter_run_map: dict[str, float], error: IterationNodeError, ) -> Generator[NodeEventBase, None, None]: + # Flatten the list of lists if all outputs are lists (even in failure case) + flattened_outputs = self._flatten_outputs_if_needed(outputs) + yield IterationFailedEvent( start_at=started_at, inputs=inputs, - outputs={"output": outputs}, + outputs={"output": flattened_outputs}, steps=len(iterator_list_value), metadata={ WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens, From 54db4c176a0a12b35d4b1f63e26ea3acb09bb69a Mon Sep 17 00:00:00 2001 From: yihong Date: Fri, 10 Oct 2025 12:59:28 +0800 Subject: [PATCH 02/49] fix: drop useless logic (#26678) Signed-off-by: yihong0618 --- api/core/app/apps/base_app_runner.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index e7db3bc41b..61ac040c05 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -61,9 +61,6 @@ class AppRunner: if model_context_tokens is None: return -1 - if max_tokens is None: - max_tokens = 0 - prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages) if prompt_tokens + max_tokens > model_context_tokens: From cf1778e696ceb9b79eacfb467e4ce03e95d5e817 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:17:33 +0800 Subject: [PATCH 03/49] fix: issue w/ timepicker (#26696) Co-authored-by: lyzno1 Co-authored-by: lyzno1 <92089059+lyzno1@users.noreply.github.com> --- .../time-picker/index.spec.tsx | 95 +++++++++++++ .../time-picker/index.tsx | 131 +++++++++++++---- .../base/date-and-time-picker/types.ts | 2 +- .../date-and-time-picker/utils/dayjs.spec.ts | 67 +++++++++ .../base/date-and-time-picker/utils/dayjs.ts | 134 ++++++++++++++++-- 5 files changed, 388 insertions(+), 41 deletions(-) create mode 100644 web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx create mode 100644 web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts diff --git a/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx b/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx new file mode 100644 index 0000000000..40bc2928c8 --- /dev/null +++ b/web/app/components/base/date-and-time-picker/time-picker/index.spec.tsx @@ -0,0 +1,95 @@ +import React from 'react' +import { fireEvent, render, screen } from '@testing-library/react' +import TimePicker from './index' +import dayjs from '../utils/dayjs' +import { isDayjsObject } from '../utils/dayjs' + +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => { + if (key === 'time.defaultPlaceholder') return 'Pick a time...' + if (key === 'time.operation.now') return 'Now' + if (key === 'time.operation.ok') return 'OK' + if (key === 'common.operation.clear') return 'Clear' + return key + }, + }), +})) + +jest.mock('@/app/components/base/portal-to-follow-elem', () => ({ + PortalToFollowElem: ({ children }: { children: React.ReactNode }) =>
{children}
, + PortalToFollowElemTrigger: ({ children, onClick }: { children: React.ReactNode, onClick: (e: React.MouseEvent) => void }) => ( +
{children}
+ ), + PortalToFollowElemContent: ({ children }: { children: React.ReactNode }) => ( +
{children}
+ ), +})) + +jest.mock('./options', () => () =>
) +jest.mock('./header', () => () =>
) + +describe('TimePicker', () => { + const baseProps = { + onChange: jest.fn(), + onClear: jest.fn(), + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + test('renders formatted value for string input (Issue #26692 regression)', () => { + render( + , + ) + + expect(screen.getByDisplayValue('06:45 PM')).toBeInTheDocument() + }) + + test('confirms cleared value when confirming without selection', () => { + render( + , + ) + + const input = screen.getByRole('textbox') + fireEvent.click(input) + + const clearButton = screen.getByRole('button', { name: /clear/i }) + fireEvent.click(clearButton) + + const confirmButton = screen.getByRole('button', { name: 'OK' }) + fireEvent.click(confirmButton) + + expect(baseProps.onChange).toHaveBeenCalledTimes(1) + expect(baseProps.onChange).toHaveBeenCalledWith(undefined) + expect(baseProps.onClear).not.toHaveBeenCalled() + }) + + test('selecting current time emits timezone-aware value', () => { + const onChange = jest.fn() + render( + , + ) + + const nowButton = screen.getByRole('button', { name: 'Now' }) + fireEvent.click(nowButton) + + expect(onChange).toHaveBeenCalledTimes(1) + const emitted = onChange.mock.calls[0][0] + expect(isDayjsObject(emitted)).toBe(true) + expect(emitted?.utcOffset()).toBe(dayjs().tz('America/New_York').utcOffset()) + }) +}) diff --git a/web/app/components/base/date-and-time-picker/time-picker/index.tsx b/web/app/components/base/date-and-time-picker/time-picker/index.tsx index 1fb2cfed11..f23fcf8f4e 100644 --- a/web/app/components/base/date-and-time-picker/time-picker/index.tsx +++ b/web/app/components/base/date-and-time-picker/time-picker/index.tsx @@ -1,6 +1,13 @@ import React, { useCallback, useEffect, useRef, useState } from 'react' -import type { Period, TimePickerProps } from '../types' -import dayjs, { cloneTime, getDateWithTimezone, getHourIn12Hour } from '../utils/dayjs' +import type { Dayjs } from 'dayjs' +import { Period } from '../types' +import type { TimePickerProps } from '../types' +import dayjs, { + getDateWithTimezone, + getHourIn12Hour, + isDayjsObject, + toDayjs, +} from '../utils/dayjs' import { PortalToFollowElem, PortalToFollowElemContent, @@ -13,6 +20,11 @@ import { useTranslation } from 'react-i18next' import { RiCloseCircleFill, RiTimeLine } from '@remixicon/react' import cn from '@/utils/classnames' +const to24Hour = (hour12: string, period: Period) => { + const normalized = Number.parseInt(hour12, 10) % 12 + return period === Period.PM ? normalized + 12 : normalized +} + const TimePicker = ({ value, timezone, @@ -28,7 +40,11 @@ const TimePicker = ({ const [isOpen, setIsOpen] = useState(false) const containerRef = useRef(null) const isInitial = useRef(true) - const [selectedTime, setSelectedTime] = useState(() => value ? getDateWithTimezone({ timezone, date: value }) : undefined) + + // Initialize selectedTime + const [selectedTime, setSelectedTime] = useState(() => { + return toDayjs(value, { timezone }) + }) useEffect(() => { const handleClickOutside = (event: MouseEvent) => { @@ -39,20 +55,47 @@ const TimePicker = ({ return () => document.removeEventListener('mousedown', handleClickOutside) }, []) + // Track previous values to avoid unnecessary updates + const prevValueRef = useRef(value) + const prevTimezoneRef = useRef(timezone) + useEffect(() => { if (isInitial.current) { isInitial.current = false + // Save initial values on first render + prevValueRef.current = value + prevTimezoneRef.current = timezone return } - if (value) { - const newValue = getDateWithTimezone({ date: value, timezone }) - setSelectedTime(newValue) - onChange(newValue) + + // Only update when timezone changes but value doesn't + const valueChanged = prevValueRef.current !== value + const timezoneChanged = prevTimezoneRef.current !== timezone + + // Update reference values + prevValueRef.current = value + prevTimezoneRef.current = timezone + + // Skip if neither timezone changed nor value changed + if (!timezoneChanged && !valueChanged) return + + if (value !== undefined && value !== null) { + const dayjsValue = toDayjs(value, { timezone }) + if (!dayjsValue) return + + setSelectedTime(dayjsValue) + + if (timezoneChanged && !valueChanged) + onChange(dayjsValue) + return } - else { - setSelectedTime(prev => prev ? getDateWithTimezone({ date: prev, timezone }) : undefined) - } - }, [timezone]) + + setSelectedTime((prev) => { + if (!isDayjsObject(prev)) + return undefined + return timezone ? getDateWithTimezone({ date: prev, timezone }) : prev + }) + }, [timezone, value, onChange]) const handleClickTrigger = (e: React.MouseEvent) => { e.stopPropagation() @@ -61,8 +104,16 @@ const TimePicker = ({ return } setIsOpen(true) - if (value) - setSelectedTime(value) + + if (value) { + const dayjsValue = toDayjs(value, { timezone }) + const needsUpdate = dayjsValue && ( + !selectedTime + || !isDayjsObject(selectedTime) + || !dayjsValue.isSame(selectedTime, 'minute') + ) + if (needsUpdate) setSelectedTime(dayjsValue) + } } const handleClear = (e: React.MouseEvent) => { @@ -73,42 +124,68 @@ const TimePicker = ({ } const handleTimeSelect = (hour: string, minute: string, period: Period) => { - const newTime = cloneTime(dayjs(), dayjs(`1/1/2000 ${hour}:${minute} ${period}`)) + const periodAdjustedHour = to24Hour(hour, period) + const nextMinute = Number.parseInt(minute, 10) setSelectedTime((prev) => { - return prev ? cloneTime(prev, newTime) : newTime + const reference = isDayjsObject(prev) + ? prev + : (timezone ? getDateWithTimezone({ timezone }) : dayjs()).startOf('minute') + return reference + .set('hour', periodAdjustedHour) + .set('minute', nextMinute) + .set('second', 0) + .set('millisecond', 0) }) } + const getSafeTimeObject = useCallback(() => { + if (isDayjsObject(selectedTime)) + return selectedTime + return (timezone ? getDateWithTimezone({ timezone }) : dayjs()).startOf('day') + }, [selectedTime, timezone]) + const handleSelectHour = useCallback((hour: string) => { - const time = selectedTime || dayjs().startOf('day') + const time = getSafeTimeObject() handleTimeSelect(hour, time.minute().toString().padStart(2, '0'), time.format('A') as Period) - }, [selectedTime]) + }, [getSafeTimeObject]) const handleSelectMinute = useCallback((minute: string) => { - const time = selectedTime || dayjs().startOf('day') + const time = getSafeTimeObject() handleTimeSelect(getHourIn12Hour(time).toString().padStart(2, '0'), minute, time.format('A') as Period) - }, [selectedTime]) + }, [getSafeTimeObject]) const handleSelectPeriod = useCallback((period: Period) => { - const time = selectedTime || dayjs().startOf('day') + const time = getSafeTimeObject() handleTimeSelect(getHourIn12Hour(time).toString().padStart(2, '0'), time.minute().toString().padStart(2, '0'), period) - }, [selectedTime]) + }, [getSafeTimeObject]) const handleSelectCurrentTime = useCallback(() => { const newDate = getDateWithTimezone({ timezone }) setSelectedTime(newDate) onChange(newDate) setIsOpen(false) - }, [onChange, timezone]) + }, [timezone, onChange]) const handleConfirm = useCallback(() => { - onChange(selectedTime) + const valueToEmit = isDayjsObject(selectedTime) ? selectedTime : undefined + onChange(valueToEmit) setIsOpen(false) - }, [onChange, selectedTime]) + }, [selectedTime, onChange]) const timeFormat = 'hh:mm A' - const displayValue = value?.format(timeFormat) || '' - const placeholderDate = isOpen && selectedTime ? selectedTime.format(timeFormat) : (placeholder || t('time.defaultPlaceholder')) + + const formatTimeValue = useCallback((timeValue: string | Dayjs | undefined): string => { + if (!timeValue) return '' + + const dayjsValue = toDayjs(timeValue, { timezone }) + return dayjsValue?.format(timeFormat) || '' + }, [timezone]) + + const displayValue = formatTimeValue(value) + + const placeholderDate = isOpen && isDayjsObject(selectedTime) + ? selectedTime.format(timeFormat) + : (placeholder || t('time.defaultPlaceholder')) const inputElem = (
diff --git a/web/app/components/base/date-and-time-picker/types.ts b/web/app/components/base/date-and-time-picker/types.ts index 4ac01c142a..b51c2ebb01 100644 --- a/web/app/components/base/date-and-time-picker/types.ts +++ b/web/app/components/base/date-and-time-picker/types.ts @@ -54,7 +54,7 @@ export type TriggerParams = { onClick: (e: React.MouseEvent) => void } export type TimePickerProps = { - value: Dayjs | undefined + value: Dayjs | string | undefined timezone?: string placeholder?: string onChange: (date: Dayjs | undefined) => void diff --git a/web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts b/web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts new file mode 100644 index 0000000000..549ab01029 --- /dev/null +++ b/web/app/components/base/date-and-time-picker/utils/dayjs.spec.ts @@ -0,0 +1,67 @@ +import dayjs from './dayjs' +import { + getDateWithTimezone, + isDayjsObject, + toDayjs, +} from './dayjs' + +describe('dayjs utilities', () => { + const timezone = 'UTC' + + test('toDayjs parses time-only strings with timezone support', () => { + const result = toDayjs('18:45', { timezone }) + expect(result).toBeDefined() + expect(result?.format('HH:mm')).toBe('18:45') + expect(result?.utcOffset()).toBe(getDateWithTimezone({ timezone }).utcOffset()) + }) + + test('toDayjs parses 12-hour time strings', () => { + const tz = 'America/New_York' + const result = toDayjs('07:15 PM', { timezone: tz }) + expect(result).toBeDefined() + expect(result?.format('HH:mm')).toBe('19:15') + expect(result?.utcOffset()).toBe(getDateWithTimezone({ timezone: tz }).utcOffset()) + }) + + test('isDayjsObject detects dayjs instances', () => { + const date = dayjs() + expect(isDayjsObject(date)).toBe(true) + expect(isDayjsObject(getDateWithTimezone({ timezone }))).toBe(true) + expect(isDayjsObject('2024-01-01')).toBe(false) + expect(isDayjsObject({})).toBe(false) + }) + + test('toDayjs parses datetime strings in target timezone', () => { + const value = '2024-05-01 12:00:00' + const tz = 'America/New_York' + + const result = toDayjs(value, { timezone: tz }) + + expect(result).toBeDefined() + expect(result?.hour()).toBe(12) + expect(result?.format('YYYY-MM-DD HH:mm')).toBe('2024-05-01 12:00') + }) + + test('toDayjs parses ISO datetime strings in target timezone', () => { + const value = '2024-05-01T14:30:00' + const tz = 'Europe/London' + + const result = toDayjs(value, { timezone: tz }) + + expect(result).toBeDefined() + expect(result?.hour()).toBe(14) + expect(result?.minute()).toBe(30) + }) + + test('toDayjs handles dates without time component', () => { + const value = '2024-05-01' + const tz = 'America/Los_Angeles' + + const result = toDayjs(value, { timezone: tz }) + + expect(result).toBeDefined() + expect(result?.format('YYYY-MM-DD')).toBe('2024-05-01') + expect(result?.hour()).toBe(0) + expect(result?.minute()).toBe(0) + }) +}) diff --git a/web/app/components/base/date-and-time-picker/utils/dayjs.ts b/web/app/components/base/date-and-time-picker/utils/dayjs.ts index fef35bf6ca..808b50247a 100644 --- a/web/app/components/base/date-and-time-picker/utils/dayjs.ts +++ b/web/app/components/base/date-and-time-picker/utils/dayjs.ts @@ -10,6 +10,25 @@ dayjs.extend(timezone) export default dayjs const monthMaps: Record = {} +const DEFAULT_OFFSET_STR = 'UTC+0' +const TIME_ONLY_REGEX = /^(\d{1,2}):(\d{2})(?::(\d{2})(?:\.(\d{1,3}))?)?$/ +const TIME_ONLY_12H_REGEX = /^(\d{1,2}):(\d{2})(?::(\d{2}))?\s?(AM|PM)$/i + +const COMMON_PARSE_FORMATS = [ + 'YYYY-MM-DD', + 'YYYY/MM/DD', + 'DD-MM-YYYY', + 'DD/MM/YYYY', + 'MM-DD-YYYY', + 'MM/DD/YYYY', + 'YYYY-MM-DDTHH:mm:ss.SSSZ', + 'YYYY-MM-DDTHH:mm:ssZ', + 'YYYY-MM-DD HH:mm:ss', + 'YYYY-MM-DDTHH:mm', + 'YYYY-MM-DDTHH:mmZ', + 'YYYY-MM-DDTHH:mm:ss', + 'YYYY-MM-DDTHH:mm:ss.SSS', +] export const cloneTime = (targetDate: Dayjs, sourceDate: Dayjs) => { return targetDate.clone() @@ -76,21 +95,116 @@ export const getHourIn12Hour = (date: Dayjs) => { return hour === 0 ? 12 : hour >= 12 ? hour - 12 : hour } -export const getDateWithTimezone = (props: { date?: Dayjs, timezone?: string }) => { - return props.date ? dayjs.tz(props.date, props.timezone) : dayjs().tz(props.timezone) +export const getDateWithTimezone = ({ date, timezone }: { date?: Dayjs, timezone?: string }) => { + if (!timezone) + return (date ?? dayjs()).clone() + return date ? dayjs.tz(date, timezone) : dayjs().tz(timezone) } -// Asia/Shanghai -> UTC+8 -const DEFAULT_OFFSET_STR = 'UTC+0' export const convertTimezoneToOffsetStr = (timezone?: string) => { if (!timezone) return DEFAULT_OFFSET_STR const tzItem = tz.find(item => item.value === timezone) - if(!tzItem) + if (!tzItem) return DEFAULT_OFFSET_STR return `UTC${tzItem.name.charAt(0)}${tzItem.name.charAt(2)}` } +export const isDayjsObject = (value: unknown): value is Dayjs => dayjs.isDayjs(value) + +export type ToDayjsOptions = { + timezone?: string + format?: string + formats?: string[] +} + +const warnParseFailure = (value: string) => { + if (process.env.NODE_ENV !== 'production') + console.warn('[TimePicker] Failed to parse time value', value) +} + +const normalizeMillisecond = (value: string | undefined) => { + if (!value) return 0 + if (value.length === 3) return Number(value) + if (value.length > 3) return Number(value.slice(0, 3)) + return Number(value.padEnd(3, '0')) +} + +const applyTimezone = (date: Dayjs, timezone?: string) => { + return timezone ? getDateWithTimezone({ date, timezone }) : date +} + +export const toDayjs = (value: string | Dayjs | undefined, options: ToDayjsOptions = {}): Dayjs | undefined => { + if (!value) + return undefined + + const { timezone: tzName, format, formats } = options + + if (isDayjsObject(value)) + return applyTimezone(value, tzName) + + if (typeof value !== 'string') + return undefined + + const trimmed = value.trim() + + if (format) { + const parsedWithFormat = tzName + ? dayjs.tz(trimmed, format, tzName, true) + : dayjs(trimmed, format, true) + if (parsedWithFormat.isValid()) + return parsedWithFormat + } + + const timeMatch = TIME_ONLY_REGEX.exec(trimmed) + if (timeMatch) { + const base = applyTimezone(dayjs(), tzName).startOf('day') + const rawHour = Number(timeMatch[1]) + const minute = Number(timeMatch[2]) + const second = timeMatch[3] ? Number(timeMatch[3]) : 0 + const millisecond = normalizeMillisecond(timeMatch[4]) + + return base + .set('hour', rawHour) + .set('minute', minute) + .set('second', second) + .set('millisecond', millisecond) + } + + const timeMatch12h = TIME_ONLY_12H_REGEX.exec(trimmed) + if (timeMatch12h) { + const base = applyTimezone(dayjs(), tzName).startOf('day') + let hour = Number(timeMatch12h[1]) % 12 + const isPM = timeMatch12h[4]?.toUpperCase() === 'PM' + if (isPM) + hour += 12 + const minute = Number(timeMatch12h[2]) + const second = timeMatch12h[3] ? Number(timeMatch12h[3]) : 0 + + return base + .set('hour', hour) + .set('minute', minute) + .set('second', second) + .set('millisecond', 0) + } + + const candidateFormats = formats ?? COMMON_PARSE_FORMATS + for (const fmt of candidateFormats) { + const parsed = tzName + ? dayjs.tz(trimmed, fmt, tzName, true) + : dayjs(trimmed, fmt, true) + if (parsed.isValid()) + return parsed + } + + const fallbackParsed = tzName ? dayjs.tz(trimmed, tzName) : dayjs(trimmed) + if (fallbackParsed.isValid()) + return fallbackParsed + + warnParseFailure(value) + return undefined +} + // Parse date with multiple format support export const parseDateWithFormat = (dateString: string, format?: string): Dayjs | null => { if (!dateString) return null @@ -103,15 +217,7 @@ export const parseDateWithFormat = (dateString: string, format?: string): Dayjs // Try common date formats const formats = [ - 'YYYY-MM-DD', // Standard format - 'YYYY/MM/DD', // Slash format - 'DD-MM-YYYY', // European format - 'DD/MM/YYYY', // European slash format - 'MM-DD-YYYY', // US format - 'MM/DD/YYYY', // US slash format - 'YYYY-MM-DDTHH:mm:ss.SSSZ', // ISO format - 'YYYY-MM-DDTHH:mm:ssZ', // ISO format (no milliseconds) - 'YYYY-MM-DD HH:mm:ss', // Standard datetime format + ...COMMON_PARSE_FORMATS, ] for (const fmt of formats) { From 3a5aa4587c45530e678fd3f7bc1de6774ed68e38 Mon Sep 17 00:00:00 2001 From: Coding On Star <447357187@qq.com> Date: Fri, 10 Oct 2025 15:34:56 +0800 Subject: [PATCH 04/49] feat(billing): add tax information tooltips in pricing footer (#26705) Co-authored-by: CodingOnStar --- web/app/components/billing/pricing/footer.tsx | 12 +++++++++++- web/app/components/billing/pricing/index.tsx | 11 ++++++++--- web/i18n/en-US/billing.ts | 2 ++ web/i18n/ja-JP/billing.ts | 2 ++ web/i18n/zh-Hans/billing.ts | 2 ++ 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/web/app/components/billing/pricing/footer.tsx b/web/app/components/billing/pricing/footer.tsx index 4e3cdfee3d..fd713eb3da 100644 --- a/web/app/components/billing/pricing/footer.tsx +++ b/web/app/components/billing/pricing/footer.tsx @@ -2,19 +2,29 @@ import React from 'react' import Link from 'next/link' import { useTranslation } from 'react-i18next' import { RiArrowRightUpLine } from '@remixicon/react' +import { type Category, CategoryEnum } from '.' +import cn from '@/utils/classnames' type FooterProps = { pricingPageURL: string + currentCategory: Category } const Footer = ({ pricingPageURL, + currentCategory, }: FooterProps) => { const { t } = useTranslation() return (
-
+
+ {currentCategory === CategoryEnum.CLOUD && ( +
+ {t('billing.plansCommon.taxTip')} + {t('billing.plansCommon.taxTipSecond')} +
+ )} void @@ -25,7 +30,7 @@ const Pricing: FC = ({ const { plan } = useProviderContext() const { isCurrentWorkspaceManager } = useAppContext() const [planRange, setPlanRange] = React.useState(PlanRange.monthly) - const [currentCategory, setCurrentCategory] = useState('cloud') + const [currentCategory, setCurrentCategory] = useState(CategoryEnum.CLOUD) const canPay = isCurrentWorkspaceManager useKeyPress(['esc'], onCancel) @@ -57,7 +62,7 @@ const Pricing: FC = ({ planRange={planRange} canPay={canPay} /> -
+
diff --git a/web/i18n/en-US/billing.ts b/web/i18n/en-US/billing.ts index 72cf9a3fca..9169631281 100644 --- a/web/i18n/en-US/billing.ts +++ b/web/i18n/en-US/billing.ts @@ -37,6 +37,8 @@ const translation = { save: 'Save ', free: 'Free', annualBilling: 'Bill Annually Save {{percent}}%', + taxTip: 'All subscription prices (monthly/annual) exclude applicable taxes (e.g., VAT, sales tax).', + taxTipSecond: 'If your region has no applicable tax requirements, no tax will appear in your checkout, and you won’t be charged any additional fees for the entire subscription term.', comparePlanAndFeatures: 'Compare plans & features', priceTip: 'per workspace/', currentPlan: 'Current Plan', diff --git a/web/i18n/ja-JP/billing.ts b/web/i18n/ja-JP/billing.ts index 426687da6c..6dbff60d5a 100644 --- a/web/i18n/ja-JP/billing.ts +++ b/web/i18n/ja-JP/billing.ts @@ -36,6 +36,8 @@ const translation = { save: '節約 ', free: '無料', annualBilling: '年次請求', + taxTip: 'すべてのサブスクリプション料金(月額/年額)は、適用される税金(例:消費税、付加価値税)を含みません。', + taxTipSecond: 'お客様の地域に適用税がない場合、チェックアウト時に税金は表示されず、サブスクリプション期間中に追加料金が請求されることもありません。', comparePlanAndFeatures: 'プランと機能を比較する', priceTip: 'ワークスペース/', currentPlan: '現在のプラン', diff --git a/web/i18n/zh-Hans/billing.ts b/web/i18n/zh-Hans/billing.ts index 96ba7970c8..00a7dd909a 100644 --- a/web/i18n/zh-Hans/billing.ts +++ b/web/i18n/zh-Hans/billing.ts @@ -36,6 +36,8 @@ const translation = { save: '节省', free: '免费', annualBilling: '按年计费节省 {{percent}}%', + taxTip: '所有订阅价格(按月/按年)均不含适用税费(如增值税、销售税)。', + taxTipSecond: '如果您所在地区无适用税费要求,结账时将不会显示税费,且在整个订阅周期内您都无需支付任何额外费用。', comparePlanAndFeatures: '对比套餐 & 功能特性', priceTip: '每个团队空间/', currentPlan: '当前计划', From 294e01a8c120b4ac4038b8e01890746097a04e91 Mon Sep 17 00:00:00 2001 From: fenglin <790872612@qq.com> Date: Fri, 10 Oct 2025 15:52:09 +0800 Subject: [PATCH 05/49] Fix/tool provider tag internationalization (#26710) Co-authored-by: qiaofenglin --- web/app/components/plugins/hooks.ts | 107 ++++++++++++--------- web/app/components/tools/provider-list.tsx | 4 +- 2 files changed, 64 insertions(+), 47 deletions(-) diff --git a/web/app/components/plugins/hooks.ts b/web/app/components/plugins/hooks.ts index 0af7c1a170..f22b2c4d69 100644 --- a/web/app/components/plugins/hooks.ts +++ b/web/app/components/plugins/hooks.ts @@ -1,3 +1,4 @@ +import { useMemo } from 'react' import { useTranslation } from 'react-i18next' import type { TFunction } from 'i18next' import { @@ -14,23 +15,29 @@ export const useTags = (translateFromOut?: TFunction) => { const { t: translation } = useTranslation() const t = translateFromOut || translation - const tags = tagKeys.map((tag) => { - return { - name: tag, - label: t(`pluginTags.tags.${tag}`), + const tags = useMemo(() => { + return tagKeys.map((tag) => { + return { + name: tag, + label: t(`pluginTags.tags.${tag}`), + } + }) + }, [t]) + + const tagsMap = useMemo(() => { + return tags.reduce((acc, tag) => { + acc[tag.name] = tag + return acc + }, {} as Record) + }, [tags]) + + const getTagLabel = useMemo(() => { + return (name: string) => { + if (!tagsMap[name]) + return name + return tagsMap[name].label } - }) - - const tagsMap = tags.reduce((acc, tag) => { - acc[tag.name] = tag - return acc - }, {} as Record) - - const getTagLabel = (name: string) => { - if (!tagsMap[name]) - return name - return tagsMap[name].label - } + }, [tagsMap]) return { tags, @@ -48,23 +55,27 @@ export const useCategories = (translateFromOut?: TFunction) => { const { t: translation } = useTranslation() const t = translateFromOut || translation - const categories = categoryKeys.map((category) => { - if (category === 'agent-strategy') { - return { - name: 'agent-strategy', - label: t('plugin.category.agents'), + const categories = useMemo(() => { + return categoryKeys.map((category) => { + if (category === 'agent-strategy') { + return { + name: 'agent-strategy', + label: t('plugin.category.agents'), + } } - } - return { - name: category, - label: t(`plugin.category.${category}s`), - } - }) + return { + name: category, + label: t(`plugin.category.${category}s`), + } + }) + }, [t]) - const categoriesMap = categories.reduce((acc, category) => { - acc[category.name] = category - return acc - }, {} as Record) + const categoriesMap = useMemo(() => { + return categories.reduce((acc, category) => { + acc[category.name] = category + return acc + }, {} as Record) + }, [categories]) return { categories, @@ -76,23 +87,27 @@ export const useSingleCategories = (translateFromOut?: TFunction) => { const { t: translation } = useTranslation() const t = translateFromOut || translation - const categories = categoryKeys.map((category) => { - if (category === 'agent-strategy') { - return { - name: 'agent-strategy', - label: t('plugin.categorySingle.agent'), + const categories = useMemo(() => { + return categoryKeys.map((category) => { + if (category === 'agent-strategy') { + return { + name: 'agent-strategy', + label: t('plugin.categorySingle.agent'), + } } - } - return { - name: category, - label: t(`plugin.categorySingle.${category}`), - } - }) + return { + name: category, + label: t(`plugin.categorySingle.${category}`), + } + }) + }, [t]) - const categoriesMap = categories.reduce((acc, category) => { - acc[category.name] = category - return acc - }, {} as Record) + const categoriesMap = useMemo(() => { + return categories.reduce((acc, category) => { + acc[category.name] = category + return acc + }, {} as Record) + }, [categories]) return { categories, diff --git a/web/app/components/tools/provider-list.tsx b/web/app/components/tools/provider-list.tsx index 08a4aa0b5d..1679b4469b 100644 --- a/web/app/components/tools/provider-list.tsx +++ b/web/app/components/tools/provider-list.tsx @@ -21,6 +21,7 @@ import { useCheckInstalled, useInvalidateInstalledPluginList } from '@/service/u import { useGlobalPublicStore } from '@/context/global-public-context' import { ToolTypeEnum } from '../workflow/block-selector/types' import { useMarketplace } from './marketplace/hooks' +import { useTags } from '@/app/components/plugins/hooks' const getToolType = (type: string) => { switch (type) { @@ -40,6 +41,7 @@ const ProviderList = () => { // const searchParams = useSearchParams() // searchParams.get('category') === 'workflow' const { t } = useTranslation() + const { getTagLabel } = useTags() const { enable_marketplace } = useGlobalPublicStore(s => s.systemFeatures) const containerRef = useRef(null) @@ -180,7 +182,7 @@ const ProviderList = () => { } as any} footer={ getTagLabel(label)) || []} /> } /> From 298d8c2d881a3407152a1bd82cb83c55de77493f Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Fri, 10 Oct 2025 15:54:33 +0800 Subject: [PATCH 06/49] Update deploy-dev.yml (#26712) --- .github/workflows/deploy-dev.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml index de732c3134..cd1c86e668 100644 --- a/.github/workflows/deploy-dev.yml +++ b/.github/workflows/deploy-dev.yml @@ -18,7 +18,7 @@ jobs: - name: Deploy to server uses: appleboy/ssh-action@v0.1.8 with: - host: ${{ secrets.RAG_SSH_HOST }} + host: ${{ secrets.SSH_HOST }} username: ${{ secrets.SSH_USER }} key: ${{ secrets.SSH_PRIVATE_KEY }} script: | From 3068526797385de0d5a8808597a577722d0f7287 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 15:55:24 +0800 Subject: [PATCH 07/49] chore: translate i18n files and update type definitions (#26709) Co-authored-by: iamjoel <2120155+iamjoel@users.noreply.github.com> --- web/i18n/de-DE/billing.ts | 2 ++ web/i18n/es-ES/billing.ts | 2 ++ web/i18n/fa-IR/billing.ts | 2 ++ web/i18n/fr-FR/billing.ts | 2 ++ web/i18n/hi-IN/billing.ts | 2 ++ web/i18n/id-ID/billing.ts | 2 ++ web/i18n/it-IT/billing.ts | 2 ++ web/i18n/ko-KR/billing.ts | 2 ++ web/i18n/pl-PL/billing.ts | 2 ++ web/i18n/pt-BR/billing.ts | 2 ++ web/i18n/ro-RO/billing.ts | 2 ++ web/i18n/ru-RU/billing.ts | 2 ++ web/i18n/sl-SI/billing.ts | 2 ++ web/i18n/th-TH/billing.ts | 2 ++ web/i18n/tr-TR/billing.ts | 2 ++ web/i18n/uk-UA/billing.ts | 2 ++ web/i18n/vi-VN/billing.ts | 2 ++ web/i18n/zh-Hant/billing.ts | 2 ++ 18 files changed, 36 insertions(+) diff --git a/web/i18n/de-DE/billing.ts b/web/i18n/de-DE/billing.ts index 98d4488fab..fc45f3889c 100644 --- a/web/i18n/de-DE/billing.ts +++ b/web/i18n/de-DE/billing.ts @@ -94,6 +94,8 @@ const translation = { teamMember_one: '{{count,number}} Teammitglied', documentsRequestQuotaTooltip: 'Gibt die Gesamtzahl der Aktionen an, die ein Arbeitsbereich pro Minute innerhalb der Wissensbasis ausführen kann, einschließlich der Erstellung, Löschung, Aktualisierung von Datensätzen, des Hochladens von Dokumenten, von Änderungen, der Archivierung und von Abfragen in der Wissensbasis. Diese Kennzahl wird verwendet, um die Leistung von Anfragen an die Wissensbasis zu bewerten. Wenn ein Sandbox-Nutzer beispielsweise in einer Minute 10 aufeinanderfolgende Testdurchläufe durchführt, wird sein Arbeitsbereich für die nächste Minute vorübergehend daran gehindert, die folgenden Aktionen auszuführen: Erstellung, Löschung, Aktualisierung von Datensätzen sowie das Hochladen oder Ändern von Dokumenten.', startBuilding: 'Beginnen Sie mit der Entwicklung', + taxTipSecond: 'Wenn in Ihrer Region keine relevanten Steuervorschriften gelten, wird an der Kasse keine Steuer angezeigt und Ihnen werden während der gesamten Abonnementlaufzeit keine zusätzlichen Gebühren berechnet.', + taxTip: 'Alle Abonnementspreise (monatlich/jährlich) verstehen sich zuzüglich der geltenden Steuern (z. B. MwSt., Umsatzsteuer).', }, plans: { sandbox: { diff --git a/web/i18n/es-ES/billing.ts b/web/i18n/es-ES/billing.ts index c5d4ef95b9..a8180e2d07 100644 --- a/web/i18n/es-ES/billing.ts +++ b/web/i18n/es-ES/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'El límite de tasa de la API se aplica a todas las solicitudes realizadas a través de la API de Dify, incluidos la generación de texto, las conversaciones de chat, las ejecuciones de flujo de trabajo y el procesamiento de documentos.', documentsRequestQuotaTooltip: 'Especifica el número total de acciones que un espacio de trabajo puede realizar por minuto dentro de la base de conocimientos, incluyendo la creación, eliminación, actualización de conjuntos de datos, carga de documentos, modificaciones, archivo y consultas a la base de conocimientos. Esta métrica se utiliza para evaluar el rendimiento de las solicitudes a la base de conocimientos. Por ejemplo, si un usuario de Sandbox realiza 10 pruebas consecutivas en un minuto, su espacio de trabajo será temporalmente restringido de realizar las siguientes acciones durante el siguiente minuto: creación de conjuntos de datos, eliminación, actualizaciones y carga o modificaciones de documentos.', startBuilding: 'Empezar a construir', + taxTip: 'Todos los precios de suscripción (mensuales/anuales) excluyen los impuestos aplicables (por ejemplo, IVA, impuesto sobre ventas).', + taxTipSecond: 'Si su región no tiene requisitos fiscales aplicables, no se mostrará ningún impuesto en su pago y no se le cobrará ninguna tarifa adicional durante todo el período de suscripción.', }, plans: { sandbox: { diff --git a/web/i18n/fa-IR/billing.ts b/web/i18n/fa-IR/billing.ts index 5634692dc2..3749036f3c 100644 --- a/web/i18n/fa-IR/billing.ts +++ b/web/i18n/fa-IR/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'محدودیت نرخ API برای همه درخواست‌های انجام شده از طریق API Dify اعمال می‌شود، از جمله تولید متن، محاوره‌های چت، اجرای گردش‌های کار و پردازش اسناد.', documentsRequestQuotaTooltip: 'تعیین می‌کند که تعداد کلی اقداماتی که یک فضای کاری می‌تواند در هر دقیقه در داخل پایگاه دانش انجام دهد، شامل ایجاد مجموعه داده، حذف، به‌روزرسانی، بارگذاری مستندات، تغییرات، بایگانی و پرسش از پایگاه دانش است. این معیار برای ارزیابی عملکرد درخواست‌های پایگاه دانش استفاده می‌شود. به عنوان مثال، اگر یک کاربر Sandbox در طی یک دقیقه 10 آزمایش متوالی انجام دهد، فضای کاری او به طور موقت از انجام اقدامات زیر در دقیقه بعدی محدود خواهد شد: ایجاد مجموعه داده، حذف، به‌روزرسانی و بارگذاری یا تغییر مستندات.', startBuilding: 'شروع به ساخت کنید', + taxTip: 'تمام قیمت‌های اشتراک (ماهانه/سالانه) شامل مالیات‌های مربوطه (مثلاً مالیات بر ارزش افزوده، مالیات فروش) نمی‌شوند.', + taxTipSecond: 'اگر منطقه شما هیچ الزامات مالیاتی قابل اجرا نداشته باشد، هیچ مالیاتی در هنگام پرداخت نشان داده نمی‌شود و برای کل مدت اشتراک هیچ هزینه اضافی از شما دریافت نخواهد شد.', }, plans: { sandbox: { diff --git a/web/i18n/fr-FR/billing.ts b/web/i18n/fr-FR/billing.ts index 117d1c6654..a41eed7e23 100644 --- a/web/i18n/fr-FR/billing.ts +++ b/web/i18n/fr-FR/billing.ts @@ -94,6 +94,8 @@ const translation = { documents: '{{count,number}} Documents de connaissance', documentsRequestQuotaTooltip: 'Spécifie le nombre total d\'actions qu\'un espace de travail peut effectuer par minute dans la base de connaissances, y compris la création, la suppression, les mises à jour de jeux de données, le téléchargement de documents, les modifications, l\'archivage et les requêtes de la base de connaissances. Ce paramètre est utilisé pour évaluer les performances des requêtes de la base de connaissances. Par exemple, si un utilisateur de Sandbox effectue 10 tests de validité consécutifs en une minute, son espace de travail sera temporairement restreint dans l\'exécution des actions suivantes pendant la minute suivante : création, suppression, mises à jour de jeux de données, et téléchargements ou modifications de documents.', startBuilding: 'Commencez à construire', + taxTip: 'Tous les prix des abonnements (mensuels/annuels) s\'entendent hors taxes applicables (par exemple, TVA, taxe de vente).', + taxTipSecond: 'Si votre région n\'a pas de exigences fiscales applicables, aucune taxe n\'apparaîtra lors de votre paiement et vous ne serez pas facturé de frais supplémentaires pendant toute la durée de l\'abonnement.', }, plans: { sandbox: { diff --git a/web/i18n/hi-IN/billing.ts b/web/i18n/hi-IN/billing.ts index 749ab804ab..fbc6dffc7c 100644 --- a/web/i18n/hi-IN/billing.ts +++ b/web/i18n/hi-IN/billing.ts @@ -102,6 +102,8 @@ const translation = { teamMember_one: '{{count,number}} टीम सदस्य', documentsRequestQuotaTooltip: 'यह ज्ञान आधार में एक कार्यक्षेत्र द्वारा प्रति मिनट किए जा सकने वाले कुल कार्यों की संख्या को निर्दिष्ट करता है, जिसमें डेटासेट बनाना, हटाना, अपडेट करना, दस्तावेज़ अपलोड करना, संशोधन करना, संग्रहित करना और ज्ञान आधार अनुरोध शामिल हैं। इस मीट्रिक का उपयोग ज्ञान आधार अनुरोधों के प्रदर्शन का मूल्यांकन करने के लिए किया जाता है। उदाहरण के लिए, यदि एक सैंडबॉक्स उपयोगकर्ता एक मिनट के भीतर 10 लगातार हिट परीक्षण करता है, तो उनके कार्यक्षेत्र को अगले मिनट के लिए निम्नलिखित कार्यों को करने से अस्थायी रूप से प्रतिबंधित किया जाएगा: डेटासेट बनाना, हटाना, अपडेट करना और दस्तावेज़ अपलोड या संशोधन करना।', startBuilding: 'बनाना शुरू करें', + taxTip: 'सभी सदस्यता मूल्य (मासिक/वार्षिक) लागू करों (जैसे, VAT, बिक्री कर) को शामिल नहीं करते हैं।', + taxTipSecond: 'यदि आपके क्षेत्र में कोई लागू कर आवश्यकताएँ नहीं हैं, तो आपकी चेकआउट में कोई कर नहीं दिखाई देगा, और पूरे सदस्यता अवधि के लिए आपसे कोई अतिरिक्त शुल्क नहीं लिया जाएगा।', }, plans: { sandbox: { diff --git a/web/i18n/id-ID/billing.ts b/web/i18n/id-ID/billing.ts index 11419c3b16..c6c718d15b 100644 --- a/web/i18n/id-ID/billing.ts +++ b/web/i18n/id-ID/billing.ts @@ -87,6 +87,8 @@ const translation = { modelProviders: 'Mendukung OpenAI/Anthropic/Llama2/Azure OpenAI/Hugging Face/Replite', member: 'Anggota', startBuilding: 'Mulai Membangun', + taxTip: 'Semua harga langganan (bulanan/tahunan) belum termasuk pajak yang berlaku (misalnya, PPN, pajak penjualan).', + taxTipSecond: 'Jika wilayah Anda tidak memiliki persyaratan pajak yang berlaku, tidak akan ada pajak yang muncul saat checkout, dan Anda tidak akan dikenakan biaya tambahan apa pun selama masa langganan.', }, plans: { sandbox: { diff --git a/web/i18n/it-IT/billing.ts b/web/i18n/it-IT/billing.ts index f89502ee5b..ef6b1943e3 100644 --- a/web/i18n/it-IT/billing.ts +++ b/web/i18n/it-IT/billing.ts @@ -102,6 +102,8 @@ const translation = { annualBilling: 'Fatturazione annuale', documentsRequestQuotaTooltip: 'Specifica il numero totale di azioni che un\'area di lavoro può eseguire al minuto all\'interno della base di conoscenza, compresi la creazione, l\'eliminazione, gli aggiornamenti dei dataset, il caricamento di documenti, le modifiche, l\'archiviazione e le query sulla base di conoscenza. Questa metrica viene utilizzata per valutare le prestazioni delle richieste alla base di conoscenza. Ad esempio, se un utente di Sandbox esegue 10 test consecutivi in un minuto, la sua area di lavoro sarà temporaneamente limitata dall\'eseguire le seguenti azioni per il minuto successivo: creazione, eliminazione, aggiornamenti dei dataset e caricamento o modifica di documenti.', startBuilding: 'Inizia a costruire', + taxTip: 'Tutti i prezzi degli abbonamenti (mensili/annuali) non includono le tasse applicabili (ad esempio, IVA, imposta sulle vendite).', + taxTipSecond: 'Se nella tua regione non ci sono requisiti fiscali applicabili, nessuna tassa apparirà al momento del pagamento e non ti verranno addebitate spese aggiuntive per l\'intera durata dell\'abbonamento.', }, plans: { sandbox: { diff --git a/web/i18n/ko-KR/billing.ts b/web/i18n/ko-KR/billing.ts index ff0dd189e4..c5f081d41b 100644 --- a/web/i18n/ko-KR/billing.ts +++ b/web/i18n/ko-KR/billing.ts @@ -103,6 +103,8 @@ const translation = { documentsRequestQuotaTooltip: '지식 기반 내에서 작업 공간이 분당 수행할 수 있는 총 작업 수를 지정합니다. 여기에는 데이터 세트 생성, 삭제, 업데이트, 문서 업로드, 수정, 보관 및 지식 기반 쿼리가 포함됩니다. 이 지표는 지식 기반 요청의 성능을 평가하는 데 사용됩니다. 예를 들어, 샌드박스 사용자가 1 분 이내에 10 회의 연속 히트 테스트를 수행하면, 해당 작업 공간은 다음 1 분 동안 데이터 세트 생성, 삭제, 업데이트 및 문서 업로드 또는 수정과 같은 작업을 수행하는 것이 일시적으로 제한됩니다.', startBuilding: '구축 시작', + taxTip: '모든 구독 요금(월간/연간)에는 해당 세금(예: 부가가치세, 판매세)이 포함되어 있지 않습니다.', + taxTipSecond: '귀하의 지역에 적용 가능한 세금 요구 사항이 없는 경우, 결제 시 세금이 표시되지 않으며 전체 구독 기간 동안 추가 요금이 부과되지 않습니다.', }, plans: { sandbox: { diff --git a/web/i18n/pl-PL/billing.ts b/web/i18n/pl-PL/billing.ts index 3bf0867877..cf0859468b 100644 --- a/web/i18n/pl-PL/billing.ts +++ b/web/i18n/pl-PL/billing.ts @@ -101,6 +101,8 @@ const translation = { documentsRequestQuota: '{{count,number}}/min Limit wiedzy na żądanie', documentsRequestQuotaTooltip: 'Określa całkowitą liczbę działań, jakie przestrzeń robocza może wykonać na minutę w ramach bazy wiedzy, w tym tworzenie zbiorów danych, usuwanie, aktualizacje, przesyłanie dokumentów, modyfikacje, archiwizowanie i zapytania do bazy wiedzy. Ta metryka jest używana do oceny wydajności zapytań do bazy wiedzy. Na przykład, jeśli użytkownik Sandbox wykona 10 kolejnych testów w ciągu jednej minuty, jego przestrzeń robocza zostanie tymczasowo ograniczona w wykonywaniu następujących działań przez następną minutę: tworzenie zbiorów danych, usuwanie, aktualizacje oraz przesyłanie lub modyfikacje dokumentów.', startBuilding: 'Zacznij budować', + taxTip: 'Wszystkie ceny subskrypcji (miesięczne/roczne) nie obejmują obowiązujących podatków (np. VAT, podatek od sprzedaży).', + taxTipSecond: 'Jeśli w Twoim regionie nie ma obowiązujących przepisów podatkowych, podatek nie pojawi się podczas realizacji zamówienia i nie zostaną naliczone żadne dodatkowe opłaty przez cały okres subskrypcji.', }, plans: { sandbox: { diff --git a/web/i18n/pt-BR/billing.ts b/web/i18n/pt-BR/billing.ts index 91ccaa7794..e4ca0a064a 100644 --- a/web/i18n/pt-BR/billing.ts +++ b/web/i18n/pt-BR/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'O limite da taxa da API se aplica a todas as solicitações feitas através da API Dify, incluindo geração de texto, conversas de chat, execuções de fluxo de trabalho e processamento de documentos.', documentsRequestQuotaTooltip: 'Especifica o número total de ações que um espaço de trabalho pode realizar por minuto dentro da base de conhecimento, incluindo criação, exclusão, atualizações de conjuntos de dados, uploads de documentos, modificações, arquivamento e consultas à base de conhecimento. Esse métrica é utilizada para avaliar o desempenho das solicitações à base de conhecimento. Por exemplo, se um usuário do Sandbox realizar 10 testes de impacto consecutivos dentro de um minuto, seu espaço de trabalho ficará temporariamente restrito de realizar as seguintes ações no minuto seguinte: criação, exclusão, atualizações de conjuntos de dados e uploads ou modificações de documentos.', startBuilding: 'Comece a construir', + taxTip: 'Todos os preços de assinatura (mensal/anual) não incluem os impostos aplicáveis (por exemplo, IVA, imposto sobre vendas).', + taxTipSecond: 'Se a sua região não tiver requisitos fiscais aplicáveis, nenhum imposto aparecerá no seu checkout e você não será cobrado por taxas adicionais durante todo o período da assinatura.', }, plans: { sandbox: { diff --git a/web/i18n/ro-RO/billing.ts b/web/i18n/ro-RO/billing.ts index 550ff3e677..3f5577dc32 100644 --- a/web/i18n/ro-RO/billing.ts +++ b/web/i18n/ro-RO/billing.ts @@ -94,6 +94,8 @@ const translation = { documentsRequestQuotaTooltip: 'Specificați numărul total de acțiuni pe care un spațiu de lucru le poate efectua pe minut în cadrul bazei de cunoștințe, inclusiv crearea, ștergerea, actualizările setului de date, încărcările de documente, modificările, arhivarea și interogările bazei de cunoștințe. Acest metric este utilizat pentru a evalua performanța cererilor din baza de cunoștințe. De exemplu, dacă un utilizator Sandbox efectuează 10 teste consecutive de hituri într-un minut, spațiul său de lucru va fi restricționat temporar de la efectuarea următoarelor acțiuni pentru minutul următor: crearea setului de date, ștergerea, actualizările și încărcările sau modificările documentelor.', apiRateLimitTooltip: 'Limita de rată API se aplică tuturor cererilor efectuate prin API-ul Dify, inclusiv generarea de texte, conversațiile de chat, execuțiile fluxului de lucru și procesarea documentelor.', startBuilding: 'Începeți să construiți', + taxTip: 'Toate prețurile abonamentelor (lunare/anuale) nu includ taxele aplicabile (de exemplu, TVA, taxa pe vânzări).', + taxTipSecond: 'Dacă regiunea dumneavoastră nu are cerințe fiscale aplicabile, niciun impozit nu va apărea la finalizarea comenzii și nu vi se vor percepe taxe suplimentare pe întreaga durată a abonamentului.', }, plans: { sandbox: { diff --git a/web/i18n/ru-RU/billing.ts b/web/i18n/ru-RU/billing.ts index 27f5c71685..7017f90cc2 100644 --- a/web/i18n/ru-RU/billing.ts +++ b/web/i18n/ru-RU/billing.ts @@ -94,6 +94,8 @@ const translation = { priceTip: 'по рабочему месту/', documentsTooltip: 'Квота на количество документов, импортируемых из источника знаний.', startBuilding: 'Начать строительство', + taxTip: 'Все цены на подписку (ежемесячную/годовую) не включают применимые налоги (например, НДС, налог с продаж).', + taxTipSecond: 'Если в вашем регионе нет применимых налоговых требований, налоги не будут отображаться при оформлении заказа, и с вас не будут взиматься дополнительные сборы за весь срок подписки.', }, plans: { sandbox: { diff --git a/web/i18n/sl-SI/billing.ts b/web/i18n/sl-SI/billing.ts index 4481100dd8..fb9d9ec435 100644 --- a/web/i18n/sl-SI/billing.ts +++ b/web/i18n/sl-SI/billing.ts @@ -94,6 +94,8 @@ const translation = { getStarted: 'Začnite', documentsRequestQuotaTooltip: 'Določa skupno število dejanj, ki jih lahko delovno mesto opravi na minuto znotraj znanja baze, vključno s kreiranjem, brisanjem, posodobitvami, nalaganjem dokumentov, spremembami, arhiviranjem in poizvedbami po znanju bazi. Ta meritev se uporablja za ocenjevanje uspešnosti poizvedb v bazi znanja. Na primer, če uporabnik Sandbox izvede 10 zaporednih testov udarca v eni minuti, bo njegovo delovno mesto začasno omejeno pri izvajanju naslednjih dejanj v naslednji minuti: kreiranje podatkovnih nizov, brisanje, posodobitve in nalaganje ali spremembe dokumentov.', startBuilding: 'Začnite graditi', + taxTip: 'Vse cene naročnin (mesečne/letne) ne vključujejo veljavnih davkov (npr. DDV, davek na promet).', + taxTipSecond: 'Če vaša regija nima veljavnih davčnih zahtev, se v vaši košarici ne bo prikazal noben davek in za celotno obdobje naročnine vam ne bodo zaračunani nobeni dodatni stroški.', }, plans: { sandbox: { diff --git a/web/i18n/th-TH/billing.ts b/web/i18n/th-TH/billing.ts index 55a01449eb..461e4a8240 100644 --- a/web/i18n/th-TH/billing.ts +++ b/web/i18n/th-TH/billing.ts @@ -94,6 +94,8 @@ const translation = { annualBilling: 'การเรียกเก็บเงินประจำปี', documentsRequestQuotaTooltip: 'ระบุจำนวนรวมของการกระทำที่เวิร์กสเปซสามารถดำเนินการต่อหนึ่งนาทีภายในฐานความรู้ รวมถึงการสร้างชุดข้อมูล การลบ การอัปเดต การอัปโหลดเอกสาร การปรับเปลี่ยน การเก็บถาวร และการสอบถามฐานความรู้ เมตริกนี้ถูกใช้ในการประเมินประสิทธิภาพของคำขอฐานความรู้ ตัวอย่างเช่น หากผู้ใช้ Sandbox ทำการทดสอบการตี 10 ครั้งต่อเนื่องภายในหนึ่งนาที เวิร์กสเปซของพวกเขาจะถูกจำกัดชั่วคราวในการดำเนินการต่อไปนี้ในนาทีถัดไป: การสร้างชุดข้อมูล การลบ การอัปเดต หรือการอัปโหลดหรือปรับเปลี่ยนเอกสาร.', startBuilding: 'เริ่มสร้าง', + taxTip: 'ราคาการสมัครสมาชิกทั้งหมด (รายเดือน/รายปี) ไม่รวมภาษีที่ใช้บังคับ (เช่น ภาษีมูลค่าเพิ่ม, ภาษีการขาย)', + taxTipSecond: 'หากภูมิภาคของคุณไม่มีข้อกำหนดเกี่ยวกับภาษีที่ใช้ได้ จะไม่มีการคิดภาษีในขั้นตอนการชำระเงินของคุณ และคุณจะไม่ถูกเรียกเก็บค่าธรรมเนียมเพิ่มเติมใด ๆ ตลอดระยะเวลาสมาชิกทั้งหมด', }, plans: { sandbox: { diff --git a/web/i18n/tr-TR/billing.ts b/web/i18n/tr-TR/billing.ts index 62d6e0a07e..6d01d9dd32 100644 --- a/web/i18n/tr-TR/billing.ts +++ b/web/i18n/tr-TR/billing.ts @@ -94,6 +94,8 @@ const translation = { teamWorkspace: '{{count,number}} Takım Çalışma Alanı', documentsRequestQuotaTooltip: 'Bir çalışma alanının bilgi tabanında, veri seti oluşturma, silme, güncellemeler, belge yüklemeleri, değişiklikler, arşivleme ve bilgi tabanı sorguları dahil olmak üzere, dakikada gerçekleştirebileceği toplam işlem sayısını belirtir. Bu ölçüt, bilgi tabanı taleplerinin performansını değerlendirmek için kullanılır. Örneğin, bir Sandbox kullanıcısı bir dakika içinde ardışık 10 vurma testi gerçekleştirirse, çalışma alanı bir sonraki dakika için aşağıdaki işlemleri gerçekleştirmesi geçici olarak kısıtlanacaktır: veri seti oluşturma, silme, güncellemeler ve belge yüklemeleri veya değişiklikler.', startBuilding: 'İnşa Etmeye Başlayın', + taxTip: 'Tüm abonelik fiyatları (aylık/yıllık) geçerli vergiler (ör. KDV, satış vergisi) hariçtir.', + taxTipSecond: 'Bölgenizde geçerli vergi gereksinimleri yoksa, ödeme sayfanızda herhangi bir vergi görünmeyecek ve tüm abonelik süresi boyunca ek bir ücret tahsil edilmeyecektir.', }, plans: { sandbox: { diff --git a/web/i18n/uk-UA/billing.ts b/web/i18n/uk-UA/billing.ts index 10dafedb24..03b743e4fe 100644 --- a/web/i18n/uk-UA/billing.ts +++ b/web/i18n/uk-UA/billing.ts @@ -94,6 +94,8 @@ const translation = { apiRateLimitTooltip: 'Обмеження частоти запитів застосовується до всіх запитів, зроблених через API Dify, включаючи генерацію тексту, чат-розмови, виконання робочих процесів та обробку документів.', documentsRequestQuotaTooltip: 'Вказує загальну кількість дій, які робоча область може виконувати за хвилину в межах бази знань, включаючи створення, видалення, оновлення наборів даних, завантаження документів, модифікації, архівування та запити до бази знань. Цей показник використовується для оцінки ефективності запитів до бази знань. Наприклад, якщо користувач Sandbox виконує 10 послідовних тестів за один хвилину, його робочій області буде тимчасово заборонено виконувати наступні дії протягом наступної хвилини: створення наборів даних, видалення, оновлення, а також завантаження чи модифікацію документів.', startBuilding: 'Почніть будувати', + taxTip: 'Всі ціни на підписку (щомісячна/щорічна) не включають відповідні податки (наприклад, ПДВ, податок з продажу).', + taxTipSecond: 'Якщо для вашого регіону немає відповідних податкових вимог, податок не відображатиметься на вашому чек-ауті, і з вас не стягуватимуть додаткові збори протягом усього терміну підписки.', }, plans: { sandbox: { diff --git a/web/i18n/vi-VN/billing.ts b/web/i18n/vi-VN/billing.ts index 68e662425f..0166185e45 100644 --- a/web/i18n/vi-VN/billing.ts +++ b/web/i18n/vi-VN/billing.ts @@ -94,6 +94,8 @@ const translation = { freeTrialTipSuffix: 'Không cần thẻ tín dụng', documentsRequestQuotaTooltip: 'Chỉ định tổng số hành động mà một không gian làm việc có thể thực hiện mỗi phút trong cơ sở tri thức, bao gồm tạo mới tập dữ liệu, xóa, cập nhật, tải tài liệu lên, thay đổi, lưu trữ và truy vấn cơ sở tri thức. Chỉ số này được sử dụng để đánh giá hiệu suất của các yêu cầu cơ sở tri thức. Ví dụ, nếu một người dùng Sandbox thực hiện 10 lần kiểm tra liên tiếp trong một phút, không gian làm việc của họ sẽ bị hạn chế tạm thời không thực hiện các hành động sau trong phút tiếp theo: tạo mới tập dữ liệu, xóa, cập nhật và tải tài liệu lên hoặc thay đổi.', startBuilding: 'Bắt đầu xây dựng', + taxTipSecond: 'Nếu khu vực của bạn không có yêu cầu thuế áp dụng, sẽ không có thuế xuất hiện trong quá trình thanh toán của bạn và bạn sẽ không bị tính bất kỳ khoản phí bổ sung nào trong suốt thời gian đăng ký.', + taxTip: 'Tất cả giá đăng ký (hàng tháng/hàng năm) chưa bao gồm các loại thuế áp dụng (ví dụ: VAT, thuế bán hàng).', }, plans: { sandbox: { diff --git a/web/i18n/zh-Hant/billing.ts b/web/i18n/zh-Hant/billing.ts index f99b1ef2cf..1b0b1f5e1f 100644 --- a/web/i18n/zh-Hant/billing.ts +++ b/web/i18n/zh-Hant/billing.ts @@ -94,6 +94,8 @@ const translation = { documentsTooltip: '從知識數據來源導入的文件數量配額。', documentsRequestQuotaTooltip: '指定工作區在知識基礎中每分鐘可以執行的總操作次數,包括數據集的創建、刪除、更新、文檔上傳、修改、歸檔和知識基礎查詢。這個指標用於評估知識基礎請求的性能。例如,如果一個沙箱用戶在一分鐘內連續執行 10 次命中測試,他們的工作區將在接下來的一分鐘內暫時禁止執行以下操作:數據集的創建、刪除、更新以及文檔上傳或修改。', startBuilding: '開始建造', + taxTip: '所有訂閱價格(月費/年費)不包含適用的稅費(例如增值稅、銷售稅)。', + taxTipSecond: '如果您的地區沒有適用的稅務要求,結帳時將不會顯示任何稅款,且在整個訂閱期間您也不會被收取任何額外費用。', }, plans: { sandbox: { From aa51662d98ddfdee55691eb1a4062ce2c367f4ed Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Fri, 10 Oct 2025 15:59:14 +0800 Subject: [PATCH 08/49] refactor(api): add new endpoints for workspace management and update routing (#26465) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../console/workspace/workspace.py | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 6bec70b5da..13a61052ae 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -14,7 +14,7 @@ from controllers.common.errors import ( TooManyFilesError, UnsupportedFileTypeError, ) -from controllers.console import api +from controllers.console import console_ns from controllers.console.admin import admin_required from controllers.console.error import AccountNotLinkTenantError from controllers.console.wraps import ( @@ -65,6 +65,7 @@ tenants_fields = { workspace_fields = {"id": fields.String, "name": fields.String, "status": fields.String, "created_at": TimestampField} +@console_ns.route("/workspaces") class TenantListApi(Resource): @setup_required @login_required @@ -93,6 +94,7 @@ class TenantListApi(Resource): return {"workspaces": marshal(tenant_dicts, tenants_fields)}, 200 +@console_ns.route("/all-workspaces") class WorkspaceListApi(Resource): @setup_required @admin_required @@ -118,6 +120,8 @@ class WorkspaceListApi(Resource): }, 200 +@console_ns.route("/workspaces/current") +@console_ns.route("/info") # Deprecated class TenantApi(Resource): @setup_required @login_required @@ -143,11 +147,10 @@ class TenantApi(Resource): else: raise Unauthorized("workspace is archived") - if not tenant: - raise ValueError("No tenant available") return WorkspaceService.get_tenant_info(tenant), 200 +@console_ns.route("/workspaces/switch") class SwitchWorkspaceApi(Resource): @setup_required @login_required @@ -172,6 +175,7 @@ class SwitchWorkspaceApi(Resource): return {"result": "success", "new_tenant": marshal(WorkspaceService.get_tenant_info(new_tenant), tenant_fields)} +@console_ns.route("/workspaces/custom-config") class CustomConfigWorkspaceApi(Resource): @setup_required @login_required @@ -202,6 +206,7 @@ class CustomConfigWorkspaceApi(Resource): return {"result": "success", "tenant": marshal(WorkspaceService.get_tenant_info(tenant), tenant_fields)} +@console_ns.route("/workspaces/custom-config/webapp-logo/upload") class WebappLogoWorkspaceApi(Resource): @setup_required @login_required @@ -242,6 +247,7 @@ class WebappLogoWorkspaceApi(Resource): return {"id": upload_file.id}, 201 +@console_ns.route("/workspaces/info") class WorkspaceInfoApi(Resource): @setup_required @login_required @@ -261,13 +267,3 @@ class WorkspaceInfoApi(Resource): db.session.commit() return {"result": "success", "tenant": marshal(WorkspaceService.get_tenant_info(tenant), tenant_fields)} - - -api.add_resource(TenantListApi, "/workspaces") # GET for getting all tenants -api.add_resource(WorkspaceListApi, "/all-workspaces") # GET for getting all tenants -api.add_resource(TenantApi, "/workspaces/current", endpoint="workspaces_current") # GET for getting current tenant info -api.add_resource(TenantApi, "/info", endpoint="info") # Deprecated -api.add_resource(SwitchWorkspaceApi, "/workspaces/switch") # POST for switching tenant -api.add_resource(CustomConfigWorkspaceApi, "/workspaces/custom-config") -api.add_resource(WebappLogoWorkspaceApi, "/workspaces/custom-config/webapp-logo/upload") -api.add_resource(WorkspaceInfoApi, "/workspaces/info") # POST for changing workspace info From 2b6882bd978255852cf0af2588199fe3645bafe8 Mon Sep 17 00:00:00 2001 From: znn Date: Fri, 10 Oct 2025 13:31:33 +0530 Subject: [PATCH 09/49] fix chunks 2 (#26623) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/rag/splitter/fixed_text_splitter.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/api/core/rag/splitter/fixed_text_splitter.py b/api/core/rag/splitter/fixed_text_splitter.py index 8356861242..801d2a2a52 100644 --- a/api/core/rag/splitter/fixed_text_splitter.py +++ b/api/core/rag/splitter/fixed_text_splitter.py @@ -2,6 +2,7 @@ from __future__ import annotations +import re from typing import Any from core.model_manager import ModelInstance @@ -52,7 +53,7 @@ class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter) """Create a new TextSplitter.""" super().__init__(**kwargs) self._fixed_separator = fixed_separator - self._separators = separators or ["\n\n", "\n", " ", ""] + self._separators = separators or ["\n\n", "\n", "。", ". ", " ", ""] def split_text(self, text: str) -> list[str]: """Split incoming text and return chunks.""" @@ -90,16 +91,19 @@ class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter) # Now that we have the separator, split the text if separator: if separator == " ": - splits = text.split() + splits = re.split(r" +", text) else: splits = text.split(separator) splits = [item + separator if i < len(splits) else item for i, item in enumerate(splits)] else: splits = list(text) - splits = [s for s in splits if (s not in {"", "\n"})] + if separator == "\n": + splits = [s for s in splits if s != ""] + else: + splits = [s for s in splits if (s not in {"", "\n"})] _good_splits = [] _good_splits_lengths = [] # cache the lengths of the splits - _separator = "" if self._keep_separator else separator + _separator = separator if self._keep_separator else "" s_lens = self._length_function(splits) if separator != "": for s, s_len in zip(splits, s_lens): From 8a2b2082992a49597cb4d9b9832b22d468c0092f Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Fri, 10 Oct 2025 17:12:12 +0900 Subject: [PATCH 10/49] Refactor account models to use SQLAlchemy 2.0 dataclass mapping (#26415) Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- api/models/account.py | 159 +++++++++++------- api/services/account_service.py | 24 +-- .../app/test_chat_message_permissions.py | 14 +- .../app/test_model_config_permissions.py | 9 +- .../services/test_account_service.py | 3 +- .../services/test_workflow_service.py | 83 ++++----- .../test_delete_segment_from_index_task.py | 21 +-- .../test_disable_segments_from_index_task.py | 54 +++--- .../tasks/test_mail_invite_member_task.py | 17 +- api/tests/unit_tests/libs/test_helper.py | 4 +- .../test_sqlalchemy_repository.py | 5 +- .../test_workflow_draft_variable_service.py | 3 +- 12 files changed, 219 insertions(+), 177 deletions(-) diff --git a/api/models/account.py b/api/models/account.py index 8c1f990aa2..86cd9e41b5 100644 --- a/api/models/account.py +++ b/api/models/account.py @@ -1,15 +1,16 @@ import enum import json +from dataclasses import field from datetime import datetime from typing import Any, Optional import sqlalchemy as sa from flask_login import UserMixin # type: ignore[import-untyped] from sqlalchemy import DateTime, String, func, select -from sqlalchemy.orm import Mapped, Session, mapped_column, reconstructor +from sqlalchemy.orm import Mapped, Session, mapped_column from typing_extensions import deprecated -from models.base import Base +from models.base import TypeBase from .engine import db from .types import StringUUID @@ -83,31 +84,37 @@ class AccountStatus(enum.StrEnum): CLOSED = "closed" -class Account(UserMixin, Base): +class Account(UserMixin, TypeBase): __tablename__ = "accounts" __table_args__ = (sa.PrimaryKeyConstraint("id", name="account_pkey"), sa.Index("account_email_idx", "email")) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) name: Mapped[str] = mapped_column(String(255)) email: Mapped[str] = mapped_column(String(255)) - password: Mapped[str | None] = mapped_column(String(255)) - password_salt: Mapped[str | None] = mapped_column(String(255)) - avatar: Mapped[str | None] = mapped_column(String(255), nullable=True) - interface_language: Mapped[str | None] = mapped_column(String(255)) - interface_theme: Mapped[str | None] = mapped_column(String(255), nullable=True) - timezone: Mapped[str | None] = mapped_column(String(255)) - last_login_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - last_login_ip: Mapped[str | None] = mapped_column(String(255), nullable=True) - last_active_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - status: Mapped[str] = mapped_column(String(16), server_default=sa.text("'active'::character varying")) - initialized_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) + password: Mapped[str | None] = mapped_column(String(255), default=None) + password_salt: Mapped[str | None] = mapped_column(String(255), default=None) + avatar: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + interface_language: Mapped[str | None] = mapped_column(String(255), default=None) + interface_theme: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + timezone: Mapped[str | None] = mapped_column(String(255), default=None) + last_login_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None) + last_login_ip: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + last_active_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + status: Mapped[str] = mapped_column( + String(16), server_default=sa.text("'active'::character varying"), default="active" + ) + initialized_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) - @reconstructor - def init_on_load(self): - self.role: TenantAccountRole | None = None - self._current_tenant: Tenant | None = None + role: TenantAccountRole | None = field(default=None, init=False) + _current_tenant: "Tenant | None" = field(default=None, init=False) @property def is_password_set(self): @@ -226,18 +233,24 @@ class TenantStatus(enum.StrEnum): ARCHIVE = "archive" -class Tenant(Base): +class Tenant(TypeBase): __tablename__ = "tenants" __table_args__ = (sa.PrimaryKeyConstraint("id", name="tenant_pkey"),) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) name: Mapped[str] = mapped_column(String(255)) - encrypt_public_key: Mapped[str | None] = mapped_column(sa.Text) - plan: Mapped[str] = mapped_column(String(255), server_default=sa.text("'basic'::character varying")) - status: Mapped[str] = mapped_column(String(255), server_default=sa.text("'normal'::character varying")) - custom_config: Mapped[str | None] = mapped_column(sa.Text) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) + encrypt_public_key: Mapped[str | None] = mapped_column(sa.Text, default=None) + plan: Mapped[str] = mapped_column( + String(255), server_default=sa.text("'basic'::character varying"), default="basic" + ) + status: Mapped[str] = mapped_column( + String(255), server_default=sa.text("'normal'::character varying"), default="normal" + ) + custom_config: Mapped[str | None] = mapped_column(sa.Text, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), init=False) def get_accounts(self) -> list[Account]: return list( @@ -257,7 +270,7 @@ class Tenant(Base): self.custom_config = json.dumps(value) -class TenantAccountJoin(Base): +class TenantAccountJoin(TypeBase): __tablename__ = "tenant_account_joins" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="tenant_account_join_pkey"), @@ -266,17 +279,21 @@ class TenantAccountJoin(Base): sa.UniqueConstraint("tenant_id", "account_id", name="unique_tenant_account_join"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) tenant_id: Mapped[str] = mapped_column(StringUUID) account_id: Mapped[str] = mapped_column(StringUUID) - current: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false")) - role: Mapped[str] = mapped_column(String(16), server_default="normal") - invited_by: Mapped[str | None] = mapped_column(StringUUID) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) + current: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false"), default=False) + role: Mapped[str] = mapped_column(String(16), server_default="normal", default="normal") + invited_by: Mapped[str | None] = mapped_column(StringUUID, nullable=True, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) -class AccountIntegrate(Base): +class AccountIntegrate(TypeBase): __tablename__ = "account_integrates" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="account_integrate_pkey"), @@ -284,16 +301,20 @@ class AccountIntegrate(Base): sa.UniqueConstraint("provider", "open_id", name="unique_provider_open_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) account_id: Mapped[str] = mapped_column(StringUUID) provider: Mapped[str] = mapped_column(String(16)) open_id: Mapped[str] = mapped_column(String(255)) encrypted_token: Mapped[str] = mapped_column(String(255)) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, server_default=func.current_timestamp(), nullable=False, init=False + ) -class InvitationCode(Base): +class InvitationCode(TypeBase): __tablename__ = "invitation_codes" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="invitation_code_pkey"), @@ -301,18 +322,22 @@ class InvitationCode(Base): sa.Index("invitation_codes_code_idx", "code", "status"), ) - id: Mapped[int] = mapped_column(sa.Integer) + id: Mapped[int] = mapped_column(sa.Integer, init=False) batch: Mapped[str] = mapped_column(String(255)) code: Mapped[str] = mapped_column(String(32)) - status: Mapped[str] = mapped_column(String(16), server_default=sa.text("'unused'::character varying")) - used_at: Mapped[datetime | None] = mapped_column(DateTime) - used_by_tenant_id: Mapped[str | None] = mapped_column(StringUUID) - used_by_account_id: Mapped[str | None] = mapped_column(StringUUID) - deprecated_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=sa.text("CURRENT_TIMESTAMP(0)")) + status: Mapped[str] = mapped_column( + String(16), server_default=sa.text("'unused'::character varying"), default="unused" + ) + used_at: Mapped[datetime | None] = mapped_column(DateTime, default=None) + used_by_tenant_id: Mapped[str | None] = mapped_column(StringUUID, default=None) + used_by_account_id: Mapped[str | None] = mapped_column(StringUUID, default=None) + deprecated_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None) + created_at: Mapped[datetime] = mapped_column( + DateTime, server_default=sa.text("CURRENT_TIMESTAMP(0)"), nullable=False, init=False + ) -class TenantPluginPermission(Base): +class TenantPluginPermission(TypeBase): class InstallPermission(enum.StrEnum): EVERYONE = "everyone" ADMINS = "admins" @@ -329,13 +354,17 @@ class TenantPluginPermission(Base): sa.UniqueConstraint("tenant_id", name="unique_tenant_plugin"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - install_permission: Mapped[InstallPermission] = mapped_column(String(16), nullable=False, server_default="everyone") - debug_permission: Mapped[DebugPermission] = mapped_column(String(16), nullable=False, server_default="noone") + install_permission: Mapped[InstallPermission] = mapped_column( + String(16), nullable=False, server_default="everyone", default=InstallPermission.EVERYONE + ) + debug_permission: Mapped[DebugPermission] = mapped_column( + String(16), nullable=False, server_default="noone", default=DebugPermission.NOBODY + ) -class TenantPluginAutoUpgradeStrategy(Base): +class TenantPluginAutoUpgradeStrategy(TypeBase): class StrategySetting(enum.StrEnum): DISABLED = "disabled" FIX_ONLY = "fix_only" @@ -352,12 +381,20 @@ class TenantPluginAutoUpgradeStrategy(Base): sa.UniqueConstraint("tenant_id", name="unique_tenant_plugin_auto_upgrade_strategy"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - strategy_setting: Mapped[StrategySetting] = mapped_column(String(16), nullable=False, server_default="fix_only") - upgrade_time_of_day: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) # seconds of the day - upgrade_mode: Mapped[UpgradeMode] = mapped_column(String(16), nullable=False, server_default="exclude") - exclude_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False) # plugin_id (author/name) - include_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False) # plugin_id (author/name) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + strategy_setting: Mapped[StrategySetting] = mapped_column( + String(16), nullable=False, server_default="fix_only", default=StrategySetting.FIX_ONLY + ) + upgrade_mode: Mapped[UpgradeMode] = mapped_column( + String(16), nullable=False, server_default="exclude", default=UpgradeMode.EXCLUDE + ) + exclude_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False, default_factory=list) + include_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False, default_factory=list) + upgrade_time_of_day: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) + created_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) diff --git a/api/services/account_service.py b/api/services/account_service.py index 0e699d16da..77b8744020 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -246,10 +246,8 @@ class AccountService: ) ) - account = Account() - account.email = email - account.name = name - + password_to_set = None + salt_to_set = None if password: valid_password(password) @@ -261,14 +259,18 @@ class AccountService: password_hashed = hash_password(password, salt) base64_password_hashed = base64.b64encode(password_hashed).decode() - account.password = base64_password_hashed - account.password_salt = base64_salt + password_to_set = base64_password_hashed + salt_to_set = base64_salt - account.interface_language = interface_language - account.interface_theme = interface_theme - - # Set timezone based on language - account.timezone = language_timezone_mapping.get(interface_language, "UTC") + account = Account( + name=name, + email=email, + password=password_to_set, + password_salt=salt_to_set, + interface_language=interface_language, + interface_theme=interface_theme, + timezone=language_timezone_mapping.get(interface_language, "UTC"), + ) db.session.add(account) db.session.commit() diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index da1524ff2e..4d1c1227bd 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -33,17 +33,19 @@ class TestChatMessageApiPermissions: @pytest.fixture def mock_account(self, monkeypatch: pytest.MonkeyPatch): """Create a mock Account for testing.""" - account = Account() - account.id = str(uuid.uuid4()) - account.name = "Test User" - account.email = "test@example.com" + + account = Account( + name="Test User", + email="test@example.com", + ) account.last_active_at = naive_utc_now() account.created_at = naive_utc_now() account.updated_at = naive_utc_now() + account.id = str(uuid.uuid4()) - tenant = Tenant() + # Create mock tenant + tenant = Tenant(name="Test Tenant") tenant.id = str(uuid.uuid4()) - tenant.name = "Test Tenant" mock_session_instance = mock.Mock() diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index c0fd56ef63..e158f26f3a 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -32,17 +32,16 @@ class TestModelConfigResourcePermissions: @pytest.fixture def mock_account(self, monkeypatch: pytest.MonkeyPatch): """Create a mock Account for testing.""" - account = Account() + + account = Account(name="Test User", email="test@example.com") account.id = str(uuid.uuid4()) - account.name = "Test User" - account.email = "test@example.com" account.last_active_at = naive_utc_now() account.created_at = naive_utc_now() account.updated_at = naive_utc_now() - tenant = Tenant() + # Create mock tenant + tenant = Tenant(name="Test Tenant") tenant.id = str(uuid.uuid4()) - tenant.name = "Test Tenant" mock_session_instance = mock.Mock() diff --git a/api/tests/test_containers_integration_tests/services/test_account_service.py b/api/tests/test_containers_integration_tests/services/test_account_service.py index c98406d845..0a2fb955ae 100644 --- a/api/tests/test_containers_integration_tests/services/test_account_service.py +++ b/api/tests/test_containers_integration_tests/services/test_account_service.py @@ -16,6 +16,7 @@ from services.errors.account import ( AccountPasswordError, AccountRegisterError, CurrentPasswordIncorrectError, + TenantNotFoundError, ) from services.errors.workspace import WorkSpaceNotAllowedCreateError, WorkspacesLimitExceededError @@ -1414,7 +1415,7 @@ class TestTenantService: ) # Try to get current tenant (should fail) - with pytest.raises(AttributeError): + with pytest.raises((AttributeError, TenantNotFoundError)): TenantService.get_current_tenant_by_account(account) def test_switch_tenant_success(self, db_session_with_containers, mock_external_service_dependencies): diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index 60150667ed..0dd3909ba7 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -44,27 +44,26 @@ class TestWorkflowService: Account: Created test account instance """ fake = fake or Faker() - account = Account() - account.id = fake.uuid4() - account.email = fake.email() - account.name = fake.name() - account.avatar_url = fake.url() - account.tenant_id = fake.uuid4() - account.status = "active" - account.type = "normal" - account.role = "owner" - account.interface_language = "en-US" # Set interface language for Site creation + account = Account( + email=fake.email(), + name=fake.name(), + avatar=fake.url(), + status="active", + interface_language="en-US", # Set interface language for Site creation + ) account.created_at = fake.date_time_this_year() + account.id = fake.uuid4() account.updated_at = account.created_at # Create a tenant for the account from models.account import Tenant - tenant = Tenant() - tenant.id = account.tenant_id - tenant.name = f"Test Tenant {fake.company()}" - tenant.plan = "basic" - tenant.status = "active" + tenant = Tenant( + name=f"Test Tenant {fake.company()}", + plan="basic", + status="active", + ) + tenant.id = account.current_tenant_id tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -91,20 +90,21 @@ class TestWorkflowService: App: Created test app instance """ fake = fake or Faker() - app = App() - app.id = fake.uuid4() - app.tenant_id = fake.uuid4() - app.name = fake.company() - app.description = fake.text() - app.mode = AppMode.WORKFLOW - app.icon_type = "emoji" - app.icon = "🤖" - app.icon_background = "#FFEAD5" - app.enable_site = True - app.enable_api = True - app.created_by = fake.uuid4() + app = App( + id=fake.uuid4(), + tenant_id=fake.uuid4(), + name=fake.company(), + description=fake.text(), + mode=AppMode.WORKFLOW, + icon_type="emoji", + icon="🤖", + icon_background="#FFEAD5", + enable_site=True, + enable_api=True, + created_by=fake.uuid4(), + workflow_id=None, # Will be set when workflow is created + ) app.updated_by = app.created_by - app.workflow_id = None # Will be set when workflow is created from extensions.ext_database import db @@ -126,19 +126,20 @@ class TestWorkflowService: Workflow: Created test workflow instance """ fake = fake or Faker() - workflow = Workflow() - workflow.id = fake.uuid4() - workflow.tenant_id = app.tenant_id - workflow.app_id = app.id - workflow.type = WorkflowType.WORKFLOW.value - workflow.version = Workflow.VERSION_DRAFT - workflow.graph = json.dumps({"nodes": [], "edges": []}) - workflow.features = json.dumps({"features": []}) - # unique_hash is a computed property based on graph and features - workflow.created_by = account.id - workflow.updated_by = account.id - workflow.environment_variables = [] - workflow.conversation_variables = [] + workflow = Workflow( + id=fake.uuid4(), + tenant_id=app.tenant_id, + app_id=app.id, + type=WorkflowType.WORKFLOW.value, + version=Workflow.VERSION_DRAFT, + graph=json.dumps({"nodes": [], "edges": []}), + features=json.dumps({"features": []}), + # unique_hash is a computed property based on graph and features + created_by=account.id, + updated_by=account.id, + environment_variables=[], + conversation_variables=[], + ) from extensions.ext_database import db diff --git a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py index 7af4f238be..94e9b76965 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py @@ -48,11 +48,8 @@ class TestDeleteSegmentFromIndexTask: Tenant: Created test tenant instance """ fake = fake or Faker() - tenant = Tenant() + tenant = Tenant(name=f"Test Tenant {fake.company()}", plan="basic", status="active") tenant.id = fake.uuid4() - tenant.name = f"Test Tenant {fake.company()}" - tenant.plan = "basic" - tenant.status = "active" tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -73,16 +70,14 @@ class TestDeleteSegmentFromIndexTask: Account: Created test account instance """ fake = fake or Faker() - account = Account() + account = Account( + name=fake.name(), + email=fake.email(), + avatar=fake.url(), + status="active", + interface_language="en-US", + ) account.id = fake.uuid4() - account.email = fake.email() - account.name = fake.name() - account.avatar_url = fake.url() - account.tenant_id = tenant.id - account.status = "active" - account.type = "normal" - account.role = "owner" - account.interface_language = "en-US" account.created_at = fake.date_time_this_year() account.updated_at = account.created_at diff --git a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py index 5fdb8c617c..0b36e0914a 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py @@ -43,27 +43,30 @@ class TestDisableSegmentsFromIndexTask: Account: Created test account instance """ fake = fake or Faker() - account = Account() + account = Account( + email=fake.email(), + name=fake.name(), + avatar=fake.url(), + status="active", + interface_language="en-US", + ) account.id = fake.uuid4() - account.email = fake.email() - account.name = fake.name() - account.avatar_url = fake.url() + # monkey-patch attributes for test setup account.tenant_id = fake.uuid4() - account.status = "active" account.type = "normal" account.role = "owner" - account.interface_language = "en-US" account.created_at = fake.date_time_this_year() account.updated_at = account.created_at # Create a tenant for the account from models.account import Tenant - tenant = Tenant() + tenant = Tenant( + name=f"Test Tenant {fake.company()}", + plan="basic", + status="active", + ) tenant.id = account.tenant_id - tenant.name = f"Test Tenant {fake.company()}" - tenant.plan = "basic" - tenant.status = "active" tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -91,20 +94,21 @@ class TestDisableSegmentsFromIndexTask: Dataset: Created test dataset instance """ fake = fake or Faker() - dataset = Dataset() - dataset.id = fake.uuid4() - dataset.tenant_id = account.tenant_id - dataset.name = f"Test Dataset {fake.word()}" - dataset.description = fake.text(max_nb_chars=200) - dataset.provider = "vendor" - dataset.permission = "only_me" - dataset.data_source_type = "upload_file" - dataset.indexing_technique = "high_quality" - dataset.created_by = account.id - dataset.updated_by = account.id - dataset.embedding_model = "text-embedding-ada-002" - dataset.embedding_model_provider = "openai" - dataset.built_in_field_enabled = False + dataset = Dataset( + id=fake.uuid4(), + tenant_id=account.tenant_id, + name=f"Test Dataset {fake.word()}", + description=fake.text(max_nb_chars=200), + provider="vendor", + permission="only_me", + data_source_type="upload_file", + indexing_technique="high_quality", + created_by=account.id, + updated_by=account.id, + embedding_model="text-embedding-ada-002", + embedding_model_provider="openai", + built_in_field_enabled=False, + ) from extensions.ext_database import db @@ -128,6 +132,7 @@ class TestDisableSegmentsFromIndexTask: """ fake = fake or Faker() document = DatasetDocument() + document.id = fake.uuid4() document.tenant_id = dataset.tenant_id document.dataset_id = dataset.id @@ -153,7 +158,6 @@ class TestDisableSegmentsFromIndexTask: document.archived = False document.doc_form = "text_model" # Use text_model form for testing document.doc_language = "en" - from extensions.ext_database import db db.session.add(document) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py index 8fef87b317..ead7757c13 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py @@ -96,9 +96,9 @@ class TestMailInviteMemberTask: password=fake.password(), interface_language="en-US", status=AccountStatus.ACTIVE.value, - created_at=datetime.now(UTC), - updated_at=datetime.now(UTC), ) + account.created_at = datetime.now(UTC) + account.updated_at = datetime.now(UTC) db_session_with_containers.add(account) db_session_with_containers.commit() db_session_with_containers.refresh(account) @@ -106,9 +106,9 @@ class TestMailInviteMemberTask: # Create tenant tenant = Tenant( name=fake.company(), - created_at=datetime.now(UTC), - updated_at=datetime.now(UTC), ) + tenant.created_at = datetime.now(UTC) + tenant.updated_at = datetime.now(UTC) db_session_with_containers.add(tenant) db_session_with_containers.commit() db_session_with_containers.refresh(tenant) @@ -118,8 +118,8 @@ class TestMailInviteMemberTask: tenant_id=tenant.id, account_id=account.id, role=TenantAccountRole.OWNER.value, - created_at=datetime.now(UTC), ) + tenant_join.created_at = datetime.now(UTC) db_session_with_containers.add(tenant_join) db_session_with_containers.commit() @@ -164,9 +164,10 @@ class TestMailInviteMemberTask: password="", interface_language="en-US", status=AccountStatus.PENDING.value, - created_at=datetime.now(UTC), - updated_at=datetime.now(UTC), ) + + account.created_at = datetime.now(UTC) + account.updated_at = datetime.now(UTC) db_session_with_containers.add(account) db_session_with_containers.commit() db_session_with_containers.refresh(account) @@ -176,8 +177,8 @@ class TestMailInviteMemberTask: tenant_id=tenant.id, account_id=account.id, role=TenantAccountRole.NORMAL.value, - created_at=datetime.now(UTC), ) + tenant_join.created_at = datetime.now(UTC) db_session_with_containers.add(tenant_join) db_session_with_containers.commit() diff --git a/api/tests/unit_tests/libs/test_helper.py b/api/tests/unit_tests/libs/test_helper.py index b7701055f5..85789bfa7e 100644 --- a/api/tests/unit_tests/libs/test_helper.py +++ b/api/tests/unit_tests/libs/test_helper.py @@ -11,7 +11,7 @@ class TestExtractTenantId: def test_extract_tenant_id_from_account_with_tenant(self): """Test extracting tenant_id from Account with current_tenant_id.""" # Create a mock Account object - account = Account() + account = Account(name="test", email="test@example.com") # Mock the current_tenant_id property account._current_tenant = type("MockTenant", (), {"id": "account-tenant-123"})() @@ -21,7 +21,7 @@ class TestExtractTenantId: def test_extract_tenant_id_from_account_without_tenant(self): """Test extracting tenant_id from Account without current_tenant_id.""" # Create a mock Account object - account = Account() + account = Account(name="test", email="test@example.com") account._current_tenant = None tenant_id = extract_tenant_id(account) diff --git a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py index fadd1ee88f..28b339fe85 100644 --- a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py +++ b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py @@ -59,12 +59,11 @@ def session(): @pytest.fixture def mock_user(): """Create a user instance for testing.""" - user = Account() + user = Account(name="test", email="test@example.com") user.id = "test-user-id" - tenant = Tenant() + tenant = Tenant(name="Test Workspace") tenant.id = "test-tenant" - tenant.name = "Test Workspace" user._current_tenant = MagicMock() user._current_tenant.id = "test-tenant" diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index 7e324ca4db..66361f26e0 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -47,7 +47,8 @@ class TestDraftVariableSaver: def test__should_variable_be_visible(self): mock_session = MagicMock(spec=Session) - mock_user = Account(id=str(uuid.uuid4())) + mock_user = Account(name="test", email="test@example.com") + mock_user.id = str(uuid.uuid4()) test_app_id = self._get_test_app_id() saver = DraftVariableSaver( session=mock_session, From c1e8584b9760a42cd2436f763b97cb4fb5546365 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Fri, 10 Oct 2025 17:23:39 +0900 Subject: [PATCH 11/49] feat: Refactor api.add_resource to @console_ns.route decorator (#26386) Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- .../console/explore/installed_app.py | 8 +- api/controllers/console/explore/parameter.py | 10 +- .../console/explore/recommended_app.py | 8 +- .../console/explore/saved_message.py | 18 +-- api/controllers/console/workspace/account.py | 46 ++++---- .../workspace/load_balancing_config.py | 20 ++-- api/controllers/console/workspace/members.py | 21 ++-- .../console/workspace/model_providers.py | 27 ++--- api/controllers/console/workspace/models.py | 44 +++----- api/controllers/console/workspace/plugin.py | 61 +++++----- .../console/workspace/tool_providers.py | 104 +++++++----------- .../console/workspace/workspace.py | 4 +- 12 files changed, 141 insertions(+), 230 deletions(-) diff --git a/api/controllers/console/explore/installed_app.py b/api/controllers/console/explore/installed_app.py index bdc3fb0dbd..c86c243c9b 100644 --- a/api/controllers/console/explore/installed_app.py +++ b/api/controllers/console/explore/installed_app.py @@ -6,7 +6,7 @@ from flask_restx import Resource, inputs, marshal_with, reqparse from sqlalchemy import and_, select from werkzeug.exceptions import BadRequest, Forbidden, NotFound -from controllers.console import api +from controllers.console import console_ns from controllers.console.explore.wraps import InstalledAppResource from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check from extensions.ext_database import db @@ -22,6 +22,7 @@ from services.feature_service import FeatureService logger = logging.getLogger(__name__) +@console_ns.route("/installed-apps") class InstalledAppsListApi(Resource): @login_required @account_initialization_required @@ -154,6 +155,7 @@ class InstalledAppsListApi(Resource): return {"message": "App installed successfully"} +@console_ns.route("/installed-apps/") class InstalledAppApi(InstalledAppResource): """ update and delete an installed app @@ -185,7 +187,3 @@ class InstalledAppApi(InstalledAppResource): db.session.commit() return {"result": "success", "message": "App info updated successfully"} - - -api.add_resource(InstalledAppsListApi, "/installed-apps") -api.add_resource(InstalledAppApi, "/installed-apps/") diff --git a/api/controllers/console/explore/parameter.py b/api/controllers/console/explore/parameter.py index 7742ea24a9..9c6b2aedfb 100644 --- a/api/controllers/console/explore/parameter.py +++ b/api/controllers/console/explore/parameter.py @@ -1,7 +1,7 @@ from flask_restx import marshal_with from controllers.common import fields -from controllers.console import api +from controllers.console import console_ns from controllers.console.app.error import AppUnavailableError from controllers.console.explore.wraps import InstalledAppResource from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict @@ -9,6 +9,7 @@ from models.model import AppMode, InstalledApp from services.app_service import AppService +@console_ns.route("/installed-apps//parameters", endpoint="installed_app_parameters") class AppParameterApi(InstalledAppResource): """Resource for app variables.""" @@ -39,6 +40,7 @@ class AppParameterApi(InstalledAppResource): return get_parameters_from_feature_dict(features_dict=features_dict, user_input_form=user_input_form) +@console_ns.route("/installed-apps//meta", endpoint="installed_app_meta") class ExploreAppMetaApi(InstalledAppResource): def get(self, installed_app: InstalledApp): """Get app meta""" @@ -46,9 +48,3 @@ class ExploreAppMetaApi(InstalledAppResource): if not app_model: raise ValueError("App not found") return AppService().get_app_meta(app_model) - - -api.add_resource( - AppParameterApi, "/installed-apps//parameters", endpoint="installed_app_parameters" -) -api.add_resource(ExploreAppMetaApi, "/installed-apps//meta", endpoint="installed_app_meta") diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 974222ddf7..6d627a929a 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -1,7 +1,7 @@ from flask_restx import Resource, fields, marshal_with, reqparse from constants.languages import languages -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required from libs.helper import AppIconUrlField from libs.login import current_user, login_required @@ -35,6 +35,7 @@ recommended_app_list_fields = { } +@console_ns.route("/explore/apps") class RecommendedAppListApi(Resource): @login_required @account_initialization_required @@ -56,13 +57,10 @@ class RecommendedAppListApi(Resource): return RecommendedAppService.get_recommended_apps_and_categories(language_prefix) +@console_ns.route("/explore/apps/") class RecommendedAppApi(Resource): @login_required @account_initialization_required def get(self, app_id): app_id = str(app_id) return RecommendedAppService.get_recommend_app_detail(app_id) - - -api.add_resource(RecommendedAppListApi, "/explore/apps") -api.add_resource(RecommendedAppApi, "/explore/apps/") diff --git a/api/controllers/console/explore/saved_message.py b/api/controllers/console/explore/saved_message.py index 6f05f898f9..79e4a4339e 100644 --- a/api/controllers/console/explore/saved_message.py +++ b/api/controllers/console/explore/saved_message.py @@ -2,7 +2,7 @@ from flask_restx import fields, marshal_with, reqparse from flask_restx.inputs import int_range from werkzeug.exceptions import NotFound -from controllers.console import api +from controllers.console import console_ns from controllers.console.explore.error import NotCompletionAppError from controllers.console.explore.wraps import InstalledAppResource from fields.conversation_fields import message_file_fields @@ -25,6 +25,7 @@ message_fields = { } +@console_ns.route("/installed-apps//saved-messages", endpoint="installed_app_saved_messages") class SavedMessageListApi(InstalledAppResource): saved_message_infinite_scroll_pagination_fields = { "limit": fields.Integer, @@ -66,6 +67,9 @@ class SavedMessageListApi(InstalledAppResource): return {"result": "success"} +@console_ns.route( + "/installed-apps//saved-messages/", endpoint="installed_app_saved_message" +) class SavedMessageApi(InstalledAppResource): def delete(self, installed_app, message_id): app_model = installed_app.app @@ -80,15 +84,3 @@ class SavedMessageApi(InstalledAppResource): SavedMessageService.delete(app_model, current_user, message_id) return {"result": "success"}, 204 - - -api.add_resource( - SavedMessageListApi, - "/installed-apps//saved-messages", - endpoint="installed_app_saved_messages", -) -api.add_resource( - SavedMessageApi, - "/installed-apps//saved-messages/", - endpoint="installed_app_saved_message", -) diff --git a/api/controllers/console/workspace/account.py b/api/controllers/console/workspace/account.py index 7a41a8a5cc..e2b0e3f84d 100644 --- a/api/controllers/console/workspace/account.py +++ b/api/controllers/console/workspace/account.py @@ -9,7 +9,7 @@ from sqlalchemy.orm import Session from configs import dify_config from constants.languages import supported_language -from controllers.console import api +from controllers.console import console_ns from controllers.console.auth.error import ( EmailAlreadyInUseError, EmailChangeLimitError, @@ -45,6 +45,7 @@ from services.billing_service import BillingService from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError +@console_ns.route("/account/init") class AccountInitApi(Resource): @setup_required @login_required @@ -97,6 +98,7 @@ class AccountInitApi(Resource): return {"result": "success"} +@console_ns.route("/account/profile") class AccountProfileApi(Resource): @setup_required @login_required @@ -109,6 +111,7 @@ class AccountProfileApi(Resource): return current_user +@console_ns.route("/account/name") class AccountNameApi(Resource): @setup_required @login_required @@ -130,6 +133,7 @@ class AccountNameApi(Resource): return updated_account +@console_ns.route("/account/avatar") class AccountAvatarApi(Resource): @setup_required @login_required @@ -147,6 +151,7 @@ class AccountAvatarApi(Resource): return updated_account +@console_ns.route("/account/interface-language") class AccountInterfaceLanguageApi(Resource): @setup_required @login_required @@ -164,6 +169,7 @@ class AccountInterfaceLanguageApi(Resource): return updated_account +@console_ns.route("/account/interface-theme") class AccountInterfaceThemeApi(Resource): @setup_required @login_required @@ -181,6 +187,7 @@ class AccountInterfaceThemeApi(Resource): return updated_account +@console_ns.route("/account/timezone") class AccountTimezoneApi(Resource): @setup_required @login_required @@ -202,6 +209,7 @@ class AccountTimezoneApi(Resource): return updated_account +@console_ns.route("/account/password") class AccountPasswordApi(Resource): @setup_required @login_required @@ -227,6 +235,7 @@ class AccountPasswordApi(Resource): return {"result": "success"} +@console_ns.route("/account/integrates") class AccountIntegrateApi(Resource): integrate_fields = { "provider": fields.String, @@ -283,6 +292,7 @@ class AccountIntegrateApi(Resource): return {"data": integrate_data} +@console_ns.route("/account/delete/verify") class AccountDeleteVerifyApi(Resource): @setup_required @login_required @@ -298,6 +308,7 @@ class AccountDeleteVerifyApi(Resource): return {"result": "success", "data": token} +@console_ns.route("/account/delete") class AccountDeleteApi(Resource): @setup_required @login_required @@ -320,6 +331,7 @@ class AccountDeleteApi(Resource): return {"result": "success"} +@console_ns.route("/account/delete/feedback") class AccountDeleteUpdateFeedbackApi(Resource): @setup_required def post(self): @@ -333,6 +345,7 @@ class AccountDeleteUpdateFeedbackApi(Resource): return {"result": "success"} +@console_ns.route("/account/education/verify") class EducationVerifyApi(Resource): verify_fields = { "token": fields.String, @@ -352,6 +365,7 @@ class EducationVerifyApi(Resource): return BillingService.EducationIdentity.verify(account.id, account.email) +@console_ns.route("/account/education") class EducationApi(Resource): status_fields = { "result": fields.Boolean, @@ -396,6 +410,7 @@ class EducationApi(Resource): return res +@console_ns.route("/account/education/autocomplete") class EducationAutoCompleteApi(Resource): data_fields = { "data": fields.List(fields.String), @@ -419,6 +434,7 @@ class EducationAutoCompleteApi(Resource): return BillingService.EducationIdentity.autocomplete(args["keywords"], args["page"], args["limit"]) +@console_ns.route("/account/change-email") class ChangeEmailSendEmailApi(Resource): @enable_change_email @setup_required @@ -467,6 +483,7 @@ class ChangeEmailSendEmailApi(Resource): return {"result": "success", "data": token} +@console_ns.route("/account/change-email/validity") class ChangeEmailCheckApi(Resource): @enable_change_email @setup_required @@ -508,6 +525,7 @@ class ChangeEmailCheckApi(Resource): return {"is_valid": True, "email": token_data.get("email"), "token": new_token} +@console_ns.route("/account/change-email/reset") class ChangeEmailResetApi(Resource): @enable_change_email @setup_required @@ -547,6 +565,7 @@ class ChangeEmailResetApi(Resource): return updated_account +@console_ns.route("/account/change-email/check-email-unique") class CheckEmailUnique(Resource): @setup_required def post(self): @@ -558,28 +577,3 @@ class CheckEmailUnique(Resource): if not AccountService.check_email_unique(args["email"]): raise EmailAlreadyInUseError() return {"result": "success"} - - -# Register API resources -api.add_resource(AccountInitApi, "/account/init") -api.add_resource(AccountProfileApi, "/account/profile") -api.add_resource(AccountNameApi, "/account/name") -api.add_resource(AccountAvatarApi, "/account/avatar") -api.add_resource(AccountInterfaceLanguageApi, "/account/interface-language") -api.add_resource(AccountInterfaceThemeApi, "/account/interface-theme") -api.add_resource(AccountTimezoneApi, "/account/timezone") -api.add_resource(AccountPasswordApi, "/account/password") -api.add_resource(AccountIntegrateApi, "/account/integrates") -api.add_resource(AccountDeleteVerifyApi, "/account/delete/verify") -api.add_resource(AccountDeleteApi, "/account/delete") -api.add_resource(AccountDeleteUpdateFeedbackApi, "/account/delete/feedback") -api.add_resource(EducationVerifyApi, "/account/education/verify") -api.add_resource(EducationApi, "/account/education") -api.add_resource(EducationAutoCompleteApi, "/account/education/autocomplete") -# Change email -api.add_resource(ChangeEmailSendEmailApi, "/account/change-email") -api.add_resource(ChangeEmailCheckApi, "/account/change-email/validity") -api.add_resource(ChangeEmailResetApi, "/account/change-email/reset") -api.add_resource(CheckEmailUnique, "/account/change-email/check-email-unique") -# api.add_resource(AccountEmailApi, '/account/email') -# api.add_resource(AccountEmailVerifyApi, '/account/email-verify') diff --git a/api/controllers/console/workspace/load_balancing_config.py b/api/controllers/console/workspace/load_balancing_config.py index 7c1bc7c075..99a1c1f032 100644 --- a/api/controllers/console/workspace/load_balancing_config.py +++ b/api/controllers/console/workspace/load_balancing_config.py @@ -1,7 +1,7 @@ from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -10,6 +10,9 @@ from models.account import Account, TenantAccountRole from services.model_load_balancing_service import ModelLoadBalancingService +@console_ns.route( + "/workspaces/current/model-providers//models/load-balancing-configs/credentials-validate" +) class LoadBalancingCredentialsValidateApi(Resource): @setup_required @login_required @@ -61,6 +64,9 @@ class LoadBalancingCredentialsValidateApi(Resource): return response +@console_ns.route( + "/workspaces/current/model-providers//models/load-balancing-configs//credentials-validate" +) class LoadBalancingConfigCredentialsValidateApi(Resource): @setup_required @login_required @@ -111,15 +117,3 @@ class LoadBalancingConfigCredentialsValidateApi(Resource): response["error"] = error return response - - -# Load Balancing Config -api.add_resource( - LoadBalancingCredentialsValidateApi, - "/workspaces/current/model-providers//models/load-balancing-configs/credentials-validate", -) - -api.add_resource( - LoadBalancingConfigCredentialsValidateApi, - "/workspaces/current/model-providers//models/load-balancing-configs//credentials-validate", -) diff --git a/api/controllers/console/workspace/members.py b/api/controllers/console/workspace/members.py index 77f0c9a735..8b89853bd9 100644 --- a/api/controllers/console/workspace/members.py +++ b/api/controllers/console/workspace/members.py @@ -6,7 +6,7 @@ from flask_restx import Resource, marshal_with, reqparse import services from configs import dify_config -from controllers.console import api +from controllers.console import console_ns from controllers.console.auth.error import ( CannotTransferOwnerToSelfError, EmailCodeError, @@ -33,6 +33,7 @@ from services.errors.account import AccountAlreadyInTenantError from services.feature_service import FeatureService +@console_ns.route("/workspaces/current/members") class MemberListApi(Resource): """List all members of current tenant.""" @@ -49,6 +50,7 @@ class MemberListApi(Resource): return {"result": "success", "accounts": members}, 200 +@console_ns.route("/workspaces/current/members/invite-email") class MemberInviteEmailApi(Resource): """Invite a new member by email.""" @@ -111,6 +113,7 @@ class MemberInviteEmailApi(Resource): }, 201 +@console_ns.route("/workspaces/current/members/") class MemberCancelInviteApi(Resource): """Cancel an invitation by member id.""" @@ -143,6 +146,7 @@ class MemberCancelInviteApi(Resource): }, 200 +@console_ns.route("/workspaces/current/members//update-role") class MemberUpdateRoleApi(Resource): """Update member role.""" @@ -177,6 +181,7 @@ class MemberUpdateRoleApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/dataset-operators") class DatasetOperatorMemberListApi(Resource): """List all members of current tenant.""" @@ -193,6 +198,7 @@ class DatasetOperatorMemberListApi(Resource): return {"result": "success", "accounts": members}, 200 +@console_ns.route("/workspaces/current/members/send-owner-transfer-confirm-email") class SendOwnerTransferEmailApi(Resource): """Send owner transfer email.""" @@ -233,6 +239,7 @@ class SendOwnerTransferEmailApi(Resource): return {"result": "success", "data": token} +@console_ns.route("/workspaces/current/members/owner-transfer-check") class OwnerTransferCheckApi(Resource): @setup_required @login_required @@ -278,6 +285,7 @@ class OwnerTransferCheckApi(Resource): return {"is_valid": True, "email": token_data.get("email"), "token": new_token} +@console_ns.route("/workspaces/current/members//owner-transfer") class OwnerTransfer(Resource): @setup_required @login_required @@ -339,14 +347,3 @@ class OwnerTransfer(Resource): raise ValueError(str(e)) return {"result": "success"} - - -api.add_resource(MemberListApi, "/workspaces/current/members") -api.add_resource(MemberInviteEmailApi, "/workspaces/current/members/invite-email") -api.add_resource(MemberCancelInviteApi, "/workspaces/current/members/") -api.add_resource(MemberUpdateRoleApi, "/workspaces/current/members//update-role") -api.add_resource(DatasetOperatorMemberListApi, "/workspaces/current/dataset-operators") -# owner transfer -api.add_resource(SendOwnerTransferEmailApi, "/workspaces/current/members/send-owner-transfer-confirm-email") -api.add_resource(OwnerTransferCheckApi, "/workspaces/current/members/owner-transfer-check") -api.add_resource(OwnerTransfer, "/workspaces/current/members//owner-transfer") diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py index 0c9db660aa..7012580362 100644 --- a/api/controllers/console/workspace/model_providers.py +++ b/api/controllers/console/workspace/model_providers.py @@ -5,7 +5,7 @@ from flask_login import current_user from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -17,6 +17,7 @@ from services.billing_service import BillingService from services.model_provider_service import ModelProviderService +@console_ns.route("/workspaces/current/model-providers") class ModelProviderListApi(Resource): @setup_required @login_required @@ -45,6 +46,7 @@ class ModelProviderListApi(Resource): return jsonable_encoder({"data": provider_list}) +@console_ns.route("/workspaces/current/model-providers//credentials") class ModelProviderCredentialApi(Resource): @setup_required @login_required @@ -151,6 +153,7 @@ class ModelProviderCredentialApi(Resource): return {"result": "success"}, 204 +@console_ns.route("/workspaces/current/model-providers//credentials/switch") class ModelProviderCredentialSwitchApi(Resource): @setup_required @login_required @@ -175,6 +178,7 @@ class ModelProviderCredentialSwitchApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//credentials/validate") class ModelProviderValidateApi(Resource): @setup_required @login_required @@ -211,6 +215,7 @@ class ModelProviderValidateApi(Resource): return response +@console_ns.route("/workspaces//model-providers///") class ModelProviderIconApi(Resource): """ Get model provider icon @@ -229,6 +234,7 @@ class ModelProviderIconApi(Resource): return send_file(io.BytesIO(icon), mimetype=mimetype) +@console_ns.route("/workspaces/current/model-providers//preferred-provider-type") class PreferredProviderTypeUpdateApi(Resource): @setup_required @login_required @@ -262,6 +268,7 @@ class PreferredProviderTypeUpdateApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//checkout-url") class ModelProviderPaymentCheckoutUrlApi(Resource): @setup_required @login_required @@ -281,21 +288,3 @@ class ModelProviderPaymentCheckoutUrlApi(Resource): prefilled_email=current_user.email, ) return data - - -api.add_resource(ModelProviderListApi, "/workspaces/current/model-providers") - -api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-providers//credentials") -api.add_resource( - ModelProviderCredentialSwitchApi, "/workspaces/current/model-providers//credentials/switch" -) -api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers//credentials/validate") - -api.add_resource( - PreferredProviderTypeUpdateApi, "/workspaces/current/model-providers//preferred-provider-type" -) -api.add_resource(ModelProviderPaymentCheckoutUrlApi, "/workspaces/current/model-providers//checkout-url") -api.add_resource( - ModelProviderIconApi, - "/workspaces//model-providers///", -) diff --git a/api/controllers/console/workspace/models.py b/api/controllers/console/workspace/models.py index f174fcc5d3..d38bb16ea7 100644 --- a/api/controllers/console/workspace/models.py +++ b/api/controllers/console/workspace/models.py @@ -4,7 +4,7 @@ from flask_login import current_user from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -17,6 +17,7 @@ from services.model_provider_service import ModelProviderService logger = logging.getLogger(__name__) +@console_ns.route("/workspaces/current/default-model") class DefaultModelApi(Resource): @setup_required @login_required @@ -85,6 +86,7 @@ class DefaultModelApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//models") class ModelProviderModelApi(Resource): @setup_required @login_required @@ -187,6 +189,7 @@ class ModelProviderModelApi(Resource): return {"result": "success"}, 204 +@console_ns.route("/workspaces/current/model-providers//models/credentials") class ModelProviderModelCredentialApi(Resource): @setup_required @login_required @@ -364,6 +367,7 @@ class ModelProviderModelCredentialApi(Resource): return {"result": "success"}, 204 +@console_ns.route("/workspaces/current/model-providers//models/credentials/switch") class ModelProviderModelCredentialSwitchApi(Resource): @setup_required @login_required @@ -395,6 +399,9 @@ class ModelProviderModelCredentialSwitchApi(Resource): return {"result": "success"} +@console_ns.route( + "/workspaces/current/model-providers//models/enable", endpoint="model-provider-model-enable" +) class ModelProviderModelEnableApi(Resource): @setup_required @login_required @@ -422,6 +429,9 @@ class ModelProviderModelEnableApi(Resource): return {"result": "success"} +@console_ns.route( + "/workspaces/current/model-providers//models/disable", endpoint="model-provider-model-disable" +) class ModelProviderModelDisableApi(Resource): @setup_required @login_required @@ -449,6 +459,7 @@ class ModelProviderModelDisableApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/model-providers//models/credentials/validate") class ModelProviderModelValidateApi(Resource): @setup_required @login_required @@ -494,6 +505,7 @@ class ModelProviderModelValidateApi(Resource): return response +@console_ns.route("/workspaces/current/model-providers//models/parameter-rules") class ModelProviderModelParameterRuleApi(Resource): @setup_required @login_required @@ -513,6 +525,7 @@ class ModelProviderModelParameterRuleApi(Resource): return jsonable_encoder({"data": parameter_rules}) +@console_ns.route("/workspaces/current/models/model-types/") class ModelProviderAvailableModelApi(Resource): @setup_required @login_required @@ -524,32 +537,3 @@ class ModelProviderAvailableModelApi(Resource): models = model_provider_service.get_models_by_model_type(tenant_id=tenant_id, model_type=model_type) return jsonable_encoder({"data": models}) - - -api.add_resource(ModelProviderModelApi, "/workspaces/current/model-providers//models") -api.add_resource( - ModelProviderModelEnableApi, - "/workspaces/current/model-providers//models/enable", - endpoint="model-provider-model-enable", -) -api.add_resource( - ModelProviderModelDisableApi, - "/workspaces/current/model-providers//models/disable", - endpoint="model-provider-model-disable", -) -api.add_resource( - ModelProviderModelCredentialApi, "/workspaces/current/model-providers//models/credentials" -) -api.add_resource( - ModelProviderModelCredentialSwitchApi, - "/workspaces/current/model-providers//models/credentials/switch", -) -api.add_resource( - ModelProviderModelValidateApi, "/workspaces/current/model-providers//models/credentials/validate" -) - -api.add_resource( - ModelProviderModelParameterRuleApi, "/workspaces/current/model-providers//models/parameter-rules" -) -api.add_resource(ModelProviderAvailableModelApi, "/workspaces/current/models/model-types/") -api.add_resource(DefaultModelApi, "/workspaces/current/default-model") diff --git a/api/controllers/console/workspace/plugin.py b/api/controllers/console/workspace/plugin.py index fd5421fa64..7c70fb8aa0 100644 --- a/api/controllers/console/workspace/plugin.py +++ b/api/controllers/console/workspace/plugin.py @@ -6,7 +6,7 @@ from flask_restx import Resource, reqparse from werkzeug.exceptions import Forbidden from configs import dify_config -from controllers.console import api +from controllers.console import console_ns from controllers.console.workspace import plugin_permission_required from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.utils.encoders import jsonable_encoder @@ -19,6 +19,7 @@ from services.plugin.plugin_permission_service import PluginPermissionService from services.plugin.plugin_service import PluginService +@console_ns.route("/workspaces/current/plugin/debugging-key") class PluginDebuggingKeyApi(Resource): @setup_required @login_required @@ -37,6 +38,7 @@ class PluginDebuggingKeyApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/list") class PluginListApi(Resource): @setup_required @login_required @@ -55,6 +57,7 @@ class PluginListApi(Resource): return jsonable_encoder({"plugins": plugins_with_total.list, "total": plugins_with_total.total}) +@console_ns.route("/workspaces/current/plugin/list/latest-versions") class PluginListLatestVersionsApi(Resource): @setup_required @login_required @@ -72,6 +75,7 @@ class PluginListLatestVersionsApi(Resource): return jsonable_encoder({"versions": versions}) +@console_ns.route("/workspaces/current/plugin/list/installations/ids") class PluginListInstallationsFromIdsApi(Resource): @setup_required @login_required @@ -91,6 +95,7 @@ class PluginListInstallationsFromIdsApi(Resource): return jsonable_encoder({"plugins": plugins}) +@console_ns.route("/workspaces/current/plugin/icon") class PluginIconApi(Resource): @setup_required def get(self): @@ -108,6 +113,7 @@ class PluginIconApi(Resource): return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age) +@console_ns.route("/workspaces/current/plugin/upload/pkg") class PluginUploadFromPkgApi(Resource): @setup_required @login_required @@ -131,6 +137,7 @@ class PluginUploadFromPkgApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/upload/github") class PluginUploadFromGithubApi(Resource): @setup_required @login_required @@ -153,6 +160,7 @@ class PluginUploadFromGithubApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/upload/bundle") class PluginUploadFromBundleApi(Resource): @setup_required @login_required @@ -176,6 +184,7 @@ class PluginUploadFromBundleApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/install/pkg") class PluginInstallFromPkgApi(Resource): @setup_required @login_required @@ -201,6 +210,7 @@ class PluginInstallFromPkgApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/install/github") class PluginInstallFromGithubApi(Resource): @setup_required @login_required @@ -230,6 +240,7 @@ class PluginInstallFromGithubApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/install/marketplace") class PluginInstallFromMarketplaceApi(Resource): @setup_required @login_required @@ -255,6 +266,7 @@ class PluginInstallFromMarketplaceApi(Resource): return jsonable_encoder(response) +@console_ns.route("/workspaces/current/plugin/marketplace/pkg") class PluginFetchMarketplacePkgApi(Resource): @setup_required @login_required @@ -280,6 +292,7 @@ class PluginFetchMarketplacePkgApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/fetch-manifest") class PluginFetchManifestApi(Resource): @setup_required @login_required @@ -304,6 +317,7 @@ class PluginFetchManifestApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks") class PluginFetchInstallTasksApi(Resource): @setup_required @login_required @@ -325,6 +339,7 @@ class PluginFetchInstallTasksApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks/") class PluginFetchInstallTaskApi(Resource): @setup_required @login_required @@ -339,6 +354,7 @@ class PluginFetchInstallTaskApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks//delete") class PluginDeleteInstallTaskApi(Resource): @setup_required @login_required @@ -353,6 +369,7 @@ class PluginDeleteInstallTaskApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks/delete_all") class PluginDeleteAllInstallTaskItemsApi(Resource): @setup_required @login_required @@ -367,6 +384,7 @@ class PluginDeleteAllInstallTaskItemsApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/tasks//delete/") class PluginDeleteInstallTaskItemApi(Resource): @setup_required @login_required @@ -381,6 +399,7 @@ class PluginDeleteInstallTaskItemApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/upgrade/marketplace") class PluginUpgradeFromMarketplaceApi(Resource): @setup_required @login_required @@ -404,6 +423,7 @@ class PluginUpgradeFromMarketplaceApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/upgrade/github") class PluginUpgradeFromGithubApi(Resource): @setup_required @login_required @@ -435,6 +455,7 @@ class PluginUpgradeFromGithubApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/uninstall") class PluginUninstallApi(Resource): @setup_required @login_required @@ -453,6 +474,7 @@ class PluginUninstallApi(Resource): raise ValueError(e) +@console_ns.route("/workspaces/current/plugin/permission/change") class PluginChangePermissionApi(Resource): @setup_required @login_required @@ -475,6 +497,7 @@ class PluginChangePermissionApi(Resource): return {"success": PluginPermissionService.change_permission(tenant_id, install_permission, debug_permission)} +@console_ns.route("/workspaces/current/plugin/permission/fetch") class PluginFetchPermissionApi(Resource): @setup_required @login_required @@ -499,6 +522,7 @@ class PluginFetchPermissionApi(Resource): ) +@console_ns.route("/workspaces/current/plugin/parameters/dynamic-options") class PluginFetchDynamicSelectOptionsApi(Resource): @setup_required @login_required @@ -535,6 +559,7 @@ class PluginFetchDynamicSelectOptionsApi(Resource): return jsonable_encoder({"options": options}) +@console_ns.route("/workspaces/current/plugin/preferences/change") class PluginChangePreferencesApi(Resource): @setup_required @login_required @@ -590,6 +615,7 @@ class PluginChangePreferencesApi(Resource): return jsonable_encoder({"success": True}) +@console_ns.route("/workspaces/current/plugin/preferences/fetch") class PluginFetchPreferencesApi(Resource): @setup_required @login_required @@ -628,6 +654,7 @@ class PluginFetchPreferencesApi(Resource): return jsonable_encoder({"permission": permission_dict, "auto_upgrade": auto_upgrade_dict}) +@console_ns.route("/workspaces/current/plugin/preferences/autoupgrade/exclude") class PluginAutoUpgradeExcludePluginApi(Resource): @setup_required @login_required @@ -641,35 +668,3 @@ class PluginAutoUpgradeExcludePluginApi(Resource): args = req.parse_args() return jsonable_encoder({"success": PluginAutoUpgradeService.exclude_plugin(tenant_id, args["plugin_id"])}) - - -api.add_resource(PluginDebuggingKeyApi, "/workspaces/current/plugin/debugging-key") -api.add_resource(PluginListApi, "/workspaces/current/plugin/list") -api.add_resource(PluginListLatestVersionsApi, "/workspaces/current/plugin/list/latest-versions") -api.add_resource(PluginListInstallationsFromIdsApi, "/workspaces/current/plugin/list/installations/ids") -api.add_resource(PluginIconApi, "/workspaces/current/plugin/icon") -api.add_resource(PluginUploadFromPkgApi, "/workspaces/current/plugin/upload/pkg") -api.add_resource(PluginUploadFromGithubApi, "/workspaces/current/plugin/upload/github") -api.add_resource(PluginUploadFromBundleApi, "/workspaces/current/plugin/upload/bundle") -api.add_resource(PluginInstallFromPkgApi, "/workspaces/current/plugin/install/pkg") -api.add_resource(PluginInstallFromGithubApi, "/workspaces/current/plugin/install/github") -api.add_resource(PluginUpgradeFromMarketplaceApi, "/workspaces/current/plugin/upgrade/marketplace") -api.add_resource(PluginUpgradeFromGithubApi, "/workspaces/current/plugin/upgrade/github") -api.add_resource(PluginInstallFromMarketplaceApi, "/workspaces/current/plugin/install/marketplace") -api.add_resource(PluginFetchManifestApi, "/workspaces/current/plugin/fetch-manifest") -api.add_resource(PluginFetchInstallTasksApi, "/workspaces/current/plugin/tasks") -api.add_resource(PluginFetchInstallTaskApi, "/workspaces/current/plugin/tasks/") -api.add_resource(PluginDeleteInstallTaskApi, "/workspaces/current/plugin/tasks//delete") -api.add_resource(PluginDeleteAllInstallTaskItemsApi, "/workspaces/current/plugin/tasks/delete_all") -api.add_resource(PluginDeleteInstallTaskItemApi, "/workspaces/current/plugin/tasks//delete/") -api.add_resource(PluginUninstallApi, "/workspaces/current/plugin/uninstall") -api.add_resource(PluginFetchMarketplacePkgApi, "/workspaces/current/plugin/marketplace/pkg") - -api.add_resource(PluginChangePermissionApi, "/workspaces/current/plugin/permission/change") -api.add_resource(PluginFetchPermissionApi, "/workspaces/current/plugin/permission/fetch") - -api.add_resource(PluginFetchDynamicSelectOptionsApi, "/workspaces/current/plugin/parameters/dynamic-options") - -api.add_resource(PluginFetchPreferencesApi, "/workspaces/current/plugin/preferences/fetch") -api.add_resource(PluginChangePreferencesApi, "/workspaces/current/plugin/preferences/change") -api.add_resource(PluginAutoUpgradeExcludePluginApi, "/workspaces/current/plugin/preferences/autoupgrade/exclude") diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 8693d99e23..9285577f72 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -10,7 +10,7 @@ from flask_restx import ( from werkzeug.exceptions import Forbidden from configs import dify_config -from controllers.console import api +from controllers.console import console_ns from controllers.console.wraps import ( account_initialization_required, enterprise_license_required, @@ -47,6 +47,7 @@ def is_valid_url(url: str) -> bool: return False +@console_ns.route("/workspaces/current/tool-providers") class ToolProviderListApi(Resource): @setup_required @login_required @@ -71,6 +72,7 @@ class ToolProviderListApi(Resource): return ToolCommonService.list_tool_providers(user_id, tenant_id, args.get("type", None)) +@console_ns.route("/workspaces/current/tool-provider/builtin//tools") class ToolBuiltinProviderListToolsApi(Resource): @setup_required @login_required @@ -88,6 +90,7 @@ class ToolBuiltinProviderListToolsApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//info") class ToolBuiltinProviderInfoApi(Resource): @setup_required @login_required @@ -100,6 +103,7 @@ class ToolBuiltinProviderInfoApi(Resource): return jsonable_encoder(BuiltinToolManageService.get_builtin_tool_provider_info(tenant_id, provider)) +@console_ns.route("/workspaces/current/tool-provider/builtin//delete") class ToolBuiltinProviderDeleteApi(Resource): @setup_required @login_required @@ -121,6 +125,7 @@ class ToolBuiltinProviderDeleteApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//add") class ToolBuiltinProviderAddApi(Resource): @setup_required @login_required @@ -150,6 +155,7 @@ class ToolBuiltinProviderAddApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//update") class ToolBuiltinProviderUpdateApi(Resource): @setup_required @login_required @@ -181,6 +187,7 @@ class ToolBuiltinProviderUpdateApi(Resource): return result +@console_ns.route("/workspaces/current/tool-provider/builtin//credentials") class ToolBuiltinProviderGetCredentialsApi(Resource): @setup_required @login_required @@ -196,6 +203,7 @@ class ToolBuiltinProviderGetCredentialsApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//icon") class ToolBuiltinProviderIconApi(Resource): @setup_required def get(self, provider): @@ -204,6 +212,7 @@ class ToolBuiltinProviderIconApi(Resource): return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age) +@console_ns.route("/workspaces/current/tool-provider/api/add") class ToolApiProviderAddApi(Resource): @setup_required @login_required @@ -243,6 +252,7 @@ class ToolApiProviderAddApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/remote") class ToolApiProviderGetRemoteSchemaApi(Resource): @setup_required @login_required @@ -266,6 +276,7 @@ class ToolApiProviderGetRemoteSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/tools") class ToolApiProviderListToolsApi(Resource): @setup_required @login_required @@ -291,6 +302,7 @@ class ToolApiProviderListToolsApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/update") class ToolApiProviderUpdateApi(Resource): @setup_required @login_required @@ -332,6 +344,7 @@ class ToolApiProviderUpdateApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/delete") class ToolApiProviderDeleteApi(Resource): @setup_required @login_required @@ -358,6 +371,7 @@ class ToolApiProviderDeleteApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/get") class ToolApiProviderGetApi(Resource): @setup_required @login_required @@ -381,6 +395,7 @@ class ToolApiProviderGetApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//credential/schema/") class ToolBuiltinProviderCredentialsSchemaApi(Resource): @setup_required @login_required @@ -396,6 +411,7 @@ class ToolBuiltinProviderCredentialsSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/schema") class ToolApiProviderSchemaApi(Resource): @setup_required @login_required @@ -412,6 +428,7 @@ class ToolApiProviderSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/api/test/pre") class ToolApiProviderPreviousTestApi(Resource): @setup_required @login_required @@ -439,6 +456,7 @@ class ToolApiProviderPreviousTestApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/create") class ToolWorkflowProviderCreateApi(Resource): @setup_required @login_required @@ -478,6 +496,7 @@ class ToolWorkflowProviderCreateApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/update") class ToolWorkflowProviderUpdateApi(Resource): @setup_required @login_required @@ -520,6 +539,7 @@ class ToolWorkflowProviderUpdateApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/delete") class ToolWorkflowProviderDeleteApi(Resource): @setup_required @login_required @@ -545,6 +565,7 @@ class ToolWorkflowProviderDeleteApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/workflow/get") class ToolWorkflowProviderGetApi(Resource): @setup_required @login_required @@ -579,6 +600,7 @@ class ToolWorkflowProviderGetApi(Resource): return jsonable_encoder(tool) +@console_ns.route("/workspaces/current/tool-provider/workflow/tools") class ToolWorkflowProviderListToolApi(Resource): @setup_required @login_required @@ -603,6 +625,7 @@ class ToolWorkflowProviderListToolApi(Resource): ) +@console_ns.route("/workspaces/current/tools/builtin") class ToolBuiltinListApi(Resource): @setup_required @login_required @@ -624,6 +647,7 @@ class ToolBuiltinListApi(Resource): ) +@console_ns.route("/workspaces/current/tools/api") class ToolApiListApi(Resource): @setup_required @login_required @@ -642,6 +666,7 @@ class ToolApiListApi(Resource): ) +@console_ns.route("/workspaces/current/tools/workflow") class ToolWorkflowListApi(Resource): @setup_required @login_required @@ -663,6 +688,7 @@ class ToolWorkflowListApi(Resource): ) +@console_ns.route("/workspaces/current/tool-labels") class ToolLabelsApi(Resource): @setup_required @login_required @@ -672,6 +698,7 @@ class ToolLabelsApi(Resource): return jsonable_encoder(ToolLabelsService.list_tool_labels()) +@console_ns.route("/oauth/plugin//tool/authorization-url") class ToolPluginOAuthApi(Resource): @setup_required @login_required @@ -716,6 +743,7 @@ class ToolPluginOAuthApi(Resource): return response +@console_ns.route("/oauth/plugin//tool/callback") class ToolOAuthCallback(Resource): @setup_required def get(self, provider): @@ -766,6 +794,7 @@ class ToolOAuthCallback(Resource): return redirect(f"{dify_config.CONSOLE_WEB_URL}/oauth-callback") +@console_ns.route("/workspaces/current/tool-provider/builtin//default-credential") class ToolBuiltinProviderSetDefaultApi(Resource): @setup_required @login_required @@ -779,6 +808,7 @@ class ToolBuiltinProviderSetDefaultApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//oauth/custom-client") class ToolOAuthCustomClient(Resource): @setup_required @login_required @@ -822,6 +852,7 @@ class ToolOAuthCustomClient(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//oauth/client-schema") class ToolBuiltinProviderGetOauthClientSchemaApi(Resource): @setup_required @login_required @@ -834,6 +865,7 @@ class ToolBuiltinProviderGetOauthClientSchemaApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/builtin//credential/info") class ToolBuiltinProviderGetCredentialInfoApi(Resource): @setup_required @login_required @@ -849,6 +881,7 @@ class ToolBuiltinProviderGetCredentialInfoApi(Resource): ) +@console_ns.route("/workspaces/current/tool-provider/mcp") class ToolProviderMCPApi(Resource): @setup_required @login_required @@ -933,6 +966,7 @@ class ToolProviderMCPApi(Resource): return {"result": "success"} +@console_ns.route("/workspaces/current/tool-provider/mcp/auth") class ToolMCPAuthApi(Resource): @setup_required @login_required @@ -978,6 +1012,7 @@ class ToolMCPAuthApi(Resource): raise ValueError(f"Failed to connect to MCP server: {e}") from e +@console_ns.route("/workspaces/current/tool-provider/mcp/tools/") class ToolMCPDetailApi(Resource): @setup_required @login_required @@ -988,6 +1023,7 @@ class ToolMCPDetailApi(Resource): return jsonable_encoder(ToolTransformService.mcp_provider_to_user_provider(provider, for_list=True)) +@console_ns.route("/workspaces/current/tools/mcp") class ToolMCPListAllApi(Resource): @setup_required @login_required @@ -1001,6 +1037,7 @@ class ToolMCPListAllApi(Resource): return [tool.to_dict() for tool in tools] +@console_ns.route("/workspaces/current/tool-provider/mcp/update/") class ToolMCPUpdateApi(Resource): @setup_required @login_required @@ -1014,6 +1051,7 @@ class ToolMCPUpdateApi(Resource): return jsonable_encoder(tools) +@console_ns.route("/mcp/oauth/callback") class ToolMCPCallbackApi(Resource): def get(self): parser = reqparse.RequestParser() @@ -1024,67 +1062,3 @@ class ToolMCPCallbackApi(Resource): authorization_code = args["code"] handle_callback(state_key, authorization_code) return redirect(f"{dify_config.CONSOLE_WEB_URL}/oauth-callback") - - -# tool provider -api.add_resource(ToolProviderListApi, "/workspaces/current/tool-providers") - -# tool oauth -api.add_resource(ToolPluginOAuthApi, "/oauth/plugin//tool/authorization-url") -api.add_resource(ToolOAuthCallback, "/oauth/plugin//tool/callback") -api.add_resource(ToolOAuthCustomClient, "/workspaces/current/tool-provider/builtin//oauth/custom-client") - -# builtin tool provider -api.add_resource(ToolBuiltinProviderListToolsApi, "/workspaces/current/tool-provider/builtin//tools") -api.add_resource(ToolBuiltinProviderInfoApi, "/workspaces/current/tool-provider/builtin//info") -api.add_resource(ToolBuiltinProviderAddApi, "/workspaces/current/tool-provider/builtin//add") -api.add_resource(ToolBuiltinProviderDeleteApi, "/workspaces/current/tool-provider/builtin//delete") -api.add_resource(ToolBuiltinProviderUpdateApi, "/workspaces/current/tool-provider/builtin//update") -api.add_resource( - ToolBuiltinProviderSetDefaultApi, "/workspaces/current/tool-provider/builtin//default-credential" -) -api.add_resource( - ToolBuiltinProviderGetCredentialInfoApi, "/workspaces/current/tool-provider/builtin//credential/info" -) -api.add_resource( - ToolBuiltinProviderGetCredentialsApi, "/workspaces/current/tool-provider/builtin//credentials" -) -api.add_resource( - ToolBuiltinProviderCredentialsSchemaApi, - "/workspaces/current/tool-provider/builtin//credential/schema/", -) -api.add_resource( - ToolBuiltinProviderGetOauthClientSchemaApi, - "/workspaces/current/tool-provider/builtin//oauth/client-schema", -) -api.add_resource(ToolBuiltinProviderIconApi, "/workspaces/current/tool-provider/builtin//icon") - -# api tool provider -api.add_resource(ToolApiProviderAddApi, "/workspaces/current/tool-provider/api/add") -api.add_resource(ToolApiProviderGetRemoteSchemaApi, "/workspaces/current/tool-provider/api/remote") -api.add_resource(ToolApiProviderListToolsApi, "/workspaces/current/tool-provider/api/tools") -api.add_resource(ToolApiProviderUpdateApi, "/workspaces/current/tool-provider/api/update") -api.add_resource(ToolApiProviderDeleteApi, "/workspaces/current/tool-provider/api/delete") -api.add_resource(ToolApiProviderGetApi, "/workspaces/current/tool-provider/api/get") -api.add_resource(ToolApiProviderSchemaApi, "/workspaces/current/tool-provider/api/schema") -api.add_resource(ToolApiProviderPreviousTestApi, "/workspaces/current/tool-provider/api/test/pre") - -# workflow tool provider -api.add_resource(ToolWorkflowProviderCreateApi, "/workspaces/current/tool-provider/workflow/create") -api.add_resource(ToolWorkflowProviderUpdateApi, "/workspaces/current/tool-provider/workflow/update") -api.add_resource(ToolWorkflowProviderDeleteApi, "/workspaces/current/tool-provider/workflow/delete") -api.add_resource(ToolWorkflowProviderGetApi, "/workspaces/current/tool-provider/workflow/get") -api.add_resource(ToolWorkflowProviderListToolApi, "/workspaces/current/tool-provider/workflow/tools") - -# mcp tool provider -api.add_resource(ToolMCPDetailApi, "/workspaces/current/tool-provider/mcp/tools/") -api.add_resource(ToolProviderMCPApi, "/workspaces/current/tool-provider/mcp") -api.add_resource(ToolMCPUpdateApi, "/workspaces/current/tool-provider/mcp/update/") -api.add_resource(ToolMCPAuthApi, "/workspaces/current/tool-provider/mcp/auth") -api.add_resource(ToolMCPCallbackApi, "/mcp/oauth/callback") - -api.add_resource(ToolBuiltinListApi, "/workspaces/current/tools/builtin") -api.add_resource(ToolApiListApi, "/workspaces/current/tools/api") -api.add_resource(ToolMCPListAllApi, "/workspaces/current/tools/mcp") -api.add_resource(ToolWorkflowListApi, "/workspaces/current/tools/workflow") -api.add_resource(ToolLabelsApi, "/workspaces/current/tool-labels") diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 13a61052ae..bc748ac3d2 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -120,8 +120,8 @@ class WorkspaceListApi(Resource): }, 200 -@console_ns.route("/workspaces/current") -@console_ns.route("/info") # Deprecated +@console_ns.route("/workspaces/current", endpoint="workspaces_current") +@console_ns.route("/info", endpoint="info") # Deprecated class TenantApi(Resource): @setup_required @login_required From aead192743a43392a62c9c082694d6c37ce35267 Mon Sep 17 00:00:00 2001 From: Xiyuan Chen <52963600+GareArc@users.noreply.github.com> Date: Fri, 10 Oct 2025 01:24:36 -0700 Subject: [PATCH 12/49] Fix/token exp when exchange main (#26708) --- api/controllers/web/passport.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/api/controllers/web/passport.py b/api/controllers/web/passport.py index 6f7105a724..7190f06426 100644 --- a/api/controllers/web/passport.py +++ b/api/controllers/web/passport.py @@ -126,6 +126,8 @@ def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: end_user_id = enterprise_user_decoded.get("end_user_id") session_id = enterprise_user_decoded.get("session_id") user_auth_type = enterprise_user_decoded.get("auth_type") + exchanged_token_expires_unix = enterprise_user_decoded.get("exp") + if not user_auth_type: raise Unauthorized("Missing auth_type in the token.") @@ -169,8 +171,11 @@ def exchange_token_for_existing_web_user(app_code: str, enterprise_user_decoded: ) db.session.add(end_user) db.session.commit() - exp_dt = datetime.now(UTC) + timedelta(minutes=dify_config.ACCESS_TOKEN_EXPIRE_MINUTES) - exp = int(exp_dt.timestamp()) + + exp = int((datetime.now(UTC) + timedelta(minutes=dify_config.ACCESS_TOKEN_EXPIRE_MINUTES)).timestamp()) + if exchanged_token_expires_unix: + exp = int(exchanged_token_expires_unix) + payload = { "iss": site.id, "sub": "Web API Passport", From ab2eacb6c1a18d329213ecbe9ff780c39646594a Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Fri, 10 Oct 2025 17:30:13 +0900 Subject: [PATCH 13/49] use model_validate (#26182) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/controllers/console/app/model_config.py | 4 +- .../console/datasets/data_source.py | 18 +++++---- api/controllers/console/datasets/datasets.py | 36 +++++++++-------- .../console/datasets/datasets_document.py | 40 ++++++++++--------- .../console/datasets/datasets_segments.py | 4 +- api/controllers/console/datasets/metadata.py | 4 +- .../datasets/rag_pipeline/rag_pipeline.py | 2 +- api/controllers/inner_api/plugin/wraps.py | 2 +- .../service_api/dataset/dataset.py | 2 +- .../service_api/dataset/document.py | 8 ++-- .../service_api/dataset/metadata.py | 4 +- .../rag_pipeline/rag_pipeline_workflow.py | 2 +- .../service_api/dataset/segment.py | 2 +- .../easy_ui_based_app/agent/manager.py | 2 +- api/core/app/apps/pipeline/pipeline_runner.py | 2 +- .../datasource/entities/common_entities.py | 7 ++-- api/core/entities/provider_configuration.py | 8 ++-- .../helper/code_executor/code_executor.py | 2 +- api/core/helper/marketplace.py | 4 +- api/core/indexing_runner.py | 38 ++++++++++-------- api/core/mcp/session/client_session.py | 2 +- .../model_runtime/entities/common_entities.py | 7 ++-- .../entities/provider_entities.py | 7 ++-- .../model_providers/model_provider_factory.py | 12 +++--- api/core/moderation/api/api.py | 4 +- api/core/plugin/entities/request.py | 10 ++--- api/core/plugin/impl/base.py | 28 ++++++------- api/core/plugin/impl/datasource.py | 6 ++- api/core/plugin/impl/model.py | 18 ++++----- api/core/rag/datasource/retrieval_service.py | 2 +- .../rag/extractor/entity/extract_setting.py | 6 --- .../processor/paragraph_index_processor.py | 4 +- .../processor/parent_child_index_processor.py | 8 ++-- .../processor/qa_index_processor.py | 6 +-- api/core/tools/builtin_tool/provider.py | 2 +- api/core/tools/entities/common_entities.py | 7 ++-- api/core/tools/mcp_tool/provider.py | 2 +- api/core/tools/tool_manager.py | 2 +- .../command_channels/redis_channel.py | 4 +- api/core/workflow/nodes/end/end_node.py | 2 +- .../nodes/iteration/iteration_start_node.py | 2 +- api/core/workflow/nodes/list_operator/node.py | 2 +- api/core/workflow/nodes/loop/loop_end_node.py | 2 +- .../workflow/nodes/loop/loop_start_node.py | 2 +- api/core/workflow/nodes/start/start_node.py | 2 +- .../variable_aggregator_node.py | 2 +- ...rameters_cache_when_sync_draft_workflow.py | 2 +- ...oin_when_app_published_workflow_updated.py | 2 +- api/models/dataset.py | 4 +- api/models/tools.py | 11 +++-- api/services/app_dsl_service.py | 12 +++--- api/services/enterprise/enterprise_service.py | 4 +- .../entities/model_provider_entities.py | 23 ++++++----- api/services/hit_testing_service.py | 2 +- api/services/ops_service.py | 2 +- api/services/plugin/plugin_migration.py | 2 +- api/services/rag_pipeline/rag_pipeline.py | 2 +- .../rag_pipeline/rag_pipeline_dsl_service.py | 20 +++++----- .../rag_pipeline_transform_service.py | 4 +- api/services/tools/tools_transform_service.py | 3 +- api/tasks/ops_trace_task.py | 2 +- .../priority_rag_pipeline_run_task.py | 4 +- .../rag_pipeline/rag_pipeline_run_task.py | 4 +- .../tools/api_tool/test_api_tool.py | 2 +- .../rag/datasource/vdb/milvus/test_milvus.py | 4 +- .../core/workflow/nodes/test_list_operator.py | 2 +- .../nodes/test_question_classifier_node.py | 4 +- .../core/workflow/test_system_variable.py | 38 +++++++++--------- .../services/test_metadata_bug_complete.py | 4 +- .../services/test_metadata_nullable_bug.py | 2 +- 70 files changed, 260 insertions(+), 241 deletions(-) diff --git a/api/controllers/console/app/model_config.py b/api/controllers/console/app/model_config.py index 11df511840..e71b774d3e 100644 --- a/api/controllers/console/app/model_config.py +++ b/api/controllers/console/app/model_config.py @@ -90,7 +90,7 @@ class ModelConfigResource(Resource): if not isinstance(tool, dict) or len(tool.keys()) <= 3: continue - agent_tool_entity = AgentToolEntity(**tool) + agent_tool_entity = AgentToolEntity.model_validate(tool) # get tool try: tool_runtime = ToolManager.get_agent_tool_runtime( @@ -124,7 +124,7 @@ class ModelConfigResource(Resource): # encrypt agent tool parameters if it's secret-input agent_mode = new_app_model_config.agent_mode_dict for tool in agent_mode.get("tools") or []: - agent_tool_entity = AgentToolEntity(**tool) + agent_tool_entity = AgentToolEntity.model_validate(tool) # get tool key = f"{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}" diff --git a/api/controllers/console/datasets/data_source.py b/api/controllers/console/datasets/data_source.py index 370e0c0d14..b0f18c11d4 100644 --- a/api/controllers/console/datasets/data_source.py +++ b/api/controllers/console/datasets/data_source.py @@ -15,7 +15,7 @@ from core.datasource.entities.datasource_entities import DatasourceProviderType, from core.datasource.online_document.online_document_plugin import OnlineDocumentDatasourcePlugin from core.indexing_runner import IndexingRunner from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo from core.rag.extractor.notion_extractor import NotionExtractor from extensions.ext_database import db from fields.data_source_fields import integrate_list_fields, integrate_notion_info_list_fields @@ -257,13 +257,15 @@ class DataSourceNotionApi(Resource): for page in notion_info["pages"]: extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": credential_id, - "notion_workspace_id": workspace_id, - "notion_obj_id": page["page_id"], - "notion_page_type": page["type"], - "tenant_id": current_user.current_tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": credential_id, + "notion_workspace_id": workspace_id, + "notion_obj_id": page["page_id"], + "notion_page_type": page["type"], + "tenant_id": current_user.current_tenant_id, + } + ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index ac088b790e..284f88ff1e 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -24,7 +24,7 @@ from core.model_runtime.entities.model_entities import ModelType from core.provider_manager import ProviderManager from core.rag.datasource.vdb.vector_type import VectorType from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo, WebsiteInfo from core.rag.retrieval.retrieval_methods import RetrievalMethod from extensions.ext_database import db from fields.app_fields import related_app_list @@ -513,13 +513,15 @@ class DatasetIndexingEstimateApi(Resource): for page in notion_info["pages"]: extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": credential_id, - "notion_workspace_id": workspace_id, - "notion_obj_id": page["page_id"], - "notion_page_type": page["type"], - "tenant_id": current_user.current_tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": credential_id, + "notion_workspace_id": workspace_id, + "notion_obj_id": page["page_id"], + "notion_page_type": page["type"], + "tenant_id": current_user.current_tenant_id, + } + ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) @@ -528,14 +530,16 @@ class DatasetIndexingEstimateApi(Resource): for url in website_info_list["urls"]: extract_setting = ExtractSetting( datasource_type=DatasourceType.WEBSITE.value, - website_info={ - "provider": website_info_list["provider"], - "job_id": website_info_list["job_id"], - "url": url, - "tenant_id": current_user.current_tenant_id, - "mode": "crawl", - "only_main_content": website_info_list["only_main_content"], - }, + website_info=WebsiteInfo.model_validate( + { + "provider": website_info_list["provider"], + "job_id": website_info_list["job_id"], + "url": url, + "tenant_id": current_user.current_tenant_id, + "mode": "crawl", + "only_main_content": website_info_list["only_main_content"], + } + ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index c5fa2061bf..a90730e997 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -44,7 +44,7 @@ from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.invoke import InvokeAuthorizationError from core.plugin.impl.exc import PluginDaemonClientSideError from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo, WebsiteInfo from extensions.ext_database import db from fields.document_fields import ( dataset_and_document_fields, @@ -305,7 +305,7 @@ class DatasetDocumentListApi(Resource): "doc_language", type=str, default="English", required=False, nullable=False, location="json" ) args = parser.parse_args() - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) if not dataset.indexing_technique and not knowledge_config.indexing_technique: raise ValueError("indexing_technique is required.") @@ -395,7 +395,7 @@ class DatasetInitApi(Resource): parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json") args = parser.parse_args() - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) if knowledge_config.indexing_technique == "high_quality": if knowledge_config.embedding_model is None or knowledge_config.embedding_model_provider is None: raise ValueError("embedding model and embedding model provider are required for high quality indexing.") @@ -547,13 +547,15 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): continue extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": data_source_info["credential_id"], - "notion_workspace_id": data_source_info["notion_workspace_id"], - "notion_obj_id": data_source_info["notion_page_id"], - "notion_page_type": data_source_info["type"], - "tenant_id": current_user.current_tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": data_source_info["credential_id"], + "notion_workspace_id": data_source_info["notion_workspace_id"], + "notion_obj_id": data_source_info["notion_page_id"], + "notion_page_type": data_source_info["type"], + "tenant_id": current_user.current_tenant_id, + } + ), document_model=document.doc_form, ) extract_settings.append(extract_setting) @@ -562,14 +564,16 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): continue extract_setting = ExtractSetting( datasource_type=DatasourceType.WEBSITE.value, - website_info={ - "provider": data_source_info["provider"], - "job_id": data_source_info["job_id"], - "url": data_source_info["url"], - "tenant_id": current_user.current_tenant_id, - "mode": data_source_info["mode"], - "only_main_content": data_source_info["only_main_content"], - }, + website_info=WebsiteInfo.model_validate( + { + "provider": data_source_info["provider"], + "job_id": data_source_info["job_id"], + "url": data_source_info["url"], + "tenant_id": current_user.current_tenant_id, + "mode": data_source_info["mode"], + "only_main_content": data_source_info["only_main_content"], + } + ), document_model=document.doc_form, ) extract_settings.append(extract_setting) diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index 9f2805e2c6..d6bd02483d 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -309,7 +309,7 @@ class DatasetDocumentSegmentUpdateApi(Resource): ) args = parser.parse_args() SegmentService.segment_create_args_validate(args, document) - segment = SegmentService.update_segment(SegmentUpdateArgs(**args), segment, document, dataset) + segment = SegmentService.update_segment(SegmentUpdateArgs.model_validate(args), segment, document, dataset) return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200 @setup_required @@ -564,7 +564,7 @@ class ChildChunkAddApi(Resource): args = parser.parse_args() try: chunks_data = args["chunks"] - chunks = [ChildChunkUpdateArgs(**chunk) for chunk in chunks_data] + chunks = [ChildChunkUpdateArgs.model_validate(chunk) for chunk in chunks_data] child_chunks = SegmentService.update_child_chunks(chunks, segment, document, dataset) except ChildChunkIndexingServiceError as e: raise ChildChunkIndexingError(str(e)) diff --git a/api/controllers/console/datasets/metadata.py b/api/controllers/console/datasets/metadata.py index dc3cd3fce9..8438458617 100644 --- a/api/controllers/console/datasets/metadata.py +++ b/api/controllers/console/datasets/metadata.py @@ -28,7 +28,7 @@ class DatasetMetadataCreateApi(Resource): parser.add_argument("type", type=str, required=True, nullable=False, location="json") parser.add_argument("name", type=str, required=True, nullable=False, location="json") args = parser.parse_args() - metadata_args = MetadataArgs(**args) + metadata_args = MetadataArgs.model_validate(args) dataset_id_str = str(dataset_id) dataset = DatasetService.get_dataset(dataset_id_str) @@ -137,7 +137,7 @@ class DocumentMetadataEditApi(Resource): parser = reqparse.RequestParser() parser.add_argument("operation_data", type=list, required=True, nullable=False, location="json") args = parser.parse_args() - metadata_args = MetadataOperationData(**args) + metadata_args = MetadataOperationData.model_validate(args) MetadataService.update_documents_metadata(dataset, metadata_args) diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py index 3af590afc8..e021f95283 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline.py @@ -88,7 +88,7 @@ class CustomizedPipelineTemplateApi(Resource): nullable=True, ) args = parser.parse_args() - pipeline_template_info = PipelineTemplateInfoEntity(**args) + pipeline_template_info = PipelineTemplateInfoEntity.model_validate(args) RagPipelineService.update_customized_pipeline_template(template_id, pipeline_template_info) return 200 diff --git a/api/controllers/inner_api/plugin/wraps.py b/api/controllers/inner_api/plugin/wraps.py index b683aa3160..a36d6b0745 100644 --- a/api/controllers/inner_api/plugin/wraps.py +++ b/api/controllers/inner_api/plugin/wraps.py @@ -128,7 +128,7 @@ def plugin_data(view: Callable[P, R] | None = None, *, payload_type: type[BaseMo raise ValueError("invalid json") try: - payload = payload_type(**data) + payload = payload_type.model_validate(data) except Exception as e: raise ValueError(f"invalid payload: {str(e)}") diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 961b96db91..92bbb76f0f 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -280,7 +280,7 @@ class DatasetListApi(DatasetApiResource): external_knowledge_id=args["external_knowledge_id"], embedding_model_provider=args["embedding_model_provider"], embedding_model_name=args["embedding_model"], - retrieval_model=RetrievalModel(**args["retrieval_model"]) + retrieval_model=RetrievalModel.model_validate(args["retrieval_model"]) if args["retrieval_model"] is not None else None, ) diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index c1122acd7b..961a338bc5 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -136,7 +136,7 @@ class DocumentAddByTextApi(DatasetApiResource): "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, } args["data_source"] = data_source - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) # validate args DocumentService.document_create_args_validate(knowledge_config) @@ -221,7 +221,7 @@ class DocumentUpdateByTextApi(DatasetApiResource): args["data_source"] = data_source # validate args args["original_document_id"] = str(document_id) - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) DocumentService.document_create_args_validate(knowledge_config) try: @@ -328,7 +328,7 @@ class DocumentAddByFileApi(DatasetApiResource): } args["data_source"] = data_source # validate args - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) DocumentService.document_create_args_validate(knowledge_config) dataset_process_rule = dataset.latest_process_rule if "process_rule" not in args else None @@ -426,7 +426,7 @@ class DocumentUpdateByFileApi(DatasetApiResource): # validate args args["original_document_id"] = str(document_id) - knowledge_config = KnowledgeConfig(**args) + knowledge_config = KnowledgeConfig.model_validate(args) DocumentService.document_create_args_validate(knowledge_config) try: diff --git a/api/controllers/service_api/dataset/metadata.py b/api/controllers/service_api/dataset/metadata.py index e01659dc68..51420fdd5f 100644 --- a/api/controllers/service_api/dataset/metadata.py +++ b/api/controllers/service_api/dataset/metadata.py @@ -51,7 +51,7 @@ class DatasetMetadataCreateServiceApi(DatasetApiResource): def post(self, tenant_id, dataset_id): """Create metadata for a dataset.""" args = metadata_create_parser.parse_args() - metadata_args = MetadataArgs(**args) + metadata_args = MetadataArgs.model_validate(args) dataset_id_str = str(dataset_id) dataset = DatasetService.get_dataset(dataset_id_str) @@ -200,7 +200,7 @@ class DocumentMetadataEditServiceApi(DatasetApiResource): DatasetService.check_dataset_permission(dataset, current_user) args = document_metadata_parser.parse_args() - metadata_args = MetadataOperationData(**args) + metadata_args = MetadataOperationData.model_validate(args) MetadataService.update_documents_metadata(dataset, metadata_args) diff --git a/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py index f05325d711..13ef8abc2d 100644 --- a/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py @@ -98,7 +98,7 @@ class DatasourceNodeRunApi(DatasetApiResource): parser.add_argument("is_published", type=bool, required=True, location="json") args: ParseResult = parser.parse_args() - datasource_node_run_api_entity: DatasourceNodeRunApiEntity = DatasourceNodeRunApiEntity(**args) + datasource_node_run_api_entity = DatasourceNodeRunApiEntity.model_validate(args) assert isinstance(current_user, Account) rag_pipeline_service: RagPipelineService = RagPipelineService() pipeline: Pipeline = rag_pipeline_service.get_pipeline(tenant_id=tenant_id, dataset_id=dataset_id) diff --git a/api/controllers/service_api/dataset/segment.py b/api/controllers/service_api/dataset/segment.py index a22155b07a..d674c7467d 100644 --- a/api/controllers/service_api/dataset/segment.py +++ b/api/controllers/service_api/dataset/segment.py @@ -252,7 +252,7 @@ class DatasetSegmentApi(DatasetApiResource): args = segment_update_parser.parse_args() updated_segment = SegmentService.update_segment( - SegmentUpdateArgs(**args["segment"]), segment, document, dataset + SegmentUpdateArgs.model_validate(args["segment"]), segment, document, dataset ) return {"data": marshal(updated_segment, segment_fields), "doc_form": document.doc_form}, 200 diff --git a/api/core/app/app_config/easy_ui_based_app/agent/manager.py b/api/core/app/app_config/easy_ui_based_app/agent/manager.py index eab26e5af9..c1f336fdde 100644 --- a/api/core/app/app_config/easy_ui_based_app/agent/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/agent/manager.py @@ -40,7 +40,7 @@ class AgentConfigManager: "credential_id": tool.get("credential_id", None), } - agent_tools.append(AgentToolEntity(**agent_tool_properties)) + agent_tools.append(AgentToolEntity.model_validate(agent_tool_properties)) if "strategy" in config["agent_mode"] and config["agent_mode"]["strategy"] not in { "react_router", diff --git a/api/core/app/apps/pipeline/pipeline_runner.py b/api/core/app/apps/pipeline/pipeline_runner.py index 145f629c4d..866c46d963 100644 --- a/api/core/app/apps/pipeline/pipeline_runner.py +++ b/api/core/app/apps/pipeline/pipeline_runner.py @@ -116,7 +116,7 @@ class PipelineRunner(WorkflowBasedAppRunner): rag_pipeline_variables = [] if workflow.rag_pipeline_variables: for v in workflow.rag_pipeline_variables: - rag_pipeline_variable = RAGPipelineVariable(**v) + rag_pipeline_variable = RAGPipelineVariable.model_validate(v) if ( rag_pipeline_variable.belong_to_node_id in (self.application_generate_entity.start_node_id, "shared") diff --git a/api/core/datasource/entities/common_entities.py b/api/core/datasource/entities/common_entities.py index ac36d83ae3..3c64632dbb 100644 --- a/api/core/datasource/entities/common_entities.py +++ b/api/core/datasource/entities/common_entities.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator class I18nObject(BaseModel): @@ -11,11 +11,12 @@ class I18nObject(BaseModel): pt_BR: str | None = Field(default=None) ja_JP: str | None = Field(default=None) - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _(self): self.zh_Hans = self.zh_Hans or self.en_US self.pt_BR = self.pt_BR or self.en_US self.ja_JP = self.ja_JP or self.en_US + return self def to_dict(self) -> dict: return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR, "ja_JP": self.ja_JP} diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 111de89178..2857729a81 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -5,7 +5,7 @@ from collections import defaultdict from collections.abc import Iterator, Sequence from json import JSONDecodeError -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, model_validator from sqlalchemy import func, select from sqlalchemy.orm import Session @@ -73,9 +73,8 @@ class ProviderConfiguration(BaseModel): # pydantic configs model_config = ConfigDict(protected_namespaces=()) - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): if self.provider.provider not in original_provider_configurate_methods: original_provider_configurate_methods[self.provider.provider] = [] for configurate_method in self.provider.configurate_methods: @@ -90,6 +89,7 @@ class ProviderConfiguration(BaseModel): and ConfigurateMethod.PREDEFINED_MODEL not in self.provider.configurate_methods ): self.provider.configurate_methods.append(ConfigurateMethod.PREDEFINED_MODEL) + return self def get_current_credentials(self, model_type: ModelType, model: str) -> dict | None: """ diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 0c1d03dc13..f92278f9e2 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -131,7 +131,7 @@ class CodeExecutor: if (code := response_data.get("code")) != 0: raise CodeExecutionError(f"Got error code: {code}. Got error msg: {response_data.get('message')}") - response_code = CodeExecutionResponse(**response_data) + response_code = CodeExecutionResponse.model_validate(response_data) if response_code.data.error: raise CodeExecutionError(response_code.data.error) diff --git a/api/core/helper/marketplace.py b/api/core/helper/marketplace.py index 10f304c087..bddb864a95 100644 --- a/api/core/helper/marketplace.py +++ b/api/core/helper/marketplace.py @@ -26,7 +26,7 @@ def batch_fetch_plugin_manifests(plugin_ids: list[str]) -> Sequence[MarketplaceP response = httpx.post(url, json={"plugin_ids": plugin_ids}, headers={"X-Dify-Version": dify_config.project.version}) response.raise_for_status() - return [MarketplacePluginDeclaration(**plugin) for plugin in response.json()["data"]["plugins"]] + return [MarketplacePluginDeclaration.model_validate(plugin) for plugin in response.json()["data"]["plugins"]] def batch_fetch_plugin_manifests_ignore_deserialization_error( @@ -41,7 +41,7 @@ def batch_fetch_plugin_manifests_ignore_deserialization_error( result: list[MarketplacePluginDeclaration] = [] for plugin in response.json()["data"]["plugins"]: try: - result.append(MarketplacePluginDeclaration(**plugin)) + result.append(MarketplacePluginDeclaration.model_validate(plugin)) except Exception: pass diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index ee37024260..3682fdb667 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -20,7 +20,7 @@ from core.rag.cleaner.clean_processor import CleanProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.docstore.dataset_docstore import DatasetDocumentStore from core.rag.extractor.entity.datasource_type import DatasourceType -from core.rag.extractor.entity.extract_setting import ExtractSetting +from core.rag.extractor.entity.extract_setting import ExtractSetting, NotionInfo, WebsiteInfo from core.rag.index_processor.constant.index_type import IndexType from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.index_processor.index_processor_factory import IndexProcessorFactory @@ -357,14 +357,16 @@ class IndexingRunner: raise ValueError("no notion import info found") extract_setting = ExtractSetting( datasource_type=DatasourceType.NOTION.value, - notion_info={ - "credential_id": data_source_info["credential_id"], - "notion_workspace_id": data_source_info["notion_workspace_id"], - "notion_obj_id": data_source_info["notion_page_id"], - "notion_page_type": data_source_info["type"], - "document": dataset_document, - "tenant_id": dataset_document.tenant_id, - }, + notion_info=NotionInfo.model_validate( + { + "credential_id": data_source_info["credential_id"], + "notion_workspace_id": data_source_info["notion_workspace_id"], + "notion_obj_id": data_source_info["notion_page_id"], + "notion_page_type": data_source_info["type"], + "document": dataset_document, + "tenant_id": dataset_document.tenant_id, + } + ), document_model=dataset_document.doc_form, ) text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"]) @@ -378,14 +380,16 @@ class IndexingRunner: raise ValueError("no website import info found") extract_setting = ExtractSetting( datasource_type=DatasourceType.WEBSITE.value, - website_info={ - "provider": data_source_info["provider"], - "job_id": data_source_info["job_id"], - "tenant_id": dataset_document.tenant_id, - "url": data_source_info["url"], - "mode": data_source_info["mode"], - "only_main_content": data_source_info["only_main_content"], - }, + website_info=WebsiteInfo.model_validate( + { + "provider": data_source_info["provider"], + "job_id": data_source_info["job_id"], + "tenant_id": dataset_document.tenant_id, + "url": data_source_info["url"], + "mode": data_source_info["mode"], + "only_main_content": data_source_info["only_main_content"], + } + ), document_model=dataset_document.doc_form, ) text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"]) diff --git a/api/core/mcp/session/client_session.py b/api/core/mcp/session/client_session.py index 5817416ba4..fa1d309134 100644 --- a/api/core/mcp/session/client_session.py +++ b/api/core/mcp/session/client_session.py @@ -294,7 +294,7 @@ class ClientSession( method="completion/complete", params=types.CompleteRequestParams( ref=ref, - argument=types.CompletionArgument(**argument), + argument=types.CompletionArgument.model_validate(argument), ), ) ), diff --git a/api/core/model_runtime/entities/common_entities.py b/api/core/model_runtime/entities/common_entities.py index c7353de5af..b673efae22 100644 --- a/api/core/model_runtime/entities/common_entities.py +++ b/api/core/model_runtime/entities/common_entities.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel +from pydantic import BaseModel, model_validator class I18nObject(BaseModel): @@ -9,7 +9,8 @@ class I18nObject(BaseModel): zh_Hans: str | None = None en_US: str - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _(self): if not self.zh_Hans: self.zh_Hans = self.en_US + return self diff --git a/api/core/model_runtime/entities/provider_entities.py b/api/core/model_runtime/entities/provider_entities.py index 2ccc9e0eae..831fb9d4db 100644 --- a/api/core/model_runtime/entities/provider_entities.py +++ b/api/core/model_runtime/entities/provider_entities.py @@ -1,7 +1,7 @@ from collections.abc import Sequence from enum import Enum, StrEnum, auto -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, ModelType @@ -46,10 +46,11 @@ class FormOption(BaseModel): value: str show_on: list[FormShowOnObject] = [] - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _(self): if not self.label: self.label = I18nObject(en_US=self.value) + return self class CredentialFormSchema(BaseModel): diff --git a/api/core/model_runtime/model_providers/model_provider_factory.py b/api/core/model_runtime/model_providers/model_provider_factory.py index e070c17abd..e1afc41bee 100644 --- a/api/core/model_runtime/model_providers/model_provider_factory.py +++ b/api/core/model_runtime/model_providers/model_provider_factory.py @@ -269,17 +269,17 @@ class ModelProviderFactory: } if model_type == ModelType.LLM: - return LargeLanguageModel(**init_params) # type: ignore + return LargeLanguageModel.model_validate(init_params) elif model_type == ModelType.TEXT_EMBEDDING: - return TextEmbeddingModel(**init_params) # type: ignore + return TextEmbeddingModel.model_validate(init_params) elif model_type == ModelType.RERANK: - return RerankModel(**init_params) # type: ignore + return RerankModel.model_validate(init_params) elif model_type == ModelType.SPEECH2TEXT: - return Speech2TextModel(**init_params) # type: ignore + return Speech2TextModel.model_validate(init_params) elif model_type == ModelType.MODERATION: - return ModerationModel(**init_params) # type: ignore + return ModerationModel.model_validate(init_params) elif model_type == ModelType.TTS: - return TTSModel(**init_params) # type: ignore + return TTSModel.model_validate(init_params) def get_provider_icon(self, provider: str, icon_type: str, lang: str) -> tuple[bytes, str]: """ diff --git a/api/core/moderation/api/api.py b/api/core/moderation/api/api.py index 573f4ec2a7..2d72b17a04 100644 --- a/api/core/moderation/api/api.py +++ b/api/core/moderation/api/api.py @@ -51,7 +51,7 @@ class ApiModeration(Moderation): params = ModerationInputParams(app_id=self.app_id, inputs=inputs, query=query) result = self._get_config_by_requestor(APIBasedExtensionPoint.APP_MODERATION_INPUT, params.model_dump()) - return ModerationInputsResult(**result) + return ModerationInputsResult.model_validate(result) return ModerationInputsResult( flagged=flagged, action=ModerationAction.DIRECT_OUTPUT, preset_response=preset_response @@ -67,7 +67,7 @@ class ApiModeration(Moderation): params = ModerationOutputParams(app_id=self.app_id, text=text) result = self._get_config_by_requestor(APIBasedExtensionPoint.APP_MODERATION_OUTPUT, params.model_dump()) - return ModerationOutputsResult(**result) + return ModerationOutputsResult.model_validate(result) return ModerationOutputsResult( flagged=flagged, action=ModerationAction.DIRECT_OUTPUT, preset_response=preset_response diff --git a/api/core/plugin/entities/request.py b/api/core/plugin/entities/request.py index 10f37f75f8..7b789d8ac9 100644 --- a/api/core/plugin/entities/request.py +++ b/api/core/plugin/entities/request.py @@ -84,15 +84,15 @@ class RequestInvokeLLM(BaseRequestInvokeModel): for i in range(len(v)): if v[i]["role"] == PromptMessageRole.USER.value: - v[i] = UserPromptMessage(**v[i]) + v[i] = UserPromptMessage.model_validate(v[i]) elif v[i]["role"] == PromptMessageRole.ASSISTANT.value: - v[i] = AssistantPromptMessage(**v[i]) + v[i] = AssistantPromptMessage.model_validate(v[i]) elif v[i]["role"] == PromptMessageRole.SYSTEM.value: - v[i] = SystemPromptMessage(**v[i]) + v[i] = SystemPromptMessage.model_validate(v[i]) elif v[i]["role"] == PromptMessageRole.TOOL.value: - v[i] = ToolPromptMessage(**v[i]) + v[i] = ToolPromptMessage.model_validate(v[i]) else: - v[i] = PromptMessage(**v[i]) + v[i] = PromptMessage.model_validate(v[i]) return v diff --git a/api/core/plugin/impl/base.py b/api/core/plugin/impl/base.py index 8e3df4da2c..62a5cc535a 100644 --- a/api/core/plugin/impl/base.py +++ b/api/core/plugin/impl/base.py @@ -94,7 +94,7 @@ class BasePluginClient: self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | dict | None = None, params: dict | None = None, @@ -104,13 +104,13 @@ class BasePluginClient: Make a stream request to the plugin daemon inner API and yield the response as a model. """ for line in self._stream_request(method, path, params, headers, data, files): - yield type(**json.loads(line)) # type: ignore + yield type_(**json.loads(line)) # type: ignore def _request_with_model( self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | None = None, params: dict | None = None, @@ -120,13 +120,13 @@ class BasePluginClient: Make a request to the plugin daemon inner API and return the response as a model. """ response = self._request(method, path, headers, data, params, files) - return type(**response.json()) # type: ignore + return type_(**response.json()) # type: ignore def _request_with_plugin_daemon_response( self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | dict | None = None, params: dict | None = None, @@ -140,22 +140,22 @@ class BasePluginClient: response = self._request(method, path, headers, data, params, files) response.raise_for_status() except HTTPError as e: - msg = f"Failed to request plugin daemon, status: {e.response.status_code}, url: {path}" - logger.exception(msg) + logger.exception("Failed to request plugin daemon, status: %s, url: %s", e.response.status_code, path) raise e except Exception as e: msg = f"Failed to request plugin daemon, url: {path}" - logger.exception(msg) + logger.exception("Failed to request plugin daemon, url: %s", path) raise ValueError(msg) from e try: json_response = response.json() if transformer: json_response = transformer(json_response) - rep = PluginDaemonBasicResponse[type](**json_response) # type: ignore + # https://stackoverflow.com/questions/59634937/variable-foo-class-is-not-valid-as-type-but-why + rep = PluginDaemonBasicResponse[type_].model_validate(json_response) # type: ignore except Exception: msg = ( - f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type.__name__)}]," + f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type_.__name__)}]," f" url: {path}" ) logger.exception(msg) @@ -163,7 +163,7 @@ class BasePluginClient: if rep.code != 0: try: - error = PluginDaemonError(**json.loads(rep.message)) + error = PluginDaemonError.model_validate(json.loads(rep.message)) except Exception: raise ValueError(f"{rep.message}, code: {rep.code}") @@ -178,7 +178,7 @@ class BasePluginClient: self, method: str, path: str, - type: type[T], + type_: type[T], headers: dict | None = None, data: bytes | dict | None = None, params: dict | None = None, @@ -189,7 +189,7 @@ class BasePluginClient: """ for line in self._stream_request(method, path, params, headers, data, files): try: - rep = PluginDaemonBasicResponse[type].model_validate_json(line) # type: ignore + rep = PluginDaemonBasicResponse[type_].model_validate_json(line) # type: ignore except (ValueError, TypeError): # TODO modify this when line_data has code and message try: @@ -204,7 +204,7 @@ class BasePluginClient: if rep.code != 0: if rep.code == -500: try: - error = PluginDaemonError(**json.loads(rep.message)) + error = PluginDaemonError.model_validate(json.loads(rep.message)) except Exception: raise PluginDaemonInnerError(code=rep.code, message=rep.message) diff --git a/api/core/plugin/impl/datasource.py b/api/core/plugin/impl/datasource.py index 84087f8104..ce1ef71494 100644 --- a/api/core/plugin/impl/datasource.py +++ b/api/core/plugin/impl/datasource.py @@ -46,7 +46,9 @@ class PluginDatasourceManager(BasePluginClient): params={"page": 1, "page_size": 256}, transformer=transformer, ) - local_file_datasource_provider = PluginDatasourceProviderEntity(**self._get_local_file_datasource_provider()) + local_file_datasource_provider = PluginDatasourceProviderEntity.model_validate( + self._get_local_file_datasource_provider() + ) for provider in response: ToolTransformService.repack_provider(tenant_id=tenant_id, provider=provider) @@ -104,7 +106,7 @@ class PluginDatasourceManager(BasePluginClient): Fetch datasource provider for the given tenant and plugin. """ if provider_id == "langgenius/file/file": - return PluginDatasourceProviderEntity(**self._get_local_file_datasource_provider()) + return PluginDatasourceProviderEntity.model_validate(self._get_local_file_datasource_provider()) tool_provider_id = DatasourceProviderID(provider_id) diff --git a/api/core/plugin/impl/model.py b/api/core/plugin/impl/model.py index 153da142f4..5dfc3c212e 100644 --- a/api/core/plugin/impl/model.py +++ b/api/core/plugin/impl/model.py @@ -162,7 +162,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/llm/invoke", - type=LLMResultChunk, + type_=LLMResultChunk, data=jsonable_encoder( { "user_id": user_id, @@ -208,7 +208,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/llm/num_tokens", - type=PluginLLMNumTokensResponse, + type_=PluginLLMNumTokensResponse, data=jsonable_encoder( { "user_id": user_id, @@ -250,7 +250,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/text_embedding/invoke", - type=TextEmbeddingResult, + type_=TextEmbeddingResult, data=jsonable_encoder( { "user_id": user_id, @@ -291,7 +291,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/text_embedding/num_tokens", - type=PluginTextEmbeddingNumTokensResponse, + type_=PluginTextEmbeddingNumTokensResponse, data=jsonable_encoder( { "user_id": user_id, @@ -334,7 +334,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/rerank/invoke", - type=RerankResult, + type_=RerankResult, data=jsonable_encoder( { "user_id": user_id, @@ -378,7 +378,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/tts/invoke", - type=PluginStringResultResponse, + type_=PluginStringResultResponse, data=jsonable_encoder( { "user_id": user_id, @@ -422,7 +422,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/tts/model/voices", - type=PluginVoicesResponse, + type_=PluginVoicesResponse, data=jsonable_encoder( { "user_id": user_id, @@ -466,7 +466,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/speech2text/invoke", - type=PluginStringResultResponse, + type_=PluginStringResultResponse, data=jsonable_encoder( { "user_id": user_id, @@ -506,7 +506,7 @@ class PluginModelClient(BasePluginClient): response = self._request_with_plugin_daemon_response_stream( method="POST", path=f"plugin/{tenant_id}/dispatch/moderation/invoke", - type=PluginBasicBooleanResponse, + type_=PluginBasicBooleanResponse, data=jsonable_encoder( { "user_id": user_id, diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 63a1d911ca..38358ccd6d 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -134,7 +134,7 @@ class RetrievalService: if not dataset: return [] metadata_condition = ( - MetadataCondition(**metadata_filtering_conditions) if metadata_filtering_conditions else None + MetadataCondition.model_validate(metadata_filtering_conditions) if metadata_filtering_conditions else None ) all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval( dataset.tenant_id, diff --git a/api/core/rag/extractor/entity/extract_setting.py b/api/core/rag/extractor/entity/extract_setting.py index b9bf9d0d8c..c3bfbce98f 100644 --- a/api/core/rag/extractor/entity/extract_setting.py +++ b/api/core/rag/extractor/entity/extract_setting.py @@ -17,9 +17,6 @@ class NotionInfo(BaseModel): tenant_id: str model_config = ConfigDict(arbitrary_types_allowed=True) - def __init__(self, **data): - super().__init__(**data) - class WebsiteInfo(BaseModel): """ @@ -47,6 +44,3 @@ class ExtractSetting(BaseModel): website_info: WebsiteInfo | None = None document_model: str | None = None model_config = ConfigDict(arbitrary_types_allowed=True) - - def __init__(self, **data): - super().__init__(**data) diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index 755aa88d08..4fcffbcc77 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -38,11 +38,11 @@ class ParagraphIndexProcessor(BaseIndexProcessor): raise ValueError("No process rule found.") if process_rule.get("mode") == "automatic": automatic_rule = DatasetProcessRule.AUTOMATIC_RULES - rules = Rule(**automatic_rule) + rules = Rule.model_validate(automatic_rule) else: if not process_rule.get("rules"): raise ValueError("No rules found in process rule.") - rules = Rule(**process_rule.get("rules")) + rules = Rule.model_validate(process_rule.get("rules")) # Split the text documents into nodes. if not rules.segmentation: raise ValueError("No segmentation found in rules.") diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index e0ccd8b567..7bdde286f5 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -40,7 +40,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): raise ValueError("No process rule found.") if not process_rule.get("rules"): raise ValueError("No rules found in process rule.") - rules = Rule(**process_rule.get("rules")) + rules = Rule.model_validate(process_rule.get("rules")) all_documents: list[Document] = [] if rules.parent_mode == ParentMode.PARAGRAPH: # Split the text documents into nodes. @@ -110,7 +110,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): child_documents = document.children if child_documents: formatted_child_documents = [ - Document(**child_document.model_dump()) for child_document in child_documents + Document.model_validate(child_document.model_dump()) for child_document in child_documents ] vector.create(formatted_child_documents) @@ -224,7 +224,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): return child_nodes def index(self, dataset: Dataset, document: DatasetDocument, chunks: Any): - parent_childs = ParentChildStructureChunk(**chunks) + parent_childs = ParentChildStructureChunk.model_validate(chunks) documents = [] for parent_child in parent_childs.parent_child_chunks: metadata = { @@ -274,7 +274,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): vector.create(all_child_documents) def format_preview(self, chunks: Any) -> Mapping[str, Any]: - parent_childs = ParentChildStructureChunk(**chunks) + parent_childs = ParentChildStructureChunk.model_validate(chunks) preview = [] for parent_child in parent_childs.parent_child_chunks: preview.append({"content": parent_child.parent_content, "child_chunks": parent_child.child_contents}) diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 2054031643..9c8f70dba8 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -47,7 +47,7 @@ class QAIndexProcessor(BaseIndexProcessor): raise ValueError("No process rule found.") if not process_rule.get("rules"): raise ValueError("No rules found in process rule.") - rules = Rule(**process_rule.get("rules")) + rules = Rule.model_validate(process_rule.get("rules")) splitter = self._get_splitter( processing_rule_mode=process_rule.get("mode"), max_tokens=rules.segmentation.max_tokens if rules.segmentation else 0, @@ -168,7 +168,7 @@ class QAIndexProcessor(BaseIndexProcessor): return docs def index(self, dataset: Dataset, document: DatasetDocument, chunks: Any): - qa_chunks = QAStructureChunk(**chunks) + qa_chunks = QAStructureChunk.model_validate(chunks) documents = [] for qa_chunk in qa_chunks.qa_chunks: metadata = { @@ -191,7 +191,7 @@ class QAIndexProcessor(BaseIndexProcessor): raise ValueError("Indexing technique must be high quality.") def format_preview(self, chunks: Any) -> Mapping[str, Any]: - qa_chunks = QAStructureChunk(**chunks) + qa_chunks = QAStructureChunk.model_validate(chunks) preview = [] for qa_chunk in qa_chunks.qa_chunks: preview.append({"question": qa_chunk.question, "answer": qa_chunk.answer}) diff --git a/api/core/tools/builtin_tool/provider.py b/api/core/tools/builtin_tool/provider.py index 45fd16d684..29d34e722a 100644 --- a/api/core/tools/builtin_tool/provider.py +++ b/api/core/tools/builtin_tool/provider.py @@ -90,7 +90,7 @@ class BuiltinToolProviderController(ToolProviderController): tools.append( assistant_tool_class( provider=provider, - entity=ToolEntity(**tool), + entity=ToolEntity.model_validate(tool), runtime=ToolRuntime(tenant_id=""), ) ) diff --git a/api/core/tools/entities/common_entities.py b/api/core/tools/entities/common_entities.py index 2c6d9c1964..21d310bbb9 100644 --- a/api/core/tools/entities/common_entities.py +++ b/api/core/tools/entities/common_entities.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator class I18nObject(BaseModel): @@ -11,11 +11,12 @@ class I18nObject(BaseModel): pt_BR: str | None = Field(default=None) ja_JP: str | None = Field(default=None) - def __init__(self, **data): - super().__init__(**data) + @model_validator(mode="after") + def _populate_missing_locales(self): self.zh_Hans = self.zh_Hans or self.en_US self.pt_BR = self.pt_BR or self.en_US self.ja_JP = self.ja_JP or self.en_US + return self def to_dict(self): return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR, "ja_JP": self.ja_JP} diff --git a/api/core/tools/mcp_tool/provider.py b/api/core/tools/mcp_tool/provider.py index 5b04f0edbe..f269b8db9b 100644 --- a/api/core/tools/mcp_tool/provider.py +++ b/api/core/tools/mcp_tool/provider.py @@ -54,7 +54,7 @@ class MCPToolProviderController(ToolProviderController): """ tools = [] tools_data = json.loads(db_provider.tools) - remote_mcp_tools = [RemoteMCPTool(**tool) for tool in tools_data] + remote_mcp_tools = [RemoteMCPTool.model_validate(tool) for tool in tools_data] user = db_provider.load_user() tools = [ ToolEntity( diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 9e5f5a7c23..af68971ca7 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -1008,7 +1008,7 @@ class ToolManager: config = tool_configurations.get(parameter.name, {}) if not (config and isinstance(config, dict) and config.get("value") is not None): continue - tool_input = ToolNodeData.ToolInput(**tool_configurations.get(parameter.name, {})) + tool_input = ToolNodeData.ToolInput.model_validate(tool_configurations.get(parameter.name, {})) if tool_input.type == "variable": variable = variable_pool.get(tool_input.value) if variable is None: diff --git a/api/core/workflow/graph_engine/command_channels/redis_channel.py b/api/core/workflow/graph_engine/command_channels/redis_channel.py index 056e17bf5d..c841459170 100644 --- a/api/core/workflow/graph_engine/command_channels/redis_channel.py +++ b/api/core/workflow/graph_engine/command_channels/redis_channel.py @@ -105,10 +105,10 @@ class RedisChannel: command_type = CommandType(command_type_value) if command_type == CommandType.ABORT: - return AbortCommand(**data) + return AbortCommand.model_validate(data) else: # For other command types, use base class - return GraphEngineCommand(**data) + return GraphEngineCommand.model_validate(data) except (ValueError, TypeError): return None diff --git a/api/core/workflow/nodes/end/end_node.py b/api/core/workflow/nodes/end/end_node.py index 2bdfe4efce..7ec74084d0 100644 --- a/api/core/workflow/nodes/end/end_node.py +++ b/api/core/workflow/nodes/end/end_node.py @@ -16,7 +16,7 @@ class EndNode(Node): _node_data: EndNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = EndNodeData(**data) + self._node_data = EndNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/iteration/iteration_start_node.py b/api/core/workflow/nodes/iteration/iteration_start_node.py index 80f39ccebc..90b7f4539b 100644 --- a/api/core/workflow/nodes/iteration/iteration_start_node.py +++ b/api/core/workflow/nodes/iteration/iteration_start_node.py @@ -18,7 +18,7 @@ class IterationStartNode(Node): _node_data: IterationStartNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = IterationStartNodeData(**data) + self._node_data = IterationStartNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/list_operator/node.py b/api/core/workflow/nodes/list_operator/node.py index 3243b22d44..180eb2ad90 100644 --- a/api/core/workflow/nodes/list_operator/node.py +++ b/api/core/workflow/nodes/list_operator/node.py @@ -41,7 +41,7 @@ class ListOperatorNode(Node): _node_data: ListOperatorNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = ListOperatorNodeData(**data) + self._node_data = ListOperatorNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/loop/loop_end_node.py b/api/core/workflow/nodes/loop/loop_end_node.py index 38aef06d24..e5bce1230c 100644 --- a/api/core/workflow/nodes/loop/loop_end_node.py +++ b/api/core/workflow/nodes/loop/loop_end_node.py @@ -18,7 +18,7 @@ class LoopEndNode(Node): _node_data: LoopEndNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = LoopEndNodeData(**data) + self._node_data = LoopEndNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/loop/loop_start_node.py b/api/core/workflow/nodes/loop/loop_start_node.py index e777a8cbe9..e065dc90a0 100644 --- a/api/core/workflow/nodes/loop/loop_start_node.py +++ b/api/core/workflow/nodes/loop/loop_start_node.py @@ -18,7 +18,7 @@ class LoopStartNode(Node): _node_data: LoopStartNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = LoopStartNodeData(**data) + self._node_data = LoopStartNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/start/start_node.py b/api/core/workflow/nodes/start/start_node.py index 2f33c54128..3b134be1a1 100644 --- a/api/core/workflow/nodes/start/start_node.py +++ b/api/core/workflow/nodes/start/start_node.py @@ -16,7 +16,7 @@ class StartNode(Node): _node_data: StartNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = StartNodeData(**data) + self._node_data = StartNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py b/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py index be00d55937..0ac0d3d858 100644 --- a/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py +++ b/api/core/workflow/nodes/variable_aggregator/variable_aggregator_node.py @@ -15,7 +15,7 @@ class VariableAggregatorNode(Node): _node_data: VariableAssignerNodeData def init_node_data(self, data: Mapping[str, Any]): - self._node_data = VariableAssignerNodeData(**data) + self._node_data = VariableAssignerNodeData.model_validate(data) def _get_error_strategy(self) -> ErrorStrategy | None: return self._node_data.error_strategy diff --git a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py index 6c9fc0bf1d..21b73b76b5 100644 --- a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py +++ b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py @@ -14,7 +14,7 @@ def handle(sender, **kwargs): for node_data in synced_draft_workflow.graph_dict.get("nodes", []): if node_data.get("data", {}).get("type") == NodeType.TOOL.value: try: - tool_entity = ToolEntity(**node_data["data"]) + tool_entity = ToolEntity.model_validate(node_data["data"]) tool_runtime = ToolManager.get_tool_runtime( provider_type=tool_entity.provider_type, provider_id=tool_entity.provider_id, diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py index 898ec1f153..7605d4082c 100644 --- a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py +++ b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py @@ -61,7 +61,7 @@ def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set[str]: for node in knowledge_retrieval_nodes: try: - node_data = KnowledgeRetrievalNodeData(**node.get("data", {})) + node_data = KnowledgeRetrievalNodeData.model_validate(node.get("data", {})) dataset_ids.update(dataset_id for dataset_id in node_data.dataset_ids) except Exception: continue diff --git a/api/models/dataset.py b/api/models/dataset.py index 25ebe14738..6263c04365 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -754,7 +754,7 @@ class DocumentSegment(Base): if process_rule and process_rule.mode == "hierarchical": rules_dict = process_rule.rules_dict if rules_dict: - rules = Rule(**rules_dict) + rules = Rule.model_validate(rules_dict) if rules.parent_mode and rules.parent_mode != ParentMode.FULL_DOC: child_chunks = ( db.session.query(ChildChunk) @@ -772,7 +772,7 @@ class DocumentSegment(Base): if process_rule and process_rule.mode == "hierarchical": rules_dict = process_rule.rules_dict if rules_dict: - rules = Rule(**rules_dict) + rules = Rule.model_validate(rules_dict) if rules.parent_mode: child_chunks = ( db.session.query(ChildChunk) diff --git a/api/models/tools.py b/api/models/tools.py index 7211d7aa3a..d581d588a4 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -152,7 +152,7 @@ class ApiToolProvider(Base): def tools(self) -> list["ApiToolBundle"]: from core.tools.entities.tool_bundle import ApiToolBundle - return [ApiToolBundle(**tool) for tool in json.loads(self.tools_str)] + return [ApiToolBundle.model_validate(tool) for tool in json.loads(self.tools_str)] @property def credentials(self) -> dict[str, Any]: @@ -242,7 +242,10 @@ class WorkflowToolProvider(Base): def parameter_configurations(self) -> list["WorkflowToolParameterConfiguration"]: from core.tools.entities.tool_entities import WorkflowToolParameterConfiguration - return [WorkflowToolParameterConfiguration(**config) for config in json.loads(self.parameter_configuration)] + return [ + WorkflowToolParameterConfiguration.model_validate(config) + for config in json.loads(self.parameter_configuration) + ] @property def app(self) -> App | None: @@ -312,7 +315,7 @@ class MCPToolProvider(Base): def mcp_tools(self) -> list["MCPTool"]: from core.mcp.types import Tool as MCPTool - return [MCPTool(**tool) for tool in json.loads(self.tools)] + return [MCPTool.model_validate(tool) for tool in json.loads(self.tools)] @property def provider_icon(self) -> Mapping[str, str] | str: @@ -552,4 +555,4 @@ class DeprecatedPublishedAppTool(Base): def description_i18n(self) -> "I18nObject": from core.tools.entities.common_entities import I18nObject - return I18nObject(**json.loads(self.description)) + return I18nObject.model_validate(json.loads(self.description)) diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index 8701fe4f4e..129e3b0492 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -659,31 +659,31 @@ class AppDslService: typ = node.get("data", {}).get("type") match typ: case NodeType.TOOL.value: - tool_entity = ToolNodeData(**node["data"]) + tool_entity = ToolNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_tool_dependency(tool_entity.provider_id), ) case NodeType.LLM.value: - llm_entity = LLMNodeData(**node["data"]) + llm_entity = LLMNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency(llm_entity.model.provider), ) case NodeType.QUESTION_CLASSIFIER.value: - question_classifier_entity = QuestionClassifierNodeData(**node["data"]) + question_classifier_entity = QuestionClassifierNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( question_classifier_entity.model.provider ), ) case NodeType.PARAMETER_EXTRACTOR.value: - parameter_extractor_entity = ParameterExtractorNodeData(**node["data"]) + parameter_extractor_entity = ParameterExtractorNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( parameter_extractor_entity.model.provider ), ) case NodeType.KNOWLEDGE_RETRIEVAL.value: - knowledge_retrieval_entity = KnowledgeRetrievalNodeData(**node["data"]) + knowledge_retrieval_entity = KnowledgeRetrievalNodeData.model_validate(node["data"]) if knowledge_retrieval_entity.retrieval_mode == "multiple": if knowledge_retrieval_entity.multiple_retrieval_config: if ( @@ -773,7 +773,7 @@ class AppDslService: """ Returns the leaked dependencies in current workspace """ - dependencies = [PluginDependency(**dep) for dep in dsl_dependencies] + dependencies = [PluginDependency.model_validate(dep) for dep in dsl_dependencies] if not dependencies: return [] diff --git a/api/services/enterprise/enterprise_service.py b/api/services/enterprise/enterprise_service.py index f8612456d6..4fbf33fd6f 100644 --- a/api/services/enterprise/enterprise_service.py +++ b/api/services/enterprise/enterprise_service.py @@ -70,7 +70,7 @@ class EnterpriseService: data = EnterpriseRequest.send_request("GET", "/webapp/access-mode/id", params=params) if not data: raise ValueError("No data found.") - return WebAppSettings(**data) + return WebAppSettings.model_validate(data) @classmethod def batch_get_app_access_mode_by_id(cls, app_ids: list[str]) -> dict[str, WebAppSettings]: @@ -100,7 +100,7 @@ class EnterpriseService: data = EnterpriseRequest.send_request("GET", "/webapp/access-mode/code", params=params) if not data: raise ValueError("No data found.") - return WebAppSettings(**data) + return WebAppSettings.model_validate(data) @classmethod def update_app_access_mode(cls, app_id: str, access_mode: str): diff --git a/api/services/entities/model_provider_entities.py b/api/services/entities/model_provider_entities.py index 49d48f044c..0f5151919f 100644 --- a/api/services/entities/model_provider_entities.py +++ b/api/services/entities/model_provider_entities.py @@ -1,6 +1,7 @@ +from collections.abc import Sequence from enum import Enum -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, model_validator from configs import dify_config from core.entities.model_entities import ( @@ -71,7 +72,7 @@ class ProviderResponse(BaseModel): icon_large: I18nObject | None = None background: str | None = None help: ProviderHelpEntity | None = None - supported_model_types: list[ModelType] + supported_model_types: Sequence[ModelType] configurate_methods: list[ConfigurateMethod] provider_credential_schema: ProviderCredentialSchema | None = None model_credential_schema: ModelCredentialSchema | None = None @@ -82,9 +83,8 @@ class ProviderResponse(BaseModel): # pydantic configs model_config = ConfigDict(protected_namespaces=()) - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): url_prefix = ( dify_config.CONSOLE_API_URL + f"/console/api/workspaces/{self.tenant_id}/model-providers/{self.provider}" ) @@ -97,6 +97,7 @@ class ProviderResponse(BaseModel): self.icon_large = I18nObject( en_US=f"{url_prefix}/icon_large/en_US", zh_Hans=f"{url_prefix}/icon_large/zh_Hans" ) + return self class ProviderWithModelsResponse(BaseModel): @@ -112,9 +113,8 @@ class ProviderWithModelsResponse(BaseModel): status: CustomConfigurationStatus models: list[ProviderModelWithStatusEntity] - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): url_prefix = ( dify_config.CONSOLE_API_URL + f"/console/api/workspaces/{self.tenant_id}/model-providers/{self.provider}" ) @@ -127,6 +127,7 @@ class ProviderWithModelsResponse(BaseModel): self.icon_large = I18nObject( en_US=f"{url_prefix}/icon_large/en_US", zh_Hans=f"{url_prefix}/icon_large/zh_Hans" ) + return self class SimpleProviderEntityResponse(SimpleProviderEntity): @@ -136,9 +137,8 @@ class SimpleProviderEntityResponse(SimpleProviderEntity): tenant_id: str - def __init__(self, **data): - super().__init__(**data) - + @model_validator(mode="after") + def _(self): url_prefix = ( dify_config.CONSOLE_API_URL + f"/console/api/workspaces/{self.tenant_id}/model-providers/{self.provider}" ) @@ -151,6 +151,7 @@ class SimpleProviderEntityResponse(SimpleProviderEntity): self.icon_large = I18nObject( en_US=f"{url_prefix}/icon_large/en_US", zh_Hans=f"{url_prefix}/icon_large/zh_Hans" ) + return self class DefaultModelResponse(BaseModel): diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 00ec3babf3..6174ce8b3b 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -46,7 +46,7 @@ class HitTestingService: from core.app.app_config.entities import MetadataFilteringCondition - metadata_filtering_conditions = MetadataFilteringCondition(**metadata_filtering_conditions) + metadata_filtering_conditions = MetadataFilteringCondition.model_validate(metadata_filtering_conditions) metadata_filter_document_ids, metadata_condition = dataset_retrieval.get_metadata_filter_condition( dataset_ids=[dataset.id], diff --git a/api/services/ops_service.py b/api/services/ops_service.py index c214640653..b4b23b8360 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -123,7 +123,7 @@ class OpsService: config_class: type[BaseTracingConfig] = provider_config["config_class"] other_keys: list[str] = provider_config["other_keys"] - default_config_instance: BaseTracingConfig = config_class(**tracing_config) + default_config_instance = config_class.model_validate(tracing_config) for key in other_keys: if key in tracing_config and tracing_config[key] == "": tracing_config[key] = getattr(default_config_instance, key, None) diff --git a/api/services/plugin/plugin_migration.py b/api/services/plugin/plugin_migration.py index 99946d8fa9..76bb9a57f9 100644 --- a/api/services/plugin/plugin_migration.py +++ b/api/services/plugin/plugin_migration.py @@ -269,7 +269,7 @@ class PluginMigration: for tool in agent_config["tools"]: if isinstance(tool, dict): try: - tool_entity = AgentToolEntity(**tool) + tool_entity = AgentToolEntity.model_validate(tool) if ( tool_entity.provider_type == ToolProviderType.BUILT_IN.value and tool_entity.provider_id not in excluded_providers diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index fdaaa73bcc..3ced0fd9ec 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -358,7 +358,7 @@ class RagPipelineService: for node in nodes: if node.get("data", {}).get("type") == "knowledge-index": knowledge_configuration = node.get("data", {}) - knowledge_configuration = KnowledgeConfiguration(**knowledge_configuration) + knowledge_configuration = KnowledgeConfiguration.model_validate(knowledge_configuration) # update dataset dataset = pipeline.retrieve_dataset(session=session) diff --git a/api/services/rag_pipeline/rag_pipeline_dsl_service.py b/api/services/rag_pipeline/rag_pipeline_dsl_service.py index f74de1bcab..9dede31ab4 100644 --- a/api/services/rag_pipeline/rag_pipeline_dsl_service.py +++ b/api/services/rag_pipeline/rag_pipeline_dsl_service.py @@ -288,7 +288,7 @@ class RagPipelineDslService: dataset_id = None for node in nodes: if node.get("data", {}).get("type") == "knowledge-index": - knowledge_configuration = KnowledgeConfiguration(**node.get("data", {})) + knowledge_configuration = KnowledgeConfiguration.model_validate(node.get("data", {})) if ( dataset and pipeline.is_published @@ -426,7 +426,7 @@ class RagPipelineDslService: dataset_id = None for node in nodes: if node.get("data", {}).get("type") == "knowledge-index": - knowledge_configuration = KnowledgeConfiguration(**node.get("data", {})) + knowledge_configuration = KnowledgeConfiguration.model_validate(node.get("data", {})) if not dataset: dataset = Dataset( tenant_id=account.current_tenant_id, @@ -734,35 +734,35 @@ class RagPipelineDslService: typ = node.get("data", {}).get("type") match typ: case NodeType.TOOL.value: - tool_entity = ToolNodeData(**node["data"]) + tool_entity = ToolNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_tool_dependency(tool_entity.provider_id), ) case NodeType.DATASOURCE.value: - datasource_entity = DatasourceNodeData(**node["data"]) + datasource_entity = DatasourceNodeData.model_validate(node["data"]) if datasource_entity.provider_type != "local_file": dependencies.append(datasource_entity.plugin_id) case NodeType.LLM.value: - llm_entity = LLMNodeData(**node["data"]) + llm_entity = LLMNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency(llm_entity.model.provider), ) case NodeType.QUESTION_CLASSIFIER.value: - question_classifier_entity = QuestionClassifierNodeData(**node["data"]) + question_classifier_entity = QuestionClassifierNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( question_classifier_entity.model.provider ), ) case NodeType.PARAMETER_EXTRACTOR.value: - parameter_extractor_entity = ParameterExtractorNodeData(**node["data"]) + parameter_extractor_entity = ParameterExtractorNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( parameter_extractor_entity.model.provider ), ) case NodeType.KNOWLEDGE_INDEX.value: - knowledge_index_entity = KnowledgeConfiguration(**node["data"]) + knowledge_index_entity = KnowledgeConfiguration.model_validate(node["data"]) if knowledge_index_entity.indexing_technique == "high_quality": if knowledge_index_entity.embedding_model_provider: dependencies.append( @@ -783,7 +783,7 @@ class RagPipelineDslService: ), ) case NodeType.KNOWLEDGE_RETRIEVAL.value: - knowledge_retrieval_entity = KnowledgeRetrievalNodeData(**node["data"]) + knowledge_retrieval_entity = KnowledgeRetrievalNodeData.model_validate(node["data"]) if knowledge_retrieval_entity.retrieval_mode == "multiple": if knowledge_retrieval_entity.multiple_retrieval_config: if ( @@ -873,7 +873,7 @@ class RagPipelineDslService: """ Returns the leaked dependencies in current workspace """ - dependencies = [PluginDependency(**dep) for dep in dsl_dependencies] + dependencies = [PluginDependency.model_validate(dep) for dep in dsl_dependencies] if not dependencies: return [] diff --git a/api/services/rag_pipeline/rag_pipeline_transform_service.py b/api/services/rag_pipeline/rag_pipeline_transform_service.py index 3d5a85b57f..b4425d85a6 100644 --- a/api/services/rag_pipeline/rag_pipeline_transform_service.py +++ b/api/services/rag_pipeline/rag_pipeline_transform_service.py @@ -156,13 +156,13 @@ class RagPipelineTransformService: self, dataset: Dataset, doc_form: str, indexing_technique: str | None, retrieval_model: dict, node: dict ): knowledge_configuration_dict = node.get("data", {}) - knowledge_configuration = KnowledgeConfiguration(**knowledge_configuration_dict) + knowledge_configuration = KnowledgeConfiguration.model_validate(knowledge_configuration_dict) if indexing_technique == "high_quality": knowledge_configuration.embedding_model = dataset.embedding_model knowledge_configuration.embedding_model_provider = dataset.embedding_model_provider if retrieval_model: - retrieval_setting = RetrievalSetting(**retrieval_model) + retrieval_setting = RetrievalSetting.model_validate(retrieval_model) if indexing_technique == "economy": retrieval_setting.search_method = "keyword_search" knowledge_configuration.retrieval_model = retrieval_setting diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 6b36ed0eb7..7ae1b97b30 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -242,7 +242,7 @@ class ToolTransformService: is_team_authorization=db_provider.authed, server_url=db_provider.masked_server_url, tools=ToolTransformService.mcp_tool_to_user_tool( - db_provider, [MCPTool(**tool) for tool in json.loads(db_provider.tools)] + db_provider, [MCPTool.model_validate(tool) for tool in json.loads(db_provider.tools)] ), updated_at=int(db_provider.updated_at.timestamp()), label=I18nObject(en_US=db_provider.name, zh_Hans=db_provider.name), @@ -387,6 +387,7 @@ class ToolTransformService: labels=labels or [], ) else: + assert tool.operation_id return ToolApiEntity( author=tool.author, name=tool.operation_id or "", diff --git a/api/tasks/ops_trace_task.py b/api/tasks/ops_trace_task.py index 7b254ac3b5..72e3b42ca7 100644 --- a/api/tasks/ops_trace_task.py +++ b/api/tasks/ops_trace_task.py @@ -36,7 +36,7 @@ def process_trace_tasks(file_info): if trace_info.get("workflow_data"): trace_info["workflow_data"] = WorkflowRun.from_dict(data=trace_info["workflow_data"]) if trace_info.get("documents"): - trace_info["documents"] = [Document(**doc) for doc in trace_info["documents"]] + trace_info["documents"] = [Document.model_validate(doc) for doc in trace_info["documents"]] try: if trace_instance: diff --git a/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py b/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py index a2c99554f1..4171656131 100644 --- a/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py +++ b/api/tasks/rag_pipeline/priority_rag_pipeline_run_task.py @@ -79,7 +79,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], # Create Flask application context for this thread with flask_app.app_context(): try: - rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity(**rag_pipeline_invoke_entity) + rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity.model_validate(rag_pipeline_invoke_entity) user_id = rag_pipeline_invoke_entity_model.user_id tenant_id = rag_pipeline_invoke_entity_model.tenant_id pipeline_id = rag_pipeline_invoke_entity_model.pipeline_id @@ -112,7 +112,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], workflow_execution_id = str(uuid.uuid4()) # Create application generate entity from dict - entity = RagPipelineGenerateEntity(**application_generate_entity) + entity = RagPipelineGenerateEntity.model_validate(application_generate_entity) # Create workflow repositories session_factory = sessionmaker(bind=db.engine, expire_on_commit=False) diff --git a/api/tasks/rag_pipeline/rag_pipeline_run_task.py b/api/tasks/rag_pipeline/rag_pipeline_run_task.py index 4e00f072bf..90ebe80daf 100644 --- a/api/tasks/rag_pipeline/rag_pipeline_run_task.py +++ b/api/tasks/rag_pipeline/rag_pipeline_run_task.py @@ -100,7 +100,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], # Create Flask application context for this thread with flask_app.app_context(): try: - rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity(**rag_pipeline_invoke_entity) + rag_pipeline_invoke_entity_model = RagPipelineInvokeEntity.model_validate(rag_pipeline_invoke_entity) user_id = rag_pipeline_invoke_entity_model.user_id tenant_id = rag_pipeline_invoke_entity_model.tenant_id pipeline_id = rag_pipeline_invoke_entity_model.pipeline_id @@ -133,7 +133,7 @@ def run_single_rag_pipeline_task(rag_pipeline_invoke_entity: Mapping[str, Any], workflow_execution_id = str(uuid.uuid4()) # Create application generate entity from dict - entity = RagPipelineGenerateEntity(**application_generate_entity) + entity = RagPipelineGenerateEntity.model_validate(application_generate_entity) # Create workflow repositories session_factory = sessionmaker(bind=db.engine, expire_on_commit=False) diff --git a/api/tests/integration_tests/tools/api_tool/test_api_tool.py b/api/tests/integration_tests/tools/api_tool/test_api_tool.py index 7c1a200c8f..e637530265 100644 --- a/api/tests/integration_tests/tools/api_tool/test_api_tool.py +++ b/api/tests/integration_tests/tools/api_tool/test_api_tool.py @@ -36,7 +36,7 @@ def test_api_tool(setup_http_mock): entity=ToolEntity( identity=ToolIdentity(provider="", author="", name="", label=I18nObject(en_US="test tool")), ), - api_bundle=ApiToolBundle(**tool_bundle), + api_bundle=ApiToolBundle.model_validate(tool_bundle), runtime=ToolRuntime(tenant_id="", credentials={"auth_type": "none"}), provider_id="test_tool", ) diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py b/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py index 48cc8a7e1c..fb2ddfe162 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/milvus/test_milvus.py @@ -11,8 +11,8 @@ def test_default_value(): config = valid_config.copy() del config[key] with pytest.raises(ValidationError) as e: - MilvusConfig(**config) + MilvusConfig.model_validate(config) assert e.value.errors()[0]["msg"] == f"Value error, config MILVUS_{key.upper()} is required" - config = MilvusConfig(**valid_config) + config = MilvusConfig.model_validate(valid_config) assert config.database == "default" diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index b942614232..55fe62ca43 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -35,7 +35,7 @@ def list_operator_node(): "extract_by": ExtractConfig(enabled=False, serial="1"), "title": "Test Title", } - node_data = ListOperatorNodeData(**config) + node_data = ListOperatorNodeData.model_validate(config) node_config = { "id": "test_node_id", "data": node_data.model_dump(), diff --git a/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py b/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py index f990280c5f..47ef289ef3 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_question_classifier_node.py @@ -17,7 +17,7 @@ def test_init_question_classifier_node_data(): "vision": {"enabled": True, "configs": {"variable_selector": ["image"], "detail": "low"}}, } - node_data = QuestionClassifierNodeData(**data) + node_data = QuestionClassifierNodeData.model_validate(data) assert node_data.query_variable_selector == ["id", "name"] assert node_data.model.provider == "openai" @@ -49,7 +49,7 @@ def test_init_question_classifier_node_data_without_vision_config(): }, } - node_data = QuestionClassifierNodeData(**data) + node_data = QuestionClassifierNodeData.model_validate(data) assert node_data.query_variable_selector == ["id", "name"] assert node_data.model.provider == "openai" diff --git a/api/tests/unit_tests/core/workflow/test_system_variable.py b/api/tests/unit_tests/core/workflow/test_system_variable.py index 11d788ed79..3ae5edb383 100644 --- a/api/tests/unit_tests/core/workflow/test_system_variable.py +++ b/api/tests/unit_tests/core/workflow/test_system_variable.py @@ -46,7 +46,7 @@ class TestSystemVariableSerialization: def test_basic_deserialization(self): """Test successful deserialization from JSON structure with all fields correctly mapped.""" # Test with complete data - system_var = SystemVariable(**COMPLETE_VALID_DATA) + system_var = SystemVariable.model_validate(COMPLETE_VALID_DATA) # Verify all fields are correctly mapped assert system_var.user_id == COMPLETE_VALID_DATA["user_id"] @@ -59,7 +59,7 @@ class TestSystemVariableSerialization: assert system_var.files == [] # Test with minimal data (only required fields) - minimal_var = SystemVariable(**VALID_BASE_DATA) + minimal_var = SystemVariable.model_validate(VALID_BASE_DATA) assert minimal_var.user_id == VALID_BASE_DATA["user_id"] assert minimal_var.app_id == VALID_BASE_DATA["app_id"] assert minimal_var.workflow_id == VALID_BASE_DATA["workflow_id"] @@ -75,12 +75,12 @@ class TestSystemVariableSerialization: # Test workflow_run_id only (preferred alias) data_run_id = {**VALID_BASE_DATA, "workflow_run_id": workflow_id} - system_var1 = SystemVariable(**data_run_id) + system_var1 = SystemVariable.model_validate(data_run_id) assert system_var1.workflow_execution_id == workflow_id # Test workflow_execution_id only (direct field name) data_execution_id = {**VALID_BASE_DATA, "workflow_execution_id": workflow_id} - system_var2 = SystemVariable(**data_execution_id) + system_var2 = SystemVariable.model_validate(data_execution_id) assert system_var2.workflow_execution_id == workflow_id # Test both present - workflow_run_id should take precedence @@ -89,17 +89,17 @@ class TestSystemVariableSerialization: "workflow_execution_id": "should-be-ignored", "workflow_run_id": workflow_id, } - system_var3 = SystemVariable(**data_both) + system_var3 = SystemVariable.model_validate(data_both) assert system_var3.workflow_execution_id == workflow_id # Test neither present - should be None - system_var4 = SystemVariable(**VALID_BASE_DATA) + system_var4 = SystemVariable.model_validate(VALID_BASE_DATA) assert system_var4.workflow_execution_id is None def test_serialization_round_trip(self): """Test that serialize → deserialize produces the same result with alias handling.""" # Create original SystemVariable - original = SystemVariable(**COMPLETE_VALID_DATA) + original = SystemVariable.model_validate(COMPLETE_VALID_DATA) # Serialize to dict serialized = original.model_dump(mode="json") @@ -110,7 +110,7 @@ class TestSystemVariableSerialization: assert serialized["workflow_run_id"] == COMPLETE_VALID_DATA["workflow_run_id"] # Deserialize back - deserialized = SystemVariable(**serialized) + deserialized = SystemVariable.model_validate(serialized) # Verify all fields match after round-trip assert deserialized.user_id == original.user_id @@ -125,7 +125,7 @@ class TestSystemVariableSerialization: def test_json_round_trip(self): """Test JSON serialization/deserialization consistency with proper structure.""" # Create original SystemVariable - original = SystemVariable(**COMPLETE_VALID_DATA) + original = SystemVariable.model_validate(COMPLETE_VALID_DATA) # Serialize to JSON string json_str = original.model_dump_json() @@ -137,7 +137,7 @@ class TestSystemVariableSerialization: assert json_data["workflow_run_id"] == COMPLETE_VALID_DATA["workflow_run_id"] # Deserialize from JSON data - deserialized = SystemVariable(**json_data) + deserialized = SystemVariable.model_validate(json_data) # Verify key fields match after JSON round-trip assert deserialized.workflow_execution_id == original.workflow_execution_id @@ -149,13 +149,13 @@ class TestSystemVariableSerialization: """Test deserialization with File objects in the files field - SystemVariable specific logic.""" # Test with empty files list data_empty = {**VALID_BASE_DATA, "files": []} - system_var_empty = SystemVariable(**data_empty) + system_var_empty = SystemVariable.model_validate(data_empty) assert system_var_empty.files == [] # Test with single File object test_file = create_test_file() data_single = {**VALID_BASE_DATA, "files": [test_file]} - system_var_single = SystemVariable(**data_single) + system_var_single = SystemVariable.model_validate(data_single) assert len(system_var_single.files) == 1 assert system_var_single.files[0].filename == "test.txt" assert system_var_single.files[0].tenant_id == "test-tenant-id" @@ -179,14 +179,14 @@ class TestSystemVariableSerialization: ) data_multiple = {**VALID_BASE_DATA, "files": [file1, file2]} - system_var_multiple = SystemVariable(**data_multiple) + system_var_multiple = SystemVariable.model_validate(data_multiple) assert len(system_var_multiple.files) == 2 assert system_var_multiple.files[0].filename == "doc1.txt" assert system_var_multiple.files[1].filename == "image.jpg" # Verify files field serialization/deserialization serialized = system_var_multiple.model_dump(mode="json") - deserialized = SystemVariable(**serialized) + deserialized = SystemVariable.model_validate(serialized) assert len(deserialized.files) == 2 assert deserialized.files[0].filename == "doc1.txt" assert deserialized.files[1].filename == "image.jpg" @@ -197,7 +197,7 @@ class TestSystemVariableSerialization: # Create with workflow_run_id (alias) data_with_alias = {**VALID_BASE_DATA, "workflow_run_id": workflow_id} - system_var = SystemVariable(**data_with_alias) + system_var = SystemVariable.model_validate(data_with_alias) # Serialize and verify alias is used serialized = system_var.model_dump() @@ -205,7 +205,7 @@ class TestSystemVariableSerialization: assert "workflow_execution_id" not in serialized # Deserialize and verify field mapping - deserialized = SystemVariable(**serialized) + deserialized = SystemVariable.model_validate(serialized) assert deserialized.workflow_execution_id == workflow_id # Test JSON serialization path @@ -213,7 +213,7 @@ class TestSystemVariableSerialization: assert json_serialized["workflow_run_id"] == workflow_id assert "workflow_execution_id" not in json_serialized - json_deserialized = SystemVariable(**json_serialized) + json_deserialized = SystemVariable.model_validate(json_serialized) assert json_deserialized.workflow_execution_id == workflow_id def test_model_validator_serialization_logic(self): @@ -222,7 +222,7 @@ class TestSystemVariableSerialization: # Test direct instantiation with workflow_execution_id (should work) data1 = {**VALID_BASE_DATA, "workflow_execution_id": workflow_id} - system_var1 = SystemVariable(**data1) + system_var1 = SystemVariable.model_validate(data1) assert system_var1.workflow_execution_id == workflow_id # Test serialization of the above (should use alias) @@ -236,7 +236,7 @@ class TestSystemVariableSerialization: "workflow_execution_id": "should-be-removed", "workflow_run_id": workflow_id, } - system_var2 = SystemVariable(**data2) + system_var2 = SystemVariable.model_validate(data2) assert system_var2.workflow_execution_id == workflow_id # Verify serialization consistency diff --git a/api/tests/unit_tests/services/test_metadata_bug_complete.py b/api/tests/unit_tests/services/test_metadata_bug_complete.py index 0ff1edc950..31fe9b2868 100644 --- a/api/tests/unit_tests/services/test_metadata_bug_complete.py +++ b/api/tests/unit_tests/services/test_metadata_bug_complete.py @@ -118,7 +118,7 @@ class TestMetadataBugCompleteValidation: # But would crash when trying to create MetadataArgs with pytest.raises((ValueError, TypeError)): - MetadataArgs(**args) + MetadataArgs.model_validate(args) def test_7_end_to_end_validation_layers(self): """Test all validation layers work together correctly.""" @@ -131,7 +131,7 @@ class TestMetadataBugCompleteValidation: valid_data = {"type": "string", "name": "test_metadata"} # Should create valid Pydantic object - metadata_args = MetadataArgs(**valid_data) + metadata_args = MetadataArgs.model_validate(valid_data) assert metadata_args.type == "string" assert metadata_args.name == "test_metadata" diff --git a/api/tests/unit_tests/services/test_metadata_nullable_bug.py b/api/tests/unit_tests/services/test_metadata_nullable_bug.py index d151100cf3..c8cd7025c2 100644 --- a/api/tests/unit_tests/services/test_metadata_nullable_bug.py +++ b/api/tests/unit_tests/services/test_metadata_nullable_bug.py @@ -76,7 +76,7 @@ class TestMetadataNullableBug: # Step 2: Try to create MetadataArgs with None values # This should fail at Pydantic validation level with pytest.raises((ValueError, TypeError)): - metadata_args = MetadataArgs(**args) + metadata_args = MetadataArgs.model_validate(args) # Step 3: If we bypass Pydantic (simulating the bug scenario) # Move this outside the request context to avoid Flask-Login issues From 94a07706ec8e2114cedf2258e8f851afbe79fd3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=AF=97=E6=B5=93?= <844670992@qq.com> Date: Fri, 10 Oct 2025 16:32:09 +0800 Subject: [PATCH 14/49] fix: restore None guards for _environment_variables/_conversation_variables getters (#25633) --- api/models/workflow.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/api/models/workflow.py b/api/models/workflow.py index e61005953e..877f571f25 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -360,7 +360,9 @@ class Workflow(Base): @property def environment_variables(self) -> Sequence[StringVariable | IntegerVariable | FloatVariable | SecretVariable]: - # _environment_variables is guaranteed to be non-None due to server_default="{}" + # TODO: find some way to init `self._environment_variables` when instance created. + if self._environment_variables is None: + self._environment_variables = "{}" # Use workflow.tenant_id to avoid relying on request user in background threads tenant_id = self.tenant_id @@ -444,7 +446,9 @@ class Workflow(Base): @property def conversation_variables(self) -> Sequence[Variable]: - # _conversation_variables is guaranteed to be non-None due to server_default="{}" + # TODO: find some way to init `self._conversation_variables` when instance created. + if self._conversation_variables is None: + self._conversation_variables = "{}" variables_dict: dict[str, Any] = json.loads(self._conversation_variables) results = [variable_factory.build_conversation_variable_from_mapping(v) for v in variables_dict.values()] From a90b60c36fe257bd0ced58c98dcf8a960234cc64 Mon Sep 17 00:00:00 2001 From: znn Date: Fri, 10 Oct 2025 14:30:03 +0530 Subject: [PATCH 15/49] removing horus eye and adding mcp icon (#25323) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: crazywoola <427733928@qq.com> --- .../plugins/card/base/card-icon.tsx | 3 +++ web/app/components/tools/mcp/modal.tsx | 5 ++++- web/utils/mcp.ts | 22 +++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 web/utils/mcp.ts diff --git a/web/app/components/plugins/card/base/card-icon.tsx b/web/app/components/plugins/card/base/card-icon.tsx index 7f7468ece2..b4c052c13c 100644 --- a/web/app/components/plugins/card/base/card-icon.tsx +++ b/web/app/components/plugins/card/base/card-icon.tsx @@ -1,6 +1,8 @@ import { RiCheckLine, RiCloseLine } from '@remixicon/react' +import { Mcp } from '@/app/components/base/icons/src/vender/other' import AppIcon from '@/app/components/base/app-icon' import cn from '@/utils/classnames' +import { shouldUseMcpIcon } from '@/utils/mcp' const iconSizeMap = { xs: 'w-4 h-4 text-base', @@ -35,6 +37,7 @@ const Icon = ({ icon={src.content} background={src.background} className='rounded-md' + innerIcon={shouldUseMcpIcon(src) ? : undefined} />
) diff --git a/web/app/components/tools/mcp/modal.tsx b/web/app/components/tools/mcp/modal.tsx index 1a12b3b3e9..1d888c57e8 100644 --- a/web/app/components/tools/mcp/modal.tsx +++ b/web/app/components/tools/mcp/modal.tsx @@ -3,6 +3,7 @@ import React, { useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { getDomain } from 'tldts' import { RiCloseLine, RiEditLine } from '@remixicon/react' +import { Mcp } from '@/app/components/base/icons/src/vender/other' import AppIconPicker from '@/app/components/base/app-icon-picker' import type { AppIconSelection } from '@/app/components/base/app-icon-picker' import AppIcon from '@/app/components/base/app-icon' @@ -17,6 +18,7 @@ import Toast from '@/app/components/base/toast' import { uploadRemoteFileInfo } from '@/service/common' import cn from '@/utils/classnames' import { useHover } from 'ahooks' +import { shouldUseMcpIconForAppIcon } from '@/utils/mcp' export type DuplicateAppModalProps = { data?: ToolWithProvider @@ -35,7 +37,7 @@ export type DuplicateAppModalProps = { onHide: () => void } -const DEFAULT_ICON = { type: 'emoji', icon: '🧿', background: '#EFF1F5' } +const DEFAULT_ICON = { type: 'emoji', icon: '🔗', background: '#6366F1' } const extractFileId = (url: string) => { const match = url.match(/files\/(.+?)\/file-preview/) return match ? match[1] : null @@ -208,6 +210,7 @@ const MCPModal = ({ icon={appIcon.type === 'emoji' ? appIcon.icon : appIcon.fileId} background={appIcon.type === 'emoji' ? appIcon.background : undefined} imageUrl={appIcon.type === 'image' ? appIcon.url : undefined} + innerIcon={shouldUseMcpIconForAppIcon(appIcon.type, appIcon.type === 'emoji' ? appIcon.icon : '') ? : undefined} size='xxl' className='relative cursor-pointer rounded-2xl' coverElement={ diff --git a/web/utils/mcp.ts b/web/utils/mcp.ts new file mode 100644 index 0000000000..dcbb63ee8a --- /dev/null +++ b/web/utils/mcp.ts @@ -0,0 +1,22 @@ +/** + * MCP (Model Context Protocol) utility functions + */ + +/** + * Determines if the MCP icon should be used based on the icon source + * @param src - The icon source, can be a string URL or an object with content and background + * @returns true if the MCP icon should be used (when it's an emoji object with 🔗 content) + */ +export const shouldUseMcpIcon = (src: any): boolean => { + return typeof src === 'object' && src?.content === '🔗' +} + +/** + * Checks if an app icon should use the MCP icon + * @param iconType - The type of icon ('emoji' | 'image') + * @param icon - The icon content (emoji or file ID) + * @returns true if the MCP icon should be used + */ +export const shouldUseMcpIconForAppIcon = (iconType: string, icon: string): boolean => { + return iconType === 'emoji' && icon === '🔗' +} From 65b832c46c5227ab89ceaba86fade66b71998dd4 Mon Sep 17 00:00:00 2001 From: znn Date: Fri, 10 Oct 2025 14:37:25 +0530 Subject: [PATCH 16/49] pan and zoom during workflow execution (#24254) --- web/app/components/workflow/index.tsx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index 75c4d51390..b289cafefd 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -412,10 +412,10 @@ export const Workflow: FC = memo(({ nodesFocusable={!nodesReadOnly} edgesFocusable={!nodesReadOnly} panOnScroll={false} - panOnDrag={controlMode === ControlMode.Hand && !workflowReadOnly} - zoomOnPinch={!workflowReadOnly} - zoomOnScroll={!workflowReadOnly} - zoomOnDoubleClick={!workflowReadOnly} + panOnDrag={controlMode === ControlMode.Hand} + zoomOnPinch={true} + zoomOnScroll={true} + zoomOnDoubleClick={true} isValidConnection={isValidConnection} selectionKeyCode={null} selectionMode={SelectionMode.Partial} From d0dd81cf84289fb4df6893deba86a8ff8f73423a Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Fri, 10 Oct 2025 18:10:23 +0800 Subject: [PATCH 17/49] chore: bump ruff to 0.14 (#26063) --- api/.ruff.toml | 1 - .../dataset_multi_retriever_tool.py | 2 +- api/pyproject.toml | 2 +- api/uv.lock | 42 +++++++++---------- 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/api/.ruff.toml b/api/.ruff.toml index 643bc063a1..5a29e1d8fa 100644 --- a/api/.ruff.toml +++ b/api/.ruff.toml @@ -81,7 +81,6 @@ ignore = [ "SIM113", # enumerate-for-loop "SIM117", # multiple-with-statements "SIM210", # if-expr-with-true-false - "UP038", # deprecated and not recommended by Ruff, https://docs.astral.sh/ruff/rules/non-pep604-isinstance/ ] [lint.per-file-ignores] diff --git a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py index 75c0c6738e..cce5ec6b1b 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py @@ -126,7 +126,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool): data_source_type=document.data_source_type, segment_id=segment.id, retriever_from=self.retriever_from, - score=document_score_list.get(segment.index_node_id, None), + score=document_score_list.get(segment.index_node_id), doc_metadata=document.doc_metadata, ) diff --git a/api/pyproject.toml b/api/pyproject.toml index 1f51d60098..e2a50a43f6 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -110,7 +110,7 @@ dev = [ "lxml-stubs~=0.5.1", "ty~=0.0.1a19", "basedpyright~=1.31.0", - "ruff~=0.12.3", + "ruff~=0.14.0", "pytest~=8.3.2", "pytest-benchmark~=4.0.0", "pytest-cov~=4.1.0", diff --git a/api/uv.lock b/api/uv.lock index 21d1f17bad..43db17b06f 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1571,7 +1571,7 @@ dev = [ { name = "pytest-cov", specifier = "~=4.1.0" }, { name = "pytest-env", specifier = "~=1.1.3" }, { name = "pytest-mock", specifier = "~=3.14.0" }, - { name = "ruff", specifier = "~=0.12.3" }, + { name = "ruff", specifier = "~=0.14.0" }, { name = "scipy-stubs", specifier = ">=1.15.3.0" }, { name = "sseclient-py", specifier = ">=1.8.0" }, { name = "testcontainers", specifier = "~=4.10.0" }, @@ -5461,28 +5461,28 @@ wheels = [ [[package]] name = "ruff" -version = "0.12.12" +version = "0.14.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a8/f0/e0965dd709b8cabe6356811c0ee8c096806bb57d20b5019eb4e48a117410/ruff-0.12.12.tar.gz", hash = "sha256:b86cd3415dbe31b3b46a71c598f4c4b2f550346d1ccf6326b347cc0c8fd063d6", size = 5359915, upload-time = "2025-09-04T16:50:18.273Z" } +sdist = { url = "https://files.pythonhosted.org/packages/41/b9/9bd84453ed6dd04688de9b3f3a4146a1698e8faae2ceeccce4e14c67ae17/ruff-0.14.0.tar.gz", hash = "sha256:62ec8969b7510f77945df916de15da55311fade8d6050995ff7f680afe582c57", size = 5452071, upload-time = "2025-10-07T18:21:55.763Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/09/79/8d3d687224d88367b51c7974cec1040c4b015772bfbeffac95face14c04a/ruff-0.12.12-py3-none-linux_armv6l.whl", hash = "sha256:de1c4b916d98ab289818e55ce481e2cacfaad7710b01d1f990c497edf217dafc", size = 12116602, upload-time = "2025-09-04T16:49:18.892Z" }, - { url = "https://files.pythonhosted.org/packages/c3/c3/6e599657fe192462f94861a09aae935b869aea8a1da07f47d6eae471397c/ruff-0.12.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7acd6045e87fac75a0b0cdedacf9ab3e1ad9d929d149785903cff9bb69ad9727", size = 12868393, upload-time = "2025-09-04T16:49:23.043Z" }, - { url = "https://files.pythonhosted.org/packages/e8/d2/9e3e40d399abc95336b1843f52fc0daaceb672d0e3c9290a28ff1a96f79d/ruff-0.12.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:abf4073688d7d6da16611f2f126be86523a8ec4343d15d276c614bda8ec44edb", size = 12036967, upload-time = "2025-09-04T16:49:26.04Z" }, - { url = "https://files.pythonhosted.org/packages/e9/03/6816b2ed08836be272e87107d905f0908be5b4a40c14bfc91043e76631b8/ruff-0.12.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:968e77094b1d7a576992ac078557d1439df678a34c6fe02fd979f973af167577", size = 12276038, upload-time = "2025-09-04T16:49:29.056Z" }, - { url = "https://files.pythonhosted.org/packages/9f/d5/707b92a61310edf358a389477eabd8af68f375c0ef858194be97ca5b6069/ruff-0.12.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42a67d16e5b1ffc6d21c5f67851e0e769517fb57a8ebad1d0781b30888aa704e", size = 11901110, upload-time = "2025-09-04T16:49:32.07Z" }, - { url = "https://files.pythonhosted.org/packages/9d/3d/f8b1038f4b9822e26ec3d5b49cf2bc313e3c1564cceb4c1a42820bf74853/ruff-0.12.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b216ec0a0674e4b1214dcc998a5088e54eaf39417327b19ffefba1c4a1e4971e", size = 13668352, upload-time = "2025-09-04T16:49:35.148Z" }, - { url = "https://files.pythonhosted.org/packages/98/0e/91421368ae6c4f3765dd41a150f760c5f725516028a6be30e58255e3c668/ruff-0.12.12-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:59f909c0fdd8f1dcdbfed0b9569b8bf428cf144bec87d9de298dcd4723f5bee8", size = 14638365, upload-time = "2025-09-04T16:49:38.892Z" }, - { url = "https://files.pythonhosted.org/packages/74/5d/88f3f06a142f58ecc8ecb0c2fe0b82343e2a2b04dcd098809f717cf74b6c/ruff-0.12.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ac93d87047e765336f0c18eacad51dad0c1c33c9df7484c40f98e1d773876f5", size = 14060812, upload-time = "2025-09-04T16:49:42.732Z" }, - { url = "https://files.pythonhosted.org/packages/13/fc/8962e7ddd2e81863d5c92400820f650b86f97ff919c59836fbc4c1a6d84c/ruff-0.12.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:01543c137fd3650d322922e8b14cc133b8ea734617c4891c5a9fccf4bfc9aa92", size = 13050208, upload-time = "2025-09-04T16:49:46.434Z" }, - { url = "https://files.pythonhosted.org/packages/53/06/8deb52d48a9a624fd37390555d9589e719eac568c020b27e96eed671f25f/ruff-0.12.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afc2fa864197634e549d87fb1e7b6feb01df0a80fd510d6489e1ce8c0b1cc45", size = 13311444, upload-time = "2025-09-04T16:49:49.931Z" }, - { url = "https://files.pythonhosted.org/packages/2a/81/de5a29af7eb8f341f8140867ffb93f82e4fde7256dadee79016ac87c2716/ruff-0.12.12-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0c0945246f5ad776cb8925e36af2438e66188d2b57d9cf2eed2c382c58b371e5", size = 13279474, upload-time = "2025-09-04T16:49:53.465Z" }, - { url = "https://files.pythonhosted.org/packages/7f/14/d9577fdeaf791737ada1b4f5c6b59c21c3326f3f683229096cccd7674e0c/ruff-0.12.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:a0fbafe8c58e37aae28b84a80ba1817f2ea552e9450156018a478bf1fa80f4e4", size = 12070204, upload-time = "2025-09-04T16:49:56.882Z" }, - { url = "https://files.pythonhosted.org/packages/77/04/a910078284b47fad54506dc0af13839c418ff704e341c176f64e1127e461/ruff-0.12.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b9c456fb2fc8e1282affa932c9e40f5ec31ec9cbb66751a316bd131273b57c23", size = 11880347, upload-time = "2025-09-04T16:49:59.729Z" }, - { url = "https://files.pythonhosted.org/packages/df/58/30185fcb0e89f05e7ea82e5817b47798f7fa7179863f9d9ba6fd4fe1b098/ruff-0.12.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5f12856123b0ad0147d90b3961f5c90e7427f9acd4b40050705499c98983f489", size = 12891844, upload-time = "2025-09-04T16:50:02.591Z" }, - { url = "https://files.pythonhosted.org/packages/21/9c/28a8dacce4855e6703dcb8cdf6c1705d0b23dd01d60150786cd55aa93b16/ruff-0.12.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:26a1b5a2bf7dd2c47e3b46d077cd9c0fc3b93e6c6cc9ed750bd312ae9dc302ee", size = 13360687, upload-time = "2025-09-04T16:50:05.8Z" }, - { url = "https://files.pythonhosted.org/packages/c8/fa/05b6428a008e60f79546c943e54068316f32ec8ab5c4f73e4563934fbdc7/ruff-0.12.12-py3-none-win32.whl", hash = "sha256:173be2bfc142af07a01e3a759aba6f7791aa47acf3604f610b1c36db888df7b1", size = 12052870, upload-time = "2025-09-04T16:50:09.121Z" }, - { url = "https://files.pythonhosted.org/packages/85/60/d1e335417804df452589271818749d061b22772b87efda88354cf35cdb7a/ruff-0.12.12-py3-none-win_amd64.whl", hash = "sha256:e99620bf01884e5f38611934c09dd194eb665b0109104acae3ba6102b600fd0d", size = 13178016, upload-time = "2025-09-04T16:50:12.559Z" }, - { url = "https://files.pythonhosted.org/packages/28/7e/61c42657f6e4614a4258f1c3b0c5b93adc4d1f8575f5229d1906b483099b/ruff-0.12.12-py3-none-win_arm64.whl", hash = "sha256:2a8199cab4ce4d72d158319b63370abf60991495fb733db96cd923a34c52d093", size = 12256762, upload-time = "2025-09-04T16:50:15.737Z" }, + { url = "https://files.pythonhosted.org/packages/3a/4e/79d463a5f80654e93fa653ebfb98e0becc3f0e7cf6219c9ddedf1e197072/ruff-0.14.0-py3-none-linux_armv6l.whl", hash = "sha256:58e15bffa7054299becf4bab8a1187062c6f8cafbe9f6e39e0d5aface455d6b3", size = 12494532, upload-time = "2025-10-07T18:21:00.373Z" }, + { url = "https://files.pythonhosted.org/packages/ee/40/e2392f445ed8e02aa6105d49db4bfff01957379064c30f4811c3bf38aece/ruff-0.14.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:838d1b065f4df676b7c9957992f2304e41ead7a50a568185efd404297d5701e8", size = 13160768, upload-time = "2025-10-07T18:21:04.73Z" }, + { url = "https://files.pythonhosted.org/packages/75/da/2a656ea7c6b9bd14c7209918268dd40e1e6cea65f4bb9880eaaa43b055cd/ruff-0.14.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:703799d059ba50f745605b04638fa7e9682cc3da084b2092feee63500ff3d9b8", size = 12363376, upload-time = "2025-10-07T18:21:07.833Z" }, + { url = "https://files.pythonhosted.org/packages/42/e2/1ffef5a1875add82416ff388fcb7ea8b22a53be67a638487937aea81af27/ruff-0.14.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ba9a8925e90f861502f7d974cc60e18ca29c72bb0ee8bfeabb6ade35a3abde7", size = 12608055, upload-time = "2025-10-07T18:21:10.72Z" }, + { url = "https://files.pythonhosted.org/packages/4a/32/986725199d7cee510d9f1dfdf95bf1efc5fa9dd714d0d85c1fb1f6be3bc3/ruff-0.14.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e41f785498bd200ffc276eb9e1570c019c1d907b07cfb081092c8ad51975bbe7", size = 12318544, upload-time = "2025-10-07T18:21:13.741Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ed/4969cefd53315164c94eaf4da7cfba1f267dc275b0abdd593d11c90829a3/ruff-0.14.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30a58c087aef4584c193aebf2700f0fbcfc1e77b89c7385e3139956fa90434e2", size = 14001280, upload-time = "2025-10-07T18:21:16.411Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ad/96c1fc9f8854c37681c9613d825925c7f24ca1acfc62a4eb3896b50bacd2/ruff-0.14.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f8d07350bc7af0a5ce8812b7d5c1a7293cf02476752f23fdfc500d24b79b783c", size = 15027286, upload-time = "2025-10-07T18:21:19.577Z" }, + { url = "https://files.pythonhosted.org/packages/b3/00/1426978f97df4fe331074baf69615f579dc4e7c37bb4c6f57c2aad80c87f/ruff-0.14.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eec3bbbf3a7d5482b5c1f42d5fc972774d71d107d447919fca620b0be3e3b75e", size = 14451506, upload-time = "2025-10-07T18:21:22.779Z" }, + { url = "https://files.pythonhosted.org/packages/58/d5/9c1cea6e493c0cf0647674cca26b579ea9d2a213b74b5c195fbeb9678e15/ruff-0.14.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16b68e183a0e28e5c176d51004aaa40559e8f90065a10a559176713fcf435206", size = 13437384, upload-time = "2025-10-07T18:21:25.758Z" }, + { url = "https://files.pythonhosted.org/packages/29/b4/4cd6a4331e999fc05d9d77729c95503f99eae3ba1160469f2b64866964e3/ruff-0.14.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb732d17db2e945cfcbbc52af0143eda1da36ca8ae25083dd4f66f1542fdf82e", size = 13447976, upload-time = "2025-10-07T18:21:28.83Z" }, + { url = "https://files.pythonhosted.org/packages/3b/c0/ac42f546d07e4f49f62332576cb845d45c67cf5610d1851254e341d563b6/ruff-0.14.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:c958f66ab884b7873e72df38dcabee03d556a8f2ee1b8538ee1c2bbd619883dd", size = 13682850, upload-time = "2025-10-07T18:21:31.842Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c4/4b0c9bcadd45b4c29fe1af9c5d1dc0ca87b4021665dfbe1c4688d407aa20/ruff-0.14.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7eb0499a2e01f6e0c285afc5bac43ab380cbfc17cd43a2e1dd10ec97d6f2c42d", size = 12449825, upload-time = "2025-10-07T18:21:35.074Z" }, + { url = "https://files.pythonhosted.org/packages/4b/a8/e2e76288e6c16540fa820d148d83e55f15e994d852485f221b9524514730/ruff-0.14.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4c63b2d99fafa05efca0ab198fd48fa6030d57e4423df3f18e03aa62518c565f", size = 12272599, upload-time = "2025-10-07T18:21:38.08Z" }, + { url = "https://files.pythonhosted.org/packages/18/14/e2815d8eff847391af632b22422b8207704222ff575dec8d044f9ab779b2/ruff-0.14.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:668fce701b7a222f3f5327f86909db2bbe99c30877c8001ff934c5413812ac02", size = 13193828, upload-time = "2025-10-07T18:21:41.216Z" }, + { url = "https://files.pythonhosted.org/packages/44/c6/61ccc2987cf0aecc588ff8f3212dea64840770e60d78f5606cd7dc34de32/ruff-0.14.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a86bf575e05cb68dcb34e4c7dfe1064d44d3f0c04bbc0491949092192b515296", size = 13628617, upload-time = "2025-10-07T18:21:44.04Z" }, + { url = "https://files.pythonhosted.org/packages/73/e6/03b882225a1b0627e75339b420883dc3c90707a8917d2284abef7a58d317/ruff-0.14.0-py3-none-win32.whl", hash = "sha256:7450a243d7125d1c032cb4b93d9625dea46c8c42b4f06c6b709baac168e10543", size = 12367872, upload-time = "2025-10-07T18:21:46.67Z" }, + { url = "https://files.pythonhosted.org/packages/41/77/56cf9cf01ea0bfcc662de72540812e5ba8e9563f33ef3d37ab2174892c47/ruff-0.14.0-py3-none-win_amd64.whl", hash = "sha256:ea95da28cd874c4d9c922b39381cbd69cb7e7b49c21b8152b014bd4f52acddc2", size = 13464628, upload-time = "2025-10-07T18:21:50.318Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2a/65880dfd0e13f7f13a775998f34703674a4554906167dce02daf7865b954/ruff-0.14.0-py3-none-win_arm64.whl", hash = "sha256:f42c9495f5c13ff841b1da4cb3c2a42075409592825dada7c5885c2c844ac730", size = 12565142, upload-time = "2025-10-07T18:21:53.577Z" }, ] [[package]] From 78f09801b56032f98c007f79b05a61d71274737c Mon Sep 17 00:00:00 2001 From: Arno Ren Date: Fri, 10 Oct 2025 23:37:10 +0800 Subject: [PATCH 18/49] fix: #26668 restore manual tool parameter values (#26733) Co-authored-by: renzeyu1 Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/workflow/nodes/agent/agent_node.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index a01686a4b8..972823b4d9 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -252,7 +252,10 @@ class AgentNode(Node): if all(isinstance(v, dict) for _, v in parameters.items()): params = {} for key, param in parameters.items(): - if param.get("auto", ParamsAutoGenerated.OPEN.value) == ParamsAutoGenerated.CLOSE.value: + if param.get("auto", ParamsAutoGenerated.OPEN.value) in ( + ParamsAutoGenerated.CLOSE.value, + 0, + ): value_param = param.get("value", {}) params[key] = value_param.get("value", "") if value_param is not None else None else: From fbc745764a4895c3d5e5866a627808a3de6eac6d Mon Sep 17 00:00:00 2001 From: GuanMu Date: Fri, 10 Oct 2025 23:37:40 +0800 Subject: [PATCH 19/49] chore: update packageManager version in package.json to pnpm@10.18.2 (#26731) --- web/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/package.json b/web/package.json index 62cccf0610..2a8972ee80 100644 --- a/web/package.json +++ b/web/package.json @@ -2,7 +2,7 @@ "name": "dify-web", "version": "1.9.1", "private": true, - "packageManager": "pnpm@10.17.1", + "packageManager": "pnpm@10.18.2", "engines": { "node": ">=v22.11.0" }, From 6157c67cfec80936bf20f798126051962e2daa6a Mon Sep 17 00:00:00 2001 From: heyszt <270985384@qq.com> Date: Fri, 10 Oct 2025 23:38:45 +0800 Subject: [PATCH 20/49] fix: sync aliyun icon SVG files (#26719) --- .../assets/public/tracing/aliyun-icon-big.svg | 2 +- .../assets/public/tracing/aliyun-icon.svg | 2 +- .../icons/src/public/tracing/AliyunIcon.json | 243 +++++++++--------- .../src/public/tracing/AliyunIconBig.json | 145 ++++++----- 4 files changed, 205 insertions(+), 187 deletions(-) diff --git a/web/app/components/base/icons/assets/public/tracing/aliyun-icon-big.svg b/web/app/components/base/icons/assets/public/tracing/aliyun-icon-big.svg index 210a1cd00b..d82b9bc1e4 100644 --- a/web/app/components/base/icons/assets/public/tracing/aliyun-icon-big.svg +++ b/web/app/components/base/icons/assets/public/tracing/aliyun-icon-big.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/web/app/components/base/icons/assets/public/tracing/aliyun-icon.svg b/web/app/components/base/icons/assets/public/tracing/aliyun-icon.svg index 6f7645301c..cee8858471 100644 --- a/web/app/components/base/icons/assets/public/tracing/aliyun-icon.svg +++ b/web/app/components/base/icons/assets/public/tracing/aliyun-icon.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/web/app/components/base/icons/src/public/tracing/AliyunIcon.json b/web/app/components/base/icons/src/public/tracing/AliyunIcon.json index 5cbb52c237..154aeff8c6 100644 --- a/web/app/components/base/icons/src/public/tracing/AliyunIcon.json +++ b/web/app/components/base/icons/src/public/tracing/AliyunIcon.json @@ -1,118 +1,129 @@ { - "icon": { - "type": "element", - "isRootNode": true, - "name": "svg", - "attributes": { - "xmlns": "http://www.w3.org/2000/svg", - "xmlns:xlink": "http://www.w3.org/1999/xlink", - "fill": "none", - "version": "1.1", - "width": "65", - "height": "16", - "viewBox": "0 0 65 16" - }, - "children": [ - { - "type": "element", - "name": "defs", - "children": [ - { - "type": "element", - "name": "clipPath", - "attributes": { - "id": "master_svg0_42_34281" - }, - "children": [ - { - "type": "element", - "name": "rect", - "attributes": { - "x": "0", - "y": "0", - "width": "19", - "height": "16", - "rx": "0" - } - } - ] - } - ] - }, - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "g", - "attributes": { - "clip-path": "url(#master_svg0_42_34281)" - }, - "children": [ - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M4.06862,14.6667C3.79213,14.6667,3.45463,14.5688,3.05614,14.373C2.97908,14.3351,2.92692,14.3105,2.89968,14.2992C2.33193,14.0628,1.82911,13.7294,1.39123,13.2989C0.463742,12.3871,0,11.2874,0,10C0,8.71258,0.463742,7.61293,1.39123,6.70107C2.16172,5.94358,3.06404,5.50073,4.09819,5.37252C4.23172,3.98276,4.81755,2.77756,5.85569,1.75693C7.04708,0.585642,8.4857,0,10.1716,0C11.5256,0,12.743,0.396982,13.8239,1.19095C14.8847,1.97019,15.61,2.97855,16,4.21604L14.7045,4.61063C14.4016,3.64918,13.8374,2.86532,13.0121,2.25905C12.1719,1.64191,11.2251,1.33333,10.1716,1.33333C8.8602,1.33333,7.74124,1.7888,6.81467,2.69974C5.88811,3.61067,5.42483,4.71076,5.42483,6L5.42483,6.66667L4.74673,6.66667C3.81172,6.66667,3.01288,6.99242,2.35021,7.64393C1.68754,8.2954,1.35621,9.08076,1.35621,10C1.35621,10.9192,1.68754,11.7046,2.35021,12.3561C2.66354,12.6641,3.02298,12.9026,3.42852,13.0714C3.48193,13.0937,3.55988,13.13,3.66237,13.1803C3.87004,13.2823,4.00545,13.3333,4.06862,13.3333L4.06862,14.6667Z", - "fill-rule": "evenodd", - "fill": "#FF6A00", - "fill-opacity": "1" - } - } - ] - }, - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M13.458613505859375,7.779393492279053C12.975613505859375,7.717463492279053,12.484813505859375,7.686503492279053,11.993983505859376,7.686503492279053C11.152583505859376,7.686503492279053,10.303403505859375,7.779393492279053,9.493183505859374,7.941943492279053C8.682953505859375,8.104503492279052,7.903893505859375,8.359943492279053,7.155983505859375,8.654083492279053C6.657383505859375,8.870823492279053,6.158783505859375,9.128843492279053,5.660181505859375,9.428153492279053C5.332974751859375,9.621673492279053,5.239486705859375,10.070633492279054,5.434253505859375,10.395743492279053L7.413073505859375,13.298533492279052C7.639003505859375,13.623603492279052,8.090863505859375,13.716463492279052,8.418073505859375,13.523003492279052C8.547913505859375,13.435263492279052,8.763453505859374,13.326893492279053,9.064693505859374,13.197863492279053C9.516553505859374,13.004333492279052,9.976203505859374,12.872733492279053,10.459223505859375,12.779863492279052C10.942243505859375,12.679263492279052,11.433053505859375,12.617333492279052,11.955023505859375,12.617333492279052L13.380683505859375,7.810353492279052L13.458613505859375,7.779393492279053ZM15.273813505859374,8.135463492279053L15.016753505859375,5.333333492279053L13.458613505859375,7.787133492279053C13.817013505859375,7.818093492279052,14.144213505859375,7.880023492279053,14.494743505859375,7.949683492279053C14.494743505859375,7.944523492279053,14.754433505859375,8.006453492279054,15.273813505859374,8.135463492279053ZM12.064083505859376,12.648273492279053L11.378523505859375,14.970463492279054L12.515943505859376,16.00003349227905L14.074083505859376,15.643933492279054L14.525943505859376,13.027603492279052C14.198743505859374,12.934663492279054,13.879283505859375,12.834063492279054,13.552083505859375,12.772133492279053C13.069083505859375,12.717933492279052,12.578283505859375,12.648273492279053,12.064083505859376,12.648273492279053ZM18.327743505859374,9.428153492279053C17.829143505859374,9.128843492279053,17.330543505859374,8.870823492279053,16.831943505859375,8.654083492279053C16.348943505859374,8.460573492279053,15.826943505859376,8.267053492279054,15.305013505859375,8.135463492279053L15.305013505859375,8.267053492279054L14.463613505859374,13.043063492279053C14.596083505859376,13.105003492279053,14.759683505859375,13.135933492279053,14.884283505859376,13.205603492279053C15.185523505859376,13.334623492279052,15.401043505859375,13.443003492279052,15.530943505859375,13.530733492279053C15.858143505859376,13.724263492279054,16.341143505859375,13.623603492279052,16.535943505859375,13.306263492279053L18.514743505859375,10.403483492279053C18.779643505859376,10.039673492279054,18.686143505859377,9.621673492279053,18.327743505859374,9.428153492279053Z", - "fill": "#FF6A00", - "fill-opacity": "1" - } - } - ] - } - ] - } - ] - }, - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M25.044,2.668L34.676,2.668L34.676,4.04L25.044,4.04L25.044,2.668ZM29.958,7.82Q29.258,9.066,28.355,10.41Q27.451999999999998,11.754,26.92,12.3L32.506,11.782Q31.442,10.158,30.84,9.346L32.058,8.562000000000001Q32.786,9.5,33.843,11.012Q34.9,12.524,35.516,13.546L34.214,14.526Q33.891999999999996,13.966,33.346000000000004,13.098Q32.016,13.182,29.734,13.378Q27.451999999999998,13.574,25.87,13.742L25.31,13.812L24.834,13.882L24.414,12.468Q24.708,12.37,24.862000000000002,12.265Q25.016,12.16,25.121,12.069Q25.226,11.978,25.268,11.936Q25.912,11.32,26.724,10.165Q27.536,9.01,28.208,7.82L23.854,7.82L23.854,6.434L35.866,6.434L35.866,7.82L29.958,7.82ZM42.656,7.414L42.656,8.576L41.354,8.576L41.354,1.814L42.656,1.87L42.656,7.036Q43.314,5.846,43.888000000000005,4.369Q44.462,2.892,44.714,1.6600000000000001L46.086,1.981999Q45.96,2.612,45.722,3.41L49.6,3.41L49.6,4.74L45.274,4.74Q44.616,6.56,43.706,8.128L42.656,7.414ZM38.596000000000004,2.346L39.884,2.402L39.884,8.212L38.596000000000004,8.212L38.596000000000004,2.346ZM46.184,4.964Q46.688,5.356,47.5,6.175Q48.312,6.994,48.788,7.582L47.751999999999995,8.59Q47.346000000000004,8.072,46.576,7.274Q45.806,6.476,45.204,5.902L46.184,4.964ZM48.41,9.01L48.41,12.706L49.894,12.706L49.894,13.966L37.391999999999996,13.966L37.391999999999996,12.706L38.848,12.706L38.848,9.01L48.41,9.01ZM41.676,10.256L40.164,10.256L40.164,12.706L41.676,12.706L41.676,10.256ZM42.908,12.706L44.364000000000004,12.706L44.364000000000004,10.256L42.908,10.256L42.908,12.706ZM45.582,12.706L47.108000000000004,12.706L47.108000000000004,10.256L45.582,10.256L45.582,12.706ZM54.906,7.456L55.116,8.394L54.178,8.814L54.178,12.818Q54.178,13.434,54.031,13.735Q53.884,14.036,53.534,14.162Q53.184,14.288,52.456,14.358L51.867999999999995,14.414L51.476,13.084L52.162,13.028Q52.512,13,52.652,12.958Q52.792,12.916,52.841,12.797Q52.89,12.678,52.89,12.384L52.89,9.36Q51.980000000000004,9.724,51.322,9.948L51.013999999999996,8.576Q51.798,8.324,52.89,7.876L52.89,5.524L51.42,5.524L51.42,4.166L52.89,4.166L52.89,1.7579989999999999L54.178,1.814L54.178,4.166L55.214,4.166L55.214,5.524L54.178,5.524L54.178,7.316L54.808,7.022L54.906,7.456ZM56.894,4.5440000000000005L56.894,6.098L55.564,6.098L55.564,3.256L58.686,3.256Q58.42,2.346,58.266,1.9260000000000002L59.624,1.7579989999999999Q59.848,2.276,60.142,3.256L63.25,3.256L63.25,6.098L61.962,6.098L61.962,4.5440000000000005L56.894,4.5440000000000005ZM59.008,6.322Q58.392,6.938,57.685,7.512Q56.978,8.086,55.956,8.841999999999999L55.242,7.764Q56.824,6.728,58.126,5.37L59.008,6.322ZM60.422,5.37Q61.024,5.776,62.095,6.581Q63.166,7.386,63.656,7.806L62.942,8.982Q62.368,8.45,61.332,7.652Q60.296,6.854,59.666,6.434L60.422,5.37ZM62.592,10.256L60.044,10.256L60.044,12.566L63.572,12.566L63.572,13.826L55.144,13.826L55.144,12.566L58.63,12.566L58.63,10.256L56.054,10.256L56.054,8.982L62.592,8.982L62.592,10.256Z", - "fill": "#FF6A00", - "fill-opacity": "1" - } - } - ] - } - ] - } - ] - } - ] - }, - "name": "AliyunIcon" + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "xmlns": "http://www.w3.org/2000/svg", + "xmlns:xlink": "http://www.w3.org/1999/xlink", + "fill": "none", + "version": "1.1", + "width": "65", + "height": "16", + "viewBox": "0 0 65 16" + }, + "children": [ + { + "type": "element", + "name": "defs", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "clipPath", + "attributes": { + "id": "master_svg0_42_34281" + }, + "children": [ + { + "type": "element", + "name": "rect", + "attributes": { + "x": "0", + "y": "0", + "width": "19", + "height": "16", + "rx": "0" + }, + "children": [] + } + ] + } + ] + }, + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "g", + "attributes": { + "clip-path": "url(#master_svg0_42_34281)" + }, + "children": [ + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M4.06862,14.6667C3.79213,14.6667,3.45463,14.5688,3.05614,14.373C2.97908,14.3351,2.92692,14.3105,2.89968,14.2992C2.33193,14.0628,1.82911,13.7294,1.39123,13.2989C0.463742,12.3871,0,11.2874,0,10C0,8.71258,0.463742,7.61293,1.39123,6.70107C2.16172,5.94358,3.06404,5.50073,4.09819,5.37252C4.23172,3.98276,4.81755,2.77756,5.85569,1.75693C7.04708,0.585642,8.4857,0,10.1716,0C11.5256,0,12.743,0.396982,13.8239,1.19095C14.8847,1.97019,15.61,2.97855,16,4.21604L14.7045,4.61063C14.4016,3.64918,13.8374,2.86532,13.0121,2.25905C12.1719,1.64191,11.2251,1.33333,10.1716,1.33333C8.8602,1.33333,7.74124,1.7888,6.81467,2.69974C5.88811,3.61067,5.42483,4.71076,5.42483,6L5.42483,6.66667L4.74673,6.66667C3.81172,6.66667,3.01288,6.99242,2.35021,7.64393C1.68754,8.2954,1.35621,9.08076,1.35621,10C1.35621,10.9192,1.68754,11.7046,2.35021,12.3561C2.66354,12.6641,3.02298,12.9026,3.42852,13.0714C3.48193,13.0937,3.55988,13.13,3.66237,13.1803C3.87004,13.2823,4.00545,13.3333,4.06862,13.3333L4.06862,14.6667Z", + "fill-rule": "evenodd", + "fill": "#FF6A00", + "fill-opacity": "1" + }, + "children": [] + } + ] + }, + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M13.458613505859375,7.779393492279053C12.975613505859375,7.717463492279053,12.484813505859375,7.686503492279053,11.993983505859376,7.686503492279053C11.152583505859376,7.686503492279053,10.303403505859375,7.779393492279053,9.493183505859374,7.941943492279053C8.682953505859375,8.104503492279052,7.903893505859375,8.359943492279053,7.155983505859375,8.654083492279053C6.657383505859375,8.870823492279053,6.158783505859375,9.128843492279053,5.660181505859375,9.428153492279053C5.332974751859375,9.621673492279053,5.239486705859375,10.070633492279054,5.434253505859375,10.395743492279053L7.413073505859375,13.298533492279052C7.639003505859375,13.623603492279052,8.090863505859375,13.716463492279052,8.418073505859375,13.523003492279052C8.547913505859375,13.435263492279052,8.763453505859374,13.326893492279053,9.064693505859374,13.197863492279053C9.516553505859374,13.004333492279052,9.976203505859374,12.872733492279053,10.459223505859375,12.779863492279052C10.942243505859375,12.679263492279052,11.433053505859375,12.617333492279052,11.955023505859375,12.617333492279052L13.380683505859375,7.810353492279052L13.458613505859375,7.779393492279053ZM15.273813505859374,8.135463492279053L15.016753505859375,5.333333492279053L13.458613505859375,7.787133492279053C13.817013505859375,7.818093492279052,14.144213505859375,7.880023492279053,14.494743505859375,7.949683492279053C14.494743505859375,7.944523492279053,14.754433505859375,8.006453492279054,15.273813505859374,8.135463492279053ZM12.064083505859376,12.648273492279053L11.378523505859375,14.970463492279054L12.515943505859376,16.00003349227905L14.074083505859376,15.643933492279054L14.525943505859376,13.027603492279052C14.198743505859374,12.934663492279054,13.879283505859375,12.834063492279054,13.552083505859375,12.772133492279053C13.069083505859375,12.717933492279052,12.578283505859375,12.648273492279053,12.064083505859376,12.648273492279053ZM18.327743505859374,9.428153492279053C17.829143505859374,9.128843492279053,17.330543505859374,8.870823492279053,16.831943505859375,8.654083492279053C16.348943505859374,8.460573492279053,15.826943505859376,8.267053492279054,15.305013505859375,8.135463492279053L15.305013505859375,8.267053492279054L14.463613505859374,13.043063492279053C14.596083505859376,13.105003492279053,14.759683505859375,13.135933492279053,14.884283505859376,13.205603492279053C15.185523505859376,13.334623492279052,15.401043505859375,13.443003492279052,15.530943505859375,13.530733492279053C15.858143505859376,13.724263492279054,16.341143505859375,13.623603492279052,16.535943505859375,13.306263492279053L18.514743505859375,10.403483492279053C18.779643505859376,10.039673492279054,18.686143505859377,9.621673492279053,18.327743505859374,9.428153492279053Z", + "fill": "#FF6A00", + "fill-opacity": "1" + }, + "children": [] + } + ] + } + ] + } + ] + }, + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M25.044,2.668L34.676,2.668L34.676,4.04L25.044,4.04L25.044,2.668ZM29.958,7.82Q29.258,9.066,28.355,10.41Q27.451999999999998,11.754,26.92,12.3L32.506,11.782Q31.442,10.158,30.84,9.346L32.058,8.562000000000001Q32.786,9.5,33.843,11.012Q34.9,12.524,35.516,13.546L34.214,14.526Q33.891999999999996,13.966,33.346000000000004,13.098Q32.016,13.182,29.734,13.378Q27.451999999999998,13.574,25.87,13.742L25.31,13.812L24.834,13.882L24.414,12.468Q24.708,12.37,24.862000000000002,12.265Q25.016,12.16,25.121,12.069Q25.226,11.978,25.268,11.936Q25.912,11.32,26.724,10.165Q27.536,9.01,28.208,7.82L23.854,7.82L23.854,6.434L35.866,6.434L35.866,7.82L29.958,7.82ZM42.656,7.414L42.656,8.576L41.354,8.576L41.354,1.814L42.656,1.87L42.656,7.036Q43.314,5.846,43.888000000000005,4.369Q44.462,2.892,44.714,1.6600000000000001L46.086,1.981999Q45.96,2.612,45.722,3.41L49.6,3.41L49.6,4.74L45.274,4.74Q44.616,6.56,43.706,8.128L42.656,7.414ZM38.596000000000004,2.346L39.884,2.402L39.884,8.212L38.596000000000004,8.212L38.596000000000004,2.346ZM46.184,4.964Q46.688,5.356,47.5,6.175Q48.312,6.994,48.788,7.582L47.751999999999995,8.59Q47.346000000000004,8.072,46.576,7.274Q45.806,6.476,45.204,5.902L46.184,4.964ZM48.41,9.01L48.41,12.706L49.894,12.706L49.894,13.966L37.391999999999996,13.966L37.391999999999996,12.706L38.848,12.706L38.848,9.01L48.41,9.01ZM41.676,10.256L40.164,10.256L40.164,12.706L41.676,12.706L41.676,10.256ZM42.908,12.706L44.364000000000004,12.706L44.364000000000004,10.256L42.908,10.256L42.908,12.706ZM45.582,12.706L47.108000000000004,12.706L47.108000000000004,10.256L45.582,10.256L45.582,12.706ZM54.906,7.456L55.116,8.394L54.178,8.814L54.178,12.818Q54.178,13.434,54.031,13.735Q53.884,14.036,53.534,14.162Q53.184,14.288,52.456,14.358L51.867999999999995,14.414L51.476,13.084L52.162,13.028Q52.512,13,52.652,12.958Q52.792,12.916,52.841,12.797Q52.89,12.678,52.89,12.384L52.89,9.36Q51.980000000000004,9.724,51.322,9.948L51.013999999999996,8.576Q51.798,8.324,52.89,7.876L52.89,5.524L51.42,5.524L51.42,4.166L52.89,4.166L52.89,1.7579989999999999L54.178,1.814L54.178,4.166L55.214,4.166L55.214,5.524L54.178,5.524L54.178,7.316L54.808,7.022L54.906,7.456ZM56.894,4.5440000000000005L56.894,6.098L55.564,6.098L55.564,3.256L58.686,3.256Q58.42,2.346,58.266,1.9260000000000002L59.624,1.7579989999999999Q59.848,2.276,60.142,3.256L63.25,3.256L63.25,6.098L61.962,6.098L61.962,4.5440000000000005L56.894,4.5440000000000005ZM59.008,6.322Q58.392,6.938,57.685,7.512Q56.978,8.086,55.956,8.841999999999999L55.242,7.764Q56.824,6.728,58.126,5.37L59.008,6.322ZM60.422,5.37Q61.024,5.776,62.095,6.581Q63.166,7.386,63.656,7.806L62.942,8.982Q62.368,8.45,61.332,7.652Q60.296,6.854,59.666,6.434L60.422,5.37ZM62.592,10.256L60.044,10.256L60.044,12.566L63.572,12.566L63.572,13.826L55.144,13.826L55.144,12.566L58.63,12.566L58.63,10.256L56.054,10.256L56.054,8.982L62.592,8.982L62.592,10.256Z", + "fill": "#FF6A00", + "fill-opacity": "1" + }, + "children": [] + } + ] + } + ] + } + ] + } + ] + }, + "name": "AliyunIcon" } diff --git a/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json b/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json index ea60744daf..7ed5166461 100644 --- a/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json +++ b/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json @@ -1,71 +1,78 @@ { - "icon": { - "type": "element", - "isRootNode": true, - "name": "svg", - "attributes": { - "xmlns": "http://www.w3.org/2000/svg", - "xmlns:xlink": "http://www.w3.org/1999/xlink", - "fill": "none", - "version": "1.1", - "width": "96", - "height": "24", - "viewBox": "0 0 96 24" - }, - "children": [ - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M6.10294,22C5.68819,22,5.18195,21.8532,4.58421,21.5595C4.46861,21.5027,4.39038,21.4658,4.34951,21.4488C3.49789,21.0943,2.74367,20.5941,2.08684,19.9484C0.695613,18.5806,0,16.9311,0,15C0,13.0689,0.695612,11.4194,2.08684,10.0516C3.24259,8.91537,4.59607,8.2511,6.14728,8.05878C6.34758,5.97414,7.22633,4.16634,8.78354,2.63539C10.5706,0.878463,12.7286,0,15.2573,0C17.2884,0,19.1146,0.595472,20.7358,1.78642C22.327,2.95528,23.4151,4.46783,24,6.32406L22.0568,6.91594C21.6024,5.47377,20.7561,4.29798,19.5181,3.38858C18.2579,2.46286,16.8377,2,15.2573,2C13.2903,2,11.6119,2.6832,10.222,4.04961C8.83217,5.41601,8.13725,7.06614,8.13725,9L8.13725,10L7.12009,10C5.71758,10,4.51932,10.4886,3.52532,11.4659C2.53132,12.4431,2.03431,13.6211,2.03431,15C2.03431,16.3789,2.53132,17.5569,3.52532,18.5341C3.99531,18.9962,4.53447,19.3538,5.14278,19.6071C5.2229,19.6405,5.33983,19.695,5.49356,19.7705C5.80505,19.9235,6.00818,20,6.10294,20L6.10294,22Z", - "fill-rule": "evenodd", - "fill": "#FF6A00", - "fill-opacity": "1" - } - } - ] - }, - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M20.18796103515625,11.66909C19.46346103515625,11.5762,18.72726103515625,11.52975,17.991011035156248,11.52975C16.728921035156247,11.52975,15.45515103515625,11.66909,14.23981103515625,11.91292C13.02447103515625,12.156749999999999,11.85588103515625,12.539909999999999,10.73402103515625,12.98113C9.98612103515625,13.306239999999999,9.23822103515625,13.69327,8.49031803515625,14.14223C7.99950790415625,14.43251,7.85927603515625,15.10595,8.15142503515625,15.59361L11.11966103515625,19.9478C11.45855103515625,20.4354,12.13634103515625,20.5747,12.627151035156249,20.2845C12.821921035156251,20.152900000000002,13.14523103515625,19.990299999999998,13.59708103515625,19.796799999999998C14.27487103515625,19.506500000000003,14.964341035156249,19.3091,15.68887103515625,19.169800000000002C16.413401035156248,19.018900000000002,17.14962103515625,18.926000000000002,17.93258103515625,18.926000000000002L20.071061035156248,11.715530000000001L20.18796103515625,11.66909ZM22.91076103515625,12.20319L22.525161035156252,8L20.18796103515625,11.6807C20.72556103515625,11.72714,21.21636103515625,11.82003,21.74216103515625,11.92453C21.74216103515625,11.91679,22.13166103515625,12.00968,22.91076103515625,12.20319ZM18.09616103515625,18.9724L17.06782103515625,22.4557L18.773961035156248,24L21.11116103515625,23.465899999999998L21.788961035156248,19.5414C21.298161035156248,19.402,20.81896103515625,19.2511,20.32816103515625,19.1582C19.60366103515625,19.076900000000002,18.86746103515625,18.9724,18.09616103515625,18.9724ZM27.49166103515625,14.14223C26.74376103515625,13.69327,25.99586103515625,13.306239999999999,25.24796103515625,12.98113C24.52346103515625,12.69086,23.74046103515625,12.40058,22.95756103515625,12.20319L22.95756103515625,12.40058L21.69546103515625,19.5646C21.89416103515625,19.6575,22.139561035156248,19.7039,22.32646103515625,19.8084C22.77836103515625,20.0019,23.101661035156248,20.1645,23.29646103515625,20.2961C23.78726103515625,20.586399999999998,24.51176103515625,20.4354,24.80396103515625,19.959400000000002L27.77216103515625,15.605229999999999C28.16946103515625,15.05951,28.02926103515625,14.43251,27.49166103515625,14.14223Z", - "fill": "#FF6A00", - "fill-opacity": "1" - } - } - ] - }, - { - "type": "element", - "name": "g", - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M35.785,3.8624638671875L50.233000000000004,3.8624638671875L50.233000000000004,5.9204638671875L35.785,5.9204638671875L35.785,3.8624638671875ZM43.156,11.5904638671875Q42.106,13.4594638671875,40.7515,15.4754638671875Q39.397,17.4914638671875,38.599000000000004,18.3104638671875L46.978,17.5334638671875Q45.382,15.0974638671875,44.479,13.8794638671875L46.306,12.7034638671875Q47.397999999999996,14.1104638671875,48.9835,16.3784638671875Q50.569,18.6464638671875,51.492999999999995,20.1794638671875L49.54,21.6494638671875Q49.057,20.8094638671875,48.238,19.5074638671875Q46.243,19.6334638671875,42.82,19.9274638671875Q39.397,20.2214638671875,37.024,20.4734638671875L36.184,20.5784638671875L35.47,20.6834638671875L34.84,18.5624638671875Q35.281,18.4154638671875,35.512,18.2579638671875Q35.743,18.1004638671875,35.9005,17.963963867187502Q36.058,17.8274638671875,36.121,17.7644638671875Q37.087,16.840463867187502,38.305,15.1079638671875Q39.522999999999996,13.3754638671875,40.531,11.5904638671875L34,11.5904638671875L34,9.5114638671875L52.018,9.5114638671875L52.018,11.5904638671875L43.156,11.5904638671875ZM62.203,10.9814638671875L62.203,12.7244638671875L60.25,12.7244638671875L60.25,2.5814638671875L62.203,2.6654638671875L62.203,10.4144638671875Q63.19,8.6294638671875,64.051,6.4139638671875Q64.912,4.1984638671875,65.28999999999999,2.3504638671875L67.348,2.8334628671875Q67.15899999999999,3.7784638671875,66.80199999999999,4.9754638671875L72.619,4.9754638671875L72.619,6.9704638671875L66.13,6.9704638671875Q65.143,9.7004638671875,63.778,12.0524638671875L62.203,10.9814638671875ZM56.113,3.3794638671875L58.045,3.4634638671875L58.045,12.1784638671875L56.113,12.1784638671875L56.113,3.3794638671875ZM67.495,7.3064638671875Q68.251,7.8944638671875,69.469,9.1229638671875Q70.687,10.3514638671875,71.40100000000001,11.2334638671875L69.84700000000001,12.7454638671875Q69.238,11.9684638671875,68.083,10.7714638671875Q66.928,9.5744638671875,66.025,8.7134638671875L67.495,7.3064638671875ZM70.834,13.3754638671875L70.834,18.9194638671875L73.06,18.9194638671875L73.06,20.8094638671875L54.307,20.8094638671875L54.307,18.9194638671875L56.491,18.9194638671875L56.491,13.3754638671875L70.834,13.3754638671875ZM60.733000000000004,15.2444638671875L58.465,15.2444638671875L58.465,18.9194638671875L60.733000000000004,18.9194638671875L60.733000000000004,15.2444638671875ZM62.581,18.9194638671875L64.765,18.9194638671875L64.765,15.2444638671875L62.581,15.2444638671875L62.581,18.9194638671875ZM66.592,18.9194638671875L68.881,18.9194638671875L68.881,15.2444638671875L66.592,15.2444638671875L66.592,18.9194638671875ZM80.578,11.0444638671875L80.893,12.4514638671875L79.48599999999999,13.0814638671875L79.48599999999999,19.0874638671875Q79.48599999999999,20.0114638671875,79.2655,20.4629638671875Q79.045,20.9144638671875,78.52000000000001,21.1034638671875Q77.995,21.2924638671875,76.90299999999999,21.3974638671875L76.021,21.4814638671875L75.43299999999999,19.4864638671875L76.462,19.4024638671875Q76.987,19.3604638671875,77.197,19.2974638671875Q77.407,19.2344638671875,77.4805,19.0559638671875Q77.554,18.8774638671875,77.554,18.4364638671875L77.554,13.9004638671875Q76.189,14.4464638671875,75.202,14.7824638671875L74.74000000000001,12.7244638671875Q75.916,12.3464638671875,77.554,11.6744638671875L77.554,8.1464638671875L75.34899999999999,8.1464638671875L75.34899999999999,6.1094638671875L77.554,6.1094638671875L77.554,2.4974628671875L79.48599999999999,2.5814638671875L79.48599999999999,6.1094638671875L81.03999999999999,6.1094638671875L81.03999999999999,8.1464638671875L79.48599999999999,8.1464638671875L79.48599999999999,10.8344638671875L80.431,10.3934638671875L80.578,11.0444638671875ZM83.56,6.6764638671875L83.56,9.0074638671875L81.565,9.0074638671875L81.565,4.7444638671875L86.24799999999999,4.7444638671875Q85.84899999999999,3.3794638671875,85.618,2.7494638671875L87.655,2.4974628671875Q87.991,3.2744638671875,88.432,4.7444638671875L93.094,4.7444638671875L93.094,9.0074638671875L91.162,9.0074638671875L91.162,6.6764638671875L83.56,6.6764638671875ZM86.731,9.3434638671875Q85.807,10.2674638671875,84.7465,11.1284638671875Q83.686,11.9894638671875,82.15299999999999,13.1234638671875L81.082,11.5064638671875Q83.455,9.9524638671875,85.408,7.9154638671875L86.731,9.3434638671875ZM88.852,7.9154638671875Q89.755,8.5244638671875,91.3615,9.731963867187499Q92.968,10.9394638671875,93.703,11.5694638671875L92.632,13.3334638671875Q91.771,12.5354638671875,90.217,11.3384638671875Q88.663,10.1414638671875,87.718,9.5114638671875L88.852,7.9154638671875ZM92.107,15.2444638671875L88.285,15.2444638671875L88.285,18.7094638671875L93.577,18.7094638671875L93.577,20.5994638671875L80.935,20.5994638671875L80.935,18.7094638671875L86.164,18.7094638671875L86.164,15.2444638671875L82.3,15.2444638671875L82.3,13.3334638671875L92.107,13.3334638671875L92.107,15.2444638671875Z", - "fill": "#FF6A00", - "fill-opacity": "1" - } - } - ] - } - ] - } - ] - }, - "name": "AliyunBigIcon" + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "xmlns": "http://www.w3.org/2000/svg", + "xmlns:xlink": "http://www.w3.org/1999/xlink", + "fill": "none", + "version": "1.1", + "width": "96", + "height": "24", + "viewBox": "0 0 96 24" + }, + "children": [ + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M6.10294,22C5.68819,22,5.18195,21.8532,4.58421,21.5595C4.46861,21.5027,4.39038,21.4658,4.34951,21.4488C3.49789,21.0943,2.74367,20.5941,2.08684,19.9484C0.695613,18.5806,0,16.9311,0,15C0,13.0689,0.695612,11.4194,2.08684,10.0516C3.24259,8.91537,4.59607,8.2511,6.14728,8.05878C6.34758,5.97414,7.22633,4.16634,8.78354,2.63539C10.5706,0.878463,12.7286,0,15.2573,0C17.2884,0,19.1146,0.595472,20.7358,1.78642C22.327,2.95528,23.4151,4.46783,24,6.32406L22.0568,6.91594C21.6024,5.47377,20.7561,4.29798,19.5181,3.38858C18.2579,2.46286,16.8377,2,15.2573,2C13.2903,2,11.6119,2.6832,10.222,4.04961C8.83217,5.41601,8.13725,7.06614,8.13725,9L8.13725,10L7.12009,10C5.71758,10,4.51932,10.4886,3.52532,11.4659C2.53132,12.4431,2.03431,13.6211,2.03431,15C2.03431,16.3789,2.53132,17.5569,3.52532,18.5341C3.99531,18.9962,4.53447,19.3538,5.14278,19.6071C5.2229,19.6405,5.33983,19.695,5.49356,19.7705C5.80505,19.9235,6.00818,20,6.10294,20L6.10294,22Z", + "fill-rule": "evenodd", + "fill": "#FF6A00", + "fill-opacity": "1" + }, + "children": [] + } + ] + }, + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M20.18796103515625,11.66909C19.46346103515625,11.5762,18.72726103515625,11.52975,17.991011035156248,11.52975C16.728921035156247,11.52975,15.45515103515625,11.66909,14.23981103515625,11.91292C13.02447103515625,12.156749999999999,11.85588103515625,12.539909999999999,10.73402103515625,12.98113C9.98612103515625,13.306239999999999,9.23822103515625,13.69327,8.49031803515625,14.14223C7.99950790415625,14.43251,7.85927603515625,15.10595,8.15142503515625,15.59361L11.11966103515625,19.9478C11.45855103515625,20.4354,12.13634103515625,20.5747,12.627151035156249,20.2845C12.821921035156251,20.152900000000002,13.14523103515625,19.990299999999998,13.59708103515625,19.796799999999998C14.27487103515625,19.506500000000003,14.964341035156249,19.3091,15.68887103515625,19.169800000000002C16.413401035156248,19.018900000000002,17.14962103515625,18.926000000000002,17.93258103515625,18.926000000000002L20.071061035156248,11.715530000000001L20.18796103515625,11.66909ZM22.91076103515625,12.20319L22.525161035156252,8L20.18796103515625,11.6807C20.72556103515625,11.72714,21.21636103515625,11.82003,21.74216103515625,11.92453C21.74216103515625,11.91679,22.13166103515625,12.00968,22.91076103515625,12.20319ZM18.09616103515625,18.9724L17.06782103515625,22.4557L18.773961035156248,24L21.11116103515625,23.465899999999998L21.788961035156248,19.5414C21.298161035156248,19.402,20.81896103515625,19.2511,20.32816103515625,19.1582C19.60366103515625,19.076900000000002,18.86746103515625,18.9724,18.09616103515625,18.9724ZM27.49166103515625,14.14223C26.74376103515625,13.69327,25.99586103515625,13.306239999999999,25.24796103515625,12.98113C24.52346103515625,12.69086,23.74046103515625,12.40058,22.95756103515625,12.20319L22.95756103515625,12.40058L21.69546103515625,19.5646C21.89416103515625,19.6575,22.139561035156248,19.7039,22.32646103515625,19.8084C22.77836103515625,20.0019,23.101661035156248,20.1645,23.29646103515625,20.2961C23.78726103515625,20.586399999999998,24.51176103515625,20.4354,24.80396103515625,19.959400000000002L27.77216103515625,15.605229999999999C28.16946103515625,15.05951,28.02926103515625,14.43251,27.49166103515625,14.14223Z", + "fill": "#FF6A00", + "fill-opacity": "1" + }, + "children": [] + } + ] + }, + { + "type": "element", + "name": "g", + "attributes": {}, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M35.785,3.8624638671875L50.233000000000004,3.8624638671875L50.233000000000004,5.9204638671875L35.785,5.9204638671875L35.785,3.8624638671875ZM43.156,11.5904638671875Q42.106,13.4594638671875,40.7515,15.4754638671875Q39.397,17.4914638671875,38.599000000000004,18.3104638671875L46.978,17.5334638671875Q45.382,15.0974638671875,44.479,13.8794638671875L46.306,12.7034638671875Q47.397999999999996,14.1104638671875,48.9835,16.3784638671875Q50.569,18.6464638671875,51.492999999999995,20.1794638671875L49.54,21.6494638671875Q49.057,20.8094638671875,48.238,19.5074638671875Q46.243,19.6334638671875,42.82,19.9274638671875Q39.397,20.2214638671875,37.024,20.4734638671875L36.184,20.5784638671875L35.47,20.6834638671875L34.84,18.5624638671875Q35.281,18.4154638671875,35.512,18.2579638671875Q35.743,18.1004638671875,35.9005,17.963963867187502Q36.058,17.8274638671875,36.121,17.7644638671875Q37.087,16.840463867187502,38.305,15.1079638671875Q39.522999999999996,13.3754638671875,40.531,11.5904638671875L34,11.5904638671875L34,9.5114638671875L52.018,9.5114638671875L52.018,11.5904638671875L43.156,11.5904638671875ZM62.203,10.9814638671875L62.203,12.7244638671875L60.25,12.7244638671875L60.25,2.5814638671875L62.203,2.6654638671875L62.203,10.4144638671875Q63.19,8.6294638671875,64.051,6.4139638671875Q64.912,4.1984638671875,65.28999999999999,2.3504638671875L67.348,2.8334628671875Q67.15899999999999,3.7784638671875,66.80199999999999,4.9754638671875L72.619,4.9754638671875L72.619,6.9704638671875L66.13,6.9704638671875Q65.143,9.7004638671875,63.778,12.0524638671875L62.203,10.9814638671875ZM56.113,3.3794638671875L58.045,3.4634638671875L58.045,12.1784638671875L56.113,12.1784638671875L56.113,3.3794638671875ZM67.495,7.3064638671875Q68.251,7.8944638671875,69.469,9.1229638671875Q70.687,10.3514638671875,71.40100000000001,11.2334638671875L69.84700000000001,12.7454638671875Q69.238,11.9684638671875,68.083,10.7714638671875Q66.928,9.5744638671875,66.025,8.7134638671875L67.495,7.3064638671875ZM70.834,13.3754638671875L70.834,18.9194638671875L73.06,18.9194638671875L73.06,20.8094638671875L54.307,20.8094638671875L54.307,18.9194638671875L56.491,18.9194638671875L56.491,13.3754638671875L70.834,13.3754638671875ZM60.733000000000004,15.2444638671875L58.465,15.2444638671875L58.465,18.9194638671875L60.733000000000004,18.9194638671875L60.733000000000004,15.2444638671875ZM62.581,18.9194638671875L64.765,18.9194638671875L64.765,15.2444638671875L62.581,15.2444638671875L62.581,18.9194638671875ZM66.592,18.9194638671875L68.881,18.9194638671875L68.881,15.2444638671875L66.592,15.2444638671875L66.592,18.9194638671875ZM80.578,11.0444638671875L80.893,12.4514638671875L79.48599999999999,13.0814638671875L79.48599999999999,19.0874638671875Q79.48599999999999,20.0114638671875,79.2655,20.4629638671875Q79.045,20.9144638671875,78.52000000000001,21.1034638671875Q77.995,21.2924638671875,76.90299999999999,21.3974638671875L76.021,21.4814638671875L75.43299999999999,19.4864638671875L76.462,19.4024638671875Q76.987,19.3604638671875,77.197,19.2974638671875Q77.407,19.2344638671875,77.4805,19.0559638671875Q77.554,18.8774638671875,77.554,18.4364638671875L77.554,13.9004638671875Q76.189,14.4464638671875,75.202,14.7824638671875L74.74000000000001,12.7244638671875Q75.916,12.3464638671875,77.554,11.6744638671875L77.554,8.1464638671875L75.34899999999999,8.1464638671875L75.34899999999999,6.1094638671875L77.554,6.1094638671875L77.554,2.4974628671875L79.48599999999999,2.5814638671875L79.48599999999999,6.1094638671875L81.03999999999999,6.1094638671875L81.03999999999999,8.1464638671875L79.48599999999999,8.1464638671875L79.48599999999999,10.8344638671875L80.431,10.3934638671875L80.578,11.0444638671875ZM83.56,6.6764638671875L83.56,9.0074638671875L81.565,9.0074638671875L81.565,4.7444638671875L86.24799999999999,4.7444638671875Q85.84899999999999,3.3794638671875,85.618,2.7494638671875L87.655,2.4974628671875Q87.991,3.2744638671875,88.432,4.7444638671875L93.094,4.7444638671875L93.094,9.0074638671875L91.162,9.0074638671875L91.162,6.6764638671875L83.56,6.6764638671875ZM86.731,9.3434638671875Q85.807,10.2674638671875,84.7465,11.1284638671875Q83.686,11.9894638671875,82.15299999999999,13.1234638671875L81.082,11.5064638671875Q83.455,9.9524638671875,85.408,7.9154638671875L86.731,9.3434638671875ZM88.852,7.9154638671875Q89.755,8.5244638671875,91.3615,9.731963867187499Q92.968,10.9394638671875,93.703,11.5694638671875L92.632,13.3334638671875Q91.771,12.5354638671875,90.217,11.3384638671875Q88.663,10.1414638671875,87.718,9.5114638671875L88.852,7.9154638671875ZM92.107,15.2444638671875L88.285,15.2444638671875L88.285,18.7094638671875L93.577,18.7094638671875L93.577,20.5994638671875L80.935,20.5994638671875L80.935,18.7094638671875L86.164,18.7094638671875L86.164,15.2444638671875L82.3,15.2444638671875L82.3,13.3334638671875L92.107,13.3334638671875L92.107,15.2444638671875Z", + "fill": "#FF6A00", + "fill-opacity": "1" + }, + "children": [] + } + ] + } + ] + } + ] + }, + "name": "AliyunIconBig" } From 3fb5a7bff1653b1860d27d6f965c81152a8f8d2d Mon Sep 17 00:00:00 2001 From: GuanMu Date: Fri, 10 Oct 2025 23:39:13 +0800 Subject: [PATCH 21/49] fix: add z-index class to PortalToFollowElemContent for proper layering in dataset extra info component (#26729) --- web/app/components/datasets/extra-info/service-api/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/datasets/extra-info/service-api/index.tsx b/web/app/components/datasets/extra-info/service-api/index.tsx index b1843682ee..af7ce946ad 100644 --- a/web/app/components/datasets/extra-info/service-api/index.tsx +++ b/web/app/components/datasets/extra-info/service-api/index.tsx @@ -52,7 +52,7 @@ const ServiceApi = ({ />
- + Date: Fri, 10 Oct 2025 23:39:25 +0800 Subject: [PATCH 22/49] =?UTF-8?q?fix:=20Set=20ApiTool=E2=80=99s=20do=5Fhtt?= =?UTF-8?q?p=5Frequest=20to=20do=20not=20retry.=20(#26721)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/core/tools/custom_tool/tool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/core/tools/custom_tool/tool.py b/api/core/tools/custom_tool/tool.py index 34d0f5c622..f18f638f2d 100644 --- a/api/core/tools/custom_tool/tool.py +++ b/api/core/tools/custom_tool/tool.py @@ -290,6 +290,7 @@ class ApiTool(Tool): method_lc ]( # https://discuss.python.org/t/type-inference-for-function-return-types/42926 url, + max_retries=0, params=params, headers=headers, cookies=cookies, From 3922ad876fb3cfb42448a718ac8bb02dec1d3800 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sat, 11 Oct 2025 00:40:54 +0900 Subject: [PATCH 23/49] part of add type to orm (#26262) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .github/workflows/autofix.yml | 2 + api/models/dataset.py | 128 +++++++++++++++++----------------- api/models/oauth.py | 51 +++++++------- api/models/task.py | 6 +- 4 files changed, 94 insertions(+), 93 deletions(-) diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index ef69e08da9..0cae2ef552 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -30,6 +30,8 @@ jobs: run: | uvx --from ast-grep-cli sg --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all uvx --from ast-grep-cli sg --pattern 'session.query($WHATEVER).filter($HERE)' --rewrite 'session.query($WHATEVER).where($HERE)' -l py --update-all + uvx --from ast-grep-cli sg -p '$A = db.Column($$$B)' -r '$A = mapped_column($$$B)' -l py --update-all + uvx --from ast-grep-cli sg -p '$A : $T = db.Column($$$B)' -r '$A : $T = mapped_column($$$B)' -l py --update-all # Convert Optional[T] to T | None (ignoring quoted types) cat > /tmp/optional-rule.yml << 'EOF' id: convert-optional-to-union diff --git a/api/models/dataset.py b/api/models/dataset.py index 6263c04365..1e1d267921 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -61,18 +61,18 @@ class Dataset(Base): created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) embedding_model = mapped_column(db.String(255), nullable=True) embedding_model_provider = mapped_column(db.String(255), nullable=True) - keyword_number = db.Column(db.Integer, nullable=True, server_default=db.text("10")) + keyword_number = mapped_column(sa.Integer, nullable=True, server_default=db.text("10")) collection_binding_id = mapped_column(StringUUID, nullable=True) retrieval_model = mapped_column(JSONB, nullable=True) - built_in_field_enabled = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) - icon_info = db.Column(JSONB, nullable=True) - runtime_mode = db.Column(db.String(255), nullable=True, server_default=db.text("'general'::character varying")) - pipeline_id = db.Column(StringUUID, nullable=True) - chunk_structure = db.Column(db.String(255), nullable=True) - enable_api = db.Column(db.Boolean, nullable=False, server_default=db.text("true")) + built_in_field_enabled = mapped_column(sa.Boolean, nullable=False, server_default=db.text("false")) + icon_info = mapped_column(JSONB, nullable=True) + runtime_mode = mapped_column(db.String(255), nullable=True, server_default=db.text("'general'::character varying")) + pipeline_id = mapped_column(StringUUID, nullable=True) + chunk_structure = mapped_column(db.String(255), nullable=True) + enable_api = mapped_column(sa.Boolean, nullable=False, server_default=db.text("true")) @property def total_documents(self): @@ -1226,21 +1226,21 @@ class PipelineBuiltInTemplate(Base): # type: ignore[name-defined] __tablename__ = "pipeline_built_in_templates" __table_args__ = (db.PrimaryKeyConstraint("id", name="pipeline_built_in_template_pkey"),) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - name = db.Column(db.String(255), nullable=False) - description = db.Column(db.Text, nullable=False) - chunk_structure = db.Column(db.String(255), nullable=False) - icon = db.Column(db.JSON, nullable=False) - yaml_content = db.Column(db.Text, nullable=False) - copyright = db.Column(db.String(255), nullable=False) - privacy_policy = db.Column(db.String(255), nullable=False) - position = db.Column(db.Integer, nullable=False) - install_count = db.Column(db.Integer, nullable=False, default=0) - language = db.Column(db.String(255), nullable=False) - created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - created_by = db.Column(StringUUID, nullable=False) - updated_by = db.Column(StringUUID, nullable=True) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + name = mapped_column(db.String(255), nullable=False) + description = mapped_column(sa.Text, nullable=False) + chunk_structure = mapped_column(db.String(255), nullable=False) + icon = mapped_column(sa.JSON, nullable=False) + yaml_content = mapped_column(sa.Text, nullable=False) + copyright = mapped_column(db.String(255), nullable=False) + privacy_policy = mapped_column(db.String(255), nullable=False) + position = mapped_column(sa.Integer, nullable=False) + install_count = mapped_column(sa.Integer, nullable=False, default=0) + language = mapped_column(db.String(255), nullable=False) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + created_by = mapped_column(StringUUID, nullable=False) + updated_by = mapped_column(StringUUID, nullable=True) @property def created_user_name(self): @@ -1257,20 +1257,20 @@ class PipelineCustomizedTemplate(Base): # type: ignore[name-defined] db.Index("pipeline_customized_template_tenant_idx", "tenant_id"), ) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - tenant_id = db.Column(StringUUID, nullable=False) - name = db.Column(db.String(255), nullable=False) - description = db.Column(db.Text, nullable=False) - chunk_structure = db.Column(db.String(255), nullable=False) - icon = db.Column(db.JSON, nullable=False) - position = db.Column(db.Integer, nullable=False) - yaml_content = db.Column(db.Text, nullable=False) - install_count = db.Column(db.Integer, nullable=False, default=0) - language = db.Column(db.String(255), nullable=False) - created_by = db.Column(StringUUID, nullable=False) - updated_by = db.Column(StringUUID, nullable=True) - created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + tenant_id = mapped_column(StringUUID, nullable=False) + name = mapped_column(db.String(255), nullable=False) + description = mapped_column(sa.Text, nullable=False) + chunk_structure = mapped_column(db.String(255), nullable=False) + icon = mapped_column(sa.JSON, nullable=False) + position = mapped_column(sa.Integer, nullable=False) + yaml_content = mapped_column(sa.Text, nullable=False) + install_count = mapped_column(sa.Integer, nullable=False, default=0) + language = mapped_column(db.String(255), nullable=False) + created_by = mapped_column(StringUUID, nullable=False) + updated_by = mapped_column(StringUUID, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @property def created_user_name(self): @@ -1284,17 +1284,17 @@ class Pipeline(Base): # type: ignore[name-defined] __tablename__ = "pipelines" __table_args__ = (db.PrimaryKeyConstraint("id", name="pipeline_pkey"),) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - tenant_id: Mapped[str] = db.Column(StringUUID, nullable=False) - name = db.Column(db.String(255), nullable=False) - description = db.Column(db.Text, nullable=False, server_default=db.text("''::character varying")) - workflow_id = db.Column(StringUUID, nullable=True) - is_public = db.Column(db.Boolean, nullable=False, server_default=db.text("false")) - is_published = db.Column(db.Boolean, nullable=False, server_default=db.text("false")) - created_by = db.Column(StringUUID, nullable=True) - created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_by = db.Column(StringUUID, nullable=True) - updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + name = mapped_column(db.String(255), nullable=False) + description = mapped_column(sa.Text, nullable=False, server_default=db.text("''::character varying")) + workflow_id = mapped_column(StringUUID, nullable=True) + is_public = mapped_column(sa.Boolean, nullable=False, server_default=db.text("false")) + is_published = mapped_column(sa.Boolean, nullable=False, server_default=db.text("false")) + created_by = mapped_column(StringUUID, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_by = mapped_column(StringUUID, nullable=True) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) def retrieve_dataset(self, session: Session): return session.query(Dataset).where(Dataset.pipeline_id == self.id).first() @@ -1307,25 +1307,25 @@ class DocumentPipelineExecutionLog(Base): db.Index("document_pipeline_execution_logs_document_id_idx", "document_id"), ) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - pipeline_id = db.Column(StringUUID, nullable=False) - document_id = db.Column(StringUUID, nullable=False) - datasource_type = db.Column(db.String(255), nullable=False) - datasource_info = db.Column(db.Text, nullable=False) - datasource_node_id = db.Column(db.String(255), nullable=False) - input_data = db.Column(db.JSON, nullable=False) - created_by = db.Column(StringUUID, nullable=True) - created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + pipeline_id = mapped_column(StringUUID, nullable=False) + document_id = mapped_column(StringUUID, nullable=False) + datasource_type = mapped_column(db.String(255), nullable=False) + datasource_info = mapped_column(sa.Text, nullable=False) + datasource_node_id = mapped_column(db.String(255), nullable=False) + input_data = mapped_column(sa.JSON, nullable=False) + created_by = mapped_column(StringUUID, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class PipelineRecommendedPlugin(Base): __tablename__ = "pipeline_recommended_plugins" __table_args__ = (db.PrimaryKeyConstraint("id", name="pipeline_recommended_plugin_pkey"),) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - plugin_id = db.Column(db.Text, nullable=False) - provider_name = db.Column(db.Text, nullable=False) - position = db.Column(db.Integer, nullable=False, default=0) - active = db.Column(db.Boolean, nullable=False, default=True) - created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + plugin_id = mapped_column(sa.Text, nullable=False) + provider_name = mapped_column(sa.Text, nullable=False) + position = mapped_column(sa.Integer, nullable=False, default=0) + active = mapped_column(sa.Boolean, nullable=False, default=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) diff --git a/api/models/oauth.py b/api/models/oauth.py index 1d5d37e3e1..ef23780dc8 100644 --- a/api/models/oauth.py +++ b/api/models/oauth.py @@ -1,7 +1,8 @@ from datetime import datetime +import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.orm import Mapped +from sqlalchemy.orm import Mapped, mapped_column from .base import Base from .engine import db @@ -15,10 +16,10 @@ class DatasourceOauthParamConfig(Base): # type: ignore[name-defined] db.UniqueConstraint("plugin_id", "provider", name="datasource_oauth_config_datasource_id_provider_idx"), ) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - plugin_id: Mapped[str] = db.Column(db.String(255), nullable=False) - provider: Mapped[str] = db.Column(db.String(255), nullable=False) - system_credentials: Mapped[dict] = db.Column(JSONB, nullable=False) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + plugin_id: Mapped[str] = mapped_column(db.String(255), nullable=False) + provider: Mapped[str] = mapped_column(db.String(255), nullable=False) + system_credentials: Mapped[dict] = mapped_column(JSONB, nullable=False) class DatasourceProvider(Base): @@ -28,19 +29,19 @@ class DatasourceProvider(Base): db.UniqueConstraint("tenant_id", "plugin_id", "provider", "name", name="datasource_provider_unique_name"), db.Index("datasource_provider_auth_type_provider_idx", "tenant_id", "plugin_id", "provider"), ) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - tenant_id = db.Column(StringUUID, nullable=False) - name: Mapped[str] = db.Column(db.String(255), nullable=False) - provider: Mapped[str] = db.Column(db.String(255), nullable=False) - plugin_id: Mapped[str] = db.Column(db.String(255), nullable=False) - auth_type: Mapped[str] = db.Column(db.String(255), nullable=False) - encrypted_credentials: Mapped[dict] = db.Column(JSONB, nullable=False) - avatar_url: Mapped[str] = db.Column(db.Text, nullable=True, default="default") - is_default: Mapped[bool] = db.Column(db.Boolean, nullable=False, server_default=db.text("false")) - expires_at: Mapped[int] = db.Column(db.Integer, nullable=False, server_default="-1") + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + tenant_id = mapped_column(StringUUID, nullable=False) + name: Mapped[str] = mapped_column(db.String(255), nullable=False) + provider: Mapped[str] = mapped_column(db.String(255), nullable=False) + plugin_id: Mapped[str] = mapped_column(db.String(255), nullable=False) + auth_type: Mapped[str] = mapped_column(db.String(255), nullable=False) + encrypted_credentials: Mapped[dict] = mapped_column(JSONB, nullable=False) + avatar_url: Mapped[str] = mapped_column(sa.Text, nullable=True, default="default") + is_default: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=db.text("false")) + expires_at: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default="-1") - created_at: Mapped[datetime] = db.Column(db.DateTime, nullable=False, default=datetime.now) - updated_at: Mapped[datetime] = db.Column(db.DateTime, nullable=False, default=datetime.now) + created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, default=datetime.now) + updated_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, default=datetime.now) class DatasourceOauthTenantParamConfig(Base): @@ -50,12 +51,12 @@ class DatasourceOauthTenantParamConfig(Base): db.UniqueConstraint("tenant_id", "plugin_id", "provider", name="datasource_oauth_tenant_config_unique"), ) - id = db.Column(StringUUID, server_default=db.text("uuidv7()")) - tenant_id = db.Column(StringUUID, nullable=False) - provider: Mapped[str] = db.Column(db.String(255), nullable=False) - plugin_id: Mapped[str] = db.Column(db.String(255), nullable=False) - client_params: Mapped[dict] = db.Column(JSONB, nullable=False, default={}) - enabled: Mapped[bool] = db.Column(db.Boolean, nullable=False, default=False) + id = mapped_column(StringUUID, server_default=db.text("uuidv7()")) + tenant_id = mapped_column(StringUUID, nullable=False) + provider: Mapped[str] = mapped_column(db.String(255), nullable=False) + plugin_id: Mapped[str] = mapped_column(db.String(255), nullable=False) + client_params: Mapped[dict] = mapped_column(JSONB, nullable=False, default={}) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=False) - created_at: Mapped[datetime] = db.Column(db.DateTime, nullable=False, default=datetime.now) - updated_at: Mapped[datetime] = db.Column(db.DateTime, nullable=False, default=datetime.now) + created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, default=datetime.now) + updated_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, default=datetime.now) diff --git a/api/models/task.py b/api/models/task.py index 3da1674536..4e49254dbd 100644 --- a/api/models/task.py +++ b/api/models/task.py @@ -8,8 +8,6 @@ from sqlalchemy.orm import Mapped, mapped_column from libs.datetime_utils import naive_utc_now from models.base import Base -from .engine import db - class CeleryTask(Base): """Task result/status.""" @@ -19,7 +17,7 @@ class CeleryTask(Base): id = mapped_column(sa.Integer, sa.Sequence("task_id_sequence"), primary_key=True, autoincrement=True) task_id = mapped_column(String(155), unique=True) status = mapped_column(String(50), default=states.PENDING) - result = mapped_column(db.PickleType, nullable=True) + result = mapped_column(sa.PickleType, nullable=True) date_done = mapped_column( DateTime, default=lambda: naive_utc_now(), @@ -44,5 +42,5 @@ class CeleryTaskSet(Base): sa.Integer, sa.Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True ) taskset_id = mapped_column(String(155), unique=True) - result = mapped_column(db.PickleType, nullable=True) + result = mapped_column(sa.PickleType, nullable=True) date_done: Mapped[datetime | None] = mapped_column(DateTime, default=lambda: naive_utc_now(), nullable=True) From bb6a331490a81eb849e210ecfc453f1d1e3d65ba Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sat, 11 Oct 2025 00:41:16 +0900 Subject: [PATCH 24/49] change all to httpx (#26119) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- .../api_based_extension_requestor.py | 32 +++---- api/core/plugin/impl/base.py | 95 ++++++++++++++----- .../vdb/elasticsearch/elasticsearch_vector.py | 4 +- .../tidb_on_qdrant/tidb_on_qdrant_vector.py | 12 +-- .../vdb/tidb_on_qdrant/tidb_service.py | 22 ++--- .../vdb/weaviate/weaviate_vector.py | 5 +- .../rag/extractor/firecrawl/firecrawl_app.py | 10 +- api/core/rag/extractor/notion_extractor.py | 14 +-- api/core/rag/extractor/watercrawl/client.py | 73 +++++++++----- api/core/rag/extractor/word_extractor.py | 14 ++- api/core/tools/utils/parser.py | 21 ++-- api/extensions/ext_otel.py | 2 - api/pyproject.toml | 4 +- api/pyrightconfig.json | 3 +- api/services/enterprise/base.py | 31 +++++- .../remote/remote_retrieval.py | 6 +- .../recommend_app/remote/remote_retrieval.py | 6 +- .../vdb/__mock/baiduvectordb.py | 4 +- .../vdb/__mock/tcvectordb.py | 5 +- .../rag/extractor/firecrawl/test_firecrawl.py | 2 +- .../rag/extractor/test_notion_extractor.py | 4 +- api/uv.lock | 34 ------- .../stress-test/setup/import_workflow_app.py | 2 +- 23 files changed, 232 insertions(+), 173 deletions(-) diff --git a/api/core/extension/api_based_extension_requestor.py b/api/core/extension/api_based_extension_requestor.py index fab9ae44e9..f9e6099049 100644 --- a/api/core/extension/api_based_extension_requestor.py +++ b/api/core/extension/api_based_extension_requestor.py @@ -1,13 +1,13 @@ from typing import cast -import requests +import httpx from configs import dify_config from models.api_based_extension import APIBasedExtensionPoint class APIBasedExtensionRequestor: - timeout: tuple[int, int] = (5, 60) + timeout: httpx.Timeout = httpx.Timeout(60.0, connect=5.0) """timeout for request connect and read""" def __init__(self, api_endpoint: str, api_key: str): @@ -27,25 +27,23 @@ class APIBasedExtensionRequestor: url = self.api_endpoint try: - # proxy support for security - proxies = None + mounts: dict[str, httpx.BaseTransport] | None = None if dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL: - proxies = { - "http": dify_config.SSRF_PROXY_HTTP_URL, - "https": dify_config.SSRF_PROXY_HTTPS_URL, + mounts = { + "http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL), + "https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL), } - response = requests.request( - method="POST", - url=url, - json={"point": point.value, "params": params}, - headers=headers, - timeout=self.timeout, - proxies=proxies, - ) - except requests.Timeout: + with httpx.Client(mounts=mounts, timeout=self.timeout) as client: + response = client.request( + method="POST", + url=url, + json={"point": point.value, "params": params}, + headers=headers, + ) + except httpx.TimeoutException: raise ValueError("request timeout") - except requests.ConnectionError: + except httpx.RequestError: raise ValueError("request connection error") if response.status_code != 200: diff --git a/api/core/plugin/impl/base.py b/api/core/plugin/impl/base.py index 62a5cc535a..c791b35161 100644 --- a/api/core/plugin/impl/base.py +++ b/api/core/plugin/impl/base.py @@ -2,11 +2,10 @@ import inspect import json import logging from collections.abc import Callable, Generator -from typing import TypeVar +from typing import Any, TypeVar -import requests +import httpx from pydantic import BaseModel -from requests.exceptions import HTTPError from yarl import URL from configs import dify_config @@ -47,29 +46,56 @@ class BasePluginClient: data: bytes | dict | str | None = None, params: dict | None = None, files: dict | None = None, - stream: bool = False, - ) -> requests.Response: + ) -> httpx.Response: """ Make a request to the plugin daemon inner API. """ - url = plugin_daemon_inner_api_baseurl / path - headers = headers or {} - headers["X-Api-Key"] = dify_config.PLUGIN_DAEMON_KEY - headers["Accept-Encoding"] = "gzip, deflate, br" + url, headers, prepared_data, params, files = self._prepare_request(path, headers, data, params, files) - if headers.get("Content-Type") == "application/json" and isinstance(data, dict): - data = json.dumps(data) + request_kwargs: dict[str, Any] = { + "method": method, + "url": url, + "headers": headers, + "params": params, + "files": files, + } + if isinstance(prepared_data, dict): + request_kwargs["data"] = prepared_data + elif prepared_data is not None: + request_kwargs["content"] = prepared_data try: - response = requests.request( - method=method, url=str(url), headers=headers, data=data, params=params, stream=stream, files=files - ) - except requests.ConnectionError: + response = httpx.request(**request_kwargs) + except httpx.RequestError: logger.exception("Request to Plugin Daemon Service failed") raise PluginDaemonInnerError(code=-500, message="Request to Plugin Daemon Service failed") return response + def _prepare_request( + self, + path: str, + headers: dict | None, + data: bytes | dict | str | None, + params: dict | None, + files: dict | None, + ) -> tuple[str, dict, bytes | dict | str | None, dict | None, dict | None]: + url = plugin_daemon_inner_api_baseurl / path + prepared_headers = dict(headers or {}) + prepared_headers["X-Api-Key"] = dify_config.PLUGIN_DAEMON_KEY + prepared_headers.setdefault("Accept-Encoding", "gzip, deflate, br") + + prepared_data: bytes | dict | str | None = ( + data if isinstance(data, (bytes, str, dict)) or data is None else None + ) + if isinstance(data, dict): + if prepared_headers.get("Content-Type") == "application/json": + prepared_data = json.dumps(data) + else: + prepared_data = data + + return str(url), prepared_headers, prepared_data, params, files + def _stream_request( self, method: str, @@ -78,17 +104,38 @@ class BasePluginClient: headers: dict | None = None, data: bytes | dict | None = None, files: dict | None = None, - ) -> Generator[bytes, None, None]: + ) -> Generator[str, None, None]: """ Make a stream request to the plugin daemon inner API """ - response = self._request(method, path, headers, data, params, files, stream=True) - for line in response.iter_lines(chunk_size=1024 * 8): - line = line.decode("utf-8").strip() - if line.startswith("data:"): - line = line[5:].strip() - if line: - yield line + url, headers, prepared_data, params, files = self._prepare_request(path, headers, data, params, files) + + stream_kwargs: dict[str, Any] = { + "method": method, + "url": url, + "headers": headers, + "params": params, + "files": files, + } + if isinstance(prepared_data, dict): + stream_kwargs["data"] = prepared_data + elif prepared_data is not None: + stream_kwargs["content"] = prepared_data + + try: + with httpx.stream(**stream_kwargs) as response: + for raw_line in response.iter_lines(): + if raw_line is None: + continue + line = raw_line.decode("utf-8") if isinstance(raw_line, bytes) else raw_line + line = line.strip() + if line.startswith("data:"): + line = line[5:].strip() + if line: + yield line + except httpx.RequestError: + logger.exception("Stream request to Plugin Daemon Service failed") + raise PluginDaemonInnerError(code=-500, message="Request to Plugin Daemon Service failed") def _stream_request_with_model( self, @@ -139,7 +186,7 @@ class BasePluginClient: try: response = self._request(method, path, headers, data, params, files) response.raise_for_status() - except HTTPError as e: + except httpx.HTTPStatusError as e: logger.exception("Failed to request plugin daemon, status: %s, url: %s", e.response.status_code, path) raise e except Exception as e: diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index 2c147fa7ca..ecb7a3916e 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -4,7 +4,7 @@ import math from typing import Any, cast from urllib.parse import urlparse -import requests +from elasticsearch import ConnectionError as ElasticsearchConnectionError from elasticsearch import Elasticsearch from flask import current_app from packaging.version import parse as parse_version @@ -138,7 +138,7 @@ class ElasticSearchVector(BaseVector): if not client.ping(): raise ConnectionError("Failed to connect to Elasticsearch") - except requests.ConnectionError as e: + except ElasticsearchConnectionError as e: raise ConnectionError(f"Vector database connection error: {str(e)}") except Exception as e: raise ConnectionError(f"Elasticsearch client initialization failed: {str(e)}") diff --git a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py index f90a311df4..1ac10209d3 100644 --- a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py +++ b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py @@ -5,9 +5,10 @@ from collections.abc import Generator, Iterable, Sequence from itertools import islice from typing import TYPE_CHECKING, Any, Union +import httpx import qdrant_client -import requests from flask import current_app +from httpx import DigestAuth from pydantic import BaseModel from qdrant_client.http import models as rest from qdrant_client.http.models import ( @@ -19,7 +20,6 @@ from qdrant_client.http.models import ( TokenizerType, ) from qdrant_client.local.qdrant_local import QdrantLocal -from requests.auth import HTTPDigestAuth from sqlalchemy import select from configs import dify_config @@ -504,10 +504,10 @@ class TidbOnQdrantVectorFactory(AbstractVectorFactory): } cluster_data = {"displayName": display_name, "region": region_object, "labels": labels} - response = requests.post( + response = httpx.post( f"{tidb_config.api_url}/clusters", json=cluster_data, - auth=HTTPDigestAuth(tidb_config.public_key, tidb_config.private_key), + auth=DigestAuth(tidb_config.public_key, tidb_config.private_key), ) if response.status_code == 200: @@ -527,10 +527,10 @@ class TidbOnQdrantVectorFactory(AbstractVectorFactory): body = {"password": new_password} - response = requests.put( + response = httpx.put( f"{tidb_config.api_url}/clusters/{cluster_id}/password", json=body, - auth=HTTPDigestAuth(tidb_config.public_key, tidb_config.private_key), + auth=DigestAuth(tidb_config.public_key, tidb_config.private_key), ) if response.status_code == 200: diff --git a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py index e1d4422144..754c149241 100644 --- a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py +++ b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_service.py @@ -2,8 +2,8 @@ import time import uuid from collections.abc import Sequence -import requests -from requests.auth import HTTPDigestAuth +import httpx +from httpx import DigestAuth from configs import dify_config from extensions.ext_database import db @@ -49,7 +49,7 @@ class TidbService: "rootPassword": password, } - response = requests.post(f"{api_url}/clusters", json=cluster_data, auth=HTTPDigestAuth(public_key, private_key)) + response = httpx.post(f"{api_url}/clusters", json=cluster_data, auth=DigestAuth(public_key, private_key)) if response.status_code == 200: response_data = response.json() @@ -83,7 +83,7 @@ class TidbService: :return: The response from the API. """ - response = requests.delete(f"{api_url}/clusters/{cluster_id}", auth=HTTPDigestAuth(public_key, private_key)) + response = httpx.delete(f"{api_url}/clusters/{cluster_id}", auth=DigestAuth(public_key, private_key)) if response.status_code == 200: return response.json() @@ -102,7 +102,7 @@ class TidbService: :return: The response from the API. """ - response = requests.get(f"{api_url}/clusters/{cluster_id}", auth=HTTPDigestAuth(public_key, private_key)) + response = httpx.get(f"{api_url}/clusters/{cluster_id}", auth=DigestAuth(public_key, private_key)) if response.status_code == 200: return response.json() @@ -127,10 +127,10 @@ class TidbService: body = {"password": new_password, "builtinRole": "role_admin", "customRoles": []} - response = requests.patch( + response = httpx.patch( f"{api_url}/clusters/{cluster_id}/sqlUsers/{account}", json=body, - auth=HTTPDigestAuth(public_key, private_key), + auth=DigestAuth(public_key, private_key), ) if response.status_code == 200: @@ -161,9 +161,7 @@ class TidbService: tidb_serverless_list_map = {item.cluster_id: item for item in tidb_serverless_list} cluster_ids = [item.cluster_id for item in tidb_serverless_list] params = {"clusterIds": cluster_ids, "view": "BASIC"} - response = requests.get( - f"{api_url}/clusters:batchGet", params=params, auth=HTTPDigestAuth(public_key, private_key) - ) + response = httpx.get(f"{api_url}/clusters:batchGet", params=params, auth=DigestAuth(public_key, private_key)) if response.status_code == 200: response_data = response.json() @@ -224,8 +222,8 @@ class TidbService: clusters.append(cluster_data) request_body = {"requests": clusters} - response = requests.post( - f"{api_url}/clusters:batchCreate", json=request_body, auth=HTTPDigestAuth(public_key, private_key) + response = httpx.post( + f"{api_url}/clusters:batchCreate", json=request_body, auth=DigestAuth(public_key, private_key) ) if response.status_code == 200: diff --git a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py index 3ec08b93ed..d84ae6010d 100644 --- a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py +++ b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py @@ -2,7 +2,6 @@ import datetime import json from typing import Any -import requests import weaviate # type: ignore from pydantic import BaseModel, model_validator @@ -45,8 +44,8 @@ class WeaviateVector(BaseVector): client = weaviate.Client( url=config.endpoint, auth_client_secret=auth_config, timeout_config=(5, 60), startup_period=None ) - except requests.ConnectionError: - raise ConnectionError("Vector database connection error") + except Exception as exc: + raise ConnectionError("Vector database connection error") from exc client.batch.configure( # `batch_size` takes an `int` value to enable auto-batching diff --git a/api/core/rag/extractor/firecrawl/firecrawl_app.py b/api/core/rag/extractor/firecrawl/firecrawl_app.py index e1ba6ef243..c20ecd2b89 100644 --- a/api/core/rag/extractor/firecrawl/firecrawl_app.py +++ b/api/core/rag/extractor/firecrawl/firecrawl_app.py @@ -2,7 +2,7 @@ import json import time from typing import Any, cast -import requests +import httpx from extensions.ext_storage import storage @@ -104,18 +104,18 @@ class FirecrawlApp: def _prepare_headers(self) -> dict[str, Any]: return {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"} - def _post_request(self, url, data, headers, retries=3, backoff_factor=0.5) -> requests.Response: + def _post_request(self, url, data, headers, retries=3, backoff_factor=0.5) -> httpx.Response: for attempt in range(retries): - response = requests.post(url, headers=headers, json=data) + response = httpx.post(url, headers=headers, json=data) if response.status_code == 502: time.sleep(backoff_factor * (2**attempt)) else: return response return response - def _get_request(self, url, headers, retries=3, backoff_factor=0.5) -> requests.Response: + def _get_request(self, url, headers, retries=3, backoff_factor=0.5) -> httpx.Response: for attempt in range(retries): - response = requests.get(url, headers=headers) + response = httpx.get(url, headers=headers) if response.status_code == 502: time.sleep(backoff_factor * (2**attempt)) else: diff --git a/api/core/rag/extractor/notion_extractor.py b/api/core/rag/extractor/notion_extractor.py index bddf41af43..e87ab38349 100644 --- a/api/core/rag/extractor/notion_extractor.py +++ b/api/core/rag/extractor/notion_extractor.py @@ -3,7 +3,7 @@ import logging import operator from typing import Any, cast -import requests +import httpx from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor @@ -92,7 +92,7 @@ class NotionExtractor(BaseExtractor): if next_cursor: current_query["start_cursor"] = next_cursor - res = requests.post( + res = httpx.post( DATABASE_URL_TMPL.format(database_id=database_id), headers={ "Authorization": "Bearer " + self._notion_access_token, @@ -160,7 +160,7 @@ class NotionExtractor(BaseExtractor): while True: query_dict: dict[str, Any] = {} if not start_cursor else {"start_cursor": start_cursor} try: - res = requests.request( + res = httpx.request( "GET", block_url, headers={ @@ -173,7 +173,7 @@ class NotionExtractor(BaseExtractor): if res.status_code != 200: raise ValueError(f"Error fetching Notion block data: {res.text}") data = res.json() - except requests.RequestException as e: + except httpx.HTTPError as e: raise ValueError("Error fetching Notion block data") from e if "results" not in data or not isinstance(data["results"], list): raise ValueError("Error fetching Notion block data") @@ -222,7 +222,7 @@ class NotionExtractor(BaseExtractor): while True: query_dict: dict[str, Any] = {} if not start_cursor else {"start_cursor": start_cursor} - res = requests.request( + res = httpx.request( "GET", block_url, headers={ @@ -282,7 +282,7 @@ class NotionExtractor(BaseExtractor): while not done: query_dict: dict[str, Any] = {} if not start_cursor else {"start_cursor": start_cursor} - res = requests.request( + res = httpx.request( "GET", block_url, headers={ @@ -354,7 +354,7 @@ class NotionExtractor(BaseExtractor): query_dict: dict[str, Any] = {} - res = requests.request( + res = httpx.request( "GET", retrieve_page_url, headers={ diff --git a/api/core/rag/extractor/watercrawl/client.py b/api/core/rag/extractor/watercrawl/client.py index 6d596e07d8..7cf6c4d289 100644 --- a/api/core/rag/extractor/watercrawl/client.py +++ b/api/core/rag/extractor/watercrawl/client.py @@ -3,8 +3,8 @@ from collections.abc import Generator from typing import Union from urllib.parse import urljoin -import requests -from requests import Response +import httpx +from httpx import Response from core.rag.extractor.watercrawl.exceptions import ( WaterCrawlAuthenticationError, @@ -20,28 +20,45 @@ class BaseAPIClient: self.session = self.init_session() def init_session(self): - session = requests.Session() - session.headers.update({"X-API-Key": self.api_key}) - session.headers.update({"Content-Type": "application/json"}) - session.headers.update({"Accept": "application/json"}) - session.headers.update({"User-Agent": "WaterCrawl-Plugin"}) - session.headers.update({"Accept-Language": "en-US"}) - return session + headers = { + "X-API-Key": self.api_key, + "Content-Type": "application/json", + "Accept": "application/json", + "User-Agent": "WaterCrawl-Plugin", + "Accept-Language": "en-US", + } + return httpx.Client(headers=headers, timeout=None) + + def _request( + self, + method: str, + endpoint: str, + query_params: dict | None = None, + data: dict | None = None, + **kwargs, + ) -> Response: + stream = kwargs.pop("stream", False) + url = urljoin(self.base_url, endpoint) + if stream: + request = self.session.build_request(method, url, params=query_params, json=data) + return self.session.send(request, stream=True, **kwargs) + + return self.session.request(method, url, params=query_params, json=data, **kwargs) def _get(self, endpoint: str, query_params: dict | None = None, **kwargs): - return self.session.get(urljoin(self.base_url, endpoint), params=query_params, **kwargs) + return self._request("GET", endpoint, query_params=query_params, **kwargs) def _post(self, endpoint: str, query_params: dict | None = None, data: dict | None = None, **kwargs): - return self.session.post(urljoin(self.base_url, endpoint), params=query_params, json=data, **kwargs) + return self._request("POST", endpoint, query_params=query_params, data=data, **kwargs) def _put(self, endpoint: str, query_params: dict | None = None, data: dict | None = None, **kwargs): - return self.session.put(urljoin(self.base_url, endpoint), params=query_params, json=data, **kwargs) + return self._request("PUT", endpoint, query_params=query_params, data=data, **kwargs) def _delete(self, endpoint: str, query_params: dict | None = None, **kwargs): - return self.session.delete(urljoin(self.base_url, endpoint), params=query_params, **kwargs) + return self._request("DELETE", endpoint, query_params=query_params, **kwargs) def _patch(self, endpoint: str, query_params: dict | None = None, data: dict | None = None, **kwargs): - return self.session.patch(urljoin(self.base_url, endpoint), params=query_params, json=data, **kwargs) + return self._request("PATCH", endpoint, query_params=query_params, data=data, **kwargs) class WaterCrawlAPIClient(BaseAPIClient): @@ -49,14 +66,17 @@ class WaterCrawlAPIClient(BaseAPIClient): super().__init__(api_key, base_url) def process_eventstream(self, response: Response, download: bool = False) -> Generator: - for line in response.iter_lines(): - line = line.decode("utf-8") - if line.startswith("data:"): - line = line[5:].strip() - data = json.loads(line) - if data["type"] == "result" and download: - data["data"] = self.download_result(data["data"]) - yield data + try: + for raw_line in response.iter_lines(): + line = raw_line.decode("utf-8") if isinstance(raw_line, bytes) else raw_line + if line.startswith("data:"): + line = line[5:].strip() + data = json.loads(line) + if data["type"] == "result" and download: + data["data"] = self.download_result(data["data"]) + yield data + finally: + response.close() def process_response(self, response: Response) -> dict | bytes | list | None | Generator: if response.status_code == 401: @@ -170,7 +190,10 @@ class WaterCrawlAPIClient(BaseAPIClient): return event_data["data"] def download_result(self, result_object: dict): - response = requests.get(result_object["result"]) - response.raise_for_status() - result_object["result"] = response.json() + response = httpx.get(result_object["result"], timeout=None) + try: + response.raise_for_status() + result_object["result"] = response.json() + finally: + response.close() return result_object diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py index f25f92cf81..1a9704688a 100644 --- a/api/core/rag/extractor/word_extractor.py +++ b/api/core/rag/extractor/word_extractor.py @@ -9,7 +9,7 @@ import uuid from urllib.parse import urlparse from xml.etree import ElementTree -import requests +import httpx from docx import Document as DocxDocument from configs import dify_config @@ -43,15 +43,19 @@ class WordExtractor(BaseExtractor): # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): - r = requests.get(self.file_path) + response = httpx.get(self.file_path, timeout=None) - if r.status_code != 200: - raise ValueError(f"Check the url of your file; returned status code {r.status_code}") + if response.status_code != 200: + response.close() + raise ValueError(f"Check the url of your file; returned status code {response.status_code}") self.web_path = self.file_path # TODO: use a better way to handle the file self.temp_file = tempfile.NamedTemporaryFile() # noqa SIM115 - self.temp_file.write(r.content) + try: + self.temp_file.write(response.content) + finally: + response.close() self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError(f"File path {self.file_path} is not a valid file or url") diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index fcb1d325af..35fd7895b9 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -4,8 +4,8 @@ from json import loads as json_loads from json.decoder import JSONDecodeError from typing import Any +import httpx from flask import request -from requests import get from yaml import YAMLError, safe_load from core.tools.entities.common_entities import I18nObject @@ -334,15 +334,20 @@ class ApiBasedToolSchemaParser: raise ToolNotSupportedError("Only openapi is supported now.") # get openapi yaml - response = get(api_url, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "}, timeout=5) - - if response.status_code != 200: - raise ToolProviderNotFoundError("cannot get openapi yaml from url.") - - return ApiBasedToolSchemaParser.parse_openapi_yaml_to_tool_bundle( - response.text, extra_info=extra_info, warning=warning + response = httpx.get( + api_url, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "}, timeout=5 ) + try: + if response.status_code != 200: + raise ToolProviderNotFoundError("cannot get openapi yaml from url.") + + return ApiBasedToolSchemaParser.parse_openapi_yaml_to_tool_bundle( + response.text, extra_info=extra_info, warning=warning + ) + finally: + response.close() + @staticmethod def auto_parse_to_tool_bundle( content: str, extra_info: dict | None = None, warning: dict | None = None diff --git a/api/extensions/ext_otel.py b/api/extensions/ext_otel.py index 19c6e68c6b..cb6e4849a9 100644 --- a/api/extensions/ext_otel.py +++ b/api/extensions/ext_otel.py @@ -138,7 +138,6 @@ def init_app(app: DifyApp): from opentelemetry.instrumentation.flask import FlaskInstrumentor from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor from opentelemetry.instrumentation.redis import RedisInstrumentor - from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor from opentelemetry.metrics import get_meter, get_meter_provider, set_meter_provider from opentelemetry.propagate import set_global_textmap @@ -238,7 +237,6 @@ def init_app(app: DifyApp): instrument_exception_logging() init_sqlalchemy_instrumentor(app) RedisInstrumentor().instrument() - RequestsInstrumentor().instrument() HTTPXClientInstrumentor().instrument() atexit.register(shutdown_tracer) diff --git a/api/pyproject.toml b/api/pyproject.toml index e2a50a43f6..22eedf7b8b 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "opentelemetry-instrumentation-flask==0.48b0", "opentelemetry-instrumentation-httpx==0.48b0", "opentelemetry-instrumentation-redis==0.48b0", - "opentelemetry-instrumentation-requests==0.48b0", + "opentelemetry-instrumentation-httpx==0.48b0", "opentelemetry-instrumentation-sqlalchemy==0.48b0", "opentelemetry-propagator-b3==1.27.0", # opentelemetry-proto1.28.0 depends on protobuf (>=5.0,<6.0), @@ -145,8 +145,6 @@ dev = [ "types-pywin32~=310.0.0", "types-pyyaml~=6.0.12", "types-regex~=2024.11.6", - "types-requests~=2.32.0", - "types-requests-oauthlib~=2.0.0", "types-shapely~=2.0.0", "types-simplejson>=3.20.0", "types-six>=1.17.0", diff --git a/api/pyrightconfig.json b/api/pyrightconfig.json index 67571316a9..bf4ec2314e 100644 --- a/api/pyrightconfig.json +++ b/api/pyrightconfig.json @@ -15,7 +15,8 @@ "opentelemetry.instrumentation.httpx", "opentelemetry.instrumentation.requests", "opentelemetry.instrumentation.sqlalchemy", - "opentelemetry.instrumentation.redis" + "opentelemetry.instrumentation.redis", + "opentelemetry.instrumentation.httpx" ], "reportUnknownMemberType": "hint", "reportUnknownParameterType": "hint", diff --git a/api/services/enterprise/base.py b/api/services/enterprise/base.py index edb76408e8..bdc960aa2d 100644 --- a/api/services/enterprise/base.py +++ b/api/services/enterprise/base.py @@ -1,10 +1,12 @@ import os +from collections.abc import Mapping +from typing import Any -import requests +import httpx class BaseRequest: - proxies = { + proxies: Mapping[str, str] | None = { "http": "", "https": "", } @@ -13,10 +15,31 @@ class BaseRequest: secret_key_header = "" @classmethod - def send_request(cls, method, endpoint, json=None, params=None): + def _build_mounts(cls) -> dict[str, httpx.BaseTransport] | None: + if not cls.proxies: + return None + + mounts: dict[str, httpx.BaseTransport] = {} + for scheme, value in cls.proxies.items(): + if not value: + continue + key = f"{scheme}://" if not scheme.endswith("://") else scheme + mounts[key] = httpx.HTTPTransport(proxy=value) + return mounts or None + + @classmethod + def send_request( + cls, + method: str, + endpoint: str, + json: Any | None = None, + params: Mapping[str, Any] | None = None, + ) -> Any: headers = {"Content-Type": "application/json", cls.secret_key_header: cls.secret_key} url = f"{cls.base_url}{endpoint}" - response = requests.request(method, url, json=json, params=params, headers=headers, proxies=cls.proxies) + mounts = cls._build_mounts() + with httpx.Client(mounts=mounts) as client: + response = client.request(method, url, json=json, params=params, headers=headers) return response.json() diff --git a/api/services/rag_pipeline/pipeline_template/remote/remote_retrieval.py b/api/services/rag_pipeline/pipeline_template/remote/remote_retrieval.py index 8f96842337..571ca6c7a6 100644 --- a/api/services/rag_pipeline/pipeline_template/remote/remote_retrieval.py +++ b/api/services/rag_pipeline/pipeline_template/remote/remote_retrieval.py @@ -1,6 +1,6 @@ import logging -import requests +import httpx from configs import dify_config from services.rag_pipeline.pipeline_template.database.database_retrieval import DatabasePipelineTemplateRetrieval @@ -43,7 +43,7 @@ class RemotePipelineTemplateRetrieval(PipelineTemplateRetrievalBase): """ domain = dify_config.HOSTED_FETCH_PIPELINE_TEMPLATES_REMOTE_DOMAIN url = f"{domain}/pipeline-templates/{template_id}" - response = requests.get(url, timeout=(3, 10)) + response = httpx.get(url, timeout=httpx.Timeout(10.0, connect=3.0)) if response.status_code != 200: return None data: dict = response.json() @@ -58,7 +58,7 @@ class RemotePipelineTemplateRetrieval(PipelineTemplateRetrievalBase): """ domain = dify_config.HOSTED_FETCH_PIPELINE_TEMPLATES_REMOTE_DOMAIN url = f"{domain}/pipeline-templates?language={language}" - response = requests.get(url, timeout=(3, 10)) + response = httpx.get(url, timeout=httpx.Timeout(10.0, connect=3.0)) if response.status_code != 200: raise ValueError(f"fetch pipeline templates failed, status code: {response.status_code}") diff --git a/api/services/recommend_app/remote/remote_retrieval.py b/api/services/recommend_app/remote/remote_retrieval.py index 2d57769f63..b217c9026a 100644 --- a/api/services/recommend_app/remote/remote_retrieval.py +++ b/api/services/recommend_app/remote/remote_retrieval.py @@ -1,6 +1,6 @@ import logging -import requests +import httpx from configs import dify_config from services.recommend_app.buildin.buildin_retrieval import BuildInRecommendAppRetrieval @@ -43,7 +43,7 @@ class RemoteRecommendAppRetrieval(RecommendAppRetrievalBase): """ domain = dify_config.HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN url = f"{domain}/apps/{app_id}" - response = requests.get(url, timeout=(3, 10)) + response = httpx.get(url, timeout=httpx.Timeout(10.0, connect=3.0)) if response.status_code != 200: return None data: dict = response.json() @@ -58,7 +58,7 @@ class RemoteRecommendAppRetrieval(RecommendAppRetrievalBase): """ domain = dify_config.HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN url = f"{domain}/apps?language={language}" - response = requests.get(url, timeout=(3, 10)) + response = httpx.get(url, timeout=httpx.Timeout(10.0, connect=3.0)) if response.status_code != 200: raise ValueError(f"fetch recommended apps failed, status code: {response.status_code}") diff --git a/api/tests/integration_tests/vdb/__mock/baiduvectordb.py b/api/tests/integration_tests/vdb/__mock/baiduvectordb.py index 6d2aff5197..8a43d03a43 100644 --- a/api/tests/integration_tests/vdb/__mock/baiduvectordb.py +++ b/api/tests/integration_tests/vdb/__mock/baiduvectordb.py @@ -1,5 +1,6 @@ import os from collections import UserDict +from typing import Any from unittest.mock import MagicMock import pytest @@ -9,7 +10,6 @@ from pymochow.model.database import Database # type: ignore from pymochow.model.enum import IndexState, IndexType, MetricType, ReadConsistency, TableState # type: ignore from pymochow.model.schema import HNSWParams, VectorIndex # type: ignore from pymochow.model.table import Table # type: ignore -from requests.adapters import HTTPAdapter class AttrDict(UserDict): @@ -21,7 +21,7 @@ class MockBaiduVectorDBClass: def mock_vector_db_client( self, config=None, - adapter: HTTPAdapter | None = None, + adapter: Any | None = None, ): self.conn = MagicMock() self._config = MagicMock() diff --git a/api/tests/integration_tests/vdb/__mock/tcvectordb.py b/api/tests/integration_tests/vdb/__mock/tcvectordb.py index e0b908cece..5130fcfe17 100644 --- a/api/tests/integration_tests/vdb/__mock/tcvectordb.py +++ b/api/tests/integration_tests/vdb/__mock/tcvectordb.py @@ -1,9 +1,8 @@ import os -from typing import Union +from typing import Any, Union import pytest from _pytest.monkeypatch import MonkeyPatch -from requests.adapters import HTTPAdapter from tcvectordb import RPCVectorDBClient # type: ignore from tcvectordb.model import enum from tcvectordb.model.collection import FilterIndexConfig @@ -23,7 +22,7 @@ class MockTcvectordbClass: key="", read_consistency: ReadConsistency = ReadConsistency.EVENTUAL_CONSISTENCY, timeout=10, - adapter: HTTPAdapter | None = None, + adapter: Any | None = None, pool_size: int = 2, proxies: dict | None = None, password: str | None = None, diff --git a/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py b/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py index 6689e13b96..e5ead6ff66 100644 --- a/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py @@ -18,7 +18,7 @@ def test_firecrawl_web_extractor_crawl_mode(mocker): mocked_firecrawl = { "id": "test", } - mocker.patch("requests.post", return_value=_mock_response(mocked_firecrawl)) + mocker.patch("httpx.post", return_value=_mock_response(mocked_firecrawl)) job_id = firecrawl_app.crawl_url(url, params) assert job_id is not None diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index eea584a2f8..f1e1820acc 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -69,7 +69,7 @@ def test_notion_page(mocker): ], "next_cursor": None, } - mocker.patch("requests.request", return_value=_mock_response(mocked_notion_page)) + mocker.patch("httpx.request", return_value=_mock_response(mocked_notion_page)) page_docs = extractor._load_data_as_documents(page_id, "page") assert len(page_docs) == 1 @@ -84,7 +84,7 @@ def test_notion_database(mocker): "results": [_generate_page(i) for i in page_title_list], "next_cursor": None, } - mocker.patch("requests.post", return_value=_mock_response(mocked_notion_database)) + mocker.patch("httpx.post", return_value=_mock_response(mocked_notion_database)) database_docs = extractor._load_data_as_documents(database_id, "database") assert len(database_docs) == 1 content = _remove_multiple_new_lines(database_docs[0].page_content) diff --git a/api/uv.lock b/api/uv.lock index 43db17b06f..af368199b7 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1325,7 +1325,6 @@ dependencies = [ { name = "opentelemetry-instrumentation-flask" }, { name = "opentelemetry-instrumentation-httpx" }, { name = "opentelemetry-instrumentation-redis" }, - { name = "opentelemetry-instrumentation-requests" }, { name = "opentelemetry-instrumentation-sqlalchemy" }, { name = "opentelemetry-propagator-b3" }, { name = "opentelemetry-proto" }, @@ -1418,8 +1417,6 @@ dev = [ { name = "types-pyyaml" }, { name = "types-redis" }, { name = "types-regex" }, - { name = "types-requests" }, - { name = "types-requests-oauthlib" }, { name = "types-setuptools" }, { name = "types-shapely" }, { name = "types-simplejson" }, @@ -1516,7 +1513,6 @@ requires-dist = [ { name = "opentelemetry-instrumentation-flask", specifier = "==0.48b0" }, { name = "opentelemetry-instrumentation-httpx", specifier = "==0.48b0" }, { name = "opentelemetry-instrumentation-redis", specifier = "==0.48b0" }, - { name = "opentelemetry-instrumentation-requests", specifier = "==0.48b0" }, { name = "opentelemetry-instrumentation-sqlalchemy", specifier = "==0.48b0" }, { name = "opentelemetry-propagator-b3", specifier = "==1.27.0" }, { name = "opentelemetry-proto", specifier = "==1.27.0" }, @@ -1609,8 +1605,6 @@ dev = [ { name = "types-pyyaml", specifier = "~=6.0.12" }, { name = "types-redis", specifier = ">=4.6.0.20241004" }, { name = "types-regex", specifier = "~=2024.11.6" }, - { name = "types-requests", specifier = "~=2.32.0" }, - { name = "types-requests-oauthlib", specifier = "~=2.0.0" }, { name = "types-setuptools", specifier = ">=80.9.0" }, { name = "types-shapely", specifier = "~=2.0.0" }, { name = "types-simplejson", specifier = ">=3.20.0" }, @@ -3910,21 +3904,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/40/892f30d400091106309cc047fd3f6d76a828fedd984a953fd5386b78a2fb/opentelemetry_instrumentation_redis-0.48b0-py3-none-any.whl", hash = "sha256:48c7f2e25cbb30bde749dc0d8b9c74c404c851f554af832956b9630b27f5bcb7", size = 11610, upload-time = "2024-08-28T21:27:18.759Z" }, ] -[[package]] -name = "opentelemetry-instrumentation-requests" -version = "0.48b0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "opentelemetry-api" }, - { name = "opentelemetry-instrumentation" }, - { name = "opentelemetry-semantic-conventions" }, - { name = "opentelemetry-util-http" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/52/ac/5eb78efde21ff21d0ad5dc8c6cc6a0f8ae482ce8a46293c2f45a628b6166/opentelemetry_instrumentation_requests-0.48b0.tar.gz", hash = "sha256:67ab9bd877a0352ee0db4616c8b4ae59736ddd700c598ed907482d44f4c9a2b3", size = 14120, upload-time = "2024-08-28T21:28:16.933Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/df/0df9226d1b14f29d23c07e6194b9fd5ad50e7d987b7fd13df7dcf718aeb1/opentelemetry_instrumentation_requests-0.48b0-py3-none-any.whl", hash = "sha256:d4f01852121d0bd4c22f14f429654a735611d4f7bf3cf93f244bdf1489b2233d", size = 12366, upload-time = "2024-08-28T21:27:20.771Z" }, -] - [[package]] name = "opentelemetry-instrumentation-sqlalchemy" version = "0.48b0" @@ -6440,19 +6419,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2b/6f/ec0012be842b1d888d46884ac5558fd62aeae1f0ec4f7a581433d890d4b5/types_requests-2.32.4.20250809-py3-none-any.whl", hash = "sha256:f73d1832fb519ece02c85b1f09d5f0dd3108938e7d47e7f94bbfa18a6782b163", size = 20644, upload-time = "2025-08-09T03:17:09.716Z" }, ] -[[package]] -name = "types-requests-oauthlib" -version = "2.0.0.20250809" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "types-oauthlib" }, - { name = "types-requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ed/40/5eca857a2dbda0fedd69b7fd3f51cb0b6ece8d448327d29f0ae54612ec98/types_requests_oauthlib-2.0.0.20250809.tar.gz", hash = "sha256:f3b9b31e0394fe2c362f0d44bc9ef6d5c150a298d01089513cd54a51daec37a2", size = 11008, upload-time = "2025-08-09T03:17:50.705Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/38/8777f0ab409a7249777f230f6aefe0e9ba98355dc8b05fb31391fa30f312/types_requests_oauthlib-2.0.0.20250809-py3-none-any.whl", hash = "sha256:0d1af4907faf9f4a1b0f0afbc7ec488f1dd5561a2b5b6dad70f78091a1acfb76", size = 14319, upload-time = "2025-08-09T03:17:49.786Z" }, -] - [[package]] name = "types-s3transfer" version = "0.13.1" diff --git a/scripts/stress-test/setup/import_workflow_app.py b/scripts/stress-test/setup/import_workflow_app.py index 86d0239e35..41a76bd29b 100755 --- a/scripts/stress-test/setup/import_workflow_app.py +++ b/scripts/stress-test/setup/import_workflow_app.py @@ -8,7 +8,7 @@ sys.path.append(str(Path(__file__).parent.parent)) import json import httpx -from common import Logger, config_helper +from common import Logger, config_helper # type: ignore[import] def import_workflow_app() -> None: From 1bd621f81990be01f5af3844297514277e8269aa Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sat, 11 Oct 2025 10:08:29 +0900 Subject: [PATCH 25/49] remove .value (#26633) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .../middleware/vdb/opensearch_config.py | 19 ++++--- api/controllers/console/app/app.py | 2 +- api/controllers/console/app/app_import.py | 6 +- api/controllers/console/app/conversation.py | 2 +- api/controllers/console/app/statistic.py | 16 +++--- .../console/app/workflow_statistic.py | 8 +-- api/controllers/console/auth/activate.py | 2 +- api/controllers/console/auth/oauth.py | 6 +- .../console/datasets/data_source.py | 2 +- api/controllers/console/datasets/datasets.py | 22 ++++---- .../console/datasets/datasets_document.py | 8 +-- .../rag_pipeline/rag_pipeline_import.py | 6 +- api/controllers/inner_api/plugin/wraps.py | 6 +- api/controllers/service_api/wraps.py | 4 +- .../easy_ui_based_app/dataset/manager.py | 6 +- .../model_config/converter.py | 2 +- .../prompt_template/manager.py | 4 +- .../app/apps/agent_chat/app_config_manager.py | 2 +- api/core/app/apps/agent_chat/app_runner.py | 4 +- api/core/app/apps/pipeline/pipeline_runner.py | 4 +- api/core/app/apps/workflow_app_runner.py | 8 +-- api/core/datasource/entities/api_entities.py | 2 +- .../entities/datasource_entities.py | 16 +++--- api/core/entities/provider_configuration.py | 6 +- api/core/indexing_runner.py | 6 +- .../output_parser/structured_output.py | 12 ++-- .../arize_phoenix_trace.py | 12 ++-- api/core/ops/langfuse_trace/langfuse_trace.py | 18 +++--- .../ops/langsmith_trace/langsmith_trace.py | 14 ++--- api/core/ops/opik_trace/opik_trace.py | 18 +++--- api/core/ops/weave_trace/weave_trace.py | 14 ++--- api/core/plugin/backwards_invocation/node.py | 2 +- api/core/plugin/entities/request.py | 8 +-- api/core/provider_manager.py | 16 +++--- .../data_post_processor.py | 4 +- api/core/rag/datasource/retrieval_service.py | 12 ++-- .../vdb/clickzetta/clickzetta_vector.py | 56 ++++++++----------- .../elasticsearch/elasticsearch_ja_vector.py | 6 +- .../vdb/elasticsearch/elasticsearch_vector.py | 30 +++++----- .../vdb/huawei/huawei_cloud_vector.py | 28 +++++----- .../datasource/vdb/lindorm/lindorm_vector.py | 28 +++++----- .../datasource/vdb/milvus/milvus_vector.py | 40 ++++++------- .../vdb/opensearch/opensearch_vector.py | 37 ++++++------ .../datasource/vdb/qdrant/qdrant_vector.py | 24 ++++---- .../vdb/tablestore/tablestore_vector.py | 50 ++++++++--------- .../tidb_on_qdrant/tidb_on_qdrant_vector.py | 24 ++++---- .../datasource/vdb/tidb_vector/tidb_vector.py | 6 +- .../vdb/vikingdb/vikingdb_vector.py | 30 +++++----- .../vdb/weaviate/weaviate_vector.py | 10 ++-- api/core/rag/entities/event.py | 6 +- api/core/rag/extractor/extract_processor.py | 10 ++-- api/core/rag/rerank/rerank_factory.py | 4 +- api/core/rag/retrieval/dataset_retrieval.py | 4 +- api/core/rag/retrieval/retrieval_methods.py | 4 +- api/core/tools/builtin_tool/provider.py | 10 ++-- api/core/tools/entities/api_entities.py | 6 +- api/core/tools/entities/tool_entities.py | 2 +- .../dataset_multi_retriever_tool.py | 2 +- .../dataset_retriever_tool.py | 2 +- api/core/tools/utils/parser.py | 6 +- api/core/workflow/nodes/agent/agent_node.py | 10 ++-- .../nodes/datasource/datasource_node.py | 6 +- api/core/workflow/nodes/http_request/node.py | 2 +- .../nodes/iteration/iteration_node.py | 2 +- .../knowledge_index/knowledge_index_node.py | 4 +- .../knowledge_retrieval_node.py | 2 +- api/core/workflow/nodes/llm/llm_utils.py | 4 +- api/core/workflow/nodes/llm/node.py | 2 +- api/core/workflow/nodes/tool/tool_node.py | 2 +- api/core/workflow/workflow_entry.py | 2 +- ...rameters_cache_when_sync_draft_workflow.py | 2 +- ...oin_when_app_published_workflow_updated.py | 2 +- .../update_provider_when_message_created.py | 2 +- .../clickzetta_volume/file_lifecycle.py | 4 +- api/factories/file_factory.py | 6 +- api/models/api_based_extension.py | 2 +- api/models/dataset.py | 2 +- api/models/model.py | 18 +++--- api/models/provider.py | 2 +- api/models/workflow.py | 4 +- api/services/account_service.py | 22 ++++---- api/services/app_dsl_service.py | 18 +++--- api/services/datasource_provider_service.py | 4 +- api/services/hit_testing_service.py | 2 +- api/services/plugin/plugin_migration.py | 4 +- api/services/rag_pipeline/rag_pipeline.py | 2 +- .../rag_pipeline/rag_pipeline_dsl_service.py | 26 ++++----- .../rag_pipeline_transform_service.py | 4 +- .../tools/api_tools_manage_service.py | 4 +- api/services/tools/tools_transform_service.py | 6 +- api/services/vector_service.py | 2 +- api/services/webapp_auth_service.py | 4 +- api/services/workflow/workflow_converter.py | 22 ++++---- .../workflow_draft_variable_service.py | 2 +- api/services/workflow_run_service.py | 2 +- api/services/workflow_service.py | 2 +- .../app/test_chat_message_permissions.py | 2 +- .../app/test_model_config_permissions.py | 2 +- .../test_workflow_draft_variable_service.py | 2 +- .../vdb/__mock/huaweicloudvectordb.py | 18 +++--- .../integration_tests/vdb/__mock/vikingdb.py | 44 +++++++-------- .../vdb/opensearch/test_opensearch.py | 4 +- .../services/test_account_service.py | 18 +++--- .../services/test_file_service.py | 6 +- .../services/test_metadata_service.py | 2 +- .../test_model_load_balancing_service.py | 2 +- .../services/test_model_provider_service.py | 2 +- .../services/test_tag_service.py | 2 +- .../services/test_web_conversation_service.py | 2 +- .../services/test_webapp_auth_service.py | 12 ++-- .../services/test_workflow_app_service.py | 48 ++++++++-------- .../services/test_workflow_run_service.py | 12 ++-- .../services/test_workflow_service.py | 4 +- .../services/test_workspace_service.py | 22 ++++---- .../tools/test_api_tools_manage_service.py | 2 +- .../tools/test_mcp_tools_manage_service.py | 2 +- .../tools/test_tools_transform_service.py | 12 ++-- .../workflow/test_workflow_converter.py | 12 ++-- .../tasks/test_add_document_to_index_task.py | 2 +- .../tasks/test_batch_clean_document_task.py | 2 +- ...test_batch_create_segment_to_index_task.py | 2 +- .../test_create_segment_to_index_task.py | 2 +- .../test_disable_segment_from_index_task.py | 2 +- .../tasks/test_document_indexing_task.py | 4 +- .../test_enable_segments_to_index_task.py | 2 +- .../tasks/test_mail_account_deletion_task.py | 2 +- .../tasks/test_mail_change_mail_task.py | 2 +- .../tasks/test_mail_invite_member_task.py | 12 ++-- .../controllers/console/auth/test_oauth.py | 12 ++-- ...st_celery_workflow_execution_repository.py | 2 +- ...lery_workflow_node_execution_repository.py | 2 +- ...test_workflow_node_execution_truncation.py | 4 +- .../command_channels/test_redis_channel.py | 4 +- .../test_mock_iteration_simple.py | 8 +-- .../test_redis_stop_integration.py | 8 +-- .../v1/test_variable_assigner_v1.py | 6 +- .../test_sqlalchemy_repository.py | 2 +- .../workflow/test_workflow_converter.py | 4 +- 138 files changed, 613 insertions(+), 633 deletions(-) diff --git a/api/configs/middleware/vdb/opensearch_config.py b/api/configs/middleware/vdb/opensearch_config.py index ba015a6eb9..a7d712545e 100644 --- a/api/configs/middleware/vdb/opensearch_config.py +++ b/api/configs/middleware/vdb/opensearch_config.py @@ -1,23 +1,24 @@ -from enum import Enum +from enum import StrEnum from typing import Literal from pydantic import Field, PositiveInt from pydantic_settings import BaseSettings +class AuthMethod(StrEnum): + """ + Authentication method for OpenSearch + """ + + BASIC = "basic" + AWS_MANAGED_IAM = "aws_managed_iam" + + class OpenSearchConfig(BaseSettings): """ Configuration settings for OpenSearch """ - class AuthMethod(Enum): - """ - Authentication method for OpenSearch - """ - - BASIC = "basic" - AWS_MANAGED_IAM = "aws_managed_iam" - OPENSEARCH_HOST: str | None = Field( description="Hostname or IP address of the OpenSearch server (e.g., 'localhost' or 'opensearch.example.com')", default=None, diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 23b8e2c5a2..3927685af3 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -304,7 +304,7 @@ class AppCopyApi(Resource): account = cast(Account, current_user) result = import_service.import_app( account=account, - import_mode=ImportMode.YAML_CONTENT.value, + import_mode=ImportMode.YAML_CONTENT, yaml_content=yaml_content, name=args.get("name"), description=args.get("description"), diff --git a/api/controllers/console/app/app_import.py b/api/controllers/console/app/app_import.py index c14f597c25..037561cfed 100644 --- a/api/controllers/console/app/app_import.py +++ b/api/controllers/console/app/app_import.py @@ -70,9 +70,9 @@ class AppImportApi(Resource): EnterpriseService.WebAppAuth.update_app_access_mode(result.app_id, "private") # Return appropriate status code based on result status = result.status - if status == ImportStatus.FAILED.value: + if status == ImportStatus.FAILED: return result.model_dump(mode="json"), 400 - elif status == ImportStatus.PENDING.value: + elif status == ImportStatus.PENDING: return result.model_dump(mode="json"), 202 return result.model_dump(mode="json"), 200 @@ -97,7 +97,7 @@ class AppImportConfirmApi(Resource): session.commit() # Return appropriate status code based on result - if result.status == ImportStatus.FAILED.value: + if result.status == ImportStatus.FAILED: return result.model_dump(mode="json"), 400 return result.model_dump(mode="json"), 200 diff --git a/api/controllers/console/app/conversation.py b/api/controllers/console/app/conversation.py index f104ab5dee..3b8dff613b 100644 --- a/api/controllers/console/app/conversation.py +++ b/api/controllers/console/app/conversation.py @@ -309,7 +309,7 @@ class ChatConversationApi(Resource): ) if app_model.mode == AppMode.ADVANCED_CHAT: - query = query.where(Conversation.invoke_from != InvokeFrom.DEBUGGER.value) + query = query.where(Conversation.invoke_from != InvokeFrom.DEBUGGER) match args["sort_by"]: case "created_at": diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py index 6471b843c6..5974395c6a 100644 --- a/api/controllers/console/app/statistic.py +++ b/api/controllers/console/app/statistic.py @@ -52,7 +52,7 @@ FROM WHERE app_id = :app_id AND invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc @@ -127,7 +127,7 @@ class DailyConversationStatistic(Resource): sa.func.count(sa.distinct(Message.conversation_id)).label("conversation_count"), ) .select_from(Message) - .where(Message.app_id == app_model.id, Message.invoke_from != InvokeFrom.DEBUGGER.value) + .where(Message.app_id == app_model.id, Message.invoke_from != InvokeFrom.DEBUGGER) ) if args["start"]: @@ -190,7 +190,7 @@ FROM WHERE app_id = :app_id AND invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc @@ -263,7 +263,7 @@ FROM WHERE app_id = :app_id AND invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc @@ -345,7 +345,7 @@ FROM WHERE c.app_id = :app_id AND m.invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc @@ -432,7 +432,7 @@ LEFT JOIN WHERE m.app_id = :app_id AND m.invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc @@ -509,7 +509,7 @@ FROM WHERE app_id = :app_id AND invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc @@ -584,7 +584,7 @@ FROM WHERE app_id = :app_id AND invoke_from != :invoke_from""" - arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER.value} + arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER} timezone = pytz.timezone(account.timezone) utc_timezone = pytz.utc diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index 535e7cadd6..b8904bf3d9 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -47,7 +47,7 @@ WHERE arg_dict = { "tz": account.timezone, "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN.value, + "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, } timezone = pytz.timezone(account.timezone) @@ -115,7 +115,7 @@ WHERE arg_dict = { "tz": account.timezone, "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN.value, + "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, } timezone = pytz.timezone(account.timezone) @@ -183,7 +183,7 @@ WHERE arg_dict = { "tz": account.timezone, "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN.value, + "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, } timezone = pytz.timezone(account.timezone) @@ -269,7 +269,7 @@ GROUP BY arg_dict = { "tz": account.timezone, "app_id": app_model.id, - "triggered_from": WorkflowRunTriggeredFrom.APP_RUN.value, + "triggered_from": WorkflowRunTriggeredFrom.APP_RUN, } timezone = pytz.timezone(account.timezone) diff --git a/api/controllers/console/auth/activate.py b/api/controllers/console/auth/activate.py index 8cdadfb03c..76171e3f8a 100644 --- a/api/controllers/console/auth/activate.py +++ b/api/controllers/console/auth/activate.py @@ -103,7 +103,7 @@ class ActivateApi(Resource): account.interface_language = args["interface_language"] account.timezone = args["timezone"] account.interface_theme = "light" - account.status = AccountStatus.ACTIVE.value + account.status = AccountStatus.ACTIVE account.initialized_at = naive_utc_now() db.session.commit() diff --git a/api/controllers/console/auth/oauth.py b/api/controllers/console/auth/oauth.py index 5528dc0569..4efeceb676 100644 --- a/api/controllers/console/auth/oauth.py +++ b/api/controllers/console/auth/oauth.py @@ -130,11 +130,11 @@ class OAuthCallback(Resource): return redirect(f"{dify_config.CONSOLE_WEB_URL}/signin?message={e.description}") # Check account status - if account.status == AccountStatus.BANNED.value: + if account.status == AccountStatus.BANNED: return redirect(f"{dify_config.CONSOLE_WEB_URL}/signin?message=Account is banned.") - if account.status == AccountStatus.PENDING.value: - account.status = AccountStatus.ACTIVE.value + if account.status == AccountStatus.PENDING: + account.status = AccountStatus.ACTIVE account.initialized_at = naive_utc_now() db.session.commit() diff --git a/api/controllers/console/datasets/data_source.py b/api/controllers/console/datasets/data_source.py index b0f18c11d4..6d9d675e87 100644 --- a/api/controllers/console/datasets/data_source.py +++ b/api/controllers/console/datasets/data_source.py @@ -256,7 +256,7 @@ class DataSourceNotionApi(Resource): credential_id = notion_info.get("credential_id") for page in notion_info["pages"]: extract_setting = ExtractSetting( - datasource_type=DatasourceType.NOTION.value, + datasource_type=DatasourceType.NOTION, notion_info=NotionInfo.model_validate( { "credential_id": credential_id, diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 284f88ff1e..dda0125687 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -500,7 +500,7 @@ class DatasetIndexingEstimateApi(Resource): if file_details: for file_detail in file_details: extract_setting = ExtractSetting( - datasource_type=DatasourceType.FILE.value, + datasource_type=DatasourceType.FILE, upload_file=file_detail, document_model=args["doc_form"], ) @@ -512,7 +512,7 @@ class DatasetIndexingEstimateApi(Resource): credential_id = notion_info.get("credential_id") for page in notion_info["pages"]: extract_setting = ExtractSetting( - datasource_type=DatasourceType.NOTION.value, + datasource_type=DatasourceType.NOTION, notion_info=NotionInfo.model_validate( { "credential_id": credential_id, @@ -529,7 +529,7 @@ class DatasetIndexingEstimateApi(Resource): website_info_list = args["info_list"]["website_info_list"] for url in website_info_list["urls"]: extract_setting = ExtractSetting( - datasource_type=DatasourceType.WEBSITE.value, + datasource_type=DatasourceType.WEBSITE, website_info=WebsiteInfo.model_validate( { "provider": website_info_list["provider"], @@ -786,7 +786,7 @@ class DatasetRetrievalSettingApi(Resource): | VectorType.VIKINGDB | VectorType.UPSTASH ): - return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]} + return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH]} case ( VectorType.QDRANT | VectorType.WEAVIATE @@ -813,9 +813,9 @@ class DatasetRetrievalSettingApi(Resource): ): return { "retrieval_method": [ - RetrievalMethod.SEMANTIC_SEARCH.value, - RetrievalMethod.FULL_TEXT_SEARCH.value, - RetrievalMethod.HYBRID_SEARCH.value, + RetrievalMethod.SEMANTIC_SEARCH, + RetrievalMethod.FULL_TEXT_SEARCH, + RetrievalMethod.HYBRID_SEARCH, ] } case _: @@ -842,7 +842,7 @@ class DatasetRetrievalSettingMockApi(Resource): | VectorType.VIKINGDB | VectorType.UPSTASH ): - return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]} + return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH]} case ( VectorType.QDRANT | VectorType.WEAVIATE @@ -867,9 +867,9 @@ class DatasetRetrievalSettingMockApi(Resource): ): return { "retrieval_method": [ - RetrievalMethod.SEMANTIC_SEARCH.value, - RetrievalMethod.FULL_TEXT_SEARCH.value, - RetrievalMethod.HYBRID_SEARCH.value, + RetrievalMethod.SEMANTIC_SEARCH, + RetrievalMethod.FULL_TEXT_SEARCH, + RetrievalMethod.HYBRID_SEARCH, ] } case _: diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index a90730e997..011dacde76 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -475,7 +475,7 @@ class DocumentIndexingEstimateApi(DocumentResource): raise NotFound("File not found.") extract_setting = ExtractSetting( - datasource_type=DatasourceType.FILE.value, upload_file=file, document_model=document.doc_form + datasource_type=DatasourceType.FILE, upload_file=file, document_model=document.doc_form ) indexing_runner = IndexingRunner() @@ -538,7 +538,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): raise NotFound("File not found.") extract_setting = ExtractSetting( - datasource_type=DatasourceType.FILE.value, upload_file=file_detail, document_model=document.doc_form + datasource_type=DatasourceType.FILE, upload_file=file_detail, document_model=document.doc_form ) extract_settings.append(extract_setting) @@ -546,7 +546,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): if not data_source_info: continue extract_setting = ExtractSetting( - datasource_type=DatasourceType.NOTION.value, + datasource_type=DatasourceType.NOTION, notion_info=NotionInfo.model_validate( { "credential_id": data_source_info["credential_id"], @@ -563,7 +563,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): if not data_source_info: continue extract_setting = ExtractSetting( - datasource_type=DatasourceType.WEBSITE.value, + datasource_type=DatasourceType.WEBSITE, website_info=WebsiteInfo.model_validate( { "provider": data_source_info["provider"], diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_import.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_import.py index e0b918456b..a82872ba2b 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_import.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_import.py @@ -60,9 +60,9 @@ class RagPipelineImportApi(Resource): # Return appropriate status code based on result status = result.status - if status == ImportStatus.FAILED.value: + if status == ImportStatus.FAILED: return result.model_dump(mode="json"), 400 - elif status == ImportStatus.PENDING.value: + elif status == ImportStatus.PENDING: return result.model_dump(mode="json"), 202 return result.model_dump(mode="json"), 200 @@ -87,7 +87,7 @@ class RagPipelineImportConfirmApi(Resource): session.commit() # Return appropriate status code based on result - if result.status == ImportStatus.FAILED.value: + if result.status == ImportStatus.FAILED: return result.model_dump(mode="json"), 400 return result.model_dump(mode="json"), 200 diff --git a/api/controllers/inner_api/plugin/wraps.py b/api/controllers/inner_api/plugin/wraps.py index a36d6b0745..1f588bedce 100644 --- a/api/controllers/inner_api/plugin/wraps.py +++ b/api/controllers/inner_api/plugin/wraps.py @@ -25,8 +25,8 @@ def get_user(tenant_id: str, user_id: str | None) -> EndUser: As a result, it could only be considered as an end user id. """ if not user_id: - user_id = DefaultEndUserSessionID.DEFAULT_SESSION_ID.value - is_anonymous = user_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID.value + user_id = DefaultEndUserSessionID.DEFAULT_SESSION_ID + is_anonymous = user_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID try: with Session(db.engine) as session: user_model = None @@ -85,7 +85,7 @@ def get_user_tenant(view: Callable[P, R] | None = None): raise ValueError("tenant_id is required") if not user_id: - user_id = DefaultEndUserSessionID.DEFAULT_SESSION_ID.value + user_id = DefaultEndUserSessionID.DEFAULT_SESSION_ID try: tenant_model = ( diff --git a/api/controllers/service_api/wraps.py b/api/controllers/service_api/wraps.py index ee8e1d105b..2c9be4e887 100644 --- a/api/controllers/service_api/wraps.py +++ b/api/controllers/service_api/wraps.py @@ -313,7 +313,7 @@ def create_or_update_end_user_for_user_id(app_model: App, user_id: str | None = Create or update session terminal based on user ID. """ if not user_id: - user_id = DefaultEndUserSessionID.DEFAULT_SESSION_ID.value + user_id = DefaultEndUserSessionID.DEFAULT_SESSION_ID with Session(db.engine, expire_on_commit=False) as session: end_user = ( @@ -332,7 +332,7 @@ def create_or_update_end_user_for_user_id(app_model: App, user_id: str | None = tenant_id=app_model.tenant_id, app_id=app_model.id, type="service_api", - is_anonymous=user_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID.value, + is_anonymous=user_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID, session_id=user_id, ) session.add(end_user) diff --git a/api/core/app/app_config/easy_ui_based_app/dataset/manager.py b/api/core/app/app_config/easy_ui_based_app/dataset/manager.py index 3564cc175b..aacafb2dad 100644 --- a/api/core/app/app_config/easy_ui_based_app/dataset/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/dataset/manager.py @@ -197,12 +197,12 @@ class DatasetConfigManager: # strategy if "strategy" not in config["agent_mode"] or not config["agent_mode"].get("strategy"): - config["agent_mode"]["strategy"] = PlanningStrategy.ROUTER.value + config["agent_mode"]["strategy"] = PlanningStrategy.ROUTER has_datasets = False if config.get("agent_mode", {}).get("strategy") in { - PlanningStrategy.ROUTER.value, - PlanningStrategy.REACT_ROUTER.value, + PlanningStrategy.ROUTER, + PlanningStrategy.REACT_ROUTER, }: for tool in config.get("agent_mode", {}).get("tools", []): key = list(tool.keys())[0] diff --git a/api/core/app/app_config/easy_ui_based_app/model_config/converter.py b/api/core/app/app_config/easy_ui_based_app/model_config/converter.py index 5b5eefe315..7cd5fe75d5 100644 --- a/api/core/app/app_config/easy_ui_based_app/model_config/converter.py +++ b/api/core/app/app_config/easy_ui_based_app/model_config/converter.py @@ -68,7 +68,7 @@ class ModelConfigConverter: # get model mode model_mode = model_config.mode if not model_mode: - model_mode = LLMMode.CHAT.value + model_mode = LLMMode.CHAT if model_schema and model_schema.model_properties.get(ModelPropertyKey.MODE): model_mode = LLMMode(model_schema.model_properties[ModelPropertyKey.MODE]).value diff --git a/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py b/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py index ec4f6074ab..21614c010c 100644 --- a/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py @@ -100,7 +100,7 @@ class PromptTemplateConfigManager: if config["model"]["mode"] not in model_mode_vals: raise ValueError(f"model.mode must be in {model_mode_vals} when prompt_type is advanced") - if app_mode == AppMode.CHAT and config["model"]["mode"] == ModelMode.COMPLETION.value: + if app_mode == AppMode.CHAT and config["model"]["mode"] == ModelMode.COMPLETION: user_prefix = config["completion_prompt_config"]["conversation_histories_role"]["user_prefix"] assistant_prefix = config["completion_prompt_config"]["conversation_histories_role"]["assistant_prefix"] @@ -110,7 +110,7 @@ class PromptTemplateConfigManager: if not assistant_prefix: config["completion_prompt_config"]["conversation_histories_role"]["assistant_prefix"] = "Assistant" - if config["model"]["mode"] == ModelMode.CHAT.value: + if config["model"]["mode"] == ModelMode.CHAT: prompt_list = config["chat_prompt_config"]["prompt"] if len(prompt_list) > 10: diff --git a/api/core/app/apps/agent_chat/app_config_manager.py b/api/core/app/apps/agent_chat/app_config_manager.py index 9ce841f432..801619ddbc 100644 --- a/api/core/app/apps/agent_chat/app_config_manager.py +++ b/api/core/app/apps/agent_chat/app_config_manager.py @@ -186,7 +186,7 @@ class AgentChatAppConfigManager(BaseAppConfigManager): raise ValueError("enabled in agent_mode must be of boolean type") if not agent_mode.get("strategy"): - agent_mode["strategy"] = PlanningStrategy.ROUTER.value + agent_mode["strategy"] = PlanningStrategy.ROUTER if agent_mode["strategy"] not in [member.value for member in list(PlanningStrategy.__members__.values())]: raise ValueError("strategy in agent_mode must be in the specified strategy list") diff --git a/api/core/app/apps/agent_chat/app_runner.py b/api/core/app/apps/agent_chat/app_runner.py index 388bed5255..759398b556 100644 --- a/api/core/app/apps/agent_chat/app_runner.py +++ b/api/core/app/apps/agent_chat/app_runner.py @@ -198,9 +198,9 @@ class AgentChatAppRunner(AppRunner): # start agent runner if agent_entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT: # check LLM mode - if model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.CHAT.value: + if model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.CHAT: runner_cls = CotChatAgentRunner - elif model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.COMPLETION.value: + elif model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.COMPLETION: runner_cls = CotCompletionAgentRunner else: raise ValueError(f"Invalid LLM mode: {model_schema.model_properties.get(ModelPropertyKey.MODE)}") diff --git a/api/core/app/apps/pipeline/pipeline_runner.py b/api/core/app/apps/pipeline/pipeline_runner.py index 866c46d963..a8a7dde2b4 100644 --- a/api/core/app/apps/pipeline/pipeline_runner.py +++ b/api/core/app/apps/pipeline/pipeline_runner.py @@ -229,8 +229,8 @@ class PipelineRunner(WorkflowBasedAppRunner): workflow_id=workflow.id, graph_config=graph_config, user_id=self.application_generate_entity.user_id, - user_from=UserFrom.ACCOUNT.value, - invoke_from=InvokeFrom.SERVICE_API.value, + user_from=UserFrom.ACCOUNT, + invoke_from=InvokeFrom.SERVICE_API, call_depth=0, ) diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 564daba86d..68eb455d26 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -100,8 +100,8 @@ class WorkflowBasedAppRunner: workflow_id=workflow_id, graph_config=graph_config, user_id=user_id, - user_from=UserFrom.ACCOUNT.value, - invoke_from=InvokeFrom.SERVICE_API.value, + user_from=UserFrom.ACCOUNT, + invoke_from=InvokeFrom.SERVICE_API, call_depth=0, ) @@ -244,8 +244,8 @@ class WorkflowBasedAppRunner: workflow_id=workflow.id, graph_config=graph_config, user_id="", - user_from=UserFrom.ACCOUNT.value, - invoke_from=InvokeFrom.SERVICE_API.value, + user_from=UserFrom.ACCOUNT, + invoke_from=InvokeFrom.SERVICE_API, call_depth=0, ) diff --git a/api/core/datasource/entities/api_entities.py b/api/core/datasource/entities/api_entities.py index cdefcc4506..1179537570 100644 --- a/api/core/datasource/entities/api_entities.py +++ b/api/core/datasource/entities/api_entities.py @@ -49,7 +49,7 @@ class DatasourceProviderApiEntity(BaseModel): for datasource in datasources: if datasource.get("parameters"): for parameter in datasource.get("parameters"): - if parameter.get("type") == DatasourceParameter.DatasourceParameterType.SYSTEM_FILES.value: + if parameter.get("type") == DatasourceParameter.DatasourceParameterType.SYSTEM_FILES: parameter["type"] = "files" # ------------- diff --git a/api/core/datasource/entities/datasource_entities.py b/api/core/datasource/entities/datasource_entities.py index ac4f51ac75..7f503b963f 100644 --- a/api/core/datasource/entities/datasource_entities.py +++ b/api/core/datasource/entities/datasource_entities.py @@ -54,16 +54,16 @@ class DatasourceParameter(PluginParameter): removes TOOLS_SELECTOR from PluginParameterType """ - STRING = PluginParameterType.STRING.value - NUMBER = PluginParameterType.NUMBER.value - BOOLEAN = PluginParameterType.BOOLEAN.value - SELECT = PluginParameterType.SELECT.value - SECRET_INPUT = PluginParameterType.SECRET_INPUT.value - FILE = PluginParameterType.FILE.value - FILES = PluginParameterType.FILES.value + STRING = PluginParameterType.STRING + NUMBER = PluginParameterType.NUMBER + BOOLEAN = PluginParameterType.BOOLEAN + SELECT = PluginParameterType.SELECT + SECRET_INPUT = PluginParameterType.SECRET_INPUT + FILE = PluginParameterType.FILE + FILES = PluginParameterType.FILES # deprecated, should not use. - SYSTEM_FILES = PluginParameterType.SYSTEM_FILES.value + SYSTEM_FILES = PluginParameterType.SYSTEM_FILES def as_normal_type(self): return as_normal_type(self) diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 2857729a81..bc19afb52a 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -207,7 +207,7 @@ class ProviderConfiguration(BaseModel): """ stmt = select(Provider).where( Provider.tenant_id == self.tenant_id, - Provider.provider_type == ProviderType.CUSTOM.value, + Provider.provider_type == ProviderType.CUSTOM, Provider.provider_name.in_(self._get_provider_names()), ) @@ -458,7 +458,7 @@ class ProviderConfiguration(BaseModel): provider_record = Provider( tenant_id=self.tenant_id, provider_name=self.provider.provider, - provider_type=ProviderType.CUSTOM.value, + provider_type=ProviderType.CUSTOM, is_valid=True, credential_id=new_record.id, ) @@ -1414,7 +1414,7 @@ class ProviderConfiguration(BaseModel): """ secret_input_form_variables = [] for credential_form_schema in credential_form_schemas: - if credential_form_schema.type.value == FormType.SECRET_INPUT.value: + if credential_form_schema.type.value == FormType.SECRET_INPUT: secret_input_form_variables.append(credential_form_schema.variable) return secret_input_form_variables diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index 3682fdb667..7822ed4268 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -343,7 +343,7 @@ class IndexingRunner: if file_detail: extract_setting = ExtractSetting( - datasource_type=DatasourceType.FILE.value, + datasource_type=DatasourceType.FILE, upload_file=file_detail, document_model=dataset_document.doc_form, ) @@ -356,7 +356,7 @@ class IndexingRunner: ): raise ValueError("no notion import info found") extract_setting = ExtractSetting( - datasource_type=DatasourceType.NOTION.value, + datasource_type=DatasourceType.NOTION, notion_info=NotionInfo.model_validate( { "credential_id": data_source_info["credential_id"], @@ -379,7 +379,7 @@ class IndexingRunner: ): raise ValueError("no website import info found") extract_setting = ExtractSetting( - datasource_type=DatasourceType.WEBSITE.value, + datasource_type=DatasourceType.WEBSITE, website_info=WebsiteInfo.model_validate( { "provider": data_source_info["provider"], diff --git a/api/core/llm_generator/output_parser/structured_output.py b/api/core/llm_generator/output_parser/structured_output.py index 1e302b7668..686529c3ca 100644 --- a/api/core/llm_generator/output_parser/structured_output.py +++ b/api/core/llm_generator/output_parser/structured_output.py @@ -224,8 +224,8 @@ def _handle_native_json_schema( # Set appropriate response format if required by the model for rule in rules: - if rule.name == "response_format" and ResponseFormat.JSON_SCHEMA.value in rule.options: - model_parameters["response_format"] = ResponseFormat.JSON_SCHEMA.value + if rule.name == "response_format" and ResponseFormat.JSON_SCHEMA in rule.options: + model_parameters["response_format"] = ResponseFormat.JSON_SCHEMA return model_parameters @@ -239,10 +239,10 @@ def _set_response_format(model_parameters: dict, rules: list): """ for rule in rules: if rule.name == "response_format": - if ResponseFormat.JSON.value in rule.options: - model_parameters["response_format"] = ResponseFormat.JSON.value - elif ResponseFormat.JSON_OBJECT.value in rule.options: - model_parameters["response_format"] = ResponseFormat.JSON_OBJECT.value + if ResponseFormat.JSON in rule.options: + model_parameters["response_format"] = ResponseFormat.JSON + elif ResponseFormat.JSON_OBJECT in rule.options: + model_parameters["response_format"] = ResponseFormat.JSON_OBJECT def _handle_prompt_based_schema( diff --git a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py index 1497bc1863..03d2d75372 100644 --- a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py +++ b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py @@ -213,9 +213,9 @@ class ArizePhoenixDataTrace(BaseTraceInstance): node_metadata.update(json.loads(node_execution.execution_metadata)) # Determine the correct span kind based on node type - span_kind = OpenInferenceSpanKindValues.CHAIN.value + span_kind = OpenInferenceSpanKindValues.CHAIN if node_execution.node_type == "llm": - span_kind = OpenInferenceSpanKindValues.LLM.value + span_kind = OpenInferenceSpanKindValues.LLM provider = process_data.get("model_provider") model = process_data.get("model_name") if provider: @@ -230,18 +230,18 @@ class ArizePhoenixDataTrace(BaseTraceInstance): node_metadata["prompt_tokens"] = usage_data.get("prompt_tokens", 0) node_metadata["completion_tokens"] = usage_data.get("completion_tokens", 0) elif node_execution.node_type == "dataset_retrieval": - span_kind = OpenInferenceSpanKindValues.RETRIEVER.value + span_kind = OpenInferenceSpanKindValues.RETRIEVER elif node_execution.node_type == "tool": - span_kind = OpenInferenceSpanKindValues.TOOL.value + span_kind = OpenInferenceSpanKindValues.TOOL else: - span_kind = OpenInferenceSpanKindValues.CHAIN.value + span_kind = OpenInferenceSpanKindValues.CHAIN node_span = self.tracer.start_span( name=node_execution.node_type, attributes={ SpanAttributes.INPUT_VALUE: node_execution.inputs or "{}", SpanAttributes.OUTPUT_VALUE: node_execution.outputs or "{}", - SpanAttributes.OPENINFERENCE_SPAN_KIND: span_kind, + SpanAttributes.OPENINFERENCE_SPAN_KIND: span_kind.value, SpanAttributes.METADATA: json.dumps(node_metadata, ensure_ascii=False), SpanAttributes.SESSION_ID: trace_info.conversation_id or "", }, diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 931bed78d4..92e6b8ea60 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -73,7 +73,7 @@ class LangFuseDataTrace(BaseTraceInstance): if trace_info.message_id: trace_id = trace_info.trace_id or trace_info.message_id - name = TraceTaskName.MESSAGE_TRACE.value + name = TraceTaskName.MESSAGE_TRACE trace_data = LangfuseTrace( id=trace_id, user_id=user_id, @@ -88,7 +88,7 @@ class LangFuseDataTrace(BaseTraceInstance): self.add_trace(langfuse_trace_data=trace_data) workflow_span_data = LangfuseSpan( id=trace_info.workflow_run_id, - name=TraceTaskName.WORKFLOW_TRACE.value, + name=TraceTaskName.WORKFLOW_TRACE, input=dict(trace_info.workflow_run_inputs), output=dict(trace_info.workflow_run_outputs), trace_id=trace_id, @@ -103,7 +103,7 @@ class LangFuseDataTrace(BaseTraceInstance): trace_data = LangfuseTrace( id=trace_id, user_id=user_id, - name=TraceTaskName.WORKFLOW_TRACE.value, + name=TraceTaskName.WORKFLOW_TRACE, input=dict(trace_info.workflow_run_inputs), output=dict(trace_info.workflow_run_outputs), metadata=metadata, @@ -253,7 +253,7 @@ class LangFuseDataTrace(BaseTraceInstance): trace_data = LangfuseTrace( id=trace_id, user_id=user_id, - name=TraceTaskName.MESSAGE_TRACE.value, + name=TraceTaskName.MESSAGE_TRACE, input={ "message": trace_info.inputs, "files": file_list, @@ -303,7 +303,7 @@ class LangFuseDataTrace(BaseTraceInstance): if trace_info.message_data is None: return span_data = LangfuseSpan( - name=TraceTaskName.MODERATION_TRACE.value, + name=TraceTaskName.MODERATION_TRACE, input=trace_info.inputs, output={ "action": trace_info.action, @@ -331,7 +331,7 @@ class LangFuseDataTrace(BaseTraceInstance): ) generation_data = LangfuseGeneration( - name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value, + name=TraceTaskName.SUGGESTED_QUESTION_TRACE, input=trace_info.inputs, output=str(trace_info.suggested_question), trace_id=trace_info.trace_id or trace_info.message_id, @@ -349,7 +349,7 @@ class LangFuseDataTrace(BaseTraceInstance): if trace_info.message_data is None: return dataset_retrieval_span_data = LangfuseSpan( - name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value, + name=TraceTaskName.DATASET_RETRIEVAL_TRACE, input=trace_info.inputs, output={"documents": trace_info.documents}, trace_id=trace_info.trace_id or trace_info.message_id, @@ -377,7 +377,7 @@ class LangFuseDataTrace(BaseTraceInstance): def generate_name_trace(self, trace_info: GenerateNameTraceInfo): name_generation_trace_data = LangfuseTrace( - name=TraceTaskName.GENERATE_NAME_TRACE.value, + name=TraceTaskName.GENERATE_NAME_TRACE, input=trace_info.inputs, output=trace_info.outputs, user_id=trace_info.tenant_id, @@ -388,7 +388,7 @@ class LangFuseDataTrace(BaseTraceInstance): self.add_trace(langfuse_trace_data=name_generation_trace_data) name_generation_span_data = LangfuseSpan( - name=TraceTaskName.GENERATE_NAME_TRACE.value, + name=TraceTaskName.GENERATE_NAME_TRACE, input=trace_info.inputs, output=trace_info.outputs, trace_id=trace_info.conversation_id, diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index 24a43e1cd8..8b8117b24c 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -81,7 +81,7 @@ class LangSmithDataTrace(BaseTraceInstance): if trace_info.message_id: message_run = LangSmithRunModel( id=trace_info.message_id, - name=TraceTaskName.MESSAGE_TRACE.value, + name=TraceTaskName.MESSAGE_TRACE, inputs=dict(trace_info.workflow_run_inputs), outputs=dict(trace_info.workflow_run_outputs), run_type=LangSmithRunType.chain, @@ -110,7 +110,7 @@ class LangSmithDataTrace(BaseTraceInstance): file_list=trace_info.file_list, total_tokens=trace_info.total_tokens, id=trace_info.workflow_run_id, - name=TraceTaskName.WORKFLOW_TRACE.value, + name=TraceTaskName.WORKFLOW_TRACE, inputs=dict(trace_info.workflow_run_inputs), run_type=LangSmithRunType.tool, start_time=trace_info.workflow_data.created_at, @@ -271,7 +271,7 @@ class LangSmithDataTrace(BaseTraceInstance): output_tokens=trace_info.answer_tokens, total_tokens=trace_info.total_tokens, id=message_id, - name=TraceTaskName.MESSAGE_TRACE.value, + name=TraceTaskName.MESSAGE_TRACE, inputs=trace_info.inputs, run_type=LangSmithRunType.chain, start_time=trace_info.start_time, @@ -327,7 +327,7 @@ class LangSmithDataTrace(BaseTraceInstance): if trace_info.message_data is None: return langsmith_run = LangSmithRunModel( - name=TraceTaskName.MODERATION_TRACE.value, + name=TraceTaskName.MODERATION_TRACE, inputs=trace_info.inputs, outputs={ "action": trace_info.action, @@ -362,7 +362,7 @@ class LangSmithDataTrace(BaseTraceInstance): if message_data is None: return suggested_question_run = LangSmithRunModel( - name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value, + name=TraceTaskName.SUGGESTED_QUESTION_TRACE, inputs=trace_info.inputs, outputs=trace_info.suggested_question, run_type=LangSmithRunType.tool, @@ -391,7 +391,7 @@ class LangSmithDataTrace(BaseTraceInstance): if trace_info.message_data is None: return dataset_retrieval_run = LangSmithRunModel( - name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value, + name=TraceTaskName.DATASET_RETRIEVAL_TRACE, inputs=trace_info.inputs, outputs={"documents": trace_info.documents}, run_type=LangSmithRunType.retriever, @@ -447,7 +447,7 @@ class LangSmithDataTrace(BaseTraceInstance): def generate_name_trace(self, trace_info: GenerateNameTraceInfo): name_run = LangSmithRunModel( - name=TraceTaskName.GENERATE_NAME_TRACE.value, + name=TraceTaskName.GENERATE_NAME_TRACE, inputs=trace_info.inputs, outputs=trace_info.outputs, run_type=LangSmithRunType.tool, diff --git a/api/core/ops/opik_trace/opik_trace.py b/api/core/ops/opik_trace/opik_trace.py index 8fa92f9fcd..8050c59db9 100644 --- a/api/core/ops/opik_trace/opik_trace.py +++ b/api/core/ops/opik_trace/opik_trace.py @@ -108,7 +108,7 @@ class OpikDataTrace(BaseTraceInstance): trace_data = { "id": opik_trace_id, - "name": TraceTaskName.MESSAGE_TRACE.value, + "name": TraceTaskName.MESSAGE_TRACE, "start_time": trace_info.start_time, "end_time": trace_info.end_time, "metadata": workflow_metadata, @@ -125,7 +125,7 @@ class OpikDataTrace(BaseTraceInstance): "id": root_span_id, "parent_span_id": None, "trace_id": opik_trace_id, - "name": TraceTaskName.WORKFLOW_TRACE.value, + "name": TraceTaskName.WORKFLOW_TRACE, "input": wrap_dict("input", trace_info.workflow_run_inputs), "output": wrap_dict("output", trace_info.workflow_run_outputs), "start_time": trace_info.start_time, @@ -138,7 +138,7 @@ class OpikDataTrace(BaseTraceInstance): else: trace_data = { "id": opik_trace_id, - "name": TraceTaskName.MESSAGE_TRACE.value, + "name": TraceTaskName.MESSAGE_TRACE, "start_time": trace_info.start_time, "end_time": trace_info.end_time, "metadata": workflow_metadata, @@ -290,7 +290,7 @@ class OpikDataTrace(BaseTraceInstance): trace_data = { "id": prepare_opik_uuid(trace_info.start_time, dify_trace_id), - "name": TraceTaskName.MESSAGE_TRACE.value, + "name": TraceTaskName.MESSAGE_TRACE, "start_time": trace_info.start_time, "end_time": trace_info.end_time, "metadata": wrap_metadata(metadata), @@ -329,7 +329,7 @@ class OpikDataTrace(BaseTraceInstance): span_data = { "trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id), - "name": TraceTaskName.MODERATION_TRACE.value, + "name": TraceTaskName.MODERATION_TRACE, "type": "tool", "start_time": start_time, "end_time": trace_info.end_time or trace_info.message_data.updated_at, @@ -355,7 +355,7 @@ class OpikDataTrace(BaseTraceInstance): span_data = { "trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id), - "name": TraceTaskName.SUGGESTED_QUESTION_TRACE.value, + "name": TraceTaskName.SUGGESTED_QUESTION_TRACE, "type": "tool", "start_time": start_time, "end_time": trace_info.end_time or message_data.updated_at, @@ -375,7 +375,7 @@ class OpikDataTrace(BaseTraceInstance): span_data = { "trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id), - "name": TraceTaskName.DATASET_RETRIEVAL_TRACE.value, + "name": TraceTaskName.DATASET_RETRIEVAL_TRACE, "type": "tool", "start_time": start_time, "end_time": trace_info.end_time or trace_info.message_data.updated_at, @@ -405,7 +405,7 @@ class OpikDataTrace(BaseTraceInstance): def generate_name_trace(self, trace_info: GenerateNameTraceInfo): trace_data = { "id": prepare_opik_uuid(trace_info.start_time, trace_info.trace_id or trace_info.message_id), - "name": TraceTaskName.GENERATE_NAME_TRACE.value, + "name": TraceTaskName.GENERATE_NAME_TRACE, "start_time": trace_info.start_time, "end_time": trace_info.end_time, "metadata": wrap_metadata(trace_info.metadata), @@ -420,7 +420,7 @@ class OpikDataTrace(BaseTraceInstance): span_data = { "trace_id": trace.id, - "name": TraceTaskName.GENERATE_NAME_TRACE.value, + "name": TraceTaskName.GENERATE_NAME_TRACE, "start_time": trace_info.start_time, "end_time": trace_info.end_time, "metadata": wrap_metadata(trace_info.metadata), diff --git a/api/core/ops/weave_trace/weave_trace.py b/api/core/ops/weave_trace/weave_trace.py index 185bdd8179..9b3d7a8192 100644 --- a/api/core/ops/weave_trace/weave_trace.py +++ b/api/core/ops/weave_trace/weave_trace.py @@ -104,7 +104,7 @@ class WeaveDataTrace(BaseTraceInstance): message_run = WeaveTraceModel( id=trace_info.message_id, - op=str(TraceTaskName.MESSAGE_TRACE.value), + op=str(TraceTaskName.MESSAGE_TRACE), inputs=dict(trace_info.workflow_run_inputs), outputs=dict(trace_info.workflow_run_outputs), total_tokens=trace_info.total_tokens, @@ -126,7 +126,7 @@ class WeaveDataTrace(BaseTraceInstance): file_list=trace_info.file_list, total_tokens=trace_info.total_tokens, id=trace_info.workflow_run_id, - op=str(TraceTaskName.WORKFLOW_TRACE.value), + op=str(TraceTaskName.WORKFLOW_TRACE), inputs=dict(trace_info.workflow_run_inputs), outputs=dict(trace_info.workflow_run_outputs), attributes=workflow_attributes, @@ -253,7 +253,7 @@ class WeaveDataTrace(BaseTraceInstance): message_run = WeaveTraceModel( id=trace_id, - op=str(TraceTaskName.MESSAGE_TRACE.value), + op=str(TraceTaskName.MESSAGE_TRACE), input_tokens=trace_info.message_tokens, output_tokens=trace_info.answer_tokens, total_tokens=trace_info.total_tokens, @@ -300,7 +300,7 @@ class WeaveDataTrace(BaseTraceInstance): moderation_run = WeaveTraceModel( id=str(uuid.uuid4()), - op=str(TraceTaskName.MODERATION_TRACE.value), + op=str(TraceTaskName.MODERATION_TRACE), inputs=trace_info.inputs, outputs={ "action": trace_info.action, @@ -330,7 +330,7 @@ class WeaveDataTrace(BaseTraceInstance): suggested_question_run = WeaveTraceModel( id=str(uuid.uuid4()), - op=str(TraceTaskName.SUGGESTED_QUESTION_TRACE.value), + op=str(TraceTaskName.SUGGESTED_QUESTION_TRACE), inputs=trace_info.inputs, outputs=trace_info.suggested_question, attributes=attributes, @@ -355,7 +355,7 @@ class WeaveDataTrace(BaseTraceInstance): dataset_retrieval_run = WeaveTraceModel( id=str(uuid.uuid4()), - op=str(TraceTaskName.DATASET_RETRIEVAL_TRACE.value), + op=str(TraceTaskName.DATASET_RETRIEVAL_TRACE), inputs=trace_info.inputs, outputs={"documents": trace_info.documents}, attributes=attributes, @@ -397,7 +397,7 @@ class WeaveDataTrace(BaseTraceInstance): name_run = WeaveTraceModel( id=str(uuid.uuid4()), - op=str(TraceTaskName.GENERATE_NAME_TRACE.value), + op=str(TraceTaskName.GENERATE_NAME_TRACE), inputs=trace_info.inputs, outputs=trace_info.outputs, attributes=attributes, diff --git a/api/core/plugin/backwards_invocation/node.py b/api/core/plugin/backwards_invocation/node.py index 1d6d21cff7..9fbcbf55b4 100644 --- a/api/core/plugin/backwards_invocation/node.py +++ b/api/core/plugin/backwards_invocation/node.py @@ -52,7 +52,7 @@ class PluginNodeBackwardsInvocation(BaseBackwardsInvocation): instruction=instruction, # instruct with variables are not supported ) node_data_dict = node_data.model_dump() - node_data_dict["type"] = NodeType.PARAMETER_EXTRACTOR.value + node_data_dict["type"] = NodeType.PARAMETER_EXTRACTOR execution = workflow_service.run_free_workflow_node( node_data_dict, tenant_id=tenant_id, diff --git a/api/core/plugin/entities/request.py b/api/core/plugin/entities/request.py index 7b789d8ac9..d5df85730b 100644 --- a/api/core/plugin/entities/request.py +++ b/api/core/plugin/entities/request.py @@ -83,13 +83,13 @@ class RequestInvokeLLM(BaseRequestInvokeModel): raise ValueError("prompt_messages must be a list") for i in range(len(v)): - if v[i]["role"] == PromptMessageRole.USER.value: + if v[i]["role"] == PromptMessageRole.USER: v[i] = UserPromptMessage.model_validate(v[i]) - elif v[i]["role"] == PromptMessageRole.ASSISTANT.value: + elif v[i]["role"] == PromptMessageRole.ASSISTANT: v[i] = AssistantPromptMessage.model_validate(v[i]) - elif v[i]["role"] == PromptMessageRole.SYSTEM.value: + elif v[i]["role"] == PromptMessageRole.SYSTEM: v[i] = SystemPromptMessage.model_validate(v[i]) - elif v[i]["role"] == PromptMessageRole.TOOL.value: + elif v[i]["role"] == PromptMessageRole.TOOL: v[i] = ToolPromptMessage.model_validate(v[i]) else: v[i] = PromptMessage.model_validate(v[i]) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 499d39bd5d..7bc9830ac3 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -610,7 +610,7 @@ class ProviderManager: provider_quota_to_provider_record_dict = {} for provider_record in provider_records: - if provider_record.provider_type != ProviderType.SYSTEM.value: + if provider_record.provider_type != ProviderType.SYSTEM: continue provider_quota_to_provider_record_dict[ProviderQuotaType.value_of(provider_record.quota_type)] = ( @@ -627,8 +627,8 @@ class ProviderManager: tenant_id=tenant_id, # TODO: Use provider name with prefix after the data migration. provider_name=ModelProviderID(provider_name).provider_name, - provider_type=ProviderType.SYSTEM.value, - quota_type=ProviderQuotaType.TRIAL.value, + provider_type=ProviderType.SYSTEM, + quota_type=ProviderQuotaType.TRIAL, quota_limit=quota.quota_limit, # type: ignore quota_used=0, is_valid=True, @@ -641,8 +641,8 @@ class ProviderManager: stmt = select(Provider).where( Provider.tenant_id == tenant_id, Provider.provider_name == ModelProviderID(provider_name).provider_name, - Provider.provider_type == ProviderType.SYSTEM.value, - Provider.quota_type == ProviderQuotaType.TRIAL.value, + Provider.provider_type == ProviderType.SYSTEM, + Provider.quota_type == ProviderQuotaType.TRIAL, ) existed_provider_record = db.session.scalar(stmt) if not existed_provider_record: @@ -702,7 +702,7 @@ class ProviderManager: """Get custom provider configuration.""" # Find custom provider record (non-system) custom_provider_record = next( - (record for record in provider_records if record.provider_type != ProviderType.SYSTEM.value), None + (record for record in provider_records if record.provider_type != ProviderType.SYSTEM), None ) if not custom_provider_record: @@ -905,7 +905,7 @@ class ProviderManager: # Convert provider_records to dict quota_type_to_provider_records_dict: dict[ProviderQuotaType, Provider] = {} for provider_record in provider_records: - if provider_record.provider_type != ProviderType.SYSTEM.value: + if provider_record.provider_type != ProviderType.SYSTEM: continue quota_type_to_provider_records_dict[ProviderQuotaType.value_of(provider_record.quota_type)] = ( @@ -1046,7 +1046,7 @@ class ProviderManager: """ secret_input_form_variables = [] for credential_form_schema in credential_form_schemas: - if credential_form_schema.type.value == FormType.SECRET_INPUT.value: + if credential_form_schema.type.value == FormType.SECRET_INPUT: secret_input_form_variables.append(credential_form_schema.variable) return secret_input_form_variables diff --git a/api/core/rag/data_post_processor/data_post_processor.py b/api/core/rag/data_post_processor/data_post_processor.py index 696e3e967f..cc946a72c3 100644 --- a/api/core/rag/data_post_processor/data_post_processor.py +++ b/api/core/rag/data_post_processor/data_post_processor.py @@ -46,7 +46,7 @@ class DataPostProcessor: reranking_model: dict | None = None, weights: dict | None = None, ) -> BaseRerankRunner | None: - if reranking_mode == RerankMode.WEIGHTED_SCORE.value and weights: + if reranking_mode == RerankMode.WEIGHTED_SCORE and weights: runner = RerankRunnerFactory.create_rerank_runner( runner_type=reranking_mode, tenant_id=tenant_id, @@ -62,7 +62,7 @@ class DataPostProcessor: ), ) return runner - elif reranking_mode == RerankMode.RERANKING_MODEL.value: + elif reranking_mode == RerankMode.RERANKING_MODEL: rerank_model_instance = self._get_rerank_model_instance(tenant_id, reranking_model) if rerank_model_instance is None: return None diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 38358ccd6d..6e9e2b4527 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -21,7 +21,7 @@ from models.dataset import Document as DatasetDocument from services.external_knowledge_service import ExternalDatasetService default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 4, @@ -107,7 +107,7 @@ class RetrievalService: raise ValueError(";\n".join(exceptions)) # Deduplicate documents for hybrid search to avoid duplicate chunks - if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value: + if retrieval_method == RetrievalMethod.HYBRID_SEARCH: all_documents = cls._deduplicate_documents(all_documents) data_post_processor = DataPostProcessor( str(dataset.tenant_id), reranking_mode, reranking_model, weights, False @@ -245,10 +245,10 @@ class RetrievalService: reranking_model and reranking_model.get("reranking_model_name") and reranking_model.get("reranking_provider_name") - and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value + and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH ): data_post_processor = DataPostProcessor( - str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False + str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL), reranking_model, None, False ) all_documents.extend( data_post_processor.invoke( @@ -293,10 +293,10 @@ class RetrievalService: reranking_model and reranking_model.get("reranking_model_name") and reranking_model.get("reranking_provider_name") - and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value + and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH ): data_post_processor = DataPostProcessor( - str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False + str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL), reranking_model, None, False ) all_documents.extend( data_post_processor.invoke( diff --git a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py index e55e5f3101..a306f9ba0c 100644 --- a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py +++ b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py @@ -488,9 +488,9 @@ class ClickzettaVector(BaseVector): create_table_sql = f""" CREATE TABLE IF NOT EXISTS {self._config.schema_name}.{self._table_name} ( id STRING NOT NULL COMMENT 'Unique document identifier', - {Field.CONTENT_KEY.value} STRING NOT NULL COMMENT 'Document text content for search and retrieval', - {Field.METADATA_KEY.value} JSON COMMENT 'Document metadata including source, type, and other attributes', - {Field.VECTOR.value} VECTOR(FLOAT, {dimension}) NOT NULL COMMENT + {Field.CONTENT_KEY} STRING NOT NULL COMMENT 'Document text content for search and retrieval', + {Field.METADATA_KEY} JSON COMMENT 'Document metadata including source, type, and other attributes', + {Field.VECTOR} VECTOR(FLOAT, {dimension}) NOT NULL COMMENT 'High-dimensional embedding vector for semantic similarity search', PRIMARY KEY (id) ) COMMENT 'Dify RAG knowledge base vector storage table for document embeddings and content' @@ -519,15 +519,15 @@ class ClickzettaVector(BaseVector): existing_indexes = cursor.fetchall() for idx in existing_indexes: # Check if vector index already exists on the embedding column - if Field.VECTOR.value in str(idx).lower(): - logger.info("Vector index already exists on column %s", Field.VECTOR.value) + if Field.VECTOR in str(idx).lower(): + logger.info("Vector index already exists on column %s", Field.VECTOR) return except (RuntimeError, ValueError) as e: logger.warning("Failed to check existing indexes: %s", e) index_sql = f""" CREATE VECTOR INDEX IF NOT EXISTS {index_name} - ON TABLE {self._config.schema_name}.{self._table_name}({Field.VECTOR.value}) + ON TABLE {self._config.schema_name}.{self._table_name}({Field.VECTOR}) PROPERTIES ( "distance.function" = "{self._config.vector_distance_function}", "scalar.type" = "f32", @@ -560,17 +560,17 @@ class ClickzettaVector(BaseVector): # More precise check: look for inverted index specifically on the content column if ( "inverted" in idx_str - and Field.CONTENT_KEY.value.lower() in idx_str + and Field.CONTENT_KEY.lower() in idx_str and (index_name.lower() in idx_str or f"idx_{self._table_name}_text" in idx_str) ): - logger.info("Inverted index already exists on column %s: %s", Field.CONTENT_KEY.value, idx) + logger.info("Inverted index already exists on column %s: %s", Field.CONTENT_KEY, idx) return except (RuntimeError, ValueError) as e: logger.warning("Failed to check existing indexes: %s", e) index_sql = f""" CREATE INVERTED INDEX IF NOT EXISTS {index_name} - ON TABLE {self._config.schema_name}.{self._table_name} ({Field.CONTENT_KEY.value}) + ON TABLE {self._config.schema_name}.{self._table_name} ({Field.CONTENT_KEY}) PROPERTIES ( "analyzer" = "{self._config.analyzer_type}", "mode" = "{self._config.analyzer_mode}" @@ -588,13 +588,13 @@ class ClickzettaVector(BaseVector): or "with the same type" in error_msg or "cannot create inverted index" in error_msg ) and "already has index" in error_msg: - logger.info("Inverted index already exists on column %s", Field.CONTENT_KEY.value) + logger.info("Inverted index already exists on column %s", Field.CONTENT_KEY) # Try to get the existing index name for logging try: cursor.execute(f"SHOW INDEX FROM {self._config.schema_name}.{self._table_name}") existing_indexes = cursor.fetchall() for idx in existing_indexes: - if "inverted" in str(idx).lower() and Field.CONTENT_KEY.value.lower() in str(idx).lower(): + if "inverted" in str(idx).lower() and Field.CONTENT_KEY.lower() in str(idx).lower(): logger.info("Found existing inverted index: %s", idx) break except (RuntimeError, ValueError): @@ -669,7 +669,7 @@ class ClickzettaVector(BaseVector): # Use parameterized INSERT with executemany for better performance and security # Cast JSON and VECTOR in SQL, pass raw data as parameters - columns = f"id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value}, {Field.VECTOR.value}" + columns = f"id, {Field.CONTENT_KEY}, {Field.METADATA_KEY}, {Field.VECTOR}" insert_sql = ( f"INSERT INTO {self._config.schema_name}.{self._table_name} ({columns}) " f"VALUES (?, ?, CAST(? AS JSON), CAST(? AS VECTOR({vector_dimension})))" @@ -767,7 +767,7 @@ class ClickzettaVector(BaseVector): # Use json_extract_string function for ClickZetta compatibility sql = ( f"DELETE FROM {self._config.schema_name}.{self._table_name} " - f"WHERE json_extract_string({Field.METADATA_KEY.value}, '$.{key}') = ?" + f"WHERE json_extract_string({Field.METADATA_KEY}, '$.{key}') = ?" ) cursor.execute(sql, binding_params=[value]) @@ -795,9 +795,7 @@ class ClickzettaVector(BaseVector): safe_doc_ids = [str(id).replace("'", "''") for id in document_ids_filter] doc_ids_str = ",".join(f"'{id}'" for id in safe_doc_ids) # Use json_extract_string function for ClickZetta compatibility - filter_clauses.append( - f"json_extract_string({Field.METADATA_KEY.value}, '$.document_id') IN ({doc_ids_str})" - ) + filter_clauses.append(f"json_extract_string({Field.METADATA_KEY}, '$.document_id') IN ({doc_ids_str})") # No need for dataset_id filter since each dataset has its own table @@ -808,23 +806,21 @@ class ClickzettaVector(BaseVector): distance_func = "COSINE_DISTANCE" if score_threshold > 0: query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" - filter_clauses.append( - f"{distance_func}({Field.VECTOR.value}, {query_vector_str}) < {2 - score_threshold}" - ) + filter_clauses.append(f"{distance_func}({Field.VECTOR}, {query_vector_str}) < {2 - score_threshold}") else: # For L2 distance, smaller is better distance_func = "L2_DISTANCE" if score_threshold > 0: query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" - filter_clauses.append(f"{distance_func}({Field.VECTOR.value}, {query_vector_str}) < {score_threshold}") + filter_clauses.append(f"{distance_func}({Field.VECTOR}, {query_vector_str}) < {score_threshold}") where_clause = " AND ".join(filter_clauses) if filter_clauses else "1=1" # Execute vector search query query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" search_sql = f""" - SELECT id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value}, - {distance_func}({Field.VECTOR.value}, {query_vector_str}) AS distance + SELECT id, {Field.CONTENT_KEY}, {Field.METADATA_KEY}, + {distance_func}({Field.VECTOR}, {query_vector_str}) AS distance FROM {self._config.schema_name}.{self._table_name} WHERE {where_clause} ORDER BY distance @@ -887,9 +883,7 @@ class ClickzettaVector(BaseVector): safe_doc_ids = [str(id).replace("'", "''") for id in document_ids_filter] doc_ids_str = ",".join(f"'{id}'" for id in safe_doc_ids) # Use json_extract_string function for ClickZetta compatibility - filter_clauses.append( - f"json_extract_string({Field.METADATA_KEY.value}, '$.document_id') IN ({doc_ids_str})" - ) + filter_clauses.append(f"json_extract_string({Field.METADATA_KEY}, '$.document_id') IN ({doc_ids_str})") # No need for dataset_id filter since each dataset has its own table @@ -897,13 +891,13 @@ class ClickzettaVector(BaseVector): # match_all requires all terms to be present # Use simple quote escaping for MATCH_ALL since it needs to be in the WHERE clause escaped_query = query.replace("'", "''") - filter_clauses.append(f"MATCH_ALL({Field.CONTENT_KEY.value}, '{escaped_query}')") + filter_clauses.append(f"MATCH_ALL({Field.CONTENT_KEY}, '{escaped_query}')") where_clause = " AND ".join(filter_clauses) # Execute full-text search query search_sql = f""" - SELECT id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value} + SELECT id, {Field.CONTENT_KEY}, {Field.METADATA_KEY} FROM {self._config.schema_name}.{self._table_name} WHERE {where_clause} LIMIT {top_k} @@ -986,19 +980,17 @@ class ClickzettaVector(BaseVector): safe_doc_ids = [str(id).replace("'", "''") for id in document_ids_filter] doc_ids_str = ",".join(f"'{id}'" for id in safe_doc_ids) # Use json_extract_string function for ClickZetta compatibility - filter_clauses.append( - f"json_extract_string({Field.METADATA_KEY.value}, '$.document_id') IN ({doc_ids_str})" - ) + filter_clauses.append(f"json_extract_string({Field.METADATA_KEY}, '$.document_id') IN ({doc_ids_str})") # No need for dataset_id filter since each dataset has its own table # Use simple quote escaping for LIKE clause escaped_query = query.replace("'", "''") - filter_clauses.append(f"{Field.CONTENT_KEY.value} LIKE '%{escaped_query}%'") + filter_clauses.append(f"{Field.CONTENT_KEY} LIKE '%{escaped_query}%'") where_clause = " AND ".join(filter_clauses) search_sql = f""" - SELECT id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value} + SELECT id, {Field.CONTENT_KEY}, {Field.METADATA_KEY} FROM {self._config.schema_name}.{self._table_name} WHERE {where_clause} LIMIT {top_k} diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py index 7b00928b7b..1e7fe52666 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py @@ -57,18 +57,18 @@ class ElasticSearchJaVector(ElasticSearchVector): } mappings = { "properties": { - Field.CONTENT_KEY.value: { + Field.CONTENT_KEY: { "type": "text", "analyzer": "ja_analyzer", "search_analyzer": "ja_analyzer", }, - Field.VECTOR.value: { # Make sure the dimension is correct here + Field.VECTOR: { # Make sure the dimension is correct here "type": "dense_vector", "dims": dim, "index": True, "similarity": "cosine", }, - Field.METADATA_KEY.value: { + Field.METADATA_KEY: { "type": "object", "properties": { "doc_id": {"type": "keyword"} # Map doc_id to keyword type diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index ecb7a3916e..0ff8c915e6 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -163,9 +163,9 @@ class ElasticSearchVector(BaseVector): index=self._collection_name, id=uuids[i], document={ - Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i] or None, - Field.METADATA_KEY.value: documents[i].metadata or {}, + Field.CONTENT_KEY: documents[i].page_content, + Field.VECTOR: embeddings[i] or None, + Field.METADATA_KEY: documents[i].metadata or {}, }, ) self._client.indices.refresh(index=self._collection_name) @@ -193,7 +193,7 @@ class ElasticSearchVector(BaseVector): def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 4) num_candidates = math.ceil(top_k * 1.5) - knn = {"field": Field.VECTOR.value, "query_vector": query_vector, "k": top_k, "num_candidates": num_candidates} + knn = {"field": Field.VECTOR, "query_vector": query_vector, "k": top_k, "num_candidates": num_candidates} document_ids_filter = kwargs.get("document_ids_filter") if document_ids_filter: knn["filter"] = {"terms": {"metadata.document_id": document_ids_filter}} @@ -205,9 +205,9 @@ class ElasticSearchVector(BaseVector): docs_and_scores.append( ( Document( - page_content=hit["_source"][Field.CONTENT_KEY.value], - vector=hit["_source"][Field.VECTOR.value], - metadata=hit["_source"][Field.METADATA_KEY.value], + page_content=hit["_source"][Field.CONTENT_KEY], + vector=hit["_source"][Field.VECTOR], + metadata=hit["_source"][Field.METADATA_KEY], ), hit["_score"], ) @@ -224,13 +224,13 @@ class ElasticSearchVector(BaseVector): return docs def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: - query_str: dict[str, Any] = {"match": {Field.CONTENT_KEY.value: query}} + query_str: dict[str, Any] = {"match": {Field.CONTENT_KEY: query}} document_ids_filter = kwargs.get("document_ids_filter") if document_ids_filter: query_str = { "bool": { - "must": {"match": {Field.CONTENT_KEY.value: query}}, + "must": {"match": {Field.CONTENT_KEY: query}}, "filter": {"terms": {"metadata.document_id": document_ids_filter}}, } } @@ -240,9 +240,9 @@ class ElasticSearchVector(BaseVector): for hit in results["hits"]["hits"]: docs.append( Document( - page_content=hit["_source"][Field.CONTENT_KEY.value], - vector=hit["_source"][Field.VECTOR.value], - metadata=hit["_source"][Field.METADATA_KEY.value], + page_content=hit["_source"][Field.CONTENT_KEY], + vector=hit["_source"][Field.VECTOR], + metadata=hit["_source"][Field.METADATA_KEY], ) ) @@ -270,14 +270,14 @@ class ElasticSearchVector(BaseVector): dim = len(embeddings[0]) mappings = { "properties": { - Field.CONTENT_KEY.value: {"type": "text"}, - Field.VECTOR.value: { # Make sure the dimension is correct here + Field.CONTENT_KEY: {"type": "text"}, + Field.VECTOR: { # Make sure the dimension is correct here "type": "dense_vector", "dims": dim, "index": True, "similarity": "cosine", }, - Field.METADATA_KEY.value: { + Field.METADATA_KEY: { "type": "object", "properties": { "doc_id": {"type": "keyword"}, # Map doc_id to keyword type diff --git a/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py b/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py index cfee090768..c7b6593a8f 100644 --- a/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py +++ b/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py @@ -67,9 +67,9 @@ class HuaweiCloudVector(BaseVector): index=self._collection_name, id=uuids[i], document={ - Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i] or None, - Field.METADATA_KEY.value: documents[i].metadata or {}, + Field.CONTENT_KEY: documents[i].page_content, + Field.VECTOR: embeddings[i] or None, + Field.METADATA_KEY: documents[i].metadata or {}, }, ) self._client.indices.refresh(index=self._collection_name) @@ -101,7 +101,7 @@ class HuaweiCloudVector(BaseVector): "size": top_k, "query": { "vector": { - Field.VECTOR.value: { + Field.VECTOR: { "vector": query_vector, "topk": top_k, } @@ -116,9 +116,9 @@ class HuaweiCloudVector(BaseVector): docs_and_scores.append( ( Document( - page_content=hit["_source"][Field.CONTENT_KEY.value], - vector=hit["_source"][Field.VECTOR.value], - metadata=hit["_source"][Field.METADATA_KEY.value], + page_content=hit["_source"][Field.CONTENT_KEY], + vector=hit["_source"][Field.VECTOR], + metadata=hit["_source"][Field.METADATA_KEY], ), hit["_score"], ) @@ -135,15 +135,15 @@ class HuaweiCloudVector(BaseVector): return docs def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: - query_str = {"match": {Field.CONTENT_KEY.value: query}} + query_str = {"match": {Field.CONTENT_KEY: query}} results = self._client.search(index=self._collection_name, query=query_str, size=kwargs.get("top_k", 4)) docs = [] for hit in results["hits"]["hits"]: docs.append( Document( - page_content=hit["_source"][Field.CONTENT_KEY.value], - vector=hit["_source"][Field.VECTOR.value], - metadata=hit["_source"][Field.METADATA_KEY.value], + page_content=hit["_source"][Field.CONTENT_KEY], + vector=hit["_source"][Field.VECTOR], + metadata=hit["_source"][Field.METADATA_KEY], ) ) @@ -171,8 +171,8 @@ class HuaweiCloudVector(BaseVector): dim = len(embeddings[0]) mappings = { "properties": { - Field.CONTENT_KEY.value: {"type": "text"}, - Field.VECTOR.value: { # Make sure the dimension is correct here + Field.CONTENT_KEY: {"type": "text"}, + Field.VECTOR: { # Make sure the dimension is correct here "type": "vector", "dimension": dim, "indexing": True, @@ -181,7 +181,7 @@ class HuaweiCloudVector(BaseVector): "neighbors": 32, "efc": 128, }, - Field.METADATA_KEY.value: { + Field.METADATA_KEY: { "type": "object", "properties": { "doc_id": {"type": "keyword"} # Map doc_id to keyword type diff --git a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py index 8824e1c67b..bfcb620618 100644 --- a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py +++ b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py @@ -125,9 +125,9 @@ class LindormVectorStore(BaseVector): } } action_values: dict[str, Any] = { - Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i], - Field.METADATA_KEY.value: documents[i].metadata, + Field.CONTENT_KEY: documents[i].page_content, + Field.VECTOR: embeddings[i], + Field.METADATA_KEY: documents[i].metadata, } if self._using_ugc: action_header["index"]["routing"] = self._routing @@ -149,7 +149,7 @@ class LindormVectorStore(BaseVector): def get_ids_by_metadata_field(self, key: str, value: str): query: dict[str, Any] = { - "query": {"bool": {"must": [{"term": {f"{Field.METADATA_KEY.value}.{key}.keyword": value}}]}} + "query": {"bool": {"must": [{"term": {f"{Field.METADATA_KEY}.{key}.keyword": value}}]}} } if self._using_ugc: query["query"]["bool"]["must"].append({"term": {f"{ROUTING_FIELD}.keyword": self._routing}}) @@ -252,14 +252,14 @@ class LindormVectorStore(BaseVector): search_query: dict[str, Any] = { "size": top_k, "_source": True, - "query": {"knn": {Field.VECTOR.value: {"vector": query_vector, "k": top_k}}}, + "query": {"knn": {Field.VECTOR: {"vector": query_vector, "k": top_k}}}, } final_ext: dict[str, Any] = {"lvector": {}} if filters is not None and len(filters) > 0: # when using filter, transform filter from List[Dict] to Dict as valid format filter_dict = {"bool": {"must": filters}} if len(filters) > 1 else filters[0] - search_query["query"]["knn"][Field.VECTOR.value]["filter"] = filter_dict # filter should be Dict + search_query["query"]["knn"][Field.VECTOR]["filter"] = filter_dict # filter should be Dict final_ext["lvector"]["filter_type"] = "pre_filter" if final_ext != {"lvector": {}}: @@ -279,9 +279,9 @@ class LindormVectorStore(BaseVector): docs_and_scores.append( ( Document( - page_content=hit["_source"][Field.CONTENT_KEY.value], - vector=hit["_source"][Field.VECTOR.value], - metadata=hit["_source"][Field.METADATA_KEY.value], + page_content=hit["_source"][Field.CONTENT_KEY], + vector=hit["_source"][Field.VECTOR], + metadata=hit["_source"][Field.METADATA_KEY], ), hit["_score"], ) @@ -318,9 +318,9 @@ class LindormVectorStore(BaseVector): docs = [] for hit in response["hits"]["hits"]: - metadata = hit["_source"].get(Field.METADATA_KEY.value) - vector = hit["_source"].get(Field.VECTOR.value) - page_content = hit["_source"].get(Field.CONTENT_KEY.value) + metadata = hit["_source"].get(Field.METADATA_KEY) + vector = hit["_source"].get(Field.VECTOR) + page_content = hit["_source"].get(Field.CONTENT_KEY) doc = Document(page_content=page_content, vector=vector, metadata=metadata) docs.append(doc) @@ -342,8 +342,8 @@ class LindormVectorStore(BaseVector): "settings": {"index": {"knn": True, "knn_routing": self._using_ugc}}, "mappings": { "properties": { - Field.CONTENT_KEY.value: {"type": "text"}, - Field.VECTOR.value: { + Field.CONTENT_KEY: {"type": "text"}, + Field.VECTOR: { "type": "knn_vector", "dimension": len(embeddings[0]), # Make sure the dimension is correct here "method": { diff --git a/api/core/rag/datasource/vdb/milvus/milvus_vector.py b/api/core/rag/datasource/vdb/milvus/milvus_vector.py index 5f32feb709..96eb465401 100644 --- a/api/core/rag/datasource/vdb/milvus/milvus_vector.py +++ b/api/core/rag/datasource/vdb/milvus/milvus_vector.py @@ -85,7 +85,7 @@ class MilvusVector(BaseVector): collection_info = self._client.describe_collection(self._collection_name) fields = [field["name"] for field in collection_info["fields"]] # Since primary field is auto-id, no need to track it - self._fields = [f for f in fields if f != Field.PRIMARY_KEY.value] + self._fields = [f for f in fields if f != Field.PRIMARY_KEY] def _check_hybrid_search_support(self) -> bool: """ @@ -130,9 +130,9 @@ class MilvusVector(BaseVector): insert_dict = { # Do not need to insert the sparse_vector field separately, as the text_bm25_emb # function will automatically convert the native text into a sparse vector for us. - Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i], - Field.METADATA_KEY.value: documents[i].metadata, + Field.CONTENT_KEY: documents[i].page_content, + Field.VECTOR: embeddings[i], + Field.METADATA_KEY: documents[i].metadata, } insert_dict_list.append(insert_dict) # Total insert count @@ -243,15 +243,15 @@ class MilvusVector(BaseVector): results = self._client.search( collection_name=self._collection_name, data=[query_vector], - anns_field=Field.VECTOR.value, + anns_field=Field.VECTOR, limit=kwargs.get("top_k", 4), - output_fields=[Field.CONTENT_KEY.value, Field.METADATA_KEY.value], + output_fields=[Field.CONTENT_KEY, Field.METADATA_KEY], filter=filter, ) return self._process_search_results( results, - output_fields=[Field.CONTENT_KEY.value, Field.METADATA_KEY.value], + output_fields=[Field.CONTENT_KEY, Field.METADATA_KEY], score_threshold=float(kwargs.get("score_threshold") or 0.0), ) @@ -264,7 +264,7 @@ class MilvusVector(BaseVector): "Full-text search is disabled: set MILVUS_ENABLE_HYBRID_SEARCH=true (requires Milvus >= 2.5.0)." ) return [] - if not self.field_exists(Field.SPARSE_VECTOR.value): + if not self.field_exists(Field.SPARSE_VECTOR): logger.warning( "Full-text search unavailable: collection missing 'sparse_vector' field; " "recreate the collection after enabling MILVUS_ENABLE_HYBRID_SEARCH to add BM25 sparse index." @@ -279,15 +279,15 @@ class MilvusVector(BaseVector): results = self._client.search( collection_name=self._collection_name, data=[query], - anns_field=Field.SPARSE_VECTOR.value, + anns_field=Field.SPARSE_VECTOR, limit=kwargs.get("top_k", 4), - output_fields=[Field.CONTENT_KEY.value, Field.METADATA_KEY.value], + output_fields=[Field.CONTENT_KEY, Field.METADATA_KEY], filter=filter, ) return self._process_search_results( results, - output_fields=[Field.CONTENT_KEY.value, Field.METADATA_KEY.value], + output_fields=[Field.CONTENT_KEY, Field.METADATA_KEY], score_threshold=float(kwargs.get("score_threshold") or 0.0), ) @@ -311,7 +311,7 @@ class MilvusVector(BaseVector): dim = len(embeddings[0]) fields = [] if metadatas: - fields.append(FieldSchema(Field.METADATA_KEY.value, DataType.JSON, max_length=65_535)) + fields.append(FieldSchema(Field.METADATA_KEY, DataType.JSON, max_length=65_535)) # Create the text field, enable_analyzer will be set True to support milvus automatically # transfer text to sparse_vector, reference: https://milvus.io/docs/full-text-search.md @@ -326,15 +326,15 @@ class MilvusVector(BaseVector): ): content_field_kwargs["analyzer_params"] = self._client_config.analyzer_params - fields.append(FieldSchema(Field.CONTENT_KEY.value, DataType.VARCHAR, **content_field_kwargs)) + fields.append(FieldSchema(Field.CONTENT_KEY, DataType.VARCHAR, **content_field_kwargs)) # Create the primary key field - fields.append(FieldSchema(Field.PRIMARY_KEY.value, DataType.INT64, is_primary=True, auto_id=True)) + fields.append(FieldSchema(Field.PRIMARY_KEY, DataType.INT64, is_primary=True, auto_id=True)) # Create the vector field, supports binary or float vectors - fields.append(FieldSchema(Field.VECTOR.value, infer_dtype_bydata(embeddings[0]), dim=dim)) + fields.append(FieldSchema(Field.VECTOR, infer_dtype_bydata(embeddings[0]), dim=dim)) # Create Sparse Vector Index for the collection if self._hybrid_search_enabled: - fields.append(FieldSchema(Field.SPARSE_VECTOR.value, DataType.SPARSE_FLOAT_VECTOR)) + fields.append(FieldSchema(Field.SPARSE_VECTOR, DataType.SPARSE_FLOAT_VECTOR)) schema = CollectionSchema(fields) @@ -342,8 +342,8 @@ class MilvusVector(BaseVector): if self._hybrid_search_enabled: bm25_function = Function( name="text_bm25_emb", - input_field_names=[Field.CONTENT_KEY.value], - output_field_names=[Field.SPARSE_VECTOR.value], + input_field_names=[Field.CONTENT_KEY], + output_field_names=[Field.SPARSE_VECTOR], function_type=FunctionType.BM25, ) schema.add_function(bm25_function) @@ -352,12 +352,12 @@ class MilvusVector(BaseVector): # Create Index params for the collection index_params_obj = IndexParams() - index_params_obj.add_index(field_name=Field.VECTOR.value, **index_params) + index_params_obj.add_index(field_name=Field.VECTOR, **index_params) # Create Sparse Vector Index for the collection if self._hybrid_search_enabled: index_params_obj.add_index( - field_name=Field.SPARSE_VECTOR.value, index_type="AUTOINDEX", metric_type="BM25" + field_name=Field.SPARSE_VECTOR, index_type="AUTOINDEX", metric_type="BM25" ) # Create the collection diff --git a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py index 3eb1df027e..80ffdadd96 100644 --- a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py +++ b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py @@ -1,6 +1,6 @@ import json import logging -from typing import Any, Literal +from typing import Any from uuid import uuid4 from opensearchpy import OpenSearch, Urllib3AWSV4SignerAuth, Urllib3HttpConnection, helpers @@ -8,6 +8,7 @@ from opensearchpy.helpers import BulkIndexError from pydantic import BaseModel, model_validator from configs import dify_config +from configs.middleware.vdb.opensearch_config import AuthMethod from core.rag.datasource.vdb.field import Field from core.rag.datasource.vdb.vector_base import BaseVector from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory @@ -25,7 +26,7 @@ class OpenSearchConfig(BaseModel): port: int secure: bool = False # use_ssl verify_certs: bool = True - auth_method: Literal["basic", "aws_managed_iam"] = "basic" + auth_method: AuthMethod = AuthMethod.BASIC user: str | None = None password: str | None = None aws_region: str | None = None @@ -98,9 +99,9 @@ class OpenSearchVector(BaseVector): "_op_type": "index", "_index": self._collection_name.lower(), "_source": { - Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i], # Make sure you pass an array here - Field.METADATA_KEY.value: documents[i].metadata, + Field.CONTENT_KEY: documents[i].page_content, + Field.VECTOR: embeddings[i], # Make sure you pass an array here + Field.METADATA_KEY: documents[i].metadata, }, } # See https://github.com/langchain-ai/langchainjs/issues/4346#issuecomment-1935123377 @@ -116,7 +117,7 @@ class OpenSearchVector(BaseVector): ) def get_ids_by_metadata_field(self, key: str, value: str): - query = {"query": {"term": {f"{Field.METADATA_KEY.value}.{key}": value}}} + query = {"query": {"term": {f"{Field.METADATA_KEY}.{key}": value}}} response = self._client.search(index=self._collection_name.lower(), body=query) if response["hits"]["hits"]: return [hit["_id"] for hit in response["hits"]["hits"]] @@ -180,17 +181,17 @@ class OpenSearchVector(BaseVector): query = { "size": kwargs.get("top_k", 4), - "query": {"knn": {Field.VECTOR.value: {Field.VECTOR.value: query_vector, "k": kwargs.get("top_k", 4)}}}, + "query": {"knn": {Field.VECTOR: {Field.VECTOR: query_vector, "k": kwargs.get("top_k", 4)}}}, } document_ids_filter = kwargs.get("document_ids_filter") if document_ids_filter: query["query"] = { "script_score": { - "query": {"bool": {"filter": [{"terms": {Field.DOCUMENT_ID.value: document_ids_filter}}]}}, + "query": {"bool": {"filter": [{"terms": {Field.DOCUMENT_ID: document_ids_filter}}]}}, "script": { "source": "knn_score", "lang": "knn", - "params": {"field": Field.VECTOR.value, "query_value": query_vector, "space_type": "l2"}, + "params": {"field": Field.VECTOR, "query_value": query_vector, "space_type": "l2"}, }, } } @@ -203,7 +204,7 @@ class OpenSearchVector(BaseVector): docs = [] for hit in response["hits"]["hits"]: - metadata = hit["_source"].get(Field.METADATA_KEY.value, {}) + metadata = hit["_source"].get(Field.METADATA_KEY, {}) # Make sure metadata is a dictionary if metadata is None: @@ -212,7 +213,7 @@ class OpenSearchVector(BaseVector): metadata["score"] = hit["_score"] score_threshold = float(kwargs.get("score_threshold") or 0.0) if hit["_score"] >= score_threshold: - doc = Document(page_content=hit["_source"].get(Field.CONTENT_KEY.value), metadata=metadata) + doc = Document(page_content=hit["_source"].get(Field.CONTENT_KEY), metadata=metadata) docs.append(doc) return docs @@ -227,9 +228,9 @@ class OpenSearchVector(BaseVector): docs = [] for hit in response["hits"]["hits"]: - metadata = hit["_source"].get(Field.METADATA_KEY.value) - vector = hit["_source"].get(Field.VECTOR.value) - page_content = hit["_source"].get(Field.CONTENT_KEY.value) + metadata = hit["_source"].get(Field.METADATA_KEY) + vector = hit["_source"].get(Field.VECTOR) + page_content = hit["_source"].get(Field.CONTENT_KEY) doc = Document(page_content=page_content, vector=vector, metadata=metadata) docs.append(doc) @@ -250,8 +251,8 @@ class OpenSearchVector(BaseVector): "settings": {"index": {"knn": True}}, "mappings": { "properties": { - Field.CONTENT_KEY.value: {"type": "text"}, - Field.VECTOR.value: { + Field.CONTENT_KEY: {"type": "text"}, + Field.VECTOR: { "type": "knn_vector", "dimension": len(embeddings[0]), # Make sure the dimension is correct here "method": { @@ -261,7 +262,7 @@ class OpenSearchVector(BaseVector): "parameters": {"ef_construction": 64, "m": 8}, }, }, - Field.METADATA_KEY.value: { + Field.METADATA_KEY: { "type": "object", "properties": { "doc_id": {"type": "keyword"}, # Map doc_id to keyword type @@ -293,7 +294,7 @@ class OpenSearchVectorFactory(AbstractVectorFactory): port=dify_config.OPENSEARCH_PORT, secure=dify_config.OPENSEARCH_SECURE, verify_certs=dify_config.OPENSEARCH_VERIFY_CERTS, - auth_method=dify_config.OPENSEARCH_AUTH_METHOD.value, + auth_method=dify_config.OPENSEARCH_AUTH_METHOD, user=dify_config.OPENSEARCH_USER, password=dify_config.OPENSEARCH_PASSWORD, aws_region=dify_config.OPENSEARCH_AWS_REGION, diff --git a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py index d46f29bd64..f8c62b908a 100644 --- a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py +++ b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py @@ -147,15 +147,13 @@ class QdrantVector(BaseVector): # create group_id payload index self._client.create_payload_index( - collection_name, Field.GROUP_KEY.value, field_schema=PayloadSchemaType.KEYWORD + collection_name, Field.GROUP_KEY, field_schema=PayloadSchemaType.KEYWORD ) # create doc_id payload index - self._client.create_payload_index( - collection_name, Field.DOC_ID.value, field_schema=PayloadSchemaType.KEYWORD - ) + self._client.create_payload_index(collection_name, Field.DOC_ID, field_schema=PayloadSchemaType.KEYWORD) # create document_id payload index self._client.create_payload_index( - collection_name, Field.DOCUMENT_ID.value, field_schema=PayloadSchemaType.KEYWORD + collection_name, Field.DOCUMENT_ID, field_schema=PayloadSchemaType.KEYWORD ) # create full text index text_index_params = TextIndexParams( @@ -165,9 +163,7 @@ class QdrantVector(BaseVector): max_token_len=20, lowercase=True, ) - self._client.create_payload_index( - collection_name, Field.CONTENT_KEY.value, field_schema=text_index_params - ) + self._client.create_payload_index(collection_name, Field.CONTENT_KEY, field_schema=text_index_params) redis_client.set(collection_exist_cache_key, 1, ex=3600) def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): @@ -220,10 +216,10 @@ class QdrantVector(BaseVector): self._build_payloads( batch_texts, batch_metadatas, - Field.CONTENT_KEY.value, - Field.METADATA_KEY.value, + Field.CONTENT_KEY, + Field.METADATA_KEY, group_id or "", # Ensure group_id is never None - Field.GROUP_KEY.value, + Field.GROUP_KEY, ), ) ] @@ -381,12 +377,12 @@ class QdrantVector(BaseVector): for result in results: if result.payload is None: continue - metadata = result.payload.get(Field.METADATA_KEY.value) or {} + metadata = result.payload.get(Field.METADATA_KEY) or {} # duplicate check score threshold if result.score >= score_threshold: metadata["score"] = result.score doc = Document( - page_content=result.payload.get(Field.CONTENT_KEY.value, ""), + page_content=result.payload.get(Field.CONTENT_KEY, ""), metadata=metadata, ) docs.append(doc) @@ -433,7 +429,7 @@ class QdrantVector(BaseVector): documents = [] for result in results: if result: - document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value) + document = self._document_from_scored_point(result, Field.CONTENT_KEY, Field.METADATA_KEY) documents.append(document) return documents diff --git a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py index e91d9bb0d6..f2156afa59 100644 --- a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py +++ b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py @@ -55,7 +55,7 @@ class TableStoreVector(BaseVector): self._normalize_full_text_bm25_score = config.normalize_full_text_bm25_score self._table_name = f"{collection_name}" self._index_name = f"{collection_name}_idx" - self._tags_field = f"{Field.METADATA_KEY.value}_tags" + self._tags_field = f"{Field.METADATA_KEY}_tags" def create_collection(self, embeddings: list[list[float]], **kwargs): dimension = len(embeddings[0]) @@ -64,7 +64,7 @@ class TableStoreVector(BaseVector): def get_by_ids(self, ids: list[str]) -> list[Document]: docs = [] request = BatchGetRowRequest() - columns_to_get = [Field.METADATA_KEY.value, Field.CONTENT_KEY.value] + columns_to_get = [Field.METADATA_KEY, Field.CONTENT_KEY] rows_to_get = [[("id", _id)] for _id in ids] request.add(TableInBatchGetRowItem(self._table_name, rows_to_get, columns_to_get, None, 1)) @@ -73,11 +73,7 @@ class TableStoreVector(BaseVector): for item in table_result: if item.is_ok and item.row: kv = {k: v for k, v, _ in item.row.attribute_columns} - docs.append( - Document( - page_content=kv[Field.CONTENT_KEY.value], metadata=json.loads(kv[Field.METADATA_KEY.value]) - ) - ) + docs.append(Document(page_content=kv[Field.CONTENT_KEY], metadata=json.loads(kv[Field.METADATA_KEY]))) return docs def get_type(self) -> str: @@ -95,9 +91,9 @@ class TableStoreVector(BaseVector): self._write_row( primary_key=uuids[i], attributes={ - Field.CONTENT_KEY.value: documents[i].page_content, - Field.VECTOR.value: embeddings[i], - Field.METADATA_KEY.value: documents[i].metadata, + Field.CONTENT_KEY: documents[i].page_content, + Field.VECTOR: embeddings[i], + Field.METADATA_KEY: documents[i].metadata, }, ) return uuids @@ -180,7 +176,7 @@ class TableStoreVector(BaseVector): field_schemas = [ tablestore.FieldSchema( - Field.CONTENT_KEY.value, + Field.CONTENT_KEY, tablestore.FieldType.TEXT, analyzer=tablestore.AnalyzerType.MAXWORD, index=True, @@ -188,7 +184,7 @@ class TableStoreVector(BaseVector): store=False, ), tablestore.FieldSchema( - Field.VECTOR.value, + Field.VECTOR, tablestore.FieldType.VECTOR, vector_options=tablestore.VectorOptions( data_type=tablestore.VectorDataType.VD_FLOAT_32, @@ -197,7 +193,7 @@ class TableStoreVector(BaseVector): ), ), tablestore.FieldSchema( - Field.METADATA_KEY.value, + Field.METADATA_KEY, tablestore.FieldType.KEYWORD, index=True, store=False, @@ -233,15 +229,15 @@ class TableStoreVector(BaseVector): pk = [("id", primary_key)] tags = [] - for key, value in attributes[Field.METADATA_KEY.value].items(): + for key, value in attributes[Field.METADATA_KEY].items(): tags.append(str(key) + "=" + str(value)) attribute_columns = [ - (Field.CONTENT_KEY.value, attributes[Field.CONTENT_KEY.value]), - (Field.VECTOR.value, json.dumps(attributes[Field.VECTOR.value])), + (Field.CONTENT_KEY, attributes[Field.CONTENT_KEY]), + (Field.VECTOR, json.dumps(attributes[Field.VECTOR])), ( - Field.METADATA_KEY.value, - json.dumps(attributes[Field.METADATA_KEY.value]), + Field.METADATA_KEY, + json.dumps(attributes[Field.METADATA_KEY]), ), (self._tags_field, json.dumps(tags)), ] @@ -270,7 +266,7 @@ class TableStoreVector(BaseVector): index_name=self._index_name, search_query=query, columns_to_get=tablestore.ColumnsToGet( - column_names=[Field.PRIMARY_KEY.value], return_type=tablestore.ColumnReturnType.SPECIFIED + column_names=[Field.PRIMARY_KEY], return_type=tablestore.ColumnReturnType.SPECIFIED ), ) @@ -288,7 +284,7 @@ class TableStoreVector(BaseVector): self, query_vector: list[float], document_ids_filter: list[str] | None, top_k: int, score_threshold: float ) -> list[Document]: knn_vector_query = tablestore.KnnVectorQuery( - field_name=Field.VECTOR.value, + field_name=Field.VECTOR, top_k=top_k, float32_query_vector=query_vector, ) @@ -311,8 +307,8 @@ class TableStoreVector(BaseVector): for col in search_hit.row[1]: ots_column_map[col[0]] = col[1] - vector_str = ots_column_map.get(Field.VECTOR.value) - metadata_str = ots_column_map.get(Field.METADATA_KEY.value) + vector_str = ots_column_map.get(Field.VECTOR) + metadata_str = ots_column_map.get(Field.METADATA_KEY) vector = json.loads(vector_str) if vector_str else None metadata = json.loads(metadata_str) if metadata_str else {} @@ -321,7 +317,7 @@ class TableStoreVector(BaseVector): documents.append( Document( - page_content=ots_column_map.get(Field.CONTENT_KEY.value) or "", + page_content=ots_column_map.get(Field.CONTENT_KEY) or "", vector=vector, metadata=metadata, ) @@ -343,7 +339,7 @@ class TableStoreVector(BaseVector): self, query: str, document_ids_filter: list[str] | None, top_k: int, score_threshold: float ) -> list[Document]: bool_query = tablestore.BoolQuery(must_queries=[], filter_queries=[], should_queries=[], must_not_queries=[]) - bool_query.must_queries.append(tablestore.MatchQuery(text=query, field_name=Field.CONTENT_KEY.value)) + bool_query.must_queries.append(tablestore.MatchQuery(text=query, field_name=Field.CONTENT_KEY)) if document_ids_filter: bool_query.filter_queries.append(tablestore.TermsQuery(self._tags_field, document_ids_filter)) @@ -374,10 +370,10 @@ class TableStoreVector(BaseVector): for col in search_hit.row[1]: ots_column_map[col[0]] = col[1] - metadata_str = ots_column_map.get(Field.METADATA_KEY.value) + metadata_str = ots_column_map.get(Field.METADATA_KEY) metadata = json.loads(metadata_str) if metadata_str else {} - vector_str = ots_column_map.get(Field.VECTOR.value) + vector_str = ots_column_map.get(Field.VECTOR) vector = json.loads(vector_str) if vector_str else None if score: @@ -385,7 +381,7 @@ class TableStoreVector(BaseVector): documents.append( Document( - page_content=ots_column_map.get(Field.CONTENT_KEY.value) or "", + page_content=ots_column_map.get(Field.CONTENT_KEY) or "", vector=vector, metadata=metadata, ) diff --git a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py index 1ac10209d3..56ffb36a2b 100644 --- a/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py +++ b/api/core/rag/datasource/vdb/tidb_on_qdrant/tidb_on_qdrant_vector.py @@ -141,15 +141,13 @@ class TidbOnQdrantVector(BaseVector): # create group_id payload index self._client.create_payload_index( - collection_name, Field.GROUP_KEY.value, field_schema=PayloadSchemaType.KEYWORD + collection_name, Field.GROUP_KEY, field_schema=PayloadSchemaType.KEYWORD ) # create doc_id payload index - self._client.create_payload_index( - collection_name, Field.DOC_ID.value, field_schema=PayloadSchemaType.KEYWORD - ) + self._client.create_payload_index(collection_name, Field.DOC_ID, field_schema=PayloadSchemaType.KEYWORD) # create document_id payload index self._client.create_payload_index( - collection_name, Field.DOCUMENT_ID.value, field_schema=PayloadSchemaType.KEYWORD + collection_name, Field.DOCUMENT_ID, field_schema=PayloadSchemaType.KEYWORD ) # create full text index text_index_params = TextIndexParams( @@ -159,9 +157,7 @@ class TidbOnQdrantVector(BaseVector): max_token_len=20, lowercase=True, ) - self._client.create_payload_index( - collection_name, Field.CONTENT_KEY.value, field_schema=text_index_params - ) + self._client.create_payload_index(collection_name, Field.CONTENT_KEY, field_schema=text_index_params) redis_client.set(collection_exist_cache_key, 1, ex=3600) def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): @@ -211,10 +207,10 @@ class TidbOnQdrantVector(BaseVector): self._build_payloads( batch_texts, batch_metadatas, - Field.CONTENT_KEY.value, - Field.METADATA_KEY.value, + Field.CONTENT_KEY, + Field.METADATA_KEY, group_id or "", - Field.GROUP_KEY.value, + Field.GROUP_KEY, ), ) ] @@ -349,13 +345,13 @@ class TidbOnQdrantVector(BaseVector): for result in results: if result.payload is None: continue - metadata = result.payload.get(Field.METADATA_KEY.value) or {} + metadata = result.payload.get(Field.METADATA_KEY) or {} # duplicate check score threshold score_threshold = kwargs.get("score_threshold") or 0.0 if result.score >= score_threshold: metadata["score"] = result.score doc = Document( - page_content=result.payload.get(Field.CONTENT_KEY.value, ""), + page_content=result.payload.get(Field.CONTENT_KEY, ""), metadata=metadata, ) docs.append(doc) @@ -392,7 +388,7 @@ class TidbOnQdrantVector(BaseVector): documents = [] for result in results: if result: - document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value) + document = self._document_from_scored_point(result, Field.CONTENT_KEY, Field.METADATA_KEY) documents.append(document) return documents diff --git a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py index b8897c4165..27ae038a06 100644 --- a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py +++ b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py @@ -55,13 +55,13 @@ class TiDBVector(BaseVector): return Table( self._collection_name, self._orm_base.metadata, - Column(Field.PRIMARY_KEY.value, String(36), primary_key=True, nullable=False), + Column(Field.PRIMARY_KEY, String(36), primary_key=True, nullable=False), Column( - Field.VECTOR.value, + Field.VECTOR, VectorType(dim), nullable=False, ), - Column(Field.TEXT_KEY.value, TEXT, nullable=False), + Column(Field.TEXT_KEY, TEXT, nullable=False), Column("meta", JSON, nullable=False), Column("create_time", DateTime, server_default=sqlalchemy.text("CURRENT_TIMESTAMP")), Column( diff --git a/api/core/rag/datasource/vdb/vikingdb/vikingdb_vector.py b/api/core/rag/datasource/vdb/vikingdb/vikingdb_vector.py index d1bdd3baef..e5feecf2bc 100644 --- a/api/core/rag/datasource/vdb/vikingdb/vikingdb_vector.py +++ b/api/core/rag/datasource/vdb/vikingdb/vikingdb_vector.py @@ -76,11 +76,11 @@ class VikingDBVector(BaseVector): if not self._has_collection(): fields = [ - Field(field_name=vdb_Field.PRIMARY_KEY.value, field_type=FieldType.String, is_primary_key=True), - Field(field_name=vdb_Field.METADATA_KEY.value, field_type=FieldType.String), - Field(field_name=vdb_Field.GROUP_KEY.value, field_type=FieldType.String), - Field(field_name=vdb_Field.CONTENT_KEY.value, field_type=FieldType.Text), - Field(field_name=vdb_Field.VECTOR.value, field_type=FieldType.Vector, dim=dimension), + Field(field_name=vdb_Field.PRIMARY_KEY, field_type=FieldType.String, is_primary_key=True), + Field(field_name=vdb_Field.METADATA_KEY, field_type=FieldType.String), + Field(field_name=vdb_Field.GROUP_KEY, field_type=FieldType.String), + Field(field_name=vdb_Field.CONTENT_KEY, field_type=FieldType.Text), + Field(field_name=vdb_Field.VECTOR, field_type=FieldType.Vector, dim=dimension), ] self._client.create_collection( @@ -100,7 +100,7 @@ class VikingDBVector(BaseVector): collection_name=self._collection_name, index_name=self._index_name, vector_index=vector_index, - partition_by=vdb_Field.GROUP_KEY.value, + partition_by=vdb_Field.GROUP_KEY, description="Index For Dify", ) redis_client.set(collection_exist_cache_key, 1, ex=3600) @@ -126,11 +126,11 @@ class VikingDBVector(BaseVector): # FIXME: fix the type of metadata later doc = Data( { - vdb_Field.PRIMARY_KEY.value: metadatas[i]["doc_id"], # type: ignore - vdb_Field.VECTOR.value: embeddings[i] if embeddings else None, - vdb_Field.CONTENT_KEY.value: page_content, - vdb_Field.METADATA_KEY.value: json.dumps(metadata), - vdb_Field.GROUP_KEY.value: self._group_id, + vdb_Field.PRIMARY_KEY: metadatas[i]["doc_id"], # type: ignore + vdb_Field.VECTOR: embeddings[i] if embeddings else None, + vdb_Field.CONTENT_KEY: page_content, + vdb_Field.METADATA_KEY: json.dumps(metadata), + vdb_Field.GROUP_KEY: self._group_id, } ) docs.append(doc) @@ -151,7 +151,7 @@ class VikingDBVector(BaseVector): # Note: Metadata field value is an dict, but vikingdb field # not support json type results = self._client.get_index(self._collection_name, self._index_name).search( - filter={"op": "must", "field": vdb_Field.GROUP_KEY.value, "conds": [self._group_id]}, + filter={"op": "must", "field": vdb_Field.GROUP_KEY, "conds": [self._group_id]}, # max value is 5000 limit=5000, ) @@ -161,7 +161,7 @@ class VikingDBVector(BaseVector): ids = [] for result in results: - metadata = result.fields.get(vdb_Field.METADATA_KEY.value) + metadata = result.fields.get(vdb_Field.METADATA_KEY) if metadata is not None: metadata = json.loads(metadata) if metadata.get(key) == value: @@ -189,12 +189,12 @@ class VikingDBVector(BaseVector): docs = [] for result in results: - metadata = result.fields.get(vdb_Field.METADATA_KEY.value) + metadata = result.fields.get(vdb_Field.METADATA_KEY) if metadata is not None: metadata = json.loads(metadata) if result.score >= score_threshold: metadata["score"] = result.score - doc = Document(page_content=result.fields.get(vdb_Field.CONTENT_KEY.value), metadata=metadata) + doc = Document(page_content=result.fields.get(vdb_Field.CONTENT_KEY), metadata=metadata) docs.append(doc) docs = sorted(docs, key=lambda x: x.metadata.get("score", 0) if x.metadata else 0, reverse=True) return docs diff --git a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py index d84ae6010d..8820c0a846 100644 --- a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py +++ b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py @@ -104,7 +104,7 @@ class WeaviateVector(BaseVector): with self._client.batch as batch: for i, text in enumerate(texts): - data_properties = {Field.TEXT_KEY.value: text} + data_properties = {Field.TEXT_KEY: text} if metadatas is not None: # metadata maybe None for key, val in (metadatas[i] or {}).items(): @@ -182,7 +182,7 @@ class WeaviateVector(BaseVector): """Look up similar documents by embedding vector in Weaviate.""" collection_name = self._collection_name properties = self._attributes - properties.append(Field.TEXT_KEY.value) + properties.append(Field.TEXT_KEY) query_obj = self._client.query.get(collection_name, properties) vector = {"vector": query_vector} @@ -204,7 +204,7 @@ class WeaviateVector(BaseVector): docs_and_scores = [] for res in result["data"]["Get"][collection_name]: - text = res.pop(Field.TEXT_KEY.value) + text = res.pop(Field.TEXT_KEY) score = 1 - res["_additional"]["distance"] docs_and_scores.append((Document(page_content=text, metadata=res), score)) @@ -232,7 +232,7 @@ class WeaviateVector(BaseVector): collection_name = self._collection_name content: dict[str, Any] = {"concepts": [query]} properties = self._attributes - properties.append(Field.TEXT_KEY.value) + properties.append(Field.TEXT_KEY) if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(collection_name, properties) @@ -250,7 +250,7 @@ class WeaviateVector(BaseVector): raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][collection_name]: - text = res.pop(Field.TEXT_KEY.value) + text = res.pop(Field.TEXT_KEY) additional = res.pop("_additional") docs.append(Document(page_content=text, vector=additional["vector"], metadata=res)) return docs diff --git a/api/core/rag/entities/event.py b/api/core/rag/entities/event.py index 24db5d77be..a61b17ddb8 100644 --- a/api/core/rag/entities/event.py +++ b/api/core/rag/entities/event.py @@ -20,12 +20,12 @@ class BaseDatasourceEvent(BaseModel): class DatasourceErrorEvent(BaseDatasourceEvent): - event: str = DatasourceStreamEvent.ERROR.value + event: DatasourceStreamEvent = DatasourceStreamEvent.ERROR error: str = Field(..., description="error message") class DatasourceCompletedEvent(BaseDatasourceEvent): - event: str = DatasourceStreamEvent.COMPLETED.value + event: DatasourceStreamEvent = DatasourceStreamEvent.COMPLETED data: Mapping[str, Any] | list = Field(..., description="result") total: int | None = Field(default=0, description="total") completed: int | None = Field(default=0, description="completed") @@ -33,6 +33,6 @@ class DatasourceCompletedEvent(BaseDatasourceEvent): class DatasourceProcessingEvent(BaseDatasourceEvent): - event: str = DatasourceStreamEvent.PROCESSING.value + event: DatasourceStreamEvent = DatasourceStreamEvent.PROCESSING total: int | None = Field(..., description="total") completed: int | None = Field(..., description="completed") diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index 3dc08e1832..0f62f9c4b6 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -45,7 +45,7 @@ class ExtractProcessor: cls, upload_file: UploadFile, return_text: bool = False, is_automatic: bool = False ) -> Union[list[Document], str]: extract_setting = ExtractSetting( - datasource_type=DatasourceType.FILE.value, upload_file=upload_file, document_model="text_model" + datasource_type=DatasourceType.FILE, upload_file=upload_file, document_model="text_model" ) if return_text: delimiter = "\n" @@ -76,7 +76,7 @@ class ExtractProcessor: # https://stackoverflow.com/questions/26541416/generate-temporary-file-names-without-creating-actual-file-in-python#comment90414256_26541521 file_path = f"{temp_dir}/{tempfile.gettempdir()}{suffix}" Path(file_path).write_bytes(response.content) - extract_setting = ExtractSetting(datasource_type=DatasourceType.FILE.value, document_model="text_model") + extract_setting = ExtractSetting(datasource_type=DatasourceType.FILE, document_model="text_model") if return_text: delimiter = "\n" return delimiter.join( @@ -92,7 +92,7 @@ class ExtractProcessor: def extract( cls, extract_setting: ExtractSetting, is_automatic: bool = False, file_path: str | None = None ) -> list[Document]: - if extract_setting.datasource_type == DatasourceType.FILE.value: + if extract_setting.datasource_type == DatasourceType.FILE: with tempfile.TemporaryDirectory() as temp_dir: if not file_path: assert extract_setting.upload_file is not None, "upload_file is required" @@ -163,7 +163,7 @@ class ExtractProcessor: # txt extractor = TextExtractor(file_path, autodetect_encoding=True) return extractor.extract() - elif extract_setting.datasource_type == DatasourceType.NOTION.value: + elif extract_setting.datasource_type == DatasourceType.NOTION: assert extract_setting.notion_info is not None, "notion_info is required" extractor = NotionExtractor( notion_workspace_id=extract_setting.notion_info.notion_workspace_id, @@ -174,7 +174,7 @@ class ExtractProcessor: credential_id=extract_setting.notion_info.credential_id, ) return extractor.extract() - elif extract_setting.datasource_type == DatasourceType.WEBSITE.value: + elif extract_setting.datasource_type == DatasourceType.WEBSITE: assert extract_setting.website_info is not None, "website_info is required" if extract_setting.website_info.provider == "firecrawl": extractor = FirecrawlWebExtractor( diff --git a/api/core/rag/rerank/rerank_factory.py b/api/core/rag/rerank/rerank_factory.py index 1a3cf85736..524e83824c 100644 --- a/api/core/rag/rerank/rerank_factory.py +++ b/api/core/rag/rerank/rerank_factory.py @@ -8,9 +8,9 @@ class RerankRunnerFactory: @staticmethod def create_rerank_runner(runner_type: str, *args, **kwargs) -> BaseRerankRunner: match runner_type: - case RerankMode.RERANKING_MODEL.value: + case RerankMode.RERANKING_MODEL: return RerankModelRunner(*args, **kwargs) - case RerankMode.WEIGHTED_SCORE.value: + case RerankMode.WEIGHTED_SCORE: return WeightRerankRunner(*args, **kwargs) case _: raise ValueError(f"Unknown runner type: {runner_type}") diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index b08f80da49..0a702d2902 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -61,7 +61,7 @@ from models.dataset import Document as DatasetDocument from services.external_knowledge_service import ExternalDatasetService default_retrieval_model: dict[str, Any] = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 4, @@ -692,7 +692,7 @@ class DatasetRetrieval: if retrieve_config.retrieve_strategy == DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE: # get retrieval model config default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 2, diff --git a/api/core/rag/retrieval/retrieval_methods.py b/api/core/rag/retrieval/retrieval_methods.py index c7c6e60c8d..5f0f2a9d33 100644 --- a/api/core/rag/retrieval/retrieval_methods.py +++ b/api/core/rag/retrieval/retrieval_methods.py @@ -9,8 +9,8 @@ class RetrievalMethod(Enum): @staticmethod def is_support_semantic_search(retrieval_method: str) -> bool: - return retrieval_method in {RetrievalMethod.SEMANTIC_SEARCH.value, RetrievalMethod.HYBRID_SEARCH.value} + return retrieval_method in {RetrievalMethod.SEMANTIC_SEARCH, RetrievalMethod.HYBRID_SEARCH} @staticmethod def is_support_fulltext_search(retrieval_method: str) -> bool: - return retrieval_method in {RetrievalMethod.FULL_TEXT_SEARCH.value, RetrievalMethod.HYBRID_SEARCH.value} + return retrieval_method in {RetrievalMethod.FULL_TEXT_SEARCH, RetrievalMethod.HYBRID_SEARCH} diff --git a/api/core/tools/builtin_tool/provider.py b/api/core/tools/builtin_tool/provider.py index 29d34e722a..2e94907f30 100644 --- a/api/core/tools/builtin_tool/provider.py +++ b/api/core/tools/builtin_tool/provider.py @@ -111,7 +111,7 @@ class BuiltinToolProviderController(ToolProviderController): :return: the credentials schema """ - return self.get_credentials_schema_by_type(CredentialType.API_KEY.value) + return self.get_credentials_schema_by_type(CredentialType.API_KEY) def get_credentials_schema_by_type(self, credential_type: str) -> list[ProviderConfig]: """ @@ -122,7 +122,7 @@ class BuiltinToolProviderController(ToolProviderController): """ if credential_type == CredentialType.OAUTH2.value: return self.entity.oauth_schema.credentials_schema.copy() if self.entity.oauth_schema else [] - if credential_type == CredentialType.API_KEY.value: + if credential_type == CredentialType.API_KEY: return self.entity.credentials_schema.copy() if self.entity.credentials_schema else [] raise ValueError(f"Invalid credential type: {credential_type}") @@ -134,15 +134,15 @@ class BuiltinToolProviderController(ToolProviderController): """ return self.entity.oauth_schema.client_schema.copy() if self.entity.oauth_schema else [] - def get_supported_credential_types(self) -> list[str]: + def get_supported_credential_types(self) -> list[CredentialType]: """ returns the credential support type of the provider """ types = [] if self.entity.credentials_schema is not None and len(self.entity.credentials_schema) > 0: - types.append(CredentialType.API_KEY.value) + types.append(CredentialType.API_KEY) if self.entity.oauth_schema is not None and len(self.entity.oauth_schema.credentials_schema) > 0: - types.append(CredentialType.OAUTH2.value) + types.append(CredentialType.OAUTH2) return types def get_tools(self) -> list[BuiltinTool]: diff --git a/api/core/tools/entities/api_entities.py b/api/core/tools/entities/api_entities.py index 00c4ab9dd7..de6bf01ae9 100644 --- a/api/core/tools/entities/api_entities.py +++ b/api/core/tools/entities/api_entities.py @@ -61,7 +61,7 @@ class ToolProviderApiEntity(BaseModel): for tool in tools: if tool.get("parameters"): for parameter in tool.get("parameters"): - if parameter.get("type") == ToolParameter.ToolParameterType.SYSTEM_FILES.value: + if parameter.get("type") == ToolParameter.ToolParameterType.SYSTEM_FILES: parameter["type"] = "files" if parameter.get("input_schema") is None: parameter.pop("input_schema", None) @@ -110,7 +110,9 @@ class ToolProviderCredentialApiEntity(BaseModel): class ToolProviderCredentialInfoApiEntity(BaseModel): - supported_credential_types: list[str] = Field(description="The supported credential types of the provider") + supported_credential_types: list[CredentialType] = Field( + description="The supported credential types of the provider" + ) is_oauth_custom_client_enabled: bool = Field( default=False, description="Whether the OAuth custom client is enabled for the provider" ) diff --git a/api/core/tools/entities/tool_entities.py b/api/core/tools/entities/tool_entities.py index a59b54216f..62e3aa8b5d 100644 --- a/api/core/tools/entities/tool_entities.py +++ b/api/core/tools/entities/tool_entities.py @@ -113,7 +113,7 @@ class ApiProviderAuthType(StrEnum): # normalize & tiny alias for backward compatibility v = (value or "").strip().lower() if v == "api_key": - v = cls.API_KEY_HEADER.value + v = cls.API_KEY_HEADER for mode in cls: if mode.value == v: diff --git a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py index cce5ec6b1b..b5bc4d3c00 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py @@ -18,7 +18,7 @@ from extensions.ext_database import db from models.dataset import Dataset, Document, DocumentSegment default_retrieval_model: dict[str, Any] = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 2, diff --git a/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py index 0e2237befd..1eae582f67 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py @@ -17,7 +17,7 @@ from models.dataset import Document as DatasetDocument from services.external_knowledge_service import ExternalDatasetService default_retrieval_model: dict[str, Any] = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "reranking_mode": "reranking_model", diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index 35fd7895b9..c7ac3387e5 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -393,7 +393,7 @@ class ApiBasedToolSchemaParser: openapi = ApiBasedToolSchemaParser.parse_openapi_to_tool_bundle( loaded_content, extra_info=extra_info, warning=warning ) - schema_type = ApiProviderSchemaType.OPENAPI.value + schema_type = ApiProviderSchemaType.OPENAPI return openapi, schema_type except ToolApiSchemaError as e: openapi_error = e @@ -403,7 +403,7 @@ class ApiBasedToolSchemaParser: converted_swagger = ApiBasedToolSchemaParser.parse_swagger_to_openapi( loaded_content, extra_info=extra_info, warning=warning ) - schema_type = ApiProviderSchemaType.SWAGGER.value + schema_type = ApiProviderSchemaType.SWAGGER return ApiBasedToolSchemaParser.parse_openapi_to_tool_bundle( converted_swagger, extra_info=extra_info, warning=warning ), schema_type @@ -415,7 +415,7 @@ class ApiBasedToolSchemaParser: openapi_plugin = ApiBasedToolSchemaParser.parse_openai_plugin_json_to_tool_bundle( json_dumps(loaded_content), extra_info=extra_info, warning=warning ) - return openapi_plugin, ApiProviderSchemaType.OPENAI_PLUGIN.value + return openapi_plugin, ApiProviderSchemaType.OPENAI_PLUGIN except ToolNotSupportedError as e: # maybe it's not plugin at all openapi_plugin_error = e diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 972823b4d9..4a24b18465 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -252,8 +252,8 @@ class AgentNode(Node): if all(isinstance(v, dict) for _, v in parameters.items()): params = {} for key, param in parameters.items(): - if param.get("auto", ParamsAutoGenerated.OPEN.value) in ( - ParamsAutoGenerated.CLOSE.value, + if param.get("auto", ParamsAutoGenerated.OPEN) in ( + ParamsAutoGenerated.CLOSE, 0, ): value_param = param.get("value", {}) @@ -269,7 +269,7 @@ class AgentNode(Node): value = cast(list[dict[str, Any]], value) tool_value = [] for tool in value: - provider_type = ToolProviderType(tool.get("type", ToolProviderType.BUILT_IN.value)) + provider_type = ToolProviderType(tool.get("type", ToolProviderType.BUILT_IN)) setting_params = tool.get("settings", {}) parameters = tool.get("parameters", {}) manual_input_params = [key for key, value in parameters.items() if value is not None] @@ -420,7 +420,7 @@ class AgentNode(Node): def _fetch_memory(self, model_instance: ModelInstance) -> TokenBufferMemory | None: # get conversation id conversation_id_variable = self.graph_runtime_state.variable_pool.get( - ["sys", SystemVariableKey.CONVERSATION_ID.value] + ["sys", SystemVariableKey.CONVERSATION_ID] ) if not isinstance(conversation_id_variable, StringSegment): return None @@ -479,7 +479,7 @@ class AgentNode(Node): if meta_version and Version(meta_version) > Version("0.0.1"): return tools else: - return [tool for tool in tools if tool.get("type") != ToolProviderType.MCP.value] + return [tool for tool in tools if tool.get("type") != ToolProviderType.MCP] def _transform_message( self, diff --git a/api/core/workflow/nodes/datasource/datasource_node.py b/api/core/workflow/nodes/datasource/datasource_node.py index 937f4c944f..e392cb5f5c 100644 --- a/api/core/workflow/nodes/datasource/datasource_node.py +++ b/api/core/workflow/nodes/datasource/datasource_node.py @@ -75,11 +75,11 @@ class DatasourceNode(Node): node_data = self._node_data variable_pool = self.graph_runtime_state.variable_pool - datasource_type_segement = variable_pool.get(["sys", SystemVariableKey.DATASOURCE_TYPE.value]) + datasource_type_segement = variable_pool.get(["sys", SystemVariableKey.DATASOURCE_TYPE]) if not datasource_type_segement: raise DatasourceNodeError("Datasource type is not set") datasource_type = str(datasource_type_segement.value) if datasource_type_segement.value else None - datasource_info_segement = variable_pool.get(["sys", SystemVariableKey.DATASOURCE_INFO.value]) + datasource_info_segement = variable_pool.get(["sys", SystemVariableKey.DATASOURCE_INFO]) if not datasource_info_segement: raise DatasourceNodeError("Datasource info is not set") datasource_info_value = datasource_info_segement.value @@ -267,7 +267,7 @@ class DatasourceNode(Node): return result def _fetch_files(self, variable_pool: VariablePool) -> list[File]: - variable = variable_pool.get(["sys", SystemVariableKey.FILES.value]) + variable = variable_pool.get(["sys", SystemVariableKey.FILES]) assert isinstance(variable, ArrayAnyVariable | ArrayAnySegment) return list(variable.value) if variable else [] diff --git a/api/core/workflow/nodes/http_request/node.py b/api/core/workflow/nodes/http_request/node.py index 20e1337ea7..55dec3fb08 100644 --- a/api/core/workflow/nodes/http_request/node.py +++ b/api/core/workflow/nodes/http_request/node.py @@ -234,7 +234,7 @@ class HttpRequestNode(Node): mapping = { "tool_file_id": tool_file.id, - "transfer_method": FileTransferMethod.TOOL_FILE.value, + "transfer_method": FileTransferMethod.TOOL_FILE, } file = file_factory.build_from_mapping( mapping=mapping, diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index 965e22b74c..c089a68bd4 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -95,7 +95,7 @@ class IterationNode(Node): "config": { "is_parallel": False, "parallel_nums": 10, - "error_handle_mode": ErrorHandleMode.TERMINATED.value, + "error_handle_mode": ErrorHandleMode.TERMINATED, }, } diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 05e0c7707a..2751f24048 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -27,7 +27,7 @@ from .exc import ( logger = logging.getLogger(__name__) default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 2, @@ -77,7 +77,7 @@ class KnowledgeIndexNode(Node): raise KnowledgeIndexNodeError("Index chunk variable is required.") invoke_from = variable_pool.get(["sys", SystemVariableKey.INVOKE_FROM]) if invoke_from: - is_preview = invoke_from.value == InvokeFrom.DEBUGGER.value + is_preview = invoke_from.value == InvokeFrom.DEBUGGER else: is_preview = False chunks = variable.value diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index b6128d3eab..7091b62463 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -72,7 +72,7 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 4, diff --git a/api/core/workflow/nodes/llm/llm_utils.py b/api/core/workflow/nodes/llm/llm_utils.py index ad969cdad1..aff84433b2 100644 --- a/api/core/workflow/nodes/llm/llm_utils.py +++ b/api/core/workflow/nodes/llm/llm_utils.py @@ -92,7 +92,7 @@ def fetch_memory( return None # get conversation id - conversation_id_variable = variable_pool.get(["sys", SystemVariableKey.CONVERSATION_ID.value]) + conversation_id_variable = variable_pool.get(["sys", SystemVariableKey.CONVERSATION_ID]) if not isinstance(conversation_id_variable, StringSegment): return None conversation_id = conversation_id_variable.value @@ -143,7 +143,7 @@ def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUs Provider.tenant_id == tenant_id, # TODO: Use provider name with prefix after the data migration. Provider.provider_name == ModelProviderID(model_instance.provider).provider_name, - Provider.provider_type == ProviderType.SYSTEM.value, + Provider.provider_type == ProviderType.SYSTEM, Provider.quota_type == system_configuration.current_quota_type.value, Provider.quota_limit > Provider.quota_used, ) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 4742476352..13f6d904e6 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -945,7 +945,7 @@ class LLMNode(Node): variable_mapping["#files#"] = typed_node_data.vision.configs.variable_selector if typed_node_data.memory: - variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value] + variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY] if typed_node_data.prompt_config: enable_jinja = False diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index ce1a879ff1..cd0094f531 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -224,7 +224,7 @@ class ToolNode(Node): return result def _fetch_files(self, variable_pool: "VariablePool") -> list[File]: - variable = variable_pool.get(["sys", SystemVariableKey.FILES.value]) + variable = variable_pool.get(["sys", SystemVariableKey.FILES]) assert isinstance(variable, ArrayAnyVariable | ArrayAnySegment) return list(variable.value) if variable else [] diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index 3801dfe15d..4cd885cfa5 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -227,7 +227,7 @@ class WorkflowEntry: "height": node_height, "type": "custom", "data": { - "type": NodeType.START.value, + "type": NodeType.START, "title": "Start", "desc": "Start", }, diff --git a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py index 21b73b76b5..1b44d8a1e2 100644 --- a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py +++ b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py @@ -12,7 +12,7 @@ def handle(sender, **kwargs): if synced_draft_workflow is None: return for node_data in synced_draft_workflow.graph_dict.get("nodes", []): - if node_data.get("data", {}).get("type") == NodeType.TOOL.value: + if node_data.get("data", {}).get("type") == NodeType.TOOL: try: tool_entity = ToolEntity.model_validate(node_data["data"]) tool_runtime = ToolManager.get_tool_runtime( diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py index 7605d4082c..53e0065f6e 100644 --- a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py +++ b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py @@ -53,7 +53,7 @@ def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set[str]: # fetch all knowledge retrieval nodes knowledge_retrieval_nodes = [ - node for node in nodes if node.get("data", {}).get("type") == NodeType.KNOWLEDGE_RETRIEVAL.value + node for node in nodes if node.get("data", {}).get("type") == NodeType.KNOWLEDGE_RETRIEVAL ] if not knowledge_retrieval_nodes: diff --git a/api/events/event_handlers/update_provider_when_message_created.py b/api/events/event_handlers/update_provider_when_message_created.py index 27efa539dc..c0694d4efe 100644 --- a/api/events/event_handlers/update_provider_when_message_created.py +++ b/api/events/event_handlers/update_provider_when_message_created.py @@ -139,7 +139,7 @@ def handle(sender: Message, **kwargs): filters=_ProviderUpdateFilters( tenant_id=tenant_id, provider_name=ModelProviderID(model_config.provider).provider_name, - provider_type=ProviderType.SYSTEM.value, + provider_type=ProviderType.SYSTEM, quota_type=provider_configuration.system_configuration.current_quota_type.value, ), values=_ProviderUpdateValues(quota_used=Provider.quota_used + used_quota, last_used=current_time), diff --git a/api/extensions/storage/clickzetta_volume/file_lifecycle.py b/api/extensions/storage/clickzetta_volume/file_lifecycle.py index 6ab02ad8cc..dc5aa8e39c 100644 --- a/api/extensions/storage/clickzetta_volume/file_lifecycle.py +++ b/api/extensions/storage/clickzetta_volume/file_lifecycle.py @@ -264,7 +264,7 @@ class FileLifecycleManager: logger.warning("File %s not found in metadata", filename) return False - metadata_dict[filename]["status"] = FileStatus.ARCHIVED.value + metadata_dict[filename]["status"] = FileStatus.ARCHIVED metadata_dict[filename]["modified_at"] = datetime.now().isoformat() self._save_metadata(metadata_dict) @@ -309,7 +309,7 @@ class FileLifecycleManager: # Update metadata metadata_dict = self._load_metadata() if filename in metadata_dict: - metadata_dict[filename]["status"] = FileStatus.DELETED.value + metadata_dict[filename]["status"] = FileStatus.DELETED metadata_dict[filename]["modified_at"] = datetime.now().isoformat() self._save_metadata(metadata_dict) diff --git a/api/factories/file_factory.py b/api/factories/file_factory.py index d66c757249..69fd1a6da3 100644 --- a/api/factories/file_factory.py +++ b/api/factories/file_factory.py @@ -45,7 +45,7 @@ def build_from_message_file( } # Set the correct ID field based on transfer method - if message_file.transfer_method == FileTransferMethod.TOOL_FILE.value: + if message_file.transfer_method == FileTransferMethod.TOOL_FILE: mapping["tool_file_id"] = message_file.upload_file_id else: mapping["upload_file_id"] = message_file.upload_file_id @@ -368,9 +368,7 @@ def _build_from_datasource_file( if strict_type_validation and specified_type and detected_file_type.value != specified_type: raise ValueError("Detected file type does not match the specified type. Please verify the file.") - file_type = ( - FileType(specified_type) if specified_type and specified_type != FileType.CUSTOM.value else detected_file_type - ) + file_type = FileType(specified_type) if specified_type and specified_type != FileType.CUSTOM else detected_file_type return File( id=mapping.get("datasource_file_id"), diff --git a/api/models/api_based_extension.py b/api/models/api_based_extension.py index 60167d9069..e86826fc3d 100644 --- a/api/models/api_based_extension.py +++ b/api/models/api_based_extension.py @@ -9,7 +9,7 @@ from .base import Base from .types import StringUUID -class APIBasedExtensionPoint(enum.Enum): +class APIBasedExtensionPoint(enum.StrEnum): APP_EXTERNAL_DATA_TOOL_QUERY = "app.external_data_tool.query" PING = "ping" APP_MODERATION_INPUT = "app.moderation.input" diff --git a/api/models/dataset.py b/api/models/dataset.py index 1e1d267921..5653445f2b 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -184,7 +184,7 @@ class Dataset(Base): @property def retrieval_model_dict(self): default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 2, diff --git a/api/models/model.py b/api/models/model.py index a8218c3a4e..18958c8253 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -186,13 +186,13 @@ class App(Base): if len(keys) >= 4: provider_type = tool.get("provider_type", "") provider_id = tool.get("provider_id", "") - if provider_type == ToolProviderType.API.value: + if provider_type == ToolProviderType.API: try: uuid.UUID(provider_id) except Exception: continue api_provider_ids.append(provider_id) - if provider_type == ToolProviderType.BUILT_IN.value: + if provider_type == ToolProviderType.BUILT_IN: try: # check if it's hardcoded try: @@ -251,23 +251,23 @@ class App(Base): provider_type = tool.get("provider_type", "") provider_id = tool.get("provider_id", "") - if provider_type == ToolProviderType.API.value: + if provider_type == ToolProviderType.API: if uuid.UUID(provider_id) not in existing_api_providers: deleted_tools.append( { - "type": ToolProviderType.API.value, + "type": ToolProviderType.API, "tool_name": tool["tool_name"], "provider_id": provider_id, } ) - if provider_type == ToolProviderType.BUILT_IN.value: + if provider_type == ToolProviderType.BUILT_IN: generic_provider_id = GenericProviderID(provider_id) if not existing_builtin_providers[generic_provider_id.provider_name]: deleted_tools.append( { - "type": ToolProviderType.BUILT_IN.value, + "type": ToolProviderType.BUILT_IN, "tool_name": tool["tool_name"], "provider_id": provider_id, # use the original one } @@ -1154,7 +1154,7 @@ class Message(Base): files: list[File] = [] for message_file in message_files: - if message_file.transfer_method == FileTransferMethod.LOCAL_FILE.value: + if message_file.transfer_method == FileTransferMethod.LOCAL_FILE: if message_file.upload_file_id is None: raise ValueError(f"MessageFile {message_file.id} is a local file but has no upload_file_id") file = file_factory.build_from_mapping( @@ -1166,7 +1166,7 @@ class Message(Base): }, tenant_id=current_app.tenant_id, ) - elif message_file.transfer_method == FileTransferMethod.REMOTE_URL.value: + elif message_file.transfer_method == FileTransferMethod.REMOTE_URL: if message_file.url is None: raise ValueError(f"MessageFile {message_file.id} is a remote url but has no url") file = file_factory.build_from_mapping( @@ -1179,7 +1179,7 @@ class Message(Base): }, tenant_id=current_app.tenant_id, ) - elif message_file.transfer_method == FileTransferMethod.TOOL_FILE.value: + elif message_file.transfer_method == FileTransferMethod.TOOL_FILE: if message_file.upload_file_id is None: assert message_file.url is not None message_file.upload_file_id = message_file.url.split("/")[-1].split(".")[0] diff --git a/api/models/provider.py b/api/models/provider.py index aacc6e505a..f6852d49f4 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -107,7 +107,7 @@ class Provider(Base): """ Returns True if the provider is enabled. """ - if self.provider_type == ProviderType.SYSTEM.value: + if self.provider_type == ProviderType.SYSTEM: return self.is_valid else: return self.is_valid and self.token_is_set diff --git a/api/models/workflow.py b/api/models/workflow.py index 877f571f25..b898f02612 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -829,14 +829,14 @@ class WorkflowNodeExecutionModel(Base): # This model is expected to have `offlo if self.execution_metadata_dict: from core.workflow.nodes import NodeType - if self.node_type == NodeType.TOOL.value and "tool_info" in self.execution_metadata_dict: + if self.node_type == NodeType.TOOL and "tool_info" in self.execution_metadata_dict: tool_info: dict[str, Any] = self.execution_metadata_dict["tool_info"] extras["icon"] = ToolManager.get_tool_icon( tenant_id=self.tenant_id, provider_type=tool_info["provider_type"], provider_id=tool_info["provider_id"], ) - elif self.node_type == NodeType.DATASOURCE.value and "datasource_info" in self.execution_metadata_dict: + elif self.node_type == NodeType.DATASOURCE and "datasource_info" in self.execution_metadata_dict: datasource_info = self.execution_metadata_dict["datasource_info"] extras["icon"] = datasource_info.get("icon") return extras diff --git a/api/services/account_service.py b/api/services/account_service.py index 77b8744020..106bc0e77e 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -127,7 +127,7 @@ class AccountService: if not account: return None - if account.status == AccountStatus.BANNED.value: + if account.status == AccountStatus.BANNED: raise Unauthorized("Account is banned.") current_tenant = db.session.query(TenantAccountJoin).filter_by(account_id=account.id, current=True).first() @@ -178,7 +178,7 @@ class AccountService: if not account: raise AccountPasswordError("Invalid email or password.") - if account.status == AccountStatus.BANNED.value: + if account.status == AccountStatus.BANNED: raise AccountLoginError("Account is banned.") if password and invite_token and account.password is None: @@ -193,8 +193,8 @@ class AccountService: if account.password is None or not compare_password(password, account.password, account.password_salt): raise AccountPasswordError("Invalid email or password.") - if account.status == AccountStatus.PENDING.value: - account.status = AccountStatus.ACTIVE.value + if account.status == AccountStatus.PENDING: + account.status = AccountStatus.ACTIVE account.initialized_at = naive_utc_now() db.session.commit() @@ -357,7 +357,7 @@ class AccountService: @staticmethod def close_account(account: Account): """Close account""" - account.status = AccountStatus.CLOSED.value + account.status = AccountStatus.CLOSED db.session.commit() @staticmethod @@ -397,8 +397,8 @@ class AccountService: if ip_address: AccountService.update_login_info(account=account, ip_address=ip_address) - if account.status == AccountStatus.PENDING.value: - account.status = AccountStatus.ACTIVE.value + if account.status == AccountStatus.PENDING: + account.status = AccountStatus.ACTIVE db.session.commit() access_token = AccountService.get_account_jwt_token(account=account) @@ -766,7 +766,7 @@ class AccountService: if not account: return None - if account.status == AccountStatus.BANNED.value: + if account.status == AccountStatus.BANNED: raise Unauthorized("Account is banned.") return account @@ -1030,7 +1030,7 @@ class TenantService: @staticmethod def create_tenant_member(tenant: Tenant, account: Account, role: str = "normal") -> TenantAccountJoin: """Create tenant member""" - if role == TenantAccountRole.OWNER.value: + if role == TenantAccountRole.OWNER: if TenantService.has_roles(tenant, [TenantAccountRole.OWNER]): logger.error("Tenant %s has already an owner.", tenant.id) raise Exception("Tenant already has an owner.") @@ -1315,7 +1315,7 @@ class RegisterService: password=password, is_setup=is_setup, ) - account.status = AccountStatus.ACTIVE.value if not status else status.value + account.status = status or AccountStatus.ACTIVE account.initialized_at = naive_utc_now() if open_id is not None and provider is not None: @@ -1376,7 +1376,7 @@ class RegisterService: TenantService.create_tenant_member(tenant, account, role) # Support resend invitation email when the account is pending status - if account.status != AccountStatus.PENDING.value: + if account.status != AccountStatus.PENDING: raise AccountAlreadyInTenantError("Account already in tenant.") token = cls.generate_invite_token(tenant, account) diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index 129e3b0492..311f80bef6 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -494,7 +494,7 @@ class AppDslService: unique_hash = None graph = workflow_data.get("graph", {}) for node in graph.get("nodes", []): - if node.get("data", {}).get("type", "") == NodeType.KNOWLEDGE_RETRIEVAL.value: + if node.get("data", {}).get("type", "") == NodeType.KNOWLEDGE_RETRIEVAL: dataset_ids = node["data"].get("dataset_ids", []) node["data"]["dataset_ids"] = [ decrypted_id @@ -584,17 +584,17 @@ class AppDslService: if not node_data: continue data_type = node_data.get("type", "") - if data_type == NodeType.KNOWLEDGE_RETRIEVAL.value: + if data_type == NodeType.KNOWLEDGE_RETRIEVAL: dataset_ids = node_data.get("dataset_ids", []) node_data["dataset_ids"] = [ cls.encrypt_dataset_id(dataset_id=dataset_id, tenant_id=app_model.tenant_id) for dataset_id in dataset_ids ] # filter credential id from tool node - if not include_secret and data_type == NodeType.TOOL.value: + if not include_secret and data_type == NodeType.TOOL: node_data.pop("credential_id", None) # filter credential id from agent node - if not include_secret and data_type == NodeType.AGENT.value: + if not include_secret and data_type == NodeType.AGENT: for tool in node_data.get("agent_parameters", {}).get("tools", {}).get("value", []): tool.pop("credential_id", None) @@ -658,31 +658,31 @@ class AppDslService: try: typ = node.get("data", {}).get("type") match typ: - case NodeType.TOOL.value: + case NodeType.TOOL: tool_entity = ToolNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_tool_dependency(tool_entity.provider_id), ) - case NodeType.LLM.value: + case NodeType.LLM: llm_entity = LLMNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency(llm_entity.model.provider), ) - case NodeType.QUESTION_CLASSIFIER.value: + case NodeType.QUESTION_CLASSIFIER: question_classifier_entity = QuestionClassifierNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( question_classifier_entity.model.provider ), ) - case NodeType.PARAMETER_EXTRACTOR.value: + case NodeType.PARAMETER_EXTRACTOR: parameter_extractor_entity = ParameterExtractorNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( parameter_extractor_entity.model.provider ), ) - case NodeType.KNOWLEDGE_RETRIEVAL.value: + case NodeType.KNOWLEDGE_RETRIEVAL: knowledge_retrieval_entity = KnowledgeRetrievalNodeData.model_validate(node["data"]) if knowledge_retrieval_entity.retrieval_mode == "multiple": if knowledge_retrieval_entity.multiple_retrieval_config: diff --git a/api/services/datasource_provider_service.py b/api/services/datasource_provider_service.py index 89a5d89f61..36b7084973 100644 --- a/api/services/datasource_provider_service.py +++ b/api/services/datasource_provider_service.py @@ -646,7 +646,7 @@ class DatasourceProviderService: name=db_provider_name, provider=provider_name, plugin_id=plugin_id, - auth_type=CredentialType.API_KEY.value, + auth_type=CredentialType.API_KEY, encrypted_credentials=credentials, ) session.add(datasource_provider) @@ -674,7 +674,7 @@ class DatasourceProviderService: secret_input_form_variables = [] for credential_form_schema in credential_form_schemas: - if credential_form_schema.type.value == FormType.SECRET_INPUT.value: + if credential_form_schema.type.value == FormType.SECRET_INPUT: secret_input_form_variables.append(credential_form_schema.name) return secret_input_form_variables diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 6174ce8b3b..aa29354a6e 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -15,7 +15,7 @@ from models.dataset import Dataset, DatasetQuery logger = logging.getLogger(__name__) default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 4, diff --git a/api/services/plugin/plugin_migration.py b/api/services/plugin/plugin_migration.py index 76bb9a57f9..dec92a6faa 100644 --- a/api/services/plugin/plugin_migration.py +++ b/api/services/plugin/plugin_migration.py @@ -242,7 +242,7 @@ class PluginMigration: if data.get("type") == "tool": provider_name = data.get("provider_name") provider_type = data.get("provider_type") - if provider_name not in excluded_providers and provider_type == ToolProviderType.BUILT_IN.value: + if provider_name not in excluded_providers and provider_type == ToolProviderType.BUILT_IN: result.append(ToolProviderID(provider_name).plugin_id) return result @@ -271,7 +271,7 @@ class PluginMigration: try: tool_entity = AgentToolEntity.model_validate(tool) if ( - tool_entity.provider_type == ToolProviderType.BUILT_IN.value + tool_entity.provider_type == ToolProviderType.BUILT_IN and tool_entity.provider_id not in excluded_providers ): result.append(ToolProviderID(tool_entity.provider_id).plugin_id) diff --git a/api/services/rag_pipeline/rag_pipeline.py b/api/services/rag_pipeline/rag_pipeline.py index 3ced0fd9ec..13c0ca7392 100644 --- a/api/services/rag_pipeline/rag_pipeline.py +++ b/api/services/rag_pipeline/rag_pipeline.py @@ -873,7 +873,7 @@ class RagPipelineService: variable_pool = node_instance.graph_runtime_state.variable_pool invoke_from = variable_pool.get(["sys", SystemVariableKey.INVOKE_FROM]) if invoke_from: - if invoke_from.value == InvokeFrom.PUBLISHED.value: + if invoke_from.value == InvokeFrom.PUBLISHED: document_id = variable_pool.get(["sys", SystemVariableKey.DOCUMENT_ID]) if document_id: document = db.session.query(Document).where(Document.id == document_id.value).first() diff --git a/api/services/rag_pipeline/rag_pipeline_dsl_service.py b/api/services/rag_pipeline/rag_pipeline_dsl_service.py index 9dede31ab4..c02fad4dc6 100644 --- a/api/services/rag_pipeline/rag_pipeline_dsl_service.py +++ b/api/services/rag_pipeline/rag_pipeline_dsl_service.py @@ -556,7 +556,7 @@ class RagPipelineDslService: graph = workflow_data.get("graph", {}) for node in graph.get("nodes", []): - if node.get("data", {}).get("type", "") == NodeType.KNOWLEDGE_RETRIEVAL.value: + if node.get("data", {}).get("type", "") == NodeType.KNOWLEDGE_RETRIEVAL: dataset_ids = node["data"].get("dataset_ids", []) node["data"]["dataset_ids"] = [ decrypted_id @@ -613,7 +613,7 @@ class RagPipelineDslService: tenant_id=pipeline.tenant_id, app_id=pipeline.id, features="{}", - type=WorkflowType.RAG_PIPELINE.value, + type=WorkflowType.RAG_PIPELINE, version="draft", graph=json.dumps(graph), created_by=account.id, @@ -689,17 +689,17 @@ class RagPipelineDslService: if not node_data: continue data_type = node_data.get("type", "") - if data_type == NodeType.KNOWLEDGE_RETRIEVAL.value: + if data_type == NodeType.KNOWLEDGE_RETRIEVAL: dataset_ids = node_data.get("dataset_ids", []) node["data"]["dataset_ids"] = [ self.encrypt_dataset_id(dataset_id=dataset_id, tenant_id=pipeline.tenant_id) for dataset_id in dataset_ids ] # filter credential id from tool node - if not include_secret and data_type == NodeType.TOOL.value: + if not include_secret and data_type == NodeType.TOOL: node_data.pop("credential_id", None) # filter credential id from agent node - if not include_secret and data_type == NodeType.AGENT.value: + if not include_secret and data_type == NodeType.AGENT: for tool in node_data.get("agent_parameters", {}).get("tools", {}).get("value", []): tool.pop("credential_id", None) @@ -733,35 +733,35 @@ class RagPipelineDslService: try: typ = node.get("data", {}).get("type") match typ: - case NodeType.TOOL.value: + case NodeType.TOOL: tool_entity = ToolNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_tool_dependency(tool_entity.provider_id), ) - case NodeType.DATASOURCE.value: + case NodeType.DATASOURCE: datasource_entity = DatasourceNodeData.model_validate(node["data"]) if datasource_entity.provider_type != "local_file": dependencies.append(datasource_entity.plugin_id) - case NodeType.LLM.value: + case NodeType.LLM: llm_entity = LLMNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency(llm_entity.model.provider), ) - case NodeType.QUESTION_CLASSIFIER.value: + case NodeType.QUESTION_CLASSIFIER: question_classifier_entity = QuestionClassifierNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( question_classifier_entity.model.provider ), ) - case NodeType.PARAMETER_EXTRACTOR.value: + case NodeType.PARAMETER_EXTRACTOR: parameter_extractor_entity = ParameterExtractorNodeData.model_validate(node["data"]) dependencies.append( DependenciesAnalysisService.analyze_model_provider_dependency( parameter_extractor_entity.model.provider ), ) - case NodeType.KNOWLEDGE_INDEX.value: + case NodeType.KNOWLEDGE_INDEX: knowledge_index_entity = KnowledgeConfiguration.model_validate(node["data"]) if knowledge_index_entity.indexing_technique == "high_quality": if knowledge_index_entity.embedding_model_provider: @@ -782,7 +782,7 @@ class RagPipelineDslService: knowledge_index_entity.retrieval_model.reranking_model.reranking_provider_name ), ) - case NodeType.KNOWLEDGE_RETRIEVAL.value: + case NodeType.KNOWLEDGE_RETRIEVAL: knowledge_retrieval_entity = KnowledgeRetrievalNodeData.model_validate(node["data"]) if knowledge_retrieval_entity.retrieval_mode == "multiple": if knowledge_retrieval_entity.multiple_retrieval_config: @@ -927,7 +927,7 @@ class RagPipelineDslService: account = cast(Account, current_user) rag_pipeline_import_info: RagPipelineImportInfo = self.import_rag_pipeline( account=account, - import_mode=ImportMode.YAML_CONTENT.value, + import_mode=ImportMode.YAML_CONTENT, yaml_content=rag_pipeline_dataset_create_entity.yaml_content, dataset=None, dataset_name=rag_pipeline_dataset_create_entity.name, diff --git a/api/services/rag_pipeline/rag_pipeline_transform_service.py b/api/services/rag_pipeline/rag_pipeline_transform_service.py index b4425d85a6..39f426a2b0 100644 --- a/api/services/rag_pipeline/rag_pipeline_transform_service.py +++ b/api/services/rag_pipeline/rag_pipeline_transform_service.py @@ -214,7 +214,7 @@ class RagPipelineTransformService: tenant_id=pipeline.tenant_id, app_id=pipeline.id, features="{}", - type=WorkflowType.RAG_PIPELINE.value, + type=WorkflowType.RAG_PIPELINE, version="draft", graph=json.dumps(graph), created_by=current_user.id, @@ -226,7 +226,7 @@ class RagPipelineTransformService: tenant_id=pipeline.tenant_id, app_id=pipeline.id, features="{}", - type=WorkflowType.RAG_PIPELINE.value, + type=WorkflowType.RAG_PIPELINE, version=str(datetime.now(UTC).replace(tzinfo=None)), graph=json.dumps(graph), created_by=current_user.id, diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index f86d7e51bf..2c0c63f634 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -277,7 +277,7 @@ class ApiToolManageService: provider.icon = json.dumps(icon) provider.schema = schema provider.description = extra_info.get("description", "") - provider.schema_type_str = ApiProviderSchemaType.OPENAPI.value + provider.schema_type_str = ApiProviderSchemaType.OPENAPI provider.tools_str = json.dumps(jsonable_encoder(tool_bundles)) provider.privacy_policy = privacy_policy provider.custom_disclaimer = custom_disclaimer @@ -393,7 +393,7 @@ class ApiToolManageService: icon="", schema=schema, description="", - schema_type_str=ApiProviderSchemaType.OPENAPI.value, + schema_type_str=ApiProviderSchemaType.OPENAPI, tools_str=json.dumps(jsonable_encoder(tool_bundles)), credentials_str=json.dumps(credentials), ) diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 7ae1b97b30..81b4d6993a 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -50,16 +50,16 @@ class ToolTransformService: URL(dify_config.CONSOLE_API_URL or "/") / "console" / "api" / "workspaces" / "current" / "tool-provider" ) - if provider_type == ToolProviderType.BUILT_IN.value: + if provider_type == ToolProviderType.BUILT_IN: return str(url_prefix / "builtin" / provider_name / "icon") - elif provider_type in {ToolProviderType.API.value, ToolProviderType.WORKFLOW.value}: + elif provider_type in {ToolProviderType.API, ToolProviderType.WORKFLOW}: try: if isinstance(icon, str): return json.loads(icon) return icon except Exception: return {"background": "#252525", "content": "\ud83d\ude01"} - elif provider_type == ToolProviderType.MCP.value: + elif provider_type == ToolProviderType.MCP: return icon return "" diff --git a/api/services/vector_service.py b/api/services/vector_service.py index 1c559f2c2b..abc92a0181 100644 --- a/api/services/vector_service.py +++ b/api/services/vector_service.py @@ -134,7 +134,7 @@ class VectorService: ) # use full doc mode to generate segment's child chunk processing_rule_dict = processing_rule.to_dict() - processing_rule_dict["rules"]["parent_mode"] = ParentMode.FULL_DOC.value + processing_rule_dict["rules"]["parent_mode"] = ParentMode.FULL_DOC documents = index_processor.transform( [document], embedding_model_instance=embedding_model_instance, diff --git a/api/services/webapp_auth_service.py b/api/services/webapp_auth_service.py index 066dc9d741..d30e14f7a1 100644 --- a/api/services/webapp_auth_service.py +++ b/api/services/webapp_auth_service.py @@ -36,7 +36,7 @@ class WebAppAuthService: if not account: raise AccountNotFoundError() - if account.status == AccountStatus.BANNED.value: + if account.status == AccountStatus.BANNED: raise AccountLoginError("Account is banned.") if account.password is None or not compare_password(password, account.password, account.password_salt): @@ -56,7 +56,7 @@ class WebAppAuthService: if not account: return None - if account.status == AccountStatus.BANNED.value: + if account.status == AccountStatus.BANNED: raise Unauthorized("Account is banned.") return account diff --git a/api/services/workflow/workflow_converter.py b/api/services/workflow/workflow_converter.py index ce7d16b3bd..9c09f54bf5 100644 --- a/api/services/workflow/workflow_converter.py +++ b/api/services/workflow/workflow_converter.py @@ -228,7 +228,7 @@ class WorkflowConverter: "position": None, "data": { "title": "START", - "type": NodeType.START.value, + "type": NodeType.START, "variables": [jsonable_encoder(v) for v in variables], }, } @@ -273,7 +273,7 @@ class WorkflowConverter: inputs[v.variable] = "{{#start." + v.variable + "#}}" request_body = { - "point": APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY.value, + "point": APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY, "params": { "app_id": app_model.id, "tool_variable": tool_variable, @@ -290,7 +290,7 @@ class WorkflowConverter: "position": None, "data": { "title": f"HTTP REQUEST {api_based_extension.name}", - "type": NodeType.HTTP_REQUEST.value, + "type": NodeType.HTTP_REQUEST, "method": "post", "url": api_based_extension.api_endpoint, "authorization": {"type": "api-key", "config": {"type": "bearer", "api_key": api_key}}, @@ -308,7 +308,7 @@ class WorkflowConverter: "position": None, "data": { "title": f"Parse {api_based_extension.name} Response", - "type": NodeType.CODE.value, + "type": NodeType.CODE, "variables": [{"variable": "response_json", "value_selector": [http_request_node["id"], "body"]}], "code_language": "python3", "code": "import json\n\ndef main(response_json: str) -> str:\n response_body = json.loads(" @@ -348,7 +348,7 @@ class WorkflowConverter: "position": None, "data": { "title": "KNOWLEDGE RETRIEVAL", - "type": NodeType.KNOWLEDGE_RETRIEVAL.value, + "type": NodeType.KNOWLEDGE_RETRIEVAL, "query_variable_selector": query_variable_selector, "dataset_ids": dataset_config.dataset_ids, "retrieval_mode": retrieve_config.retrieve_strategy.value, @@ -396,16 +396,16 @@ class WorkflowConverter: :param external_data_variable_node_mapping: external data variable node mapping """ # fetch start and knowledge retrieval node - start_node = next(filter(lambda n: n["data"]["type"] == NodeType.START.value, graph["nodes"])) + start_node = next(filter(lambda n: n["data"]["type"] == NodeType.START, graph["nodes"])) knowledge_retrieval_node = next( - filter(lambda n: n["data"]["type"] == NodeType.KNOWLEDGE_RETRIEVAL.value, graph["nodes"]), None + filter(lambda n: n["data"]["type"] == NodeType.KNOWLEDGE_RETRIEVAL, graph["nodes"]), None ) role_prefix = None prompts: Any | None = None # Chat Model - if model_config.mode == LLMMode.CHAT.value: + if model_config.mode == LLMMode.CHAT: if prompt_template.prompt_type == PromptTemplateEntity.PromptType.SIMPLE: if not prompt_template.simple_prompt_template: raise ValueError("Simple prompt template is required") @@ -517,7 +517,7 @@ class WorkflowConverter: "position": None, "data": { "title": "LLM", - "type": NodeType.LLM.value, + "type": NodeType.LLM, "model": { "provider": model_config.provider, "name": model_config.model, @@ -572,7 +572,7 @@ class WorkflowConverter: "position": None, "data": { "title": "END", - "type": NodeType.END.value, + "type": NodeType.END, "outputs": [{"variable": "result", "value_selector": ["llm", "text"]}], }, } @@ -586,7 +586,7 @@ class WorkflowConverter: return { "id": "answer", "position": None, - "data": {"title": "ANSWER", "type": NodeType.ANSWER.value, "answer": "{{#llm.text#}}"}, + "data": {"title": "ANSWER", "type": NodeType.ANSWER, "answer": "{{#llm.text#}}"}, } def _create_edge(self, source: str, target: str): diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py index 1378c20128..344b7486ee 100644 --- a/api/services/workflow_draft_variable_service.py +++ b/api/services/workflow_draft_variable_service.py @@ -569,7 +569,7 @@ class WorkflowDraftVariableService: system_instruction="", system_instruction_tokens=0, status="normal", - invoke_from=InvokeFrom.DEBUGGER.value, + invoke_from=InvokeFrom.DEBUGGER, from_source="console", from_end_user_id=None, from_account_id=account_id, diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index 79d91cab4c..6a2edd912a 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -74,7 +74,7 @@ class WorkflowRunService: return self._workflow_run_repo.get_paginated_workflow_runs( tenant_id=app_model.tenant_id, app_id=app_model.id, - triggered_from=WorkflowRunTriggeredFrom.DEBUGGING.value, + triggered_from=WorkflowRunTriggeredFrom.DEBUGGING, limit=limit, last_id=last_id, ) diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 359fdb85fd..dea6a657a4 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -1006,7 +1006,7 @@ def _setup_variable_pool( ) # Only add chatflow-specific variables for non-workflow types - if workflow.type != WorkflowType.WORKFLOW.value: + if workflow.type != WorkflowType.WORKFLOW: system_variable.query = query system_variable.conversation_id = conversation_id system_variable.dialogue_count = 1 diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index 4d1c1227bd..498ac56d5d 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -25,7 +25,7 @@ class TestChatMessageApiPermissions: """Create a mock App model for testing.""" app = App() app.id = str(uuid.uuid4()) - app.mode = AppMode.CHAT.value + app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) app.status = "normal" return app diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index e158f26f3a..04945e57a0 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -23,7 +23,7 @@ class TestModelConfigResourcePermissions: """Create a mock App model for testing.""" app = App() app.id = str(uuid.uuid4()) - app.mode = AppMode.CHAT.value + app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) app.status = "normal" app.app_model_config_id = str(uuid.uuid4()) diff --git a/api/tests/integration_tests/services/test_workflow_draft_variable_service.py b/api/tests/integration_tests/services/test_workflow_draft_variable_service.py index aeee882750..f3a5ba0d11 100644 --- a/api/tests/integration_tests/services/test_workflow_draft_variable_service.py +++ b/api/tests/integration_tests/services/test_workflow_draft_variable_service.py @@ -542,7 +542,7 @@ class TestWorkflowDraftVariableServiceResetVariable(unittest.TestCase): index=1, node_execution_id=str(uuid.uuid4()), node_id=self._node_id, - node_type=NodeType.LLM.value, + node_type=NodeType.LLM, title="Test Node", inputs='{"input": "test input"}', process_data='{"test_var": "process_value", "other_var": "other_process"}', diff --git a/api/tests/integration_tests/vdb/__mock/huaweicloudvectordb.py b/api/tests/integration_tests/vdb/__mock/huaweicloudvectordb.py index 9706c52455..9e24672317 100644 --- a/api/tests/integration_tests/vdb/__mock/huaweicloudvectordb.py +++ b/api/tests/integration_tests/vdb/__mock/huaweicloudvectordb.py @@ -44,25 +44,25 @@ class MockClient: "hits": [ { "_source": { - Field.CONTENT_KEY.value: "abcdef", - Field.VECTOR.value: [1, 2], - Field.METADATA_KEY.value: {}, + Field.CONTENT_KEY: "abcdef", + Field.VECTOR: [1, 2], + Field.METADATA_KEY: {}, }, "_score": 1.0, }, { "_source": { - Field.CONTENT_KEY.value: "123456", - Field.VECTOR.value: [2, 2], - Field.METADATA_KEY.value: {}, + Field.CONTENT_KEY: "123456", + Field.VECTOR: [2, 2], + Field.METADATA_KEY: {}, }, "_score": 0.9, }, { "_source": { - Field.CONTENT_KEY.value: "a1b2c3", - Field.VECTOR.value: [3, 2], - Field.METADATA_KEY.value: {}, + Field.CONTENT_KEY: "a1b2c3", + Field.VECTOR: [3, 2], + Field.METADATA_KEY: {}, }, "_score": 0.8, }, diff --git a/api/tests/integration_tests/vdb/__mock/vikingdb.py b/api/tests/integration_tests/vdb/__mock/vikingdb.py index 3ad72e5550..f351df8d5b 100644 --- a/api/tests/integration_tests/vdb/__mock/vikingdb.py +++ b/api/tests/integration_tests/vdb/__mock/vikingdb.py @@ -40,13 +40,13 @@ class MockVikingDBClass: collection_name=collection_name, description="Collection For Dify", viking_db_service=self._viking_db_service, - primary_key=vdb_Field.PRIMARY_KEY.value, + primary_key=vdb_Field.PRIMARY_KEY, fields=[ - Field(field_name=vdb_Field.PRIMARY_KEY.value, field_type=FieldType.String, is_primary_key=True), - Field(field_name=vdb_Field.METADATA_KEY.value, field_type=FieldType.String), - Field(field_name=vdb_Field.GROUP_KEY.value, field_type=FieldType.String), - Field(field_name=vdb_Field.CONTENT_KEY.value, field_type=FieldType.Text), - Field(field_name=vdb_Field.VECTOR.value, field_type=FieldType.Vector, dim=768), + Field(field_name=vdb_Field.PRIMARY_KEY, field_type=FieldType.String, is_primary_key=True), + Field(field_name=vdb_Field.METADATA_KEY, field_type=FieldType.String), + Field(field_name=vdb_Field.GROUP_KEY, field_type=FieldType.String), + Field(field_name=vdb_Field.CONTENT_KEY, field_type=FieldType.Text), + Field(field_name=vdb_Field.VECTOR, field_type=FieldType.Vector, dim=768), ], indexes=[ Index( @@ -71,7 +71,7 @@ class MockVikingDBClass: return Collection( collection_name=collection_name, description=description, - primary_key=vdb_Field.PRIMARY_KEY.value, + primary_key=vdb_Field.PRIMARY_KEY, viking_db_service=self._viking_db_service, fields=fields, ) @@ -126,11 +126,11 @@ class MockVikingDBClass: def fetch_data(self, id: Union[str, list[str], int, list[int]]): return Data( fields={ - vdb_Field.GROUP_KEY.value: "test_group", - vdb_Field.METADATA_KEY.value: "{}", - vdb_Field.CONTENT_KEY.value: "content", - vdb_Field.PRIMARY_KEY.value: id, - vdb_Field.VECTOR.value: [-0.00762577411336441, -0.01949881482151406, 0.008832383941428398], + vdb_Field.GROUP_KEY: "test_group", + vdb_Field.METADATA_KEY: "{}", + vdb_Field.CONTENT_KEY: "content", + vdb_Field.PRIMARY_KEY: id, + vdb_Field.VECTOR: [-0.00762577411336441, -0.01949881482151406, 0.008832383941428398], }, id=id, ) @@ -151,16 +151,16 @@ class MockVikingDBClass: return [ Data( fields={ - vdb_Field.GROUP_KEY.value: "test_group", - vdb_Field.METADATA_KEY.value: '\ + vdb_Field.GROUP_KEY: "test_group", + vdb_Field.METADATA_KEY: '\ {"source": "/var/folders/ml/xxx/xxx.txt", \ "document_id": "test_document_id", \ "dataset_id": "test_dataset_id", \ "doc_id": "test_id", \ "doc_hash": "test_hash"}', - vdb_Field.CONTENT_KEY.value: "content", - vdb_Field.PRIMARY_KEY.value: "test_id", - vdb_Field.VECTOR.value: vector, + vdb_Field.CONTENT_KEY: "content", + vdb_Field.PRIMARY_KEY: "test_id", + vdb_Field.VECTOR: vector, }, id="test_id", score=0.10, @@ -173,16 +173,16 @@ class MockVikingDBClass: return [ Data( fields={ - vdb_Field.GROUP_KEY.value: "test_group", - vdb_Field.METADATA_KEY.value: '\ + vdb_Field.GROUP_KEY: "test_group", + vdb_Field.METADATA_KEY: '\ {"source": "/var/folders/ml/xxx/xxx.txt", \ "document_id": "test_document_id", \ "dataset_id": "test_dataset_id", \ "doc_id": "test_id", \ "doc_hash": "test_hash"}', - vdb_Field.CONTENT_KEY.value: "content", - vdb_Field.PRIMARY_KEY.value: "test_id", - vdb_Field.VECTOR.value: [-0.00762577411336441, -0.01949881482151406, 0.008832383941428398], + vdb_Field.CONTENT_KEY: "content", + vdb_Field.PRIMARY_KEY: "test_id", + vdb_Field.VECTOR: [-0.00762577411336441, -0.01949881482151406, 0.008832383941428398], }, id="test_id", score=0.10, diff --git a/api/tests/integration_tests/vdb/opensearch/test_opensearch.py b/api/tests/integration_tests/vdb/opensearch/test_opensearch.py index 2d44dd2924..192c995ce5 100644 --- a/api/tests/integration_tests/vdb/opensearch/test_opensearch.py +++ b/api/tests/integration_tests/vdb/opensearch/test_opensearch.py @@ -129,8 +129,8 @@ class TestOpenSearchVector: "hits": [ { "_source": { - Field.CONTENT_KEY.value: get_example_text(), - Field.METADATA_KEY.value: {"document_id": self.example_doc_id}, + Field.CONTENT_KEY: get_example_text(), + Field.METADATA_KEY: {"document_id": self.example_doc_id}, }, "_score": 1.0, } diff --git a/api/tests/test_containers_integration_tests/services/test_account_service.py b/api/tests/test_containers_integration_tests/services/test_account_service.py index 0a2fb955ae..6eff73a8f3 100644 --- a/api/tests/test_containers_integration_tests/services/test_account_service.py +++ b/api/tests/test_containers_integration_tests/services/test_account_service.py @@ -64,7 +64,7 @@ class TestAccountService: password=password, ) assert account.email == email - assert account.status == AccountStatus.ACTIVE.value + assert account.status == AccountStatus.ACTIVE # Login with correct password logged_in = AccountService.authenticate(email, password) @@ -185,7 +185,7 @@ class TestAccountService: ) # Ban the account - account.status = AccountStatus.BANNED.value + account.status = AccountStatus.BANNED from extensions.ext_database import db db.session.commit() @@ -269,14 +269,14 @@ class TestAccountService: interface_language="en-US", password=password, ) - account.status = AccountStatus.PENDING.value + account.status = AccountStatus.PENDING from extensions.ext_database import db db.session.commit() # Authenticate should activate the account authenticated_account = AccountService.authenticate(email, password) - assert authenticated_account.status == AccountStatus.ACTIVE.value + assert authenticated_account.status == AccountStatus.ACTIVE assert authenticated_account.initialized_at is not None def test_update_account_password_success(self, db_session_with_containers, mock_external_service_dependencies): @@ -539,7 +539,7 @@ class TestAccountService: from extensions.ext_database import db db.session.refresh(account) - assert account.status == AccountStatus.CLOSED.value + assert account.status == AccountStatus.CLOSED def test_update_account_fields(self, db_session_with_containers, mock_external_service_dependencies): """ @@ -679,7 +679,7 @@ class TestAccountService: interface_language="en-US", password=password, ) - account.status = AccountStatus.PENDING.value + account.status = AccountStatus.PENDING from extensions.ext_database import db db.session.commit() @@ -688,7 +688,7 @@ class TestAccountService: token_pair = AccountService.login(account) db.session.refresh(account) - assert account.status == AccountStatus.ACTIVE.value + assert account.status == AccountStatus.ACTIVE def test_logout(self, db_session_with_containers, mock_external_service_dependencies): """ @@ -860,7 +860,7 @@ class TestAccountService: ) # Ban the account - account.status = AccountStatus.BANNED.value + account.status = AccountStatus.BANNED from extensions.ext_database import db db.session.commit() @@ -990,7 +990,7 @@ class TestAccountService: ) # Ban the account - account.status = AccountStatus.BANNED.value + account.status = AccountStatus.BANNED from extensions.ext_database import db db.session.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_file_service.py b/api/tests/test_containers_integration_tests/services/test_file_service.py index 5598c5bc0c..e6bfc157c7 100644 --- a/api/tests/test_containers_integration_tests/services/test_file_service.py +++ b/api/tests/test_containers_integration_tests/services/test_file_service.py @@ -86,7 +86,7 @@ class TestFileService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) @@ -187,7 +187,7 @@ class TestFileService: assert upload_file.extension == "pdf" assert upload_file.mime_type == mimetype assert upload_file.created_by == account.id - assert upload_file.created_by_role == CreatorUserRole.ACCOUNT.value + assert upload_file.created_by_role == CreatorUserRole.ACCOUNT assert upload_file.used is False assert upload_file.hash == hashlib.sha3_256(content).hexdigest() @@ -216,7 +216,7 @@ class TestFileService: assert upload_file is not None assert upload_file.created_by == end_user.id - assert upload_file.created_by_role == CreatorUserRole.END_USER.value + assert upload_file.created_by_role == CreatorUserRole.END_USER def test_upload_file_with_datasets_source( self, db_session_with_containers, engine, mock_external_service_dependencies diff --git a/api/tests/test_containers_integration_tests/services/test_metadata_service.py b/api/tests/test_containers_integration_tests/services/test_metadata_service.py index d0f7e945f1..253791cc2d 100644 --- a/api/tests/test_containers_integration_tests/services/test_metadata_service.py +++ b/api/tests/test_containers_integration_tests/services/test_metadata_service.py @@ -72,7 +72,7 @@ class TestMetadataService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py b/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py index 66527dd506..8a72331425 100644 --- a/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py +++ b/api/tests/test_containers_integration_tests/services/test_model_load_balancing_service.py @@ -103,7 +103,7 @@ class TestModelLoadBalancingService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/services/test_model_provider_service.py b/api/tests/test_containers_integration_tests/services/test_model_provider_service.py index 2196da8b3e..fb319a4963 100644 --- a/api/tests/test_containers_integration_tests/services/test_model_provider_service.py +++ b/api/tests/test_containers_integration_tests/services/test_model_provider_service.py @@ -67,7 +67,7 @@ class TestModelProviderService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/services/test_tag_service.py b/api/tests/test_containers_integration_tests/services/test_tag_service.py index 04cff397b2..3d1226019b 100644 --- a/api/tests/test_containers_integration_tests/services/test_tag_service.py +++ b/api/tests/test_containers_integration_tests/services/test_tag_service.py @@ -66,7 +66,7 @@ class TestTagService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py index c9ace46c55..5db7901cbc 100644 --- a/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py @@ -144,7 +144,7 @@ class TestWebConversationService: system_instruction=fake.text(max_nb_chars=300), system_instruction_tokens=50, status="normal", - invoke_from=InvokeFrom.WEB_APP.value, + invoke_from=InvokeFrom.WEB_APP, from_source="console" if isinstance(user, Account) else "api", from_end_user_id=user.id if isinstance(user, EndUser) else None, from_account_id=user.id if isinstance(user, Account) else None, diff --git a/api/tests/test_containers_integration_tests/services/test_webapp_auth_service.py b/api/tests/test_containers_integration_tests/services/test_webapp_auth_service.py index 316cfe1674..059767458a 100644 --- a/api/tests/test_containers_integration_tests/services/test_webapp_auth_service.py +++ b/api/tests/test_containers_integration_tests/services/test_webapp_auth_service.py @@ -87,7 +87,7 @@ class TestWebAppAuthService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) @@ -150,7 +150,7 @@ class TestWebAppAuthService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) @@ -232,7 +232,7 @@ class TestWebAppAuthService: assert result.id == account.id assert result.email == account.email assert result.name == account.name - assert result.status == AccountStatus.ACTIVE.value + assert result.status == AccountStatus.ACTIVE # Verify database state from extensions.ext_database import db @@ -280,7 +280,7 @@ class TestWebAppAuthService: email=fake.email(), name=fake.name(), interface_language="en-US", - status=AccountStatus.BANNED.value, + status=AccountStatus.BANNED, ) # Hash password @@ -411,7 +411,7 @@ class TestWebAppAuthService: assert result.id == account.id assert result.email == account.email assert result.name == account.name - assert result.status == AccountStatus.ACTIVE.value + assert result.status == AccountStatus.ACTIVE # Verify database state from extensions.ext_database import db @@ -455,7 +455,7 @@ class TestWebAppAuthService: email=unique_email, name=fake.name(), interface_language="en-US", - status=AccountStatus.BANNED.value, + status=AccountStatus.BANNED, ) from extensions.ext_database import db diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py index 2e18184aea..62c9bead86 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py @@ -199,7 +199,7 @@ class TestWorkflowAppService: elapsed_time=1.5, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), finished_at=datetime.now(UTC), @@ -215,7 +215,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), ) @@ -356,7 +356,7 @@ class TestWorkflowAppService: elapsed_time=1.0 + i, total_tokens=100 + i * 10, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), finished_at=datetime.now(UTC) + timedelta(minutes=i + 1) if status != "running" else None, @@ -371,7 +371,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), ) @@ -464,7 +464,7 @@ class TestWorkflowAppService: elapsed_time=1.0, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=timestamp, finished_at=timestamp + timedelta(minutes=1), @@ -479,7 +479,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=timestamp, ) @@ -571,7 +571,7 @@ class TestWorkflowAppService: elapsed_time=1.0, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), finished_at=datetime.now(UTC) + timedelta(minutes=i + 1), @@ -586,7 +586,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), ) @@ -701,7 +701,7 @@ class TestWorkflowAppService: elapsed_time=1.0, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), finished_at=datetime.now(UTC) + timedelta(minutes=i + 1), @@ -716,7 +716,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), ) @@ -743,7 +743,7 @@ class TestWorkflowAppService: elapsed_time=1.0, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.END_USER.value, + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, created_at=datetime.now(UTC) + timedelta(minutes=i + 10), finished_at=datetime.now(UTC) + timedelta(minutes=i + 11), @@ -758,7 +758,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="web-app", - created_by_role=CreatorUserRole.END_USER.value, + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, created_at=datetime.now(UTC) + timedelta(minutes=i + 10), ) @@ -780,14 +780,14 @@ class TestWorkflowAppService: limit=20, ) assert result_session_filter["total"] == 2 - assert all(log.created_by_role == CreatorUserRole.END_USER.value for log in result_session_filter["data"]) + assert all(log.created_by_role == CreatorUserRole.END_USER for log in result_session_filter["data"]) # Test filtering by account email result_account_filter = service.get_paginate_workflow_app_logs( session=db_session_with_containers, app_model=app, created_by_account=account.email, page=1, limit=20 ) assert result_account_filter["total"] == 3 - assert all(log.created_by_role == CreatorUserRole.ACCOUNT.value for log in result_account_filter["data"]) + assert all(log.created_by_role == CreatorUserRole.ACCOUNT for log in result_account_filter["data"]) # Test filtering by non-existent session ID result_no_session = service.get_paginate_workflow_app_logs( @@ -853,7 +853,7 @@ class TestWorkflowAppService: elapsed_time=1.0, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), finished_at=datetime.now(UTC) + timedelta(minutes=1), @@ -869,7 +869,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), ) @@ -943,7 +943,7 @@ class TestWorkflowAppService: elapsed_time=0.0, # Edge case: 0 elapsed time total_tokens=0, # Edge case: 0 tokens total_steps=0, # Edge case: 0 steps - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), finished_at=datetime.now(UTC), @@ -959,7 +959,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), ) @@ -1098,7 +1098,7 @@ class TestWorkflowAppService: elapsed_time=1.5, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), finished_at=datetime.now(UTC) + timedelta(minutes=i + 1) if status == "succeeded" else None, @@ -1113,7 +1113,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), ) @@ -1198,7 +1198,7 @@ class TestWorkflowAppService: elapsed_time=1.5, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), finished_at=datetime.now(UTC) + timedelta(minutes=i + 1) if status != "running" else None, @@ -1213,7 +1213,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i), ) @@ -1300,7 +1300,7 @@ class TestWorkflowAppService: elapsed_time=1.5, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i * 10 + j), finished_at=datetime.now(UTC) + timedelta(minutes=i * 10 + j + 1), @@ -1315,7 +1315,7 @@ class TestWorkflowAppService: workflow_id=workflow.id, workflow_run_id=workflow_run.id, created_from="service-api", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC) + timedelta(minutes=i * 10 + j), ) diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py index 4cb21ef6bd..23c4eeb82f 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py @@ -130,7 +130,7 @@ class TestWorkflowRunService: elapsed_time=1.5, total_tokens=100, total_steps=3, - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=created_time, finished_at=created_time, @@ -167,7 +167,7 @@ class TestWorkflowRunService: inputs={}, status="normal", mode="chat", - from_source=CreatorUserRole.ACCOUNT.value, + from_source=CreatorUserRole.ACCOUNT, from_account_id=account.id, ) db.session.add(conversation) @@ -188,7 +188,7 @@ class TestWorkflowRunService: message.answer_price_unit = 0.001 message.currency = "USD" message.status = "normal" - message.from_source = CreatorUserRole.ACCOUNT.value + message.from_source = CreatorUserRole.ACCOUNT message.from_account_id = account.id message.workflow_run_id = workflow_run.id message.inputs = {"input": "test input"} @@ -458,7 +458,7 @@ class TestWorkflowRunService: status="succeeded", elapsed_time=0.5, execution_metadata=json.dumps({"tokens": 50}), - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, created_at=datetime.now(UTC), ) @@ -689,7 +689,7 @@ class TestWorkflowRunService: status="succeeded", elapsed_time=0.5, execution_metadata=json.dumps({"tokens": 50}), - created_by_role=CreatorUserRole.END_USER.value, + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, created_at=datetime.now(UTC), ) @@ -710,4 +710,4 @@ class TestWorkflowRunService: assert node_exec.app_id == app.id assert node_exec.workflow_run_id == workflow_run.id assert node_exec.created_by == end_user.id - assert node_exec.created_by_role == CreatorUserRole.END_USER.value + assert node_exec.created_by_role == CreatorUserRole.END_USER diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index 0dd3909ba7..4741eba1f5 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -130,7 +130,7 @@ class TestWorkflowService: id=fake.uuid4(), tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version=Workflow.VERSION_DRAFT, graph=json.dumps({"nodes": [], "edges": []}), features=json.dumps({"features": []}), @@ -176,7 +176,7 @@ class TestWorkflowService: node_execution.node_type = "test_node" node_execution.title = "Test Node" # Required field node_execution.status = "succeeded" - node_execution.created_by_role = CreatorUserRole.ACCOUNT.value # Required field + node_execution.created_by_role = CreatorUserRole.ACCOUNT # Required field node_execution.created_by = account.id # Required field node_execution.created_at = fake.date_time_this_year() diff --git a/api/tests/test_containers_integration_tests/services/test_workspace_service.py b/api/tests/test_containers_integration_tests/services/test_workspace_service.py index 3fd439256d..814d1908bd 100644 --- a/api/tests/test_containers_integration_tests/services/test_workspace_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workspace_service.py @@ -69,7 +69,7 @@ class TestWorkspaceService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) @@ -111,7 +111,7 @@ class TestWorkspaceService: assert result["name"] == tenant.name assert result["plan"] == tenant.plan assert result["status"] == tenant.status - assert result["role"] == TenantAccountRole.OWNER.value + assert result["role"] == TenantAccountRole.OWNER assert result["created_at"] == tenant.created_at assert result["trial_end_reason"] is None @@ -159,7 +159,7 @@ class TestWorkspaceService: assert result["name"] == tenant.name assert result["plan"] == tenant.plan assert result["status"] == tenant.status - assert result["role"] == TenantAccountRole.OWNER.value + assert result["role"] == TenantAccountRole.OWNER assert result["created_at"] == tenant.created_at assert result["trial_end_reason"] is None @@ -194,7 +194,7 @@ class TestWorkspaceService: from extensions.ext_database import db join = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first() - join.role = TenantAccountRole.NORMAL.value + join.role = TenantAccountRole.NORMAL db.session.commit() # Setup mocks for feature service @@ -212,7 +212,7 @@ class TestWorkspaceService: assert result["name"] == tenant.name assert result["plan"] == tenant.plan assert result["status"] == tenant.status - assert result["role"] == TenantAccountRole.NORMAL.value + assert result["role"] == TenantAccountRole.NORMAL assert result["created_at"] == tenant.created_at assert result["trial_end_reason"] is None @@ -245,7 +245,7 @@ class TestWorkspaceService: from extensions.ext_database import db join = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first() - join.role = TenantAccountRole.ADMIN.value + join.role = TenantAccountRole.ADMIN db.session.commit() # Setup mocks for feature service and tenant service @@ -260,7 +260,7 @@ class TestWorkspaceService: # Assert: Verify the expected outcomes assert result is not None - assert result["role"] == TenantAccountRole.ADMIN.value + assert result["role"] == TenantAccountRole.ADMIN # Verify custom config is included for admin users assert "custom_config" in result @@ -378,7 +378,7 @@ class TestWorkspaceService: from extensions.ext_database import db join = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first() - join.role = TenantAccountRole.EDITOR.value + join.role = TenantAccountRole.EDITOR db.session.commit() # Setup mocks for feature service and tenant service @@ -394,7 +394,7 @@ class TestWorkspaceService: # Assert: Verify the expected outcomes assert result is not None - assert result["role"] == TenantAccountRole.EDITOR.value + assert result["role"] == TenantAccountRole.EDITOR # Verify custom config is not included for editor users without admin privileges assert "custom_config" not in result @@ -425,7 +425,7 @@ class TestWorkspaceService: from extensions.ext_database import db join = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first() - join.role = TenantAccountRole.DATASET_OPERATOR.value + join.role = TenantAccountRole.DATASET_OPERATOR db.session.commit() # Setup mocks for feature service and tenant service @@ -441,7 +441,7 @@ class TestWorkspaceService: # Assert: Verify the expected outcomes assert result is not None - assert result["role"] == TenantAccountRole.DATASET_OPERATOR.value + assert result["role"] == TenantAccountRole.DATASET_OPERATOR # Verify custom config is not included for dataset operators without admin privileges assert "custom_config" not in result diff --git a/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py index a412bdccf8..7366b08439 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py @@ -72,7 +72,7 @@ class TestApiToolManageService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py index dd22dcbfd1..f7a4c53318 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py @@ -72,7 +72,7 @@ class TestMCPToolManageService: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py b/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py index 827f9c010e..ae0c7b7a6b 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_tools_transform_service.py @@ -168,7 +168,7 @@ class TestToolTransformService: """ # Arrange: Setup test data fake = Faker() - provider_type = ToolProviderType.BUILT_IN.value + provider_type = ToolProviderType.BUILT_IN provider_name = fake.company() icon = "🔧" @@ -206,7 +206,7 @@ class TestToolTransformService: """ # Arrange: Setup test data fake = Faker() - provider_type = ToolProviderType.API.value + provider_type = ToolProviderType.API provider_name = fake.company() icon = '{"background": "#FF6B6B", "content": "🔧"}' @@ -231,7 +231,7 @@ class TestToolTransformService: """ # Arrange: Setup test data with invalid JSON fake = Faker() - provider_type = ToolProviderType.API.value + provider_type = ToolProviderType.API provider_name = fake.company() icon = '{"invalid": json}' @@ -257,7 +257,7 @@ class TestToolTransformService: """ # Arrange: Setup test data fake = Faker() - provider_type = ToolProviderType.WORKFLOW.value + provider_type = ToolProviderType.WORKFLOW provider_name = fake.company() icon = {"background": "#FF6B6B", "content": "🔧"} @@ -282,7 +282,7 @@ class TestToolTransformService: """ # Arrange: Setup test data fake = Faker() - provider_type = ToolProviderType.MCP.value + provider_type = ToolProviderType.MCP provider_name = fake.company() icon = {"background": "#FF6B6B", "content": "🔧"} @@ -329,7 +329,7 @@ class TestToolTransformService: # Arrange: Setup test data fake = Faker() tenant_id = fake.uuid4() - provider = {"type": ToolProviderType.BUILT_IN.value, "name": fake.company(), "icon": "🔧"} + provider = {"type": ToolProviderType.BUILT_IN, "name": fake.company(), "icon": "🔧"} # Act: Execute the method under test ToolTransformService.repack_provider(tenant_id, provider) diff --git a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py index 18ab4bb73c..88aa0b6e72 100644 --- a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py +++ b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py @@ -66,7 +66,7 @@ class TestWorkflowConverter: mock_config.model = ModelConfigEntity( provider="openai", model="gpt-4", - mode=LLMMode.CHAT.value, + mode=LLMMode.CHAT, parameters={}, stop=[], ) @@ -120,7 +120,7 @@ class TestWorkflowConverter: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) @@ -150,7 +150,7 @@ class TestWorkflowConverter: app = App( tenant_id=tenant.id, name=fake.company(), - mode=AppMode.CHAT.value, + mode=AppMode.CHAT, icon_type="emoji", icon="🤖", icon_background="#FF6B6B", @@ -218,7 +218,7 @@ class TestWorkflowConverter: # Assert: Verify the expected outcomes assert new_app is not None assert new_app.name == "Test Workflow App" - assert new_app.mode == AppMode.ADVANCED_CHAT.value + assert new_app.mode == AppMode.ADVANCED_CHAT assert new_app.icon_type == "emoji" assert new_app.icon == "🚀" assert new_app.icon_background == "#4CAF50" @@ -257,7 +257,7 @@ class TestWorkflowConverter: app = App( tenant_id=tenant.id, name=fake.company(), - mode=AppMode.CHAT.value, + mode=AppMode.CHAT, icon_type="emoji", icon="🤖", icon_background="#FF6B6B", @@ -522,7 +522,7 @@ class TestWorkflowConverter: model_config = ModelConfigEntity( provider="openai", model="gpt-4", - mode=LLMMode.CHAT.value, + mode=LLMMode.CHAT, parameters={"temperature": 0.7}, stop=[], ) diff --git a/api/tests/test_containers_integration_tests/tasks/test_add_document_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_add_document_to_index_task.py index 4600f2addb..96e673d855 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_add_document_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_add_document_to_index_task.py @@ -63,7 +63,7 @@ class TestAddDocumentToIndexTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_batch_clean_document_task.py b/api/tests/test_containers_integration_tests/tasks/test_batch_clean_document_task.py index 3d17a8ac9d..8628e2af7f 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_batch_clean_document_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_batch_clean_document_task.py @@ -84,7 +84,7 @@ class TestBatchCleanDocumentTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_batch_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_batch_create_segment_to_index_task.py index fcae93c669..a9cfb6ffd4 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_batch_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_batch_create_segment_to_index_task.py @@ -112,7 +112,7 @@ class TestBatchCreateSegmentToIndexTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py index de81295100..987ebf8aca 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py @@ -91,7 +91,7 @@ class TestCreateSegmentToIndexTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_disable_segment_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_disable_segment_from_index_task.py index e1d63e993b..bc3701d098 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_disable_segment_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_disable_segment_from_index_task.py @@ -69,7 +69,7 @@ class TestDisableSegmentFromIndexTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py index f75dcf06e1..a315577b78 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py @@ -72,7 +72,7 @@ class TestDocumentIndexingTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) @@ -154,7 +154,7 @@ class TestDocumentIndexingTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_enable_segments_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_enable_segments_to_index_task.py index 38056496e7..798fe091ab 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_enable_segments_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_enable_segments_to_index_task.py @@ -63,7 +63,7 @@ class TestEnableSegmentsToIndexTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py index 2f38246787..31e9b67421 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py @@ -66,7 +66,7 @@ class TestMailAccountDeletionTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db.session.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py index 9cf348d989..1aed7dc7cc 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py @@ -65,7 +65,7 @@ class TestMailChangeMailTask: join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(join) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py index ead7757c13..c083861004 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py @@ -95,7 +95,7 @@ class TestMailInviteMemberTask: name=fake.name(), password=fake.password(), interface_language="en-US", - status=AccountStatus.ACTIVE.value, + status=AccountStatus.ACTIVE, ) account.created_at = datetime.now(UTC) account.updated_at = datetime.now(UTC) @@ -117,7 +117,7 @@ class TestMailInviteMemberTask: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.OWNER.value, + role=TenantAccountRole.OWNER, ) tenant_join.created_at = datetime.now(UTC) db_session_with_containers.add(tenant_join) @@ -163,7 +163,7 @@ class TestMailInviteMemberTask: name=email.split("@")[0], password="", interface_language="en-US", - status=AccountStatus.PENDING.value, + status=AccountStatus.PENDING, ) account.created_at = datetime.now(UTC) @@ -176,7 +176,7 @@ class TestMailInviteMemberTask: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role=TenantAccountRole.NORMAL.value, + role=TenantAccountRole.NORMAL, ) tenant_join.created_at = datetime.now(UTC) db_session_with_containers.add(tenant_join) @@ -486,7 +486,7 @@ class TestMailInviteMemberTask: db_session_with_containers.refresh(pending_account) db_session_with_containers.refresh(tenant) - assert pending_account.status == AccountStatus.PENDING.value + assert pending_account.status == AccountStatus.PENDING assert pending_account.email == invitee_email assert tenant.name is not None @@ -497,7 +497,7 @@ class TestMailInviteMemberTask: .first() ) assert tenant_join is not None - assert tenant_join.role == TenantAccountRole.NORMAL.value + assert tenant_join.role == TenantAccountRole.NORMAL def test_send_invite_member_mail_token_lifecycle_management( self, db_session_with_containers, mock_external_service_dependencies diff --git a/api/tests/unit_tests/controllers/console/auth/test_oauth.py b/api/tests/unit_tests/controllers/console/auth/test_oauth.py index 1a2e27e8fe..67f4b85413 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_oauth.py +++ b/api/tests/unit_tests/controllers/console/auth/test_oauth.py @@ -143,7 +143,7 @@ class TestOAuthCallback: oauth_provider.get_user_info.return_value = OAuthUserInfo(id="123", name="Test User", email="test@example.com") account = MagicMock() - account.status = AccountStatus.ACTIVE.value + account.status = AccountStatus.ACTIVE token_pair = MagicMock() token_pair.access_token = "jwt_access_token" @@ -220,11 +220,11 @@ class TestOAuthCallback: @pytest.mark.parametrize( ("account_status", "expected_redirect"), [ - (AccountStatus.BANNED.value, "http://localhost:3000/signin?message=Account is banned."), + (AccountStatus.BANNED, "http://localhost:3000/signin?message=Account is banned."), # CLOSED status: Currently NOT handled, will proceed to login (security issue) # This documents actual behavior. See test_defensive_check_for_closed_account_status for details ( - AccountStatus.CLOSED.value, + AccountStatus.CLOSED, "http://localhost:3000?access_token=jwt_access_token&refresh_token=jwt_refresh_token", ), ], @@ -296,13 +296,13 @@ class TestOAuthCallback: mock_get_providers.return_value = {"github": oauth_setup["provider"]} mock_account = MagicMock() - mock_account.status = AccountStatus.PENDING.value + mock_account.status = AccountStatus.PENDING mock_generate_account.return_value = mock_account with app.test_request_context("/auth/oauth/github/callback?code=test_code"): resource.get("github") - assert mock_account.status == AccountStatus.ACTIVE.value + assert mock_account.status == AccountStatus.ACTIVE assert mock_account.initialized_at is not None mock_db.session.commit.assert_called_once() @@ -352,7 +352,7 @@ class TestOAuthCallback: # Create account with CLOSED status closed_account = MagicMock() - closed_account.status = AccountStatus.CLOSED.value + closed_account.status = AccountStatus.CLOSED closed_account.id = "123" closed_account.name = "Closed Account" mock_generate_account.return_value = closed_account diff --git a/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py b/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py index e7733b2317..e6d0371cd5 100644 --- a/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py +++ b/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py @@ -140,7 +140,7 @@ class TestCeleryWorkflowExecutionRepository: assert call_args["execution_data"] == sample_workflow_execution.model_dump() assert call_args["tenant_id"] == mock_account.current_tenant_id assert call_args["app_id"] == "test-app" - assert call_args["triggered_from"] == WorkflowRunTriggeredFrom.APP_RUN.value + assert call_args["triggered_from"] == WorkflowRunTriggeredFrom.APP_RUN assert call_args["creator_user_id"] == mock_account.id # Verify no task tracking occurs (no _pending_saves attribute) diff --git a/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py b/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py index 3abe20fca1..f6211f4cca 100644 --- a/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py +++ b/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py @@ -149,7 +149,7 @@ class TestCeleryWorkflowNodeExecutionRepository: assert call_args["execution_data"] == sample_workflow_node_execution.model_dump() assert call_args["tenant_id"] == mock_account.current_tenant_id assert call_args["app_id"] == "test-app" - assert call_args["triggered_from"] == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN.value + assert call_args["triggered_from"] == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN assert call_args["creator_user_id"] == mock_account.id # Verify execution is cached diff --git a/api/tests/unit_tests/core/repositories/test_workflow_node_execution_truncation.py b/api/tests/unit_tests/core/repositories/test_workflow_node_execution_truncation.py index 36f7d3ef55..485be90eae 100644 --- a/api/tests/unit_tests/core/repositories/test_workflow_node_execution_truncation.py +++ b/api/tests/unit_tests/core/repositories/test_workflow_node_execution_truncation.py @@ -145,12 +145,12 @@ class TestSQLAlchemyWorkflowNodeExecutionRepositoryTruncation: db_model.index = 1 db_model.predecessor_node_id = None db_model.node_id = "node-id" - db_model.node_type = NodeType.LLM.value + db_model.node_type = NodeType.LLM db_model.title = "Test Node" db_model.inputs = json.dumps({"value": "inputs"}) db_model.process_data = json.dumps({"value": "process_data"}) db_model.outputs = json.dumps({"value": "outputs"}) - db_model.status = WorkflowNodeExecutionStatus.SUCCEEDED.value + db_model.status = WorkflowNodeExecutionStatus.SUCCEEDED db_model.error = None db_model.elapsed_time = 1.0 db_model.execution_metadata = "{}" diff --git a/api/tests/unit_tests/core/workflow/graph_engine/command_channels/test_redis_channel.py b/api/tests/unit_tests/core/workflow/graph_engine/command_channels/test_redis_channel.py index 2c08fff27b..7ebccf83a7 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/command_channels/test_redis_channel.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/command_channels/test_redis_channel.py @@ -147,7 +147,7 @@ class TestRedisChannel: """Test deserializing an abort command.""" channel = RedisChannel(MagicMock(), "test:key") - abort_data = {"command_type": CommandType.ABORT.value} + abort_data = {"command_type": CommandType.ABORT} command = channel._deserialize_command(abort_data) assert isinstance(command, AbortCommand) @@ -158,7 +158,7 @@ class TestRedisChannel: channel = RedisChannel(MagicMock(), "test:key") # For now, only ABORT is supported, but test generic handling - generic_data = {"command_type": CommandType.ABORT.value} + generic_data = {"command_type": CommandType.ABORT} command = channel._deserialize_command(generic_data) assert command is not None diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_iteration_simple.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_iteration_simple.py index 6a9bfbdcc3..c39c12925f 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_iteration_simple.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_iteration_simple.py @@ -56,8 +56,8 @@ def test_mock_iteration_node_preserves_config(): workflow_id="test", graph_config={"nodes": [], "edges": []}, user_id="test", - user_from=UserFrom.ACCOUNT.value, - invoke_from=InvokeFrom.SERVICE_API.value, + user_from=UserFrom.ACCOUNT, + invoke_from=InvokeFrom.SERVICE_API, call_depth=0, ) @@ -117,8 +117,8 @@ def test_mock_loop_node_preserves_config(): workflow_id="test", graph_config={"nodes": [], "edges": []}, user_id="test", - user_from=UserFrom.ACCOUNT.value, - invoke_from=InvokeFrom.SERVICE_API.value, + user_from=UserFrom.ACCOUNT, + invoke_from=InvokeFrom.SERVICE_API, call_depth=0, ) diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_redis_stop_integration.py b/api/tests/unit_tests/core/workflow/graph_engine/test_redis_stop_integration.py index b286d99f70..bd41fdeee5 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_redis_stop_integration.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_redis_stop_integration.py @@ -49,7 +49,7 @@ class TestRedisStopIntegration: # Verify the command data command_json = calls[0][0][1] command_data = json.loads(command_json) - assert command_data["command_type"] == CommandType.ABORT.value + assert command_data["command_type"] == CommandType.ABORT assert command_data["reason"] == "Test stop" def test_graph_engine_manager_handles_redis_failure_gracefully(self): @@ -122,7 +122,7 @@ class TestRedisStopIntegration: # Verify serialized command command_json = calls[0][0][1] command_data = json.loads(command_json) - assert command_data["command_type"] == CommandType.ABORT.value + assert command_data["command_type"] == CommandType.ABORT assert command_data["reason"] == "User requested stop" # Check expire was set @@ -137,9 +137,7 @@ class TestRedisStopIntegration: mock_redis.pipeline.return_value.__exit__ = Mock(return_value=None) # Mock command data - abort_command_json = json.dumps( - {"command_type": CommandType.ABORT.value, "reason": "Test abort", "payload": None} - ) + abort_command_json = json.dumps({"command_type": CommandType.ABORT, "reason": "Test abort", "payload": None}) # Mock pipeline execute to return commands mock_pipeline.execute.return_value = [ diff --git a/api/tests/unit_tests/core/workflow/nodes/variable_assigner/v1/test_variable_assigner_v1.py b/api/tests/unit_tests/core/workflow/nodes/variable_assigner/v1/test_variable_assigner_v1.py index 3e50d5522a..6189febdf5 100644 --- a/api/tests/unit_tests/core/workflow/nodes/variable_assigner/v1/test_variable_assigner_v1.py +++ b/api/tests/unit_tests/core/workflow/nodes/variable_assigner/v1/test_variable_assigner_v1.py @@ -87,7 +87,7 @@ def test_overwrite_string_variable(): "data": { "title": "test", "assigned_variable_selector": ["conversation", conversation_variable.name], - "write_mode": WriteMode.OVER_WRITE.value, + "write_mode": WriteMode.OVER_WRITE, "input_variable_selector": [DEFAULT_NODE_ID, input_variable.name], }, } @@ -189,7 +189,7 @@ def test_append_variable_to_array(): "data": { "title": "test", "assigned_variable_selector": ["conversation", conversation_variable.name], - "write_mode": WriteMode.APPEND.value, + "write_mode": WriteMode.APPEND, "input_variable_selector": [DEFAULT_NODE_ID, input_variable.name], }, } @@ -282,7 +282,7 @@ def test_clear_array(): "data": { "title": "test", "assigned_variable_selector": ["conversation", conversation_variable.name], - "write_mode": WriteMode.CLEAR.value, + "write_mode": WriteMode.CLEAR, "input_variable_selector": [], }, } diff --git a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py index 28b339fe85..5cba43714a 100644 --- a/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py +++ b/api/tests/unit_tests/repositories/workflow_node_execution/test_sqlalchemy_repository.py @@ -298,7 +298,7 @@ def test_to_domain_model(repository): db_model.predecessor_node_id = "test-predecessor-id" db_model.node_execution_id = "test-node-execution-id" db_model.node_id = "test-node-id" - db_model.node_type = NodeType.START.value + db_model.node_type = NodeType.START db_model.title = "Test Node" db_model.inputs = json.dumps(inputs_dict) db_model.process_data = json.dumps(process_data_dict) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_converter.py b/api/tests/unit_tests/services/workflow/test_workflow_converter.py index 2ca781bae5..63ce4c0c3c 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_converter.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_converter.py @@ -107,7 +107,7 @@ def test__convert_to_http_request_node_for_chatbot(default_variables): assert body_data body_data_json = json.loads(body_data) - assert body_data_json["point"] == APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY.value + assert body_data_json["point"] == APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY body_params = body_data_json["params"] assert body_params["app_id"] == app_model.id @@ -168,7 +168,7 @@ def test__convert_to_http_request_node_for_workflow_app(default_variables): assert body_data body_data_json = json.loads(body_data) - assert body_data_json["point"] == APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY.value + assert body_data_json["point"] == APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY body_params = body_data_json["params"] assert body_params["app_id"] == app_model.id From cbf2ba6cecc5fbe5d2f731871d1754840eb8289d Mon Sep 17 00:00:00 2001 From: carribean Date: Sat, 11 Oct 2025 10:47:28 +0800 Subject: [PATCH 26/49] Feature integrate alibabacloud mysql vector (#25994) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/.env.example | 9 + api/configs/middleware/__init__.py | 2 + .../vdb/alibabacloud_mysql_config.py | 54 ++ api/controllers/console/datasets/datasets.py | 2 + .../vdb/alibabacloud_mysql/__init__.py | 0 .../alibabacloud_mysql_vector.py | 388 ++++++++++ api/core/rag/datasource/vdb/vector_factory.py | 6 + api/core/rag/datasource/vdb/vector_type.py | 1 + api/pyproject.toml | 1 + .../test_alibabacloud_mysql_vector.py | 722 ++++++++++++++++++ api/uv.lock | 21 + docker/.env.example | 11 +- docker/docker-compose.yaml | 7 + 13 files changed, 1223 insertions(+), 1 deletion(-) create mode 100644 api/configs/middleware/vdb/alibabacloud_mysql_config.py rename docker/volumes/sandbox/dependencies/python-requirements.txt => api/core/rag/datasource/vdb/alibabacloud_mysql/__init__.py (100%) create mode 100644 api/core/rag/datasource/vdb/alibabacloud_mysql/alibabacloud_mysql_vector.py create mode 100644 api/tests/unit_tests/core/rag/datasource/vdb/alibabacloud_mysql/test_alibabacloud_mysql_vector.py diff --git a/api/.env.example b/api/.env.example index a462bfdbec..1d8190ce5f 100644 --- a/api/.env.example +++ b/api/.env.example @@ -343,6 +343,15 @@ OCEANBASE_VECTOR_DATABASE=test OCEANBASE_MEMORY_LIMIT=6G OCEANBASE_ENABLE_HYBRID_SEARCH=false +# AlibabaCloud MySQL Vector configuration +ALIBABACLOUD_MYSQL_HOST=127.0.0.1 +ALIBABACLOUD_MYSQL_PORT=3306 +ALIBABACLOUD_MYSQL_USER=root +ALIBABACLOUD_MYSQL_PASSWORD=root +ALIBABACLOUD_MYSQL_DATABASE=dify +ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 +ALIBABACLOUD_MYSQL_HNSW_M=6 + # openGauss configuration OPENGAUSS_HOST=127.0.0.1 OPENGAUSS_PORT=6600 diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py index 62b3cc9842..d872e8201b 100644 --- a/api/configs/middleware/__init__.py +++ b/api/configs/middleware/__init__.py @@ -18,6 +18,7 @@ from .storage.opendal_storage_config import OpenDALStorageConfig from .storage.supabase_storage_config import SupabaseStorageConfig from .storage.tencent_cos_storage_config import TencentCloudCOSStorageConfig from .storage.volcengine_tos_storage_config import VolcengineTOSStorageConfig +from .vdb.alibabacloud_mysql_config import AlibabaCloudMySQLConfig from .vdb.analyticdb_config import AnalyticdbConfig from .vdb.baidu_vector_config import BaiduVectorDBConfig from .vdb.chroma_config import ChromaConfig @@ -330,6 +331,7 @@ class MiddlewareConfig( ClickzettaConfig, HuaweiCloudConfig, MilvusConfig, + AlibabaCloudMySQLConfig, MyScaleConfig, OpenSearchConfig, OracleConfig, diff --git a/api/configs/middleware/vdb/alibabacloud_mysql_config.py b/api/configs/middleware/vdb/alibabacloud_mysql_config.py new file mode 100644 index 0000000000..a76400ed1c --- /dev/null +++ b/api/configs/middleware/vdb/alibabacloud_mysql_config.py @@ -0,0 +1,54 @@ +from pydantic import Field, PositiveInt +from pydantic_settings import BaseSettings + + +class AlibabaCloudMySQLConfig(BaseSettings): + """ + Configuration settings for AlibabaCloud MySQL vector database + """ + + ALIBABACLOUD_MYSQL_HOST: str = Field( + description="Hostname or IP address of the AlibabaCloud MySQL server (e.g., 'localhost' or 'mysql.aliyun.com')", + default="localhost", + ) + + ALIBABACLOUD_MYSQL_PORT: PositiveInt = Field( + description="Port number on which the AlibabaCloud MySQL server is listening (default is 3306)", + default=3306, + ) + + ALIBABACLOUD_MYSQL_USER: str = Field( + description="Username for authenticating with AlibabaCloud MySQL (default is 'root')", + default="root", + ) + + ALIBABACLOUD_MYSQL_PASSWORD: str = Field( + description="Password for authenticating with AlibabaCloud MySQL (default is an empty string)", + default="", + ) + + ALIBABACLOUD_MYSQL_DATABASE: str = Field( + description="Name of the AlibabaCloud MySQL database to connect to (default is 'dify')", + default="dify", + ) + + ALIBABACLOUD_MYSQL_MAX_CONNECTION: PositiveInt = Field( + description="Maximum number of connections in the connection pool", + default=5, + ) + + ALIBABACLOUD_MYSQL_CHARSET: str = Field( + description="Character set for AlibabaCloud MySQL connection (default is 'utf8mb4')", + default="utf8mb4", + ) + + ALIBABACLOUD_MYSQL_DISTANCE_FUNCTION: str = Field( + description="Distance function used for vector similarity search in AlibabaCloud MySQL " + "(e.g., 'cosine', 'euclidean')", + default="cosine", + ) + + ALIBABACLOUD_MYSQL_HNSW_M: PositiveInt = Field( + description="Maximum number of connections per layer for HNSW vector index (default is 6, range: 3-200)", + default=6, + ) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index dda0125687..72cd33eab6 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -810,6 +810,7 @@ class DatasetRetrievalSettingApi(Resource): | VectorType.MATRIXONE | VectorType.CLICKZETTA | VectorType.BAIDU + | VectorType.ALIBABACLOUD_MYSQL ): return { "retrieval_method": [ @@ -864,6 +865,7 @@ class DatasetRetrievalSettingMockApi(Resource): | VectorType.MATRIXONE | VectorType.CLICKZETTA | VectorType.BAIDU + | VectorType.ALIBABACLOUD_MYSQL ): return { "retrieval_method": [ diff --git a/docker/volumes/sandbox/dependencies/python-requirements.txt b/api/core/rag/datasource/vdb/alibabacloud_mysql/__init__.py similarity index 100% rename from docker/volumes/sandbox/dependencies/python-requirements.txt rename to api/core/rag/datasource/vdb/alibabacloud_mysql/__init__.py diff --git a/api/core/rag/datasource/vdb/alibabacloud_mysql/alibabacloud_mysql_vector.py b/api/core/rag/datasource/vdb/alibabacloud_mysql/alibabacloud_mysql_vector.py new file mode 100644 index 0000000000..fdb5ffebfc --- /dev/null +++ b/api/core/rag/datasource/vdb/alibabacloud_mysql/alibabacloud_mysql_vector.py @@ -0,0 +1,388 @@ +import hashlib +import json +import logging +import uuid +from contextlib import contextmanager +from typing import Any, Literal, cast + +import mysql.connector +from mysql.connector import Error as MySQLError +from pydantic import BaseModel, model_validator + +from configs import dify_config +from core.rag.datasource.vdb.vector_base import BaseVector +from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory +from core.rag.datasource.vdb.vector_type import VectorType +from core.rag.embedding.embedding_base import Embeddings +from core.rag.models.document import Document +from extensions.ext_redis import redis_client +from models.dataset import Dataset + +logger = logging.getLogger(__name__) + + +class AlibabaCloudMySQLVectorConfig(BaseModel): + host: str + port: int + user: str + password: str + database: str + max_connection: int + charset: str = "utf8mb4" + distance_function: Literal["cosine", "euclidean"] = "cosine" + hnsw_m: int = 6 + + @model_validator(mode="before") + @classmethod + def validate_config(cls, values: dict): + if not values.get("host"): + raise ValueError("config ALIBABACLOUD_MYSQL_HOST is required") + if not values.get("port"): + raise ValueError("config ALIBABACLOUD_MYSQL_PORT is required") + if not values.get("user"): + raise ValueError("config ALIBABACLOUD_MYSQL_USER is required") + if values.get("password") is None: + raise ValueError("config ALIBABACLOUD_MYSQL_PASSWORD is required") + if not values.get("database"): + raise ValueError("config ALIBABACLOUD_MYSQL_DATABASE is required") + if not values.get("max_connection"): + raise ValueError("config ALIBABACLOUD_MYSQL_MAX_CONNECTION is required") + return values + + +SQL_CREATE_TABLE = """ +CREATE TABLE IF NOT EXISTS {table_name} ( + id VARCHAR(36) PRIMARY KEY, + text LONGTEXT NOT NULL, + meta JSON NOT NULL, + embedding VECTOR({dimension}) NOT NULL, + VECTOR INDEX (embedding) M={hnsw_m} DISTANCE={distance_function} +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +""" + +SQL_CREATE_META_INDEX = """ +CREATE INDEX idx_{index_hash}_meta ON {table_name} + ((CAST(JSON_UNQUOTE(JSON_EXTRACT(meta, '$.document_id')) AS CHAR(36)))); +""" + +SQL_CREATE_FULLTEXT_INDEX = """ +CREATE FULLTEXT INDEX idx_{index_hash}_text ON {table_name} (text) WITH PARSER ngram; +""" + + +class AlibabaCloudMySQLVector(BaseVector): + def __init__(self, collection_name: str, config: AlibabaCloudMySQLVectorConfig): + super().__init__(collection_name) + self.pool = self._create_connection_pool(config) + self.table_name = collection_name.lower() + self.index_hash = hashlib.md5(self.table_name.encode()).hexdigest()[:8] + self.distance_function = config.distance_function.lower() + self.hnsw_m = config.hnsw_m + self._check_vector_support() + + def get_type(self) -> str: + return VectorType.ALIBABACLOUD_MYSQL + + def _create_connection_pool(self, config: AlibabaCloudMySQLVectorConfig): + # Create connection pool using mysql-connector-python pooling + pool_config: dict[str, Any] = { + "host": config.host, + "port": config.port, + "user": config.user, + "password": config.password, + "database": config.database, + "charset": config.charset, + "autocommit": True, + "pool_name": f"pool_{self.collection_name}", + "pool_size": config.max_connection, + "pool_reset_session": True, + } + return mysql.connector.pooling.MySQLConnectionPool(**pool_config) + + def _check_vector_support(self): + """Check if the MySQL server supports vector operations.""" + try: + with self._get_cursor() as cur: + # Check MySQL version and vector support + cur.execute("SELECT VERSION()") + version = cur.fetchone()["VERSION()"] + logger.debug("Connected to MySQL version: %s", version) + # Try to execute a simple vector function to verify support + cur.execute("SELECT VEC_FromText('[1,2,3]') IS NOT NULL as vector_support") + result = cur.fetchone() + if not result or not result.get("vector_support"): + raise ValueError( + "RDS MySQL Vector functions are not available." + " Please ensure you're using RDS MySQL 8.0.36+ with Vector support." + ) + + except MySQLError as e: + if "FUNCTION" in str(e) and "VEC_FromText" in str(e): + raise ValueError( + "RDS MySQL Vector functions are not available." + " Please ensure you're using RDS MySQL 8.0.36+ with Vector support." + ) from e + raise e + + @contextmanager + def _get_cursor(self): + conn = self.pool.get_connection() + cur = conn.cursor(dictionary=True) + try: + yield cur + finally: + cur.close() + conn.close() + + def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs): + dimension = len(embeddings[0]) + self._create_collection(dimension) + return self.add_texts(texts, embeddings) + + def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): + values = [] + pks = [] + for i, doc in enumerate(documents): + if doc.metadata is not None: + doc_id = doc.metadata.get("doc_id", str(uuid.uuid4())) + pks.append(doc_id) + # Convert embedding list to Aliyun MySQL vector format + vector_str = "[" + ",".join(map(str, embeddings[i])) + "]" + values.append( + ( + doc_id, + doc.page_content, + json.dumps(doc.metadata), + vector_str, + ) + ) + + with self._get_cursor() as cur: + insert_sql = ( + f"INSERT INTO {self.table_name} (id, text, meta, embedding) VALUES (%s, %s, %s, VEC_FromText(%s))" + ) + cur.executemany(insert_sql, values) + return pks + + def text_exists(self, id: str) -> bool: + with self._get_cursor() as cur: + cur.execute(f"SELECT id FROM {self.table_name} WHERE id = %s", (id,)) + return cur.fetchone() is not None + + def get_by_ids(self, ids: list[str]) -> list[Document]: + if not ids: + return [] + + with self._get_cursor() as cur: + placeholders = ",".join(["%s"] * len(ids)) + cur.execute(f"SELECT meta, text FROM {self.table_name} WHERE id IN ({placeholders})", ids) + docs = [] + for record in cur: + metadata = record["meta"] + if isinstance(metadata, str): + metadata = json.loads(metadata) + docs.append(Document(page_content=record["text"], metadata=metadata)) + return docs + + def delete_by_ids(self, ids: list[str]): + # Avoiding crashes caused by performing delete operations on empty lists + if not ids: + return + + with self._get_cursor() as cur: + try: + placeholders = ",".join(["%s"] * len(ids)) + cur.execute(f"DELETE FROM {self.table_name} WHERE id IN ({placeholders})", ids) + except MySQLError as e: + if e.errno == 1146: # Table doesn't exist + logger.warning("Table %s not found, skipping delete operation.", self.table_name) + return + else: + raise e + + def delete_by_metadata_field(self, key: str, value: str): + with self._get_cursor() as cur: + cur.execute( + f"DELETE FROM {self.table_name} WHERE JSON_UNQUOTE(JSON_EXTRACT(meta, %s)) = %s", (f"$.{key}", value) + ) + + def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: + """ + Search the nearest neighbors to a vector using RDS MySQL vector distance functions. + + :param query_vector: The input vector to search for similar items. + :return: List of Documents that are nearest to the query vector. + """ + top_k = kwargs.get("top_k", 4) + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError("top_k must be a positive integer") + + document_ids_filter = kwargs.get("document_ids_filter") + where_clause = "" + params = [] + + if document_ids_filter: + placeholders = ",".join(["%s"] * len(document_ids_filter)) + where_clause = f" WHERE JSON_UNQUOTE(JSON_EXTRACT(meta, '$.document_id')) IN ({placeholders}) " + params.extend(document_ids_filter) + + # Convert query vector to RDS MySQL vector format + query_vector_str = "[" + ",".join(map(str, query_vector)) + "]" + + # Use RSD MySQL's native vector distance functions + with self._get_cursor() as cur: + # Choose distance function based on configuration + distance_func = "VEC_DISTANCE_COSINE" if self.distance_function == "cosine" else "VEC_DISTANCE_EUCLIDEAN" + + # Note: RDS MySQL optimizer will use vector index when ORDER BY + LIMIT are present + # Use column alias in ORDER BY to avoid calculating distance twice + sql = f""" + SELECT meta, text, + {distance_func}(embedding, VEC_FromText(%s)) AS distance + FROM {self.table_name} + {where_clause} + ORDER BY distance + LIMIT %s + """ + query_params = [query_vector_str] + params + [top_k] + + cur.execute(sql, query_params) + + docs = [] + score_threshold = float(kwargs.get("score_threshold") or 0.0) + + for record in cur: + try: + distance = float(record["distance"]) + # Convert distance to similarity score + if self.distance_function == "cosine": + # For cosine distance: similarity = 1 - distance + similarity = 1.0 - distance + else: + # For euclidean distance: use inverse relationship + # similarity = 1 / (1 + distance) + similarity = 1.0 / (1.0 + distance) + + metadata = record["meta"] + if isinstance(metadata, str): + metadata = json.loads(metadata) + metadata["score"] = similarity + metadata["distance"] = distance + + if similarity >= score_threshold: + docs.append(Document(page_content=record["text"], metadata=metadata)) + except (ValueError, json.JSONDecodeError) as e: + logger.warning("Error processing search result: %s", e) + continue + + return docs + + def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: + top_k = kwargs.get("top_k", 5) + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError("top_k must be a positive integer") + + document_ids_filter = kwargs.get("document_ids_filter") + where_clause = "" + params = [] + + if document_ids_filter: + placeholders = ",".join(["%s"] * len(document_ids_filter)) + where_clause = f" AND JSON_UNQUOTE(JSON_EXTRACT(meta, '$.document_id')) IN ({placeholders}) " + params.extend(document_ids_filter) + + with self._get_cursor() as cur: + # Build query parameters: query (twice for MATCH clauses), document_ids_filter (if any), top_k + query_params = [query, query] + params + [top_k] + cur.execute( + f"""SELECT meta, text, + MATCH(text) AGAINST(%s IN NATURAL LANGUAGE MODE) AS score + FROM {self.table_name} + WHERE MATCH(text) AGAINST(%s IN NATURAL LANGUAGE MODE) + {where_clause} + ORDER BY score DESC + LIMIT %s""", + query_params, + ) + docs = [] + for record in cur: + metadata = record["meta"] + if isinstance(metadata, str): + metadata = json.loads(metadata) + metadata["score"] = float(record["score"]) + docs.append(Document(page_content=record["text"], metadata=metadata)) + return docs + + def delete(self): + with self._get_cursor() as cur: + cur.execute(f"DROP TABLE IF EXISTS {self.table_name}") + + def _create_collection(self, dimension: int): + collection_exist_cache_key = f"vector_indexing_{self._collection_name}" + lock_name = f"{collection_exist_cache_key}_lock" + with redis_client.lock(lock_name, timeout=20): + if redis_client.get(collection_exist_cache_key): + return + + with self._get_cursor() as cur: + # Create table with vector column and vector index + cur.execute( + SQL_CREATE_TABLE.format( + table_name=self.table_name, + dimension=dimension, + distance_function=self.distance_function, + hnsw_m=self.hnsw_m, + ) + ) + # Create metadata index (check if exists first) + try: + cur.execute(SQL_CREATE_META_INDEX.format(table_name=self.table_name, index_hash=self.index_hash)) + except MySQLError as e: + if e.errno != 1061: # Duplicate key name + logger.warning("Could not create meta index: %s", e) + + # Create full-text index for text search + try: + cur.execute( + SQL_CREATE_FULLTEXT_INDEX.format(table_name=self.table_name, index_hash=self.index_hash) + ) + except MySQLError as e: + if e.errno != 1061: # Duplicate key name + logger.warning("Could not create fulltext index: %s", e) + + redis_client.set(collection_exist_cache_key, 1, ex=3600) + + +class AlibabaCloudMySQLVectorFactory(AbstractVectorFactory): + def _validate_distance_function(self, distance_function: str) -> Literal["cosine", "euclidean"]: + """Validate and return the distance function as a proper Literal type.""" + if distance_function not in ["cosine", "euclidean"]: + raise ValueError(f"Invalid distance function: {distance_function}. Must be 'cosine' or 'euclidean'") + return cast(Literal["cosine", "euclidean"], distance_function) + + def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings) -> AlibabaCloudMySQLVector: + if dataset.index_struct_dict: + class_prefix: str = dataset.index_struct_dict["vector_store"]["class_prefix"] + collection_name = class_prefix + else: + dataset_id = dataset.id + collection_name = Dataset.gen_collection_name_by_id(dataset_id) + dataset.index_struct = json.dumps( + self.gen_index_struct_dict(VectorType.ALIBABACLOUD_MYSQL, collection_name) + ) + return AlibabaCloudMySQLVector( + collection_name=collection_name, + config=AlibabaCloudMySQLVectorConfig( + host=dify_config.ALIBABACLOUD_MYSQL_HOST or "localhost", + port=dify_config.ALIBABACLOUD_MYSQL_PORT, + user=dify_config.ALIBABACLOUD_MYSQL_USER or "root", + password=dify_config.ALIBABACLOUD_MYSQL_PASSWORD or "", + database=dify_config.ALIBABACLOUD_MYSQL_DATABASE or "dify", + max_connection=dify_config.ALIBABACLOUD_MYSQL_MAX_CONNECTION, + charset=dify_config.ALIBABACLOUD_MYSQL_CHARSET or "utf8mb4", + distance_function=self._validate_distance_function( + dify_config.ALIBABACLOUD_MYSQL_DISTANCE_FUNCTION or "cosine" + ), + hnsw_m=dify_config.ALIBABACLOUD_MYSQL_HNSW_M or 6, + ), + ) diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index dc4f026ff3..0beb388693 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -71,6 +71,12 @@ class Vector: from core.rag.datasource.vdb.milvus.milvus_vector import MilvusVectorFactory return MilvusVectorFactory + case VectorType.ALIBABACLOUD_MYSQL: + from core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector import ( + AlibabaCloudMySQLVectorFactory, + ) + + return AlibabaCloudMySQLVectorFactory case VectorType.MYSCALE: from core.rag.datasource.vdb.myscale.myscale_vector import MyScaleVectorFactory diff --git a/api/core/rag/datasource/vdb/vector_type.py b/api/core/rag/datasource/vdb/vector_type.py index a415142196..bc7d93a2e0 100644 --- a/api/core/rag/datasource/vdb/vector_type.py +++ b/api/core/rag/datasource/vdb/vector_type.py @@ -2,6 +2,7 @@ from enum import StrEnum class VectorType(StrEnum): + ALIBABACLOUD_MYSQL = "alibabacloud_mysql" ANALYTICDB = "analyticdb" CHROMA = "chroma" MILVUS = "milvus" diff --git a/api/pyproject.toml b/api/pyproject.toml index 22eedf7b8b..897d114dcc 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -217,4 +217,5 @@ vdb = [ "weaviate-client~=3.24.0", "xinference-client~=1.2.2", "mo-vector~=0.1.13", + "mysql-connector-python>=9.3.0", ] diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/alibabacloud_mysql/test_alibabacloud_mysql_vector.py b/api/tests/unit_tests/core/rag/datasource/vdb/alibabacloud_mysql/test_alibabacloud_mysql_vector.py new file mode 100644 index 0000000000..44fe272c8c --- /dev/null +++ b/api/tests/unit_tests/core/rag/datasource/vdb/alibabacloud_mysql/test_alibabacloud_mysql_vector.py @@ -0,0 +1,722 @@ +import json +import unittest +from unittest.mock import MagicMock, patch + +import pytest + +from core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector import ( + AlibabaCloudMySQLVector, + AlibabaCloudMySQLVectorConfig, +) +from core.rag.models.document import Document + +try: + from mysql.connector import Error as MySQLError +except ImportError: + # Fallback for testing environments where mysql-connector-python might not be installed + class MySQLError(Exception): + def __init__(self, errno, msg): + self.errno = errno + self.msg = msg + super().__init__(msg) + + +class TestAlibabaCloudMySQLVector(unittest.TestCase): + def setUp(self): + self.config = AlibabaCloudMySQLVectorConfig( + host="localhost", + port=3306, + user="test_user", + password="test_password", + database="test_db", + max_connection=5, + charset="utf8mb4", + ) + self.collection_name = "test_collection" + + # Sample documents for testing + self.sample_documents = [ + Document( + page_content="This is a test document about AI.", + metadata={"doc_id": "doc1", "document_id": "dataset1", "source": "test"}, + ), + Document( + page_content="Another document about machine learning.", + metadata={"doc_id": "doc2", "document_id": "dataset1", "source": "test"}, + ), + ] + + # Sample embeddings + self.sample_embeddings = [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]] + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_init(self, mock_pool_class): + """Test AlibabaCloudMySQLVector initialization.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor for vector support check + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [ + {"VERSION()": "8.0.36"}, # Version check + {"vector_support": True}, # Vector support check + ] + + alibabacloud_mysql_vector = AlibabaCloudMySQLVector(self.collection_name, self.config) + + assert alibabacloud_mysql_vector.collection_name == self.collection_name + assert alibabacloud_mysql_vector.table_name == self.collection_name.lower() + assert alibabacloud_mysql_vector.get_type() == "alibabacloud_mysql" + assert alibabacloud_mysql_vector.distance_function == "cosine" + assert alibabacloud_mysql_vector.pool is not None + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + @patch("core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.redis_client") + def test_create_collection(self, mock_redis, mock_pool_class): + """Test collection creation.""" + # Mock Redis operations + mock_redis.lock.return_value.__enter__ = MagicMock() + mock_redis.lock.return_value.__exit__ = MagicMock() + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [ + {"VERSION()": "8.0.36"}, # Version check + {"vector_support": True}, # Vector support check + ] + + alibabacloud_mysql_vector = AlibabaCloudMySQLVector(self.collection_name, self.config) + alibabacloud_mysql_vector._create_collection(768) + + # Verify SQL execution calls - should include table creation and index creation + assert mock_cursor.execute.called + assert mock_cursor.execute.call_count >= 3 # CREATE TABLE + 2 indexes + mock_redis.set.assert_called_once() + + def test_config_validation(self): + """Test configuration validation.""" + # Test missing required fields + with pytest.raises(ValueError): + AlibabaCloudMySQLVectorConfig( + host="", # Empty host should raise error + port=3306, + user="test", + password="test", + database="test", + max_connection=5, + ) + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_vector_support_check_success(self, mock_pool_class): + """Test successful vector support check.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + # Should not raise an exception + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + assert vector_store is not None + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_vector_support_check_failure(self, mock_pool_class): + """Test vector support check failure.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.35"}, {"vector_support": False}] + + with pytest.raises(ValueError) as context: + AlibabaCloudMySQLVector(self.collection_name, self.config) + + assert "RDS MySQL Vector functions are not available" in str(context.value) + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_vector_support_check_function_error(self, mock_pool_class): + """Test vector support check with function not found error.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = {"VERSION()": "8.0.36"} + mock_cursor.execute.side_effect = [None, MySQLError(errno=1305, msg="FUNCTION VEC_FromText does not exist")] + + with pytest.raises(ValueError) as context: + AlibabaCloudMySQLVector(self.collection_name, self.config) + + assert "RDS MySQL Vector functions are not available" in str(context.value) + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + @patch("core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.redis_client") + def test_create_documents(self, mock_redis, mock_pool_class): + """Test creating documents with embeddings.""" + # Setup mocks + self._setup_mocks(mock_redis, mock_pool_class) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + result = vector_store.create(self.sample_documents, self.sample_embeddings) + + assert len(result) == 2 + assert "doc1" in result + assert "doc2" in result + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_add_texts(self, mock_pool_class): + """Test adding texts to the vector store.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + result = vector_store.add_texts(self.sample_documents, self.sample_embeddings) + + assert len(result) == 2 + mock_cursor.executemany.assert_called_once() + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_text_exists(self, mock_pool_class): + """Test checking if text exists.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [ + {"VERSION()": "8.0.36"}, + {"vector_support": True}, + {"id": "doc1"}, # Text exists + ] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + exists = vector_store.text_exists("doc1") + + assert exists + # Check that the correct SQL was executed (last call after init) + execute_calls = mock_cursor.execute.call_args_list + last_call = execute_calls[-1] + assert "SELECT id FROM" in last_call[0][0] + assert last_call[0][1] == ("doc1",) + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_text_not_exists(self, mock_pool_class): + """Test checking if text does not exist.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [ + {"VERSION()": "8.0.36"}, + {"vector_support": True}, + None, # Text does not exist + ] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + exists = vector_store.text_exists("nonexistent") + + assert not exists + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_get_by_ids(self, mock_pool_class): + """Test getting documents by IDs.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter( + [ + {"meta": json.dumps({"doc_id": "doc1", "source": "test"}), "text": "Test document 1"}, + {"meta": json.dumps({"doc_id": "doc2", "source": "test"}), "text": "Test document 2"}, + ] + ) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + docs = vector_store.get_by_ids(["doc1", "doc2"]) + + assert len(docs) == 2 + assert docs[0].page_content == "Test document 1" + assert docs[1].page_content == "Test document 2" + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_get_by_ids_empty_list(self, mock_pool_class): + """Test getting documents with empty ID list.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + docs = vector_store.get_by_ids([]) + + assert len(docs) == 0 + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_delete_by_ids(self, mock_pool_class): + """Test deleting documents by IDs.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + vector_store.delete_by_ids(["doc1", "doc2"]) + + # Check that delete SQL was executed + execute_calls = mock_cursor.execute.call_args_list + delete_calls = [call for call in execute_calls if "DELETE" in str(call)] + assert len(delete_calls) == 1 + delete_call = delete_calls[0] + assert "DELETE FROM" in delete_call[0][0] + assert delete_call[0][1] == ["doc1", "doc2"] + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_delete_by_ids_empty_list(self, mock_pool_class): + """Test deleting with empty ID list.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + vector_store.delete_by_ids([]) # Should not raise an exception + + # Verify no delete SQL was executed + execute_calls = mock_cursor.execute.call_args_list + delete_calls = [call for call in execute_calls if "DELETE" in str(call)] + assert len(delete_calls) == 0 + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_delete_by_ids_table_not_exists(self, mock_pool_class): + """Test deleting when table doesn't exist.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + # Simulate table doesn't exist error on delete + + def execute_side_effect(*args, **kwargs): + if "DELETE" in args[0]: + raise MySQLError(errno=1146, msg="Table doesn't exist") + + mock_cursor.execute.side_effect = execute_side_effect + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + # Should not raise an exception + vector_store.delete_by_ids(["doc1"]) + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_delete_by_metadata_field(self, mock_pool_class): + """Test deleting documents by metadata field.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + vector_store.delete_by_metadata_field("document_id", "dataset1") + + # Check that the correct SQL was executed + execute_calls = mock_cursor.execute.call_args_list + delete_calls = [call for call in execute_calls if "DELETE" in str(call)] + assert len(delete_calls) == 1 + delete_call = delete_calls[0] + assert "JSON_UNQUOTE(JSON_EXTRACT(meta" in delete_call[0][0] + assert delete_call[0][1] == ("$.document_id", "dataset1") + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_vector_cosine(self, mock_pool_class): + """Test vector search with cosine distance.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter( + [{"meta": json.dumps({"doc_id": "doc1", "source": "test"}), "text": "Test document 1", "distance": 0.1}] + ) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + query_vector = [0.1, 0.2, 0.3, 0.4] + docs = vector_store.search_by_vector(query_vector, top_k=5) + + assert len(docs) == 1 + assert docs[0].page_content == "Test document 1" + assert abs(docs[0].metadata["score"] - 0.9) < 0.1 # 1 - 0.1 = 0.9 + assert docs[0].metadata["distance"] == 0.1 + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_vector_euclidean(self, mock_pool_class): + """Test vector search with euclidean distance.""" + config = AlibabaCloudMySQLVectorConfig( + host="localhost", + port=3306, + user="test_user", + password="test_password", + database="test_db", + max_connection=5, + distance_function="euclidean", + ) + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter( + [{"meta": json.dumps({"doc_id": "doc1", "source": "test"}), "text": "Test document 1", "distance": 2.0}] + ) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, config) + query_vector = [0.1, 0.2, 0.3, 0.4] + docs = vector_store.search_by_vector(query_vector, top_k=5) + + assert len(docs) == 1 + assert abs(docs[0].metadata["score"] - 1.0 / 3.0) < 0.01 # 1/(1+2) = 1/3 + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_vector_with_filter(self, mock_pool_class): + """Test vector search with document ID filter.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter([]) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + query_vector = [0.1, 0.2, 0.3, 0.4] + docs = vector_store.search_by_vector(query_vector, top_k=5, document_ids_filter=["dataset1"]) + + # Verify the SQL contains the WHERE clause for filtering + execute_calls = mock_cursor.execute.call_args_list + search_calls = [call for call in execute_calls if "VEC_DISTANCE" in str(call)] + assert len(search_calls) > 0 + search_call = search_calls[0] + assert "WHERE JSON_UNQUOTE" in search_call[0][0] + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_vector_with_score_threshold(self, mock_pool_class): + """Test vector search with score threshold.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter( + [ + { + "meta": json.dumps({"doc_id": "doc1", "source": "test"}), + "text": "High similarity document", + "distance": 0.1, # High similarity (score = 0.9) + }, + { + "meta": json.dumps({"doc_id": "doc2", "source": "test"}), + "text": "Low similarity document", + "distance": 0.8, # Low similarity (score = 0.2) + }, + ] + ) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + query_vector = [0.1, 0.2, 0.3, 0.4] + docs = vector_store.search_by_vector(query_vector, top_k=5, score_threshold=0.5) + + # Only the high similarity document should be returned + assert len(docs) == 1 + assert docs[0].page_content == "High similarity document" + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_vector_invalid_top_k(self, mock_pool_class): + """Test vector search with invalid top_k.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + query_vector = [0.1, 0.2, 0.3, 0.4] + + with pytest.raises(ValueError): + vector_store.search_by_vector(query_vector, top_k=0) + + with pytest.raises(ValueError): + vector_store.search_by_vector(query_vector, top_k="invalid") + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_full_text(self, mock_pool_class): + """Test full-text search.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter( + [ + { + "meta": {"doc_id": "doc1", "source": "test"}, + "text": "This document contains machine learning content", + "score": 1.5, + } + ] + ) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + docs = vector_store.search_by_full_text("machine learning", top_k=5) + + assert len(docs) == 1 + assert docs[0].page_content == "This document contains machine learning content" + assert docs[0].metadata["score"] == 1.5 + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_full_text_with_filter(self, mock_pool_class): + """Test full-text search with document ID filter.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + mock_cursor.__iter__ = lambda self: iter([]) + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + docs = vector_store.search_by_full_text("machine learning", top_k=5, document_ids_filter=["dataset1"]) + + # Verify the SQL contains the AND clause for filtering + execute_calls = mock_cursor.execute.call_args_list + search_calls = [call for call in execute_calls if "MATCH" in str(call)] + assert len(search_calls) > 0 + search_call = search_calls[0] + assert "AND JSON_UNQUOTE" in search_call[0][0] + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_search_by_full_text_invalid_top_k(self, mock_pool_class): + """Test full-text search with invalid top_k.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + + with pytest.raises(ValueError): + vector_store.search_by_full_text("test", top_k=0) + + with pytest.raises(ValueError): + vector_store.search_by_full_text("test", top_k="invalid") + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_delete_collection(self, mock_pool_class): + """Test deleting the entire collection.""" + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + vector_store = AlibabaCloudMySQLVector(self.collection_name, self.config) + vector_store.delete() + + # Check that DROP TABLE SQL was executed + execute_calls = mock_cursor.execute.call_args_list + drop_calls = [call for call in execute_calls if "DROP TABLE" in str(call)] + assert len(drop_calls) == 1 + drop_call = drop_calls[0] + assert f"DROP TABLE IF EXISTS {self.collection_name.lower()}" in drop_call[0][0] + + @patch( + "core.rag.datasource.vdb.alibabacloud_mysql.alibabacloud_mysql_vector.mysql.connector.pooling.MySQLConnectionPool" + ) + def test_unsupported_distance_function(self, mock_pool_class): + """Test that Pydantic validation rejects unsupported distance functions.""" + # Test that creating config with unsupported distance function raises ValidationError + with pytest.raises(ValueError) as context: + AlibabaCloudMySQLVectorConfig( + host="localhost", + port=3306, + user="test_user", + password="test_password", + database="test_db", + max_connection=5, + distance_function="manhattan", # Unsupported - not in Literal["cosine", "euclidean"] + ) + + # The error should be related to validation + assert "Input should be 'cosine' or 'euclidean'" in str(context.value) or "manhattan" in str(context.value) + + def _setup_mocks(self, mock_redis, mock_pool_class): + """Helper method to setup common mocks.""" + # Mock Redis operations + mock_redis.lock.return_value.__enter__ = MagicMock() + mock_redis.lock.return_value.__exit__ = MagicMock() + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.get_connection.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.side_effect = [{"VERSION()": "8.0.36"}, {"vector_support": True}] + + +if __name__ == "__main__": + unittest.main() diff --git a/api/uv.lock b/api/uv.lock index af368199b7..49339129e1 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1449,6 +1449,7 @@ vdb = [ { name = "couchbase" }, { name = "elasticsearch" }, { name = "mo-vector" }, + { name = "mysql-connector-python" }, { name = "opensearch-py" }, { name = "oracledb" }, { name = "pgvecto-rs", extra = ["sqlalchemy"] }, @@ -1637,6 +1638,7 @@ vdb = [ { name = "couchbase", specifier = "~=4.3.0" }, { name = "elasticsearch", specifier = "==8.14.0" }, { name = "mo-vector", specifier = "~=0.1.13" }, + { name = "mysql-connector-python", specifier = ">=9.3.0" }, { name = "opensearch-py", specifier = "==2.4.0" }, { name = "oracledb", specifier = "==3.3.0" }, { name = "pgvecto-rs", extras = ["sqlalchemy"], specifier = "~=0.2.1" }, @@ -3437,6 +3439,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] +[[package]] +name = "mysql-connector-python" +version = "9.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/77/2b45e6460d05b1f1b7a4c8eb79a50440b4417971973bb78c9ef6cad630a6/mysql_connector_python-9.4.0.tar.gz", hash = "sha256:d111360332ae78933daf3d48ff497b70739aa292ab0017791a33e826234e743b", size = 12185532, upload-time = "2025-07-22T08:02:05.788Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/0c/4365a802129be9fa63885533c38be019f1c6b6f5bcf8844ac53902314028/mysql_connector_python-9.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:7df1a8ddd182dd8adc914f6dc902a986787bf9599705c29aca7b2ce84e79d361", size = 17501627, upload-time = "2025-07-22T07:57:45.416Z" }, + { url = "https://files.pythonhosted.org/packages/c0/bf/ca596c00d7a6eaaf8ef2f66c9b23cd312527f483073c43ffac7843049cb4/mysql_connector_python-9.4.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:3892f20472e13e63b1fb4983f454771dd29f211b09724e69a9750e299542f2f8", size = 18369494, upload-time = "2025-07-22T07:57:49.714Z" }, + { url = "https://files.pythonhosted.org/packages/25/14/6510a11ed9f80d77f743dc207773092c4ab78d5efa454b39b48480315d85/mysql_connector_python-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d3e87142103d71c4df647ece30f98e85e826652272ed1c74822b56f6acdc38e7", size = 33516187, upload-time = "2025-07-22T07:57:55.294Z" }, + { url = "https://files.pythonhosted.org/packages/16/a8/4f99d80f1cf77733ce9a44b6adb7f0dd7079e7afa51ca4826515ef0c3e16/mysql_connector_python-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:b27fcd403436fe83bafb2fe7fcb785891e821e639275c4ad3b3bd1e25f533206", size = 33917818, upload-time = "2025-07-22T07:58:00.523Z" }, + { url = "https://files.pythonhosted.org/packages/15/9c/127f974ca9d5ee25373cb5433da06bb1f36e05f2a6b7436da1fe9c6346b0/mysql_connector_python-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd6ff5afb9c324b0bbeae958c93156cce4168c743bf130faf224d52818d1f0ee", size = 16392378, upload-time = "2025-07-22T07:58:04.669Z" }, + { url = "https://files.pythonhosted.org/packages/03/7c/a543fb17c2dfa6be8548dfdc5879a0c7924cd5d1c79056c48472bb8fe858/mysql_connector_python-9.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:4efa3898a24aba6a4bfdbf7c1f5023c78acca3150d72cc91199cca2ccd22f76f", size = 17503693, upload-time = "2025-07-22T07:58:08.96Z" }, + { url = "https://files.pythonhosted.org/packages/cb/6e/c22fbee05f5cfd6ba76155b6d45f6261d8d4c1e36e23de04e7f25fbd01a4/mysql_connector_python-9.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:665c13e7402235162e5b7a2bfdee5895192121b64ea455c90a81edac6a48ede5", size = 18371987, upload-time = "2025-07-22T07:58:13.273Z" }, + { url = "https://files.pythonhosted.org/packages/b4/fd/f426f5f35a3d3180c7f84d1f96b4631be2574df94ca1156adab8618b236c/mysql_connector_python-9.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:815aa6cad0f351c1223ef345781a538f2e5e44ef405fdb3851eb322bd9c4ca2b", size = 33516214, upload-time = "2025-07-22T07:58:18.967Z" }, + { url = "https://files.pythonhosted.org/packages/45/5a/1b053ae80b43cd3ccebc4bb99a98826969b3b0f8adebdcc2530750ad76ed/mysql_connector_python-9.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b3436a2c8c0ec7052932213e8d01882e6eb069dbab33402e685409084b133a1c", size = 33918565, upload-time = "2025-07-22T07:58:25.28Z" }, + { url = "https://files.pythonhosted.org/packages/cb/69/36b989de675d98ba8ff7d45c96c30c699865c657046f2e32db14e78f13d9/mysql_connector_python-9.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:57b0c224676946b70548c56798d5023f65afa1ba5b8ac9f04a143d27976c7029", size = 16392563, upload-time = "2025-07-22T07:58:29.623Z" }, + { url = "https://files.pythonhosted.org/packages/36/34/b6165e15fd45a8deb00932d8e7d823de7650270873b4044c4db6688e1d8f/mysql_connector_python-9.4.0-py2.py3-none-any.whl", hash = "sha256:56e679169c704dab279b176fab2a9ee32d2c632a866c0f7cd48a8a1e2cf802c4", size = 406574, upload-time = "2025-07-22T07:59:08.394Z" }, +] + [[package]] name = "nest-asyncio" version = "1.6.0" diff --git a/docker/.env.example b/docker/.env.example index 6d07cf7fa5..b0e8d020ba 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -449,7 +449,7 @@ SUPABASE_URL=your-server-url # ------------------------------ # The type of vector store to use. -# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `clickzetta`. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `clickzetta`, `alibabacloud_mysql`. VECTOR_STORE=weaviate # Prefix used to create collection name in vector database VECTOR_INDEX_NAME_PREFIX=Vector_index @@ -580,6 +580,15 @@ ORACLE_WALLET_LOCATION=/app/api/storage/wallet ORACLE_WALLET_PASSWORD=dify ORACLE_IS_AUTONOMOUS=false +# AlibabaCloud MySQL configuration, only available when VECTOR_STORE is `alibabcloud_mysql` +ALIBABACLOUD_MYSQL_HOST=127.0.0.1 +ALIBABACLOUD_MYSQL_PORT=3306 +ALIBABACLOUD_MYSQL_USER=root +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +ALIBABACLOUD_MYSQL_DATABASE=dify +ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 +ALIBABACLOUD_MYSQL_HNSW_M=6 + # relyt configurations, only available when VECTOR_STORE is `relyt` RELYT_HOST=db RELYT_PORT=5432 diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 5d47471093..0df648f38f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -244,6 +244,13 @@ x-shared-env: &shared-api-worker-env ORACLE_WALLET_LOCATION: ${ORACLE_WALLET_LOCATION:-/app/api/storage/wallet} ORACLE_WALLET_PASSWORD: ${ORACLE_WALLET_PASSWORD:-dify} ORACLE_IS_AUTONOMOUS: ${ORACLE_IS_AUTONOMOUS:-false} + ALIBABACLOUD_MYSQL_HOST: ${ALIBABACLOUD_MYSQL_HOST:-127.0.0.1} + ALIBABACLOUD_MYSQL_PORT: ${ALIBABACLOUD_MYSQL_PORT:-3306} + ALIBABACLOUD_MYSQL_USER: ${ALIBABACLOUD_MYSQL_USER:-root} + ALIBABACLOUD_MYSQL_PASSWORD: ${ALIBABACLOUD_MYSQL_PASSWORD:-difyai123456} + ALIBABACLOUD_MYSQL_DATABASE: ${ALIBABACLOUD_MYSQL_DATABASE:-dify} + ALIBABACLOUD_MYSQL_MAX_CONNECTION: ${ALIBABACLOUD_MYSQL_MAX_CONNECTION:-5} + ALIBABACLOUD_MYSQL_HNSW_M: ${ALIBABACLOUD_MYSQL_HNSW_M:-6} RELYT_HOST: ${RELYT_HOST:-db} RELYT_PORT: ${RELYT_PORT:-5432} RELYT_USER: ${RELYT_USER:-postgres} From 275a0f9ddd04d180e5178c6fc45acd460537cd47 Mon Sep 17 00:00:00 2001 From: Maries Date: Sat, 11 Oct 2025 12:43:09 +0800 Subject: [PATCH 27/49] chore(workflows): update deployment configurations for trigger dev (#26753) --- .github/workflows/build-push.yml | 3 +-- .../{deploy-rag-dev.yml => deploy-trigger-dev.yml} | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) rename .github/workflows/{deploy-rag-dev.yml => deploy-trigger-dev.yml} (75%) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 24a9da4400..f7f464a601 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -4,8 +4,7 @@ on: push: branches: - "main" - - "deploy/dev" - - "deploy/enterprise" + - "deploy/**" - "build/**" - "release/e-*" - "hotfix/**" diff --git a/.github/workflows/deploy-rag-dev.yml b/.github/workflows/deploy-trigger-dev.yml similarity index 75% rename from .github/workflows/deploy-rag-dev.yml rename to .github/workflows/deploy-trigger-dev.yml index 86265aad6d..2d9a904fc5 100644 --- a/.github/workflows/deploy-rag-dev.yml +++ b/.github/workflows/deploy-trigger-dev.yml @@ -1,4 +1,4 @@ -name: Deploy RAG Dev +name: Deploy Trigger Dev permissions: contents: read @@ -7,7 +7,7 @@ on: workflow_run: workflows: ["Build and Push API & Web"] branches: - - "deploy/rag-dev" + - "deploy/trigger-dev" types: - completed @@ -16,12 +16,12 @@ jobs: runs-on: ubuntu-latest if: | github.event.workflow_run.conclusion == 'success' && - github.event.workflow_run.head_branch == 'deploy/rag-dev' + github.event.workflow_run.head_branch == 'deploy/trigger-dev' steps: - name: Deploy to server uses: appleboy/ssh-action@v0.1.8 with: - host: ${{ secrets.RAG_SSH_HOST }} + host: ${{ secrets.TRIGGER_SSH_HOST }} username: ${{ secrets.SSH_USER }} key: ${{ secrets.SSH_PRIVATE_KEY }} script: | From 586f210d6e5684c4c02ac06f86d7b8e4bf9ab6f4 Mon Sep 17 00:00:00 2001 From: GuanMu Date: Sat, 11 Oct 2025 13:01:05 +0800 Subject: [PATCH 28/49] chore: remove unused dependencies for dagre from package.json and pnpm-lock.yaml (#26755) --- web/package.json | 2 -- web/pnpm-lock.yaml | 24 ------------------------ 2 files changed, 26 deletions(-) diff --git a/web/package.json b/web/package.json index 2a8972ee80..a5cfd387f7 100644 --- a/web/package.json +++ b/web/package.json @@ -43,7 +43,6 @@ "knip": "knip" }, "dependencies": { - "@dagrejs/dagre": "^1.1.4", "@emoji-mart/data": "^1.2.1", "@floating-ui/react": "^0.26.25", "@formatjs/intl-localematcher": "^0.5.6", @@ -162,7 +161,6 @@ "@testing-library/dom": "^10.4.0", "@testing-library/jest-dom": "^6.8.0", "@testing-library/react": "^16.0.1", - "@types/dagre": "^0.7.52", "@types/jest": "^29.5.13", "@types/js-cookie": "^3.0.6", "@types/lodash-es": "^4.17.12", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 8046f94d59..0a45f14ba0 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -49,9 +49,6 @@ importers: .: dependencies: - '@dagrejs/dagre': - specifier: ^1.1.4 - version: 1.1.5 '@emoji-mart/data': specifier: ^1.2.1 version: 1.2.1 @@ -401,9 +398,6 @@ importers: '@testing-library/react': specifier: ^16.0.1 version: 16.3.0(@testing-library/dom@10.4.0)(@types/react-dom@19.1.7(@types/react@19.1.11))(@types/react@19.1.11)(react-dom@19.1.1(react@19.1.1))(react@19.1.1) - '@types/dagre': - specifier: ^0.7.52 - version: 0.7.53 '@types/jest': specifier: ^29.5.13 version: 29.5.14 @@ -1329,13 +1323,6 @@ packages: resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} engines: {node: '>=12'} - '@dagrejs/dagre@1.1.5': - resolution: {integrity: sha512-Ghgrh08s12DCL5SeiR6AoyE80mQELTWhJBRmXfFoqDiFkR458vPEdgTbbjA0T+9ETNxUblnD0QW55tfdvi5pjQ==} - - '@dagrejs/graphlib@2.2.4': - resolution: {integrity: sha512-mepCf/e9+SKYy1d02/UkvSy6+6MoyXhVxP8lLDfA7BPE1X1d4dR0sZznmbM8/XVJ1GPM+Svnx7Xj6ZweByWUkw==} - engines: {node: '>17.0.0'} - '@discoveryjs/json-ext@0.5.7': resolution: {integrity: sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==} engines: {node: '>=10.0.0'} @@ -3338,9 +3325,6 @@ packages: '@types/d3@7.4.3': resolution: {integrity: sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==} - '@types/dagre@0.7.53': - resolution: {integrity: sha512-f4gkWqzPZvYmKhOsDnhq/R8mO4UMcKdxZo+i5SCkOU1wvGeHJeUXGIHeE9pnwGyPMDof1Vx5ZQo4nxpeg2TTVQ==} - '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} @@ -9785,12 +9769,6 @@ snapshots: '@jridgewell/trace-mapping': 0.3.9 optional: true - '@dagrejs/dagre@1.1.5': - dependencies: - '@dagrejs/graphlib': 2.2.4 - - '@dagrejs/graphlib@2.2.4': {} - '@discoveryjs/json-ext@0.5.7': {} '@emnapi/core@1.5.0': @@ -12129,8 +12107,6 @@ snapshots: '@types/d3-transition': 3.0.9 '@types/d3-zoom': 3.0.8 - '@types/dagre@0.7.53': {} - '@types/debug@4.1.12': dependencies: '@types/ms': 2.1.0 From 456dbfe7d7ed538925cd8f7d1139c3a1fd96d515 Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Sat, 11 Oct 2025 13:48:57 +0800 Subject: [PATCH 29/49] feat: add tracking for updated_by and updated_at fields in app models (#26736) --- api/controllers/console/app/model_config.py | 3 +++ api/controllers/console/app/workflow.py | 9 +++++++-- api/services/app_dsl_service.py | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/api/controllers/console/app/model_config.py b/api/controllers/console/app/model_config.py index e71b774d3e..fa6e3f8738 100644 --- a/api/controllers/console/app/model_config.py +++ b/api/controllers/console/app/model_config.py @@ -14,6 +14,7 @@ from core.tools.tool_manager import ToolManager from core.tools.utils.configuration import ToolParameterConfigurationManager from events.app_event import app_model_config_was_updated from extensions.ext_database import db +from libs.datetime_utils import naive_utc_now from libs.login import login_required from models.account import Account from models.model import AppMode, AppModelConfig @@ -172,6 +173,8 @@ class ModelConfigResource(Resource): db.session.flush() app_model.app_model_config_id = new_app_model_config.id + app_model.updated_by = current_user.id + app_model.updated_at = naive_utc_now() db.session.commit() app_model_config_was_updated.send(app_model, app_model_config=new_app_model_config) diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py index 1f5cbbeca5..578d864b80 100644 --- a/api/controllers/console/app/workflow.py +++ b/api/controllers/console/app/workflow.py @@ -25,6 +25,7 @@ from factories import file_factory, variable_factory from fields.workflow_fields import workflow_fields, workflow_pagination_fields from fields.workflow_run_fields import workflow_run_node_execution_fields from libs import helper +from libs.datetime_utils import naive_utc_now from libs.helper import TimestampField, uuid_value from libs.login import current_user, login_required from models import App @@ -674,8 +675,12 @@ class PublishedWorkflowApi(Resource): marked_comment=args.marked_comment or "", ) - app_model.workflow_id = workflow.id - db.session.commit() # NOTE: this is necessary for update app_model.workflow_id + # Update app_model within the same session to ensure atomicity + app_model_in_session = session.get(App, app_model.id) + if app_model_in_session: + app_model_in_session.workflow_id = workflow.id + app_model_in_session.updated_by = current_user.id + app_model_in_session.updated_at = naive_utc_now() workflow_created_at = TimestampField().format(workflow.created_at) diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index 311f80bef6..e2915ebfbb 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -29,6 +29,7 @@ from core.workflow.nodes.tool.entities import ToolNodeData from events.app_event import app_model_config_was_updated, app_was_created from extensions.ext_redis import redis_client from factories import variable_factory +from libs.datetime_utils import naive_utc_now from models import Account, App, AppMode from models.model import AppModelConfig from models.workflow import Workflow @@ -439,6 +440,7 @@ class AppDslService: app.icon = icon app.icon_background = icon_background or app_data.get("icon_background", app.icon_background) app.updated_by = account.id + app.updated_at = naive_utc_now() else: if account.current_tenant_id is None: raise ValueError("Current tenant is not set") From bd5df5cf1cf62a461c92ce3b20ecc64c167a1259 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 11 Oct 2025 16:33:31 +0800 Subject: [PATCH 30/49] feat: add InlineDeleteConfirm base component (#26762) --- .../base/inline-delete-confirm/index.spec.tsx | 152 ++++++++++++++++++ .../base/inline-delete-confirm/index.tsx | 90 +++++++++++ web/i18n/en-US/common.ts | 4 + web/i18n/ja-JP/common.ts | 4 + web/i18n/zh-Hans/common.ts | 4 + 5 files changed, 254 insertions(+) create mode 100644 web/app/components/base/inline-delete-confirm/index.spec.tsx create mode 100644 web/app/components/base/inline-delete-confirm/index.tsx diff --git a/web/app/components/base/inline-delete-confirm/index.spec.tsx b/web/app/components/base/inline-delete-confirm/index.spec.tsx new file mode 100644 index 0000000000..c113c4ade9 --- /dev/null +++ b/web/app/components/base/inline-delete-confirm/index.spec.tsx @@ -0,0 +1,152 @@ +import React from 'react' +import { cleanup, fireEvent, render } from '@testing-library/react' +import InlineDeleteConfirm from './index' + +// Mock react-i18next +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => { + const translations: Record = { + 'common.operation.deleteConfirmTitle': 'Delete?', + 'common.operation.yes': 'Yes', + 'common.operation.no': 'No', + 'common.operation.confirmAction': 'Please confirm your action.', + } + return translations[key] || key + }, + }), +})) + +afterEach(cleanup) + +describe('InlineDeleteConfirm', () => { + describe('Rendering', () => { + test('should render with default text', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + expect(getByText('Delete?')).toBeInTheDocument() + expect(getByText('No')).toBeInTheDocument() + expect(getByText('Yes')).toBeInTheDocument() + }) + + test('should render with custom text', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + expect(getByText('Remove?')).toBeInTheDocument() + expect(getByText('Cancel')).toBeInTheDocument() + expect(getByText('Confirm')).toBeInTheDocument() + }) + + test('should have proper ARIA attributes', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { container } = render( + , + ) + + const wrapper = container.firstChild as HTMLElement + expect(wrapper).toHaveAttribute('aria-labelledby', 'inline-delete-confirm-title') + expect(wrapper).toHaveAttribute('aria-describedby', 'inline-delete-confirm-description') + }) + }) + + describe('Button interactions', () => { + test('should call onCancel when cancel button is clicked', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + fireEvent.click(getByText('No')) + expect(onCancel).toHaveBeenCalledTimes(1) + expect(onConfirm).not.toHaveBeenCalled() + }) + + test('should call onConfirm when confirm button is clicked', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + fireEvent.click(getByText('Yes')) + expect(onConfirm).toHaveBeenCalledTimes(1) + expect(onCancel).not.toHaveBeenCalled() + }) + }) + + describe('Variant prop', () => { + test('should render with delete variant by default', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + const confirmButton = getByText('Yes').closest('button') + expect(confirmButton?.className).toContain('btn-destructive') + }) + + test('should render without destructive class for warning variant', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + const confirmButton = getByText('Yes').closest('button') + expect(confirmButton?.className).not.toContain('btn-destructive') + }) + + test('should render without destructive class for info variant', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { getByText } = render( + , + ) + + const confirmButton = getByText('Yes').closest('button') + expect(confirmButton?.className).not.toContain('btn-destructive') + }) + }) + + describe('Custom className', () => { + test('should apply custom className to wrapper', () => { + const onConfirm = jest.fn() + const onCancel = jest.fn() + const { container } = render( + , + ) + + const wrapper = container.firstChild as HTMLElement + expect(wrapper.className).toContain('custom-class') + }) + }) +}) diff --git a/web/app/components/base/inline-delete-confirm/index.tsx b/web/app/components/base/inline-delete-confirm/index.tsx new file mode 100644 index 0000000000..2a33e14701 --- /dev/null +++ b/web/app/components/base/inline-delete-confirm/index.tsx @@ -0,0 +1,90 @@ +'use client' +import type { FC } from 'react' +import { useTranslation } from 'react-i18next' +import Button from '@/app/components/base/button' +import cn from '@/utils/classnames' + +export type InlineDeleteConfirmProps = { + title?: string + confirmText?: string + cancelText?: string + onConfirm: () => void + onCancel: () => void + className?: string + variant?: 'delete' | 'warning' | 'info' +} + +const InlineDeleteConfirm: FC = ({ + title, + confirmText, + cancelText, + onConfirm, + onCancel, + className, + variant = 'delete', +}) => { + const { t } = useTranslation() + + const titleText = title || t('common.operation.deleteConfirmTitle', 'Delete?') + const confirmTxt = confirmText || t('common.operation.yes', 'Yes') + const cancelTxt = cancelText || t('common.operation.no', 'No') + + return ( +
+
+
+ {titleText} +
+ +
+ + +
+
+ + + {t('common.operation.confirmAction', 'Please confirm your action.')} + +
+ ) +} + +InlineDeleteConfirm.displayName = 'InlineDeleteConfirm' + +export default InlineDeleteConfirm diff --git a/web/i18n/en-US/common.ts b/web/i18n/en-US/common.ts index b9d315388f..4f177ca0c3 100644 --- a/web/i18n/en-US/common.ts +++ b/web/i18n/en-US/common.ts @@ -18,6 +18,10 @@ const translation = { cancel: 'Cancel', clear: 'Clear', save: 'Save', + yes: 'Yes', + no: 'No', + deleteConfirmTitle: 'Delete?', + confirmAction: 'Please confirm your action.', saveAndEnable: 'Save & Enable', edit: 'Edit', add: 'Add', diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts index 5526ac0441..52545c460b 100644 --- a/web/i18n/ja-JP/common.ts +++ b/web/i18n/ja-JP/common.ts @@ -67,6 +67,10 @@ const translation = { selectAll: 'すべて選択', deSelectAll: 'すべて選択解除', config: 'コンフィグ', + yes: 'はい', + no: 'いいえ', + deleteConfirmTitle: '削除しますか?', + confirmAction: '操作を確認してください。', }, errorMsg: { fieldRequired: '{{field}}は必要です', diff --git a/web/i18n/zh-Hans/common.ts b/web/i18n/zh-Hans/common.ts index 0ecdb20d5e..e73ac4cc6b 100644 --- a/web/i18n/zh-Hans/common.ts +++ b/web/i18n/zh-Hans/common.ts @@ -18,6 +18,10 @@ const translation = { cancel: '取消', clear: '清空', save: '保存', + yes: '是', + no: '否', + deleteConfirmTitle: '删除?', + confirmAction: '请确认您的操作。', saveAndEnable: '保存并启用', edit: '编辑', add: '添加', From 5217017e69807cd1cc076fb9e2e22f778904e38d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 11 Oct 2025 17:23:40 +0800 Subject: [PATCH 31/49] chore: translate i18n files and update type definitions (#26763) Co-authored-by: asukaminato0721 <30024051+asukaminato0721@users.noreply.github.com> --- web/i18n/de-DE/common.ts | 4 ++++ web/i18n/es-ES/common.ts | 4 ++++ web/i18n/fa-IR/common.ts | 4 ++++ web/i18n/fr-FR/common.ts | 4 ++++ web/i18n/hi-IN/common.ts | 4 ++++ web/i18n/id-ID/common.ts | 4 ++++ web/i18n/it-IT/common.ts | 4 ++++ web/i18n/ko-KR/common.ts | 4 ++++ web/i18n/pl-PL/common.ts | 4 ++++ web/i18n/pt-BR/common.ts | 4 ++++ web/i18n/ro-RO/common.ts | 4 ++++ web/i18n/ru-RU/common.ts | 4 ++++ web/i18n/sl-SI/common.ts | 4 ++++ web/i18n/th-TH/common.ts | 4 ++++ web/i18n/tr-TR/common.ts | 4 ++++ web/i18n/uk-UA/common.ts | 4 ++++ web/i18n/vi-VN/common.ts | 4 ++++ web/i18n/zh-Hant/common.ts | 4 ++++ 18 files changed, 72 insertions(+) diff --git a/web/i18n/de-DE/common.ts b/web/i18n/de-DE/common.ts index 9431fbbf6a..d64b295121 100644 --- a/web/i18n/de-DE/common.ts +++ b/web/i18n/de-DE/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'Alles auswählen', deSelectAll: 'Alle abwählen', config: 'Konfiguration', + yes: 'Ja', + deleteConfirmTitle: 'Löschen?', + no: 'Nein', + confirmAction: 'Bitte bestätigen Sie Ihre Aktion.', }, placeholder: { input: 'Bitte eingeben', diff --git a/web/i18n/es-ES/common.ts b/web/i18n/es-ES/common.ts index 74af4a03b6..55c2c5e474 100644 --- a/web/i18n/es-ES/common.ts +++ b/web/i18n/es-ES/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Deseleccionar todo', selectAll: 'Seleccionar todo', config: 'Config', + confirmAction: 'Por favor, confirme su acción.', + deleteConfirmTitle: '¿Eliminar?', + yes: 'Sí', + no: 'No', }, errorMsg: { fieldRequired: '{{field}} es requerido', diff --git a/web/i18n/fa-IR/common.ts b/web/i18n/fa-IR/common.ts index dc6620ce2e..4d7482b143 100644 --- a/web/i18n/fa-IR/common.ts +++ b/web/i18n/fa-IR/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'انتخاب همه', deSelectAll: 'همه را انتخاب نکنید', config: 'تنظیمات', + no: 'نه', + deleteConfirmTitle: 'حذف شود؟', + yes: 'بله', + confirmAction: 'لطفاً اقدام خود را تأیید کنید.', }, errorMsg: { fieldRequired: '{{field}} الزامی است', diff --git a/web/i18n/fr-FR/common.ts b/web/i18n/fr-FR/common.ts index f1e8ad007c..d2f1b6287a 100644 --- a/web/i18n/fr-FR/common.ts +++ b/web/i18n/fr-FR/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Désélectionner tout', selectAll: 'Sélectionner tout', config: 'Config', + no: 'Non', + confirmAction: 'Veuillez confirmer votre action.', + deleteConfirmTitle: 'Supprimer ?', + yes: 'Oui', }, placeholder: { input: 'Veuillez entrer', diff --git a/web/i18n/hi-IN/common.ts b/web/i18n/hi-IN/common.ts index d882b00929..acab7e9172 100644 --- a/web/i18n/hi-IN/common.ts +++ b/web/i18n/hi-IN/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'सभी चुनें', deSelectAll: 'सभी चयन हटाएँ', config: 'कॉन्फ़िगरेशन', + no: 'नहीं', + yes: 'हाँ', + deleteConfirmTitle: 'हटाएं?', + confirmAction: 'कृपया अपनी क्रिया की पुष्टि करें।', }, errorMsg: { fieldRequired: '{{field}} आवश्यक है', diff --git a/web/i18n/id-ID/common.ts b/web/i18n/id-ID/common.ts index b224f153f6..e57b9b3641 100644 --- a/web/i18n/id-ID/common.ts +++ b/web/i18n/id-ID/common.ts @@ -67,6 +67,10 @@ const translation = { sure: 'Saya yakin', imageCopied: 'Gambar yang disalin', config: 'Konfigurasi', + deleteConfirmTitle: 'Hapus?', + confirmAction: 'Silakan konfirmasi tindakan Anda.', + yes: 'Ya', + no: 'Tidak', }, errorMsg: { urlError: 'URL harus dimulai dengan http:// atau https://', diff --git a/web/i18n/it-IT/common.ts b/web/i18n/it-IT/common.ts index 4ba4f34240..14f705301a 100644 --- a/web/i18n/it-IT/common.ts +++ b/web/i18n/it-IT/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'Seleziona tutto', deSelectAll: 'Deseleziona tutto', config: 'Config', + no: 'No', + yes: 'Sì', + confirmAction: 'Per favore conferma la tua azione.', + deleteConfirmTitle: 'Eliminare?', }, errorMsg: { fieldRequired: '{{field}} è obbligatorio', diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts index 9d2948c594..617c0851c8 100644 --- a/web/i18n/ko-KR/common.ts +++ b/web/i18n/ko-KR/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: '모두 선택', deSelectAll: '모두 선택 해제', config: '구성', + no: '아니요', + yes: '네', + deleteConfirmTitle: '삭제하시겠습니까?', + confirmAction: '귀하의 행동을 확인해 주세요.', }, placeholder: { input: '입력해주세요', diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts index 3f820e14e0..5fa05d3ce4 100644 --- a/web/i18n/pl-PL/common.ts +++ b/web/i18n/pl-PL/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Odznacz wszystkie', selectAll: 'Zaznacz wszystkie', config: 'Konfiguracja', + yes: 'Tak', + no: 'Nie', + deleteConfirmTitle: 'Usunąć?', + confirmAction: 'Proszę potwierdzić swoją akcję.', }, placeholder: { input: 'Proszę wprowadzić', diff --git a/web/i18n/pt-BR/common.ts b/web/i18n/pt-BR/common.ts index 3f5f353fb6..918bc24bf8 100644 --- a/web/i18n/pt-BR/common.ts +++ b/web/i18n/pt-BR/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Desmarcar tudo', selectAll: 'Selecionar tudo', config: 'Configuração', + no: 'Não', + yes: 'Sim', + deleteConfirmTitle: 'Excluir?', + confirmAction: 'Por favor, confirme sua ação.', }, placeholder: { input: 'Por favor, insira', diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts index 2e36e487fb..1a2f7f98c3 100644 --- a/web/i18n/ro-RO/common.ts +++ b/web/i18n/ro-RO/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Deselectați tot', selectAll: 'Selectați tot', config: 'Configurație', + yes: 'Da', + deleteConfirmTitle: 'Ștergere?', + no: 'Nu', + confirmAction: 'Vă rugăm să confirmați acțiunea dumneavoastră.', }, placeholder: { input: 'Vă rugăm să introduceți', diff --git a/web/i18n/ru-RU/common.ts b/web/i18n/ru-RU/common.ts index 8f1fb3a51b..de5ff494a6 100644 --- a/web/i18n/ru-RU/common.ts +++ b/web/i18n/ru-RU/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'Выбрать все', deSelectAll: 'Снять выделение со всех', config: 'Конфигурация', + yes: 'Да', + no: 'Нет', + deleteConfirmTitle: 'Удалить?', + confirmAction: 'Пожалуйста, подтвердите ваше действие.', }, errorMsg: { fieldRequired: '{{field}} обязательно', diff --git a/web/i18n/sl-SI/common.ts b/web/i18n/sl-SI/common.ts index 2efd6f8de6..169d0997f6 100644 --- a/web/i18n/sl-SI/common.ts +++ b/web/i18n/sl-SI/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'Izberi vse', deSelectAll: 'Odberi vse', config: 'Konfiguracija', + no: 'Ne', + confirmAction: 'Prosimo, potrdite svoje dejanje.', + deleteConfirmTitle: 'Izbrisati?', + yes: 'Da', }, errorMsg: { fieldRequired: '{{field}} je obvezno', diff --git a/web/i18n/th-TH/common.ts b/web/i18n/th-TH/common.ts index a673629d3e..4149843371 100644 --- a/web/i18n/th-TH/common.ts +++ b/web/i18n/th-TH/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'เลือกทั้งหมด', deSelectAll: 'ยกเลิกการเลือกทั้งหมด', config: 'การตั้งค่า', + no: 'ไม่', + deleteConfirmTitle: 'ลบหรือไม่?', + confirmAction: 'กรุณายืนยันการกระทำของคุณ', + yes: 'ใช่', }, errorMsg: { fieldRequired: '{{field}} เป็นสิ่งจําเป็น', diff --git a/web/i18n/tr-TR/common.ts b/web/i18n/tr-TR/common.ts index b198bd5d63..14b4689419 100644 --- a/web/i18n/tr-TR/common.ts +++ b/web/i18n/tr-TR/common.ts @@ -61,6 +61,10 @@ const translation = { selectAll: 'Hepsini Seç', deSelectAll: 'Hepsini Seçme', config: 'Konfigürasyon', + no: 'Hayır', + yes: 'Evet', + deleteConfirmTitle: 'Silinsin mi?', + confirmAction: 'Lütfen işleminizi onaylayın.', }, errorMsg: { fieldRequired: '{{field}} gereklidir', diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts index 69af3cc2db..ef9bccd7ff 100644 --- a/web/i18n/uk-UA/common.ts +++ b/web/i18n/uk-UA/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Вимкнути все', selectAll: 'Вибрати все', config: 'Конфігурація', + yes: 'Так', + no: 'Ні', + deleteConfirmTitle: 'Видалити?', + confirmAction: 'Будь ласка, підтвердіть свої дії.', }, placeholder: { input: 'Будь ласка, введіть текст', diff --git a/web/i18n/vi-VN/common.ts b/web/i18n/vi-VN/common.ts index 216a2e2ed2..e9e1f17f56 100644 --- a/web/i18n/vi-VN/common.ts +++ b/web/i18n/vi-VN/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: 'Bỏ chọn tất cả', selectAll: 'Chọn Tất Cả', config: 'Cấu hình', + no: 'Không', + yes: 'Vâng', + deleteConfirmTitle: 'Xóa?', + confirmAction: 'Vui lòng xác nhận hành động của bạn.', }, placeholder: { input: 'Vui lòng nhập', diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts index a5747ba300..273ecb010f 100644 --- a/web/i18n/zh-Hant/common.ts +++ b/web/i18n/zh-Hant/common.ts @@ -61,6 +61,10 @@ const translation = { deSelectAll: '全不選', selectAll: '全選', config: '配置', + yes: '是', + confirmAction: '請確認您的操作。', + deleteConfirmTitle: '刪除?', + no: '不', }, placeholder: { input: '請輸入', From a9b3539b900e84f6dcf3cdefce760960d04975d3 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 11 Oct 2025 17:45:42 +0800 Subject: [PATCH 32/49] feat: migrate Python SDK to httpx with async/await support (#26726) Signed-off-by: lyzno1 Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- sdks/python-client/MANIFEST.in | 2 + sdks/python-client/README.md | 186 ++++ sdks/python-client/dify_client/__init__.py | 17 + .../python-client/dify_client/async_client.py | 808 ++++++++++++++++++ sdks/python-client/dify_client/client.py | 321 ++++++- sdks/python-client/pyproject.toml | 43 + sdks/python-client/setup.py | 26 - sdks/python-client/tests/test_async_client.py | 250 ++++++ .../tests/test_httpx_migration.py | 331 +++++++ sdks/python-client/tests/test_new_apis.py | 416 --------- sdks/python-client/uv.lock | 271 ++++++ 11 files changed, 2183 insertions(+), 488 deletions(-) create mode 100644 sdks/python-client/dify_client/async_client.py create mode 100644 sdks/python-client/pyproject.toml delete mode 100644 sdks/python-client/setup.py create mode 100644 sdks/python-client/tests/test_async_client.py create mode 100644 sdks/python-client/tests/test_httpx_migration.py delete mode 100644 sdks/python-client/tests/test_new_apis.py create mode 100644 sdks/python-client/uv.lock diff --git a/sdks/python-client/MANIFEST.in b/sdks/python-client/MANIFEST.in index 12f44237a2..34b7e8711c 100644 --- a/sdks/python-client/MANIFEST.in +++ b/sdks/python-client/MANIFEST.in @@ -1 +1,3 @@ recursive-include dify_client *.py +include README.md +include LICENSE diff --git a/sdks/python-client/README.md b/sdks/python-client/README.md index 34b14b3a94..ebfb5f5397 100644 --- a/sdks/python-client/README.md +++ b/sdks/python-client/README.md @@ -10,6 +10,8 @@ First, install `dify-client` python sdk package: pip install dify-client ``` +### Synchronous Usage + Write your code with sdk: - completion generate with `blocking` response_mode @@ -221,3 +223,187 @@ answer = result.get("data").get("outputs") print(answer["answer"]) ``` + +- Dataset Management + +```python +from dify_client import KnowledgeBaseClient + +api_key = "your_api_key" +dataset_id = "your_dataset_id" + +# Use context manager to ensure proper resource cleanup +with KnowledgeBaseClient(api_key, dataset_id) as kb_client: + # Get dataset information + dataset_info = kb_client.get_dataset() + dataset_info.raise_for_status() + print(dataset_info.json()) + + # Update dataset configuration + update_response = kb_client.update_dataset( + name="Updated Dataset Name", + description="Updated description", + indexing_technique="high_quality" + ) + update_response.raise_for_status() + print(update_response.json()) + + # Batch update document status + batch_response = kb_client.batch_update_document_status( + action="enable", + document_ids=["doc_id_1", "doc_id_2", "doc_id_3"] + ) + batch_response.raise_for_status() + print(batch_response.json()) +``` + +- Conversation Variables Management + +```python +from dify_client import ChatClient + +api_key = "your_api_key" + +# Use context manager to ensure proper resource cleanup +with ChatClient(api_key) as chat_client: + # Get all conversation variables + variables = chat_client.get_conversation_variables( + conversation_id="conversation_id", + user="user_id" + ) + variables.raise_for_status() + print(variables.json()) + + # Update a specific conversation variable + update_var = chat_client.update_conversation_variable( + conversation_id="conversation_id", + variable_id="variable_id", + value="new_value", + user="user_id" + ) + update_var.raise_for_status() + print(update_var.json()) +``` + +### Asynchronous Usage + +The SDK provides full async/await support for all API operations using `httpx.AsyncClient`. All async clients mirror their synchronous counterparts but require `await` for method calls. + +- async chat with `blocking` response_mode + +```python +import asyncio +from dify_client import AsyncChatClient + +api_key = "your_api_key" + +async def main(): + # Use async context manager for proper resource cleanup + async with AsyncChatClient(api_key) as client: + response = await client.create_chat_message( + inputs={}, + query="Hello, how are you?", + user="user_id", + response_mode="blocking" + ) + response.raise_for_status() + result = response.json() + print(result.get('answer')) + +# Run the async function +asyncio.run(main()) +``` + +- async completion with `streaming` response_mode + +```python +import asyncio +import json +from dify_client import AsyncCompletionClient + +api_key = "your_api_key" + +async def main(): + async with AsyncCompletionClient(api_key) as client: + response = await client.create_completion_message( + inputs={"query": "What's the weather?"}, + response_mode="streaming", + user="user_id" + ) + response.raise_for_status() + + # Stream the response + async for line in response.aiter_lines(): + if line.startswith('data:'): + data = line[5:].strip() + if data: + chunk = json.loads(data) + print(chunk.get('answer', ''), end='', flush=True) + +asyncio.run(main()) +``` + +- async workflow execution + +```python +import asyncio +from dify_client import AsyncWorkflowClient + +api_key = "your_api_key" + +async def main(): + async with AsyncWorkflowClient(api_key) as client: + response = await client.run( + inputs={"query": "What is machine learning?"}, + response_mode="blocking", + user="user_id" + ) + response.raise_for_status() + result = response.json() + print(result.get("data").get("outputs")) + +asyncio.run(main()) +``` + +- async dataset management + +```python +import asyncio +from dify_client import AsyncKnowledgeBaseClient + +api_key = "your_api_key" +dataset_id = "your_dataset_id" + +async def main(): + async with AsyncKnowledgeBaseClient(api_key, dataset_id) as kb_client: + # Get dataset information + dataset_info = await kb_client.get_dataset() + dataset_info.raise_for_status() + print(dataset_info.json()) + + # List documents + docs = await kb_client.list_documents(page=1, page_size=10) + docs.raise_for_status() + print(docs.json()) + +asyncio.run(main()) +``` + +**Benefits of Async Usage:** + +- **Better Performance**: Handle multiple concurrent API requests efficiently +- **Non-blocking I/O**: Don't block the event loop during network operations +- **Scalability**: Ideal for applications handling many simultaneous requests +- **Modern Python**: Leverages Python's native async/await syntax + +**Available Async Clients:** + +- `AsyncDifyClient` - Base async client +- `AsyncChatClient` - Async chat operations +- `AsyncCompletionClient` - Async completion operations +- `AsyncWorkflowClient` - Async workflow operations +- `AsyncKnowledgeBaseClient` - Async dataset/knowledge base operations +- `AsyncWorkspaceClient` - Async workspace operations + +``` +``` diff --git a/sdks/python-client/dify_client/__init__.py b/sdks/python-client/dify_client/__init__.py index e252bc0472..ced093b20a 100644 --- a/sdks/python-client/dify_client/__init__.py +++ b/sdks/python-client/dify_client/__init__.py @@ -7,11 +7,28 @@ from dify_client.client import ( WorkspaceClient, ) +from dify_client.async_client import ( + AsyncChatClient, + AsyncCompletionClient, + AsyncDifyClient, + AsyncKnowledgeBaseClient, + AsyncWorkflowClient, + AsyncWorkspaceClient, +) + __all__ = [ + # Synchronous clients "ChatClient", "CompletionClient", "DifyClient", "KnowledgeBaseClient", "WorkflowClient", "WorkspaceClient", + # Asynchronous clients + "AsyncChatClient", + "AsyncCompletionClient", + "AsyncDifyClient", + "AsyncKnowledgeBaseClient", + "AsyncWorkflowClient", + "AsyncWorkspaceClient", ] diff --git a/sdks/python-client/dify_client/async_client.py b/sdks/python-client/dify_client/async_client.py new file mode 100644 index 0000000000..984f668d0c --- /dev/null +++ b/sdks/python-client/dify_client/async_client.py @@ -0,0 +1,808 @@ +"""Asynchronous Dify API client. + +This module provides async/await support for all Dify API operations using httpx.AsyncClient. +All client classes mirror their synchronous counterparts but require `await` for method calls. + +Example: + import asyncio + from dify_client import AsyncChatClient + + async def main(): + async with AsyncChatClient(api_key="your-key") as client: + response = await client.create_chat_message( + inputs={}, + query="Hello", + user="user-123" + ) + print(response.json()) + + asyncio.run(main()) +""" + +import json +import os +from typing import Literal, Dict, List, Any, IO + +import aiofiles +import httpx + + +class AsyncDifyClient: + """Asynchronous Dify API client. + + This client uses httpx.AsyncClient for efficient async connection pooling. + It's recommended to use this client as a context manager: + + Example: + async with AsyncDifyClient(api_key="your-key") as client: + response = await client.get_app_info() + """ + + def __init__( + self, + api_key: str, + base_url: str = "https://api.dify.ai/v1", + timeout: float = 60.0, + ): + """Initialize the async Dify client. + + Args: + api_key: Your Dify API key + base_url: Base URL for the Dify API + timeout: Request timeout in seconds (default: 60.0) + """ + self.api_key = api_key + self.base_url = base_url + self._client = httpx.AsyncClient( + base_url=base_url, + timeout=httpx.Timeout(timeout, connect=5.0), + ) + + async def __aenter__(self): + """Support async context manager protocol.""" + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Clean up resources when exiting async context.""" + await self.aclose() + + async def aclose(self): + """Close the async HTTP client and release resources.""" + if hasattr(self, "_client"): + await self._client.aclose() + + async def _send_request( + self, + method: str, + endpoint: str, + json: dict | None = None, + params: dict | None = None, + stream: bool = False, + **kwargs, + ): + """Send an async HTTP request to the Dify API. + + Args: + method: HTTP method (GET, POST, PUT, PATCH, DELETE) + endpoint: API endpoint path + json: JSON request body + params: Query parameters + stream: Whether to stream the response + **kwargs: Additional arguments to pass to httpx.request + + Returns: + httpx.Response object + """ + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + + response = await self._client.request( + method, + endpoint, + json=json, + params=params, + headers=headers, + **kwargs, + ) + + return response + + async def _send_request_with_files(self, method: str, endpoint: str, data: dict, files: dict): + """Send an async HTTP request with file uploads. + + Args: + method: HTTP method (POST, PUT, etc.) + endpoint: API endpoint path + data: Form data + files: Files to upload + + Returns: + httpx.Response object + """ + headers = {"Authorization": f"Bearer {self.api_key}"} + + response = await self._client.request( + method, + endpoint, + data=data, + headers=headers, + files=files, + ) + + return response + + async def message_feedback(self, message_id: str, rating: Literal["like", "dislike"], user: str): + """Send feedback for a message.""" + data = {"rating": rating, "user": user} + return await self._send_request("POST", f"/messages/{message_id}/feedbacks", data) + + async def get_application_parameters(self, user: str): + """Get application parameters.""" + params = {"user": user} + return await self._send_request("GET", "/parameters", params=params) + + async def file_upload(self, user: str, files: dict): + """Upload a file.""" + data = {"user": user} + return await self._send_request_with_files("POST", "/files/upload", data=data, files=files) + + async def text_to_audio(self, text: str, user: str, streaming: bool = False): + """Convert text to audio.""" + data = {"text": text, "user": user, "streaming": streaming} + return await self._send_request("POST", "/text-to-audio", json=data) + + async def get_meta(self, user: str): + """Get metadata.""" + params = {"user": user} + return await self._send_request("GET", "/meta", params=params) + + async def get_app_info(self): + """Get basic application information including name, description, tags, and mode.""" + return await self._send_request("GET", "/info") + + async def get_app_site_info(self): + """Get application site information.""" + return await self._send_request("GET", "/site") + + async def get_file_preview(self, file_id: str): + """Get file preview by file ID.""" + return await self._send_request("GET", f"/files/{file_id}/preview") + + +class AsyncCompletionClient(AsyncDifyClient): + """Async client for Completion API operations.""" + + async def create_completion_message( + self, + inputs: dict, + response_mode: Literal["blocking", "streaming"], + user: str, + files: dict | None = None, + ): + """Create a completion message. + + Args: + inputs: Input variables for the completion + response_mode: Response mode ('blocking' or 'streaming') + user: User identifier + files: Optional files to include + + Returns: + httpx.Response object + """ + data = { + "inputs": inputs, + "response_mode": response_mode, + "user": user, + "files": files, + } + return await self._send_request( + "POST", + "/completion-messages", + data, + stream=(response_mode == "streaming"), + ) + + +class AsyncChatClient(AsyncDifyClient): + """Async client for Chat API operations.""" + + async def create_chat_message( + self, + inputs: dict, + query: str, + user: str, + response_mode: Literal["blocking", "streaming"] = "blocking", + conversation_id: str | None = None, + files: dict | None = None, + ): + """Create a chat message. + + Args: + inputs: Input variables for the chat + query: User query/message + user: User identifier + response_mode: Response mode ('blocking' or 'streaming') + conversation_id: Optional conversation ID for context + files: Optional files to include + + Returns: + httpx.Response object + """ + data = { + "inputs": inputs, + "query": query, + "user": user, + "response_mode": response_mode, + "files": files, + } + if conversation_id: + data["conversation_id"] = conversation_id + + return await self._send_request( + "POST", + "/chat-messages", + data, + stream=(response_mode == "streaming"), + ) + + async def get_suggested(self, message_id: str, user: str): + """Get suggested questions for a message.""" + params = {"user": user} + return await self._send_request("GET", f"/messages/{message_id}/suggested", params=params) + + async def stop_message(self, task_id: str, user: str): + """Stop a running message generation.""" + data = {"user": user} + return await self._send_request("POST", f"/chat-messages/{task_id}/stop", data) + + async def get_conversations( + self, + user: str, + last_id: str | None = None, + limit: int | None = None, + pinned: bool | None = None, + ): + """Get list of conversations.""" + params = {"user": user, "last_id": last_id, "limit": limit, "pinned": pinned} + return await self._send_request("GET", "/conversations", params=params) + + async def get_conversation_messages( + self, + user: str, + conversation_id: str | None = None, + first_id: str | None = None, + limit: int | None = None, + ): + """Get messages from a conversation.""" + params = { + "user": user, + "conversation_id": conversation_id, + "first_id": first_id, + "limit": limit, + } + return await self._send_request("GET", "/messages", params=params) + + async def rename_conversation(self, conversation_id: str, name: str, auto_generate: bool, user: str): + """Rename a conversation.""" + data = {"name": name, "auto_generate": auto_generate, "user": user} + return await self._send_request("POST", f"/conversations/{conversation_id}/name", data) + + async def delete_conversation(self, conversation_id: str, user: str): + """Delete a conversation.""" + data = {"user": user} + return await self._send_request("DELETE", f"/conversations/{conversation_id}", data) + + async def audio_to_text(self, audio_file: IO[bytes] | tuple, user: str): + """Convert audio to text.""" + data = {"user": user} + files = {"file": audio_file} + return await self._send_request_with_files("POST", "/audio-to-text", data, files) + + # Annotation APIs + async def annotation_reply_action( + self, + action: Literal["enable", "disable"], + score_threshold: float, + embedding_provider_name: str, + embedding_model_name: str, + ): + """Enable or disable annotation reply feature.""" + data = { + "score_threshold": score_threshold, + "embedding_provider_name": embedding_provider_name, + "embedding_model_name": embedding_model_name, + } + return await self._send_request("POST", f"/apps/annotation-reply/{action}", json=data) + + async def get_annotation_reply_status(self, action: Literal["enable", "disable"], job_id: str): + """Get the status of an annotation reply action job.""" + return await self._send_request("GET", f"/apps/annotation-reply/{action}/status/{job_id}") + + async def list_annotations(self, page: int = 1, limit: int = 20, keyword: str | None = None): + """List annotations for the application.""" + params = {"page": page, "limit": limit, "keyword": keyword} + return await self._send_request("GET", "/apps/annotations", params=params) + + async def create_annotation(self, question: str, answer: str): + """Create a new annotation.""" + data = {"question": question, "answer": answer} + return await self._send_request("POST", "/apps/annotations", json=data) + + async def update_annotation(self, annotation_id: str, question: str, answer: str): + """Update an existing annotation.""" + data = {"question": question, "answer": answer} + return await self._send_request("PUT", f"/apps/annotations/{annotation_id}", json=data) + + async def delete_annotation(self, annotation_id: str): + """Delete an annotation.""" + return await self._send_request("DELETE", f"/apps/annotations/{annotation_id}") + + # Conversation Variables APIs + async def get_conversation_variables(self, conversation_id: str, user: str): + """Get all variables for a specific conversation. + + Args: + conversation_id: The conversation ID to query variables for + user: User identifier + + Returns: + Response from the API containing: + - variables: List of conversation variables with their values + - conversation_id: The conversation ID + """ + params = {"user": user} + url = f"/conversations/{conversation_id}/variables" + return await self._send_request("GET", url, params=params) + + async def update_conversation_variable(self, conversation_id: str, variable_id: str, value: Any, user: str): + """Update a specific conversation variable. + + Args: + conversation_id: The conversation ID + variable_id: The variable ID to update + value: New value for the variable + user: User identifier + + Returns: + Response from the API with updated variable information + """ + data = {"value": value, "user": user} + url = f"/conversations/{conversation_id}/variables/{variable_id}" + return await self._send_request("PATCH", url, json=data) + + +class AsyncWorkflowClient(AsyncDifyClient): + """Async client for Workflow API operations.""" + + async def run( + self, + inputs: dict, + response_mode: Literal["blocking", "streaming"] = "streaming", + user: str = "abc-123", + ): + """Run a workflow.""" + data = {"inputs": inputs, "response_mode": response_mode, "user": user} + return await self._send_request("POST", "/workflows/run", data) + + async def stop(self, task_id: str, user: str): + """Stop a running workflow task.""" + data = {"user": user} + return await self._send_request("POST", f"/workflows/tasks/{task_id}/stop", data) + + async def get_result(self, workflow_run_id: str): + """Get workflow run result.""" + return await self._send_request("GET", f"/workflows/run/{workflow_run_id}") + + async def get_workflow_logs( + self, + keyword: str = None, + status: Literal["succeeded", "failed", "stopped"] | None = None, + page: int = 1, + limit: int = 20, + created_at__before: str = None, + created_at__after: str = None, + created_by_end_user_session_id: str = None, + created_by_account: str = None, + ): + """Get workflow execution logs with optional filtering.""" + params = { + "page": page, + "limit": limit, + "keyword": keyword, + "status": status, + "created_at__before": created_at__before, + "created_at__after": created_at__after, + "created_by_end_user_session_id": created_by_end_user_session_id, + "created_by_account": created_by_account, + } + return await self._send_request("GET", "/workflows/logs", params=params) + + async def run_specific_workflow( + self, + workflow_id: str, + inputs: dict, + response_mode: Literal["blocking", "streaming"] = "streaming", + user: str = "abc-123", + ): + """Run a specific workflow by workflow ID.""" + data = {"inputs": inputs, "response_mode": response_mode, "user": user} + return await self._send_request( + "POST", + f"/workflows/{workflow_id}/run", + data, + stream=(response_mode == "streaming"), + ) + + +class AsyncWorkspaceClient(AsyncDifyClient): + """Async client for workspace-related operations.""" + + async def get_available_models(self, model_type: str): + """Get available models by model type.""" + url = f"/workspaces/current/models/model-types/{model_type}" + return await self._send_request("GET", url) + + +class AsyncKnowledgeBaseClient(AsyncDifyClient): + """Async client for Knowledge Base API operations.""" + + def __init__( + self, + api_key: str, + base_url: str = "https://api.dify.ai/v1", + dataset_id: str | None = None, + timeout: float = 60.0, + ): + """Construct an AsyncKnowledgeBaseClient object. + + Args: + api_key: API key of Dify + base_url: Base URL of Dify API + dataset_id: ID of the dataset + timeout: Request timeout in seconds + """ + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.dataset_id = dataset_id + + def _get_dataset_id(self): + """Get the dataset ID, raise error if not set.""" + if self.dataset_id is None: + raise ValueError("dataset_id is not set") + return self.dataset_id + + async def create_dataset(self, name: str, **kwargs): + """Create a new dataset.""" + return await self._send_request("POST", "/datasets", {"name": name}, **kwargs) + + async def list_datasets(self, page: int = 1, page_size: int = 20, **kwargs): + """List all datasets.""" + return await self._send_request("GET", "/datasets", params={"page": page, "limit": page_size}, **kwargs) + + async def create_document_by_text(self, name: str, text: str, extra_params: dict | None = None, **kwargs): + """Create a document by text. + + Args: + name: Name of the document + text: Text content of the document + extra_params: Extra parameters for the API + + Returns: + Response from the API + """ + data = { + "indexing_technique": "high_quality", + "process_rule": {"mode": "automatic"}, + "name": name, + "text": text, + } + if extra_params is not None and isinstance(extra_params, dict): + data.update(extra_params) + url = f"/datasets/{self._get_dataset_id()}/document/create_by_text" + return await self._send_request("POST", url, json=data, **kwargs) + + async def update_document_by_text( + self, + document_id: str, + name: str, + text: str, + extra_params: dict | None = None, + **kwargs, + ): + """Update a document by text.""" + data = {"name": name, "text": text} + if extra_params is not None and isinstance(extra_params, dict): + data.update(extra_params) + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_text" + return await self._send_request("POST", url, json=data, **kwargs) + + async def create_document_by_file( + self, + file_path: str, + original_document_id: str | None = None, + extra_params: dict | None = None, + ): + """Create a document by file.""" + async with aiofiles.open(file_path, "rb") as f: + files = {"file": (os.path.basename(file_path), f)} + data = { + "process_rule": {"mode": "automatic"}, + "indexing_technique": "high_quality", + } + if extra_params is not None and isinstance(extra_params, dict): + data.update(extra_params) + if original_document_id is not None: + data["original_document_id"] = original_document_id + url = f"/datasets/{self._get_dataset_id()}/document/create_by_file" + return await self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) + + async def update_document_by_file(self, document_id: str, file_path: str, extra_params: dict | None = None): + """Update a document by file.""" + async with aiofiles.open(file_path, "rb") as f: + files = {"file": (os.path.basename(file_path), f)} + data = {} + if extra_params is not None and isinstance(extra_params, dict): + data.update(extra_params) + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_file" + return await self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) + + async def batch_indexing_status(self, batch_id: str, **kwargs): + """Get the status of the batch indexing.""" + url = f"/datasets/{self._get_dataset_id()}/documents/{batch_id}/indexing-status" + return await self._send_request("GET", url, **kwargs) + + async def delete_dataset(self): + """Delete this dataset.""" + url = f"/datasets/{self._get_dataset_id()}" + return await self._send_request("DELETE", url) + + async def delete_document(self, document_id: str): + """Delete a document.""" + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}" + return await self._send_request("DELETE", url) + + async def list_documents( + self, + page: int | None = None, + page_size: int | None = None, + keyword: str | None = None, + **kwargs, + ): + """Get a list of documents in this dataset.""" + params = { + "page": page, + "limit": page_size, + "keyword": keyword, + } + url = f"/datasets/{self._get_dataset_id()}/documents" + return await self._send_request("GET", url, params=params, **kwargs) + + async def add_segments(self, document_id: str, segments: list[dict], **kwargs): + """Add segments to a document.""" + data = {"segments": segments} + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments" + return await self._send_request("POST", url, json=data, **kwargs) + + async def query_segments( + self, + document_id: str, + keyword: str | None = None, + status: str | None = None, + **kwargs, + ): + """Query segments in this document. + + Args: + document_id: ID of the document + keyword: Query keyword (optional) + status: Status of the segment (optional, e.g., 'completed') + **kwargs: Additional parameters to pass to the API. + Can include a 'params' dict for extra query parameters. + + Returns: + Response from the API + """ + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments" + params = { + "keyword": keyword, + "status": status, + } + if "params" in kwargs: + params.update(kwargs.pop("params")) + return await self._send_request("GET", url, params=params, **kwargs) + + async def delete_document_segment(self, document_id: str, segment_id: str): + """Delete a segment from a document.""" + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments/{segment_id}" + return await self._send_request("DELETE", url) + + async def update_document_segment(self, document_id: str, segment_id: str, segment_data: dict, **kwargs): + """Update a segment in a document.""" + data = {"segment": segment_data} + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments/{segment_id}" + return await self._send_request("POST", url, json=data, **kwargs) + + # Advanced Knowledge Base APIs + async def hit_testing( + self, + query: str, + retrieval_model: Dict[str, Any] = None, + external_retrieval_model: Dict[str, Any] = None, + ): + """Perform hit testing on the dataset.""" + data = {"query": query} + if retrieval_model: + data["retrieval_model"] = retrieval_model + if external_retrieval_model: + data["external_retrieval_model"] = external_retrieval_model + url = f"/datasets/{self._get_dataset_id()}/hit-testing" + return await self._send_request("POST", url, json=data) + + async def get_dataset_metadata(self): + """Get dataset metadata.""" + url = f"/datasets/{self._get_dataset_id()}/metadata" + return await self._send_request("GET", url) + + async def create_dataset_metadata(self, metadata_data: Dict[str, Any]): + """Create dataset metadata.""" + url = f"/datasets/{self._get_dataset_id()}/metadata" + return await self._send_request("POST", url, json=metadata_data) + + async def update_dataset_metadata(self, metadata_id: str, metadata_data: Dict[str, Any]): + """Update dataset metadata.""" + url = f"/datasets/{self._get_dataset_id()}/metadata/{metadata_id}" + return await self._send_request("PATCH", url, json=metadata_data) + + async def get_built_in_metadata(self): + """Get built-in metadata.""" + url = f"/datasets/{self._get_dataset_id()}/metadata/built-in" + return await self._send_request("GET", url) + + async def manage_built_in_metadata(self, action: str, metadata_data: Dict[str, Any] = None): + """Manage built-in metadata with specified action.""" + data = metadata_data or {} + url = f"/datasets/{self._get_dataset_id()}/metadata/built-in/{action}" + return await self._send_request("POST", url, json=data) + + async def update_documents_metadata(self, operation_data: List[Dict[str, Any]]): + """Update metadata for multiple documents.""" + url = f"/datasets/{self._get_dataset_id()}/documents/metadata" + data = {"operation_data": operation_data} + return await self._send_request("POST", url, json=data) + + # Dataset Tags APIs + async def list_dataset_tags(self): + """List all dataset tags.""" + return await self._send_request("GET", "/datasets/tags") + + async def bind_dataset_tags(self, tag_ids: List[str]): + """Bind tags to dataset.""" + data = {"tag_ids": tag_ids, "target_id": self._get_dataset_id()} + return await self._send_request("POST", "/datasets/tags/binding", json=data) + + async def unbind_dataset_tag(self, tag_id: str): + """Unbind a single tag from dataset.""" + data = {"tag_id": tag_id, "target_id": self._get_dataset_id()} + return await self._send_request("POST", "/datasets/tags/unbinding", json=data) + + async def get_dataset_tags(self): + """Get tags for current dataset.""" + url = f"/datasets/{self._get_dataset_id()}/tags" + return await self._send_request("GET", url) + + # RAG Pipeline APIs + async def get_datasource_plugins(self, is_published: bool = True): + """Get datasource plugins for RAG pipeline.""" + params = {"is_published": is_published} + url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource-plugins" + return await self._send_request("GET", url, params=params) + + async def run_datasource_node( + self, + node_id: str, + inputs: Dict[str, Any], + datasource_type: str, + is_published: bool = True, + credential_id: str = None, + ): + """Run a datasource node in RAG pipeline.""" + data = { + "inputs": inputs, + "datasource_type": datasource_type, + "is_published": is_published, + } + if credential_id: + data["credential_id"] = credential_id + url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource/nodes/{node_id}/run" + return await self._send_request("POST", url, json=data, stream=True) + + async def run_rag_pipeline( + self, + inputs: Dict[str, Any], + datasource_type: str, + datasource_info_list: List[Dict[str, Any]], + start_node_id: str, + is_published: bool = True, + response_mode: Literal["streaming", "blocking"] = "blocking", + ): + """Run RAG pipeline.""" + data = { + "inputs": inputs, + "datasource_type": datasource_type, + "datasource_info_list": datasource_info_list, + "start_node_id": start_node_id, + "is_published": is_published, + "response_mode": response_mode, + } + url = f"/datasets/{self._get_dataset_id()}/pipeline/run" + return await self._send_request("POST", url, json=data, stream=response_mode == "streaming") + + async def upload_pipeline_file(self, file_path: str): + """Upload file for RAG pipeline.""" + async with aiofiles.open(file_path, "rb") as f: + files = {"file": (os.path.basename(file_path), f)} + return await self._send_request_with_files("POST", "/datasets/pipeline/file-upload", {}, files) + + # Dataset Management APIs + async def get_dataset(self, dataset_id: str | None = None): + """Get detailed information about a specific dataset.""" + ds_id = dataset_id or self._get_dataset_id() + url = f"/datasets/{ds_id}" + return await self._send_request("GET", url) + + async def update_dataset( + self, + dataset_id: str | None = None, + name: str | None = None, + description: str | None = None, + indexing_technique: str | None = None, + embedding_model: str | None = None, + embedding_model_provider: str | None = None, + retrieval_model: Dict[str, Any] | None = None, + **kwargs, + ): + """Update dataset configuration. + + Args: + dataset_id: Dataset ID (optional, uses current dataset_id if not provided) + name: New dataset name + description: New dataset description + indexing_technique: Indexing technique ('high_quality' or 'economy') + embedding_model: Embedding model name + embedding_model_provider: Embedding model provider + retrieval_model: Retrieval model configuration dict + **kwargs: Additional parameters to pass to the API + + Returns: + Response from the API with updated dataset information + """ + ds_id = dataset_id or self._get_dataset_id() + url = f"/datasets/{ds_id}" + + payload = { + "name": name, + "description": description, + "indexing_technique": indexing_technique, + "embedding_model": embedding_model, + "embedding_model_provider": embedding_model_provider, + "retrieval_model": retrieval_model, + } + + data = {k: v for k, v in payload.items() if v is not None} + data.update(kwargs) + + return await self._send_request("PATCH", url, json=data) + + async def batch_update_document_status( + self, + action: Literal["enable", "disable", "archive", "un_archive"], + document_ids: List[str], + dataset_id: str | None = None, + ): + """Batch update document status.""" + ds_id = dataset_id or self._get_dataset_id() + url = f"/datasets/{ds_id}/documents/status/{action}" + data = {"document_ids": document_ids} + return await self._send_request("PATCH", url, json=data) diff --git a/sdks/python-client/dify_client/client.py b/sdks/python-client/dify_client/client.py index fb42e3773d..41c5abe16d 100644 --- a/sdks/python-client/dify_client/client.py +++ b/sdks/python-client/dify_client/client.py @@ -1,32 +1,114 @@ import json -from typing import Literal, Union, Dict, List, Any, Optional, IO +import os +from typing import Literal, Dict, List, Any, IO -import requests +import httpx class DifyClient: - def __init__(self, api_key, base_url: str = "https://api.dify.ai/v1"): + """Synchronous Dify API client. + + This client uses httpx.Client for efficient connection pooling and resource management. + It's recommended to use this client as a context manager: + + Example: + with DifyClient(api_key="your-key") as client: + response = client.get_app_info() + """ + + def __init__( + self, + api_key: str, + base_url: str = "https://api.dify.ai/v1", + timeout: float = 60.0, + ): + """Initialize the Dify client. + + Args: + api_key: Your Dify API key + base_url: Base URL for the Dify API + timeout: Request timeout in seconds (default: 60.0) + """ self.api_key = api_key self.base_url = base_url + self._client = httpx.Client( + base_url=base_url, + timeout=httpx.Timeout(timeout, connect=5.0), + ) + + def __enter__(self): + """Support context manager protocol.""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Clean up resources when exiting context.""" + self.close() + + def close(self): + """Close the HTTP client and release resources.""" + if hasattr(self, "_client"): + self._client.close() def _send_request( - self, method: str, endpoint: str, json: dict | None = None, params: dict | None = None, stream: bool = False + self, + method: str, + endpoint: str, + json: dict | None = None, + params: dict | None = None, + stream: bool = False, + **kwargs, ): + """Send an HTTP request to the Dify API. + + Args: + method: HTTP method (GET, POST, PUT, PATCH, DELETE) + endpoint: API endpoint path + json: JSON request body + params: Query parameters + stream: Whether to stream the response + **kwargs: Additional arguments to pass to httpx.request + + Returns: + httpx.Response object + """ headers = { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", } - url = f"{self.base_url}{endpoint}" - response = requests.request(method, url, json=json, params=params, headers=headers, stream=stream) + # httpx.Client automatically prepends base_url + response = self._client.request( + method, + endpoint, + json=json, + params=params, + headers=headers, + **kwargs, + ) return response - def _send_request_with_files(self, method, endpoint, data, files): + def _send_request_with_files(self, method: str, endpoint: str, data: dict, files: dict): + """Send an HTTP request with file uploads. + + Args: + method: HTTP method (POST, PUT, etc.) + endpoint: API endpoint path + data: Form data + files: Files to upload + + Returns: + httpx.Response object + """ headers = {"Authorization": f"Bearer {self.api_key}"} - url = f"{self.base_url}{endpoint}" - response = requests.request(method, url, data=data, headers=headers, files=files) + response = self._client.request( + method, + endpoint, + data=data, + headers=headers, + files=files, + ) return response @@ -65,7 +147,11 @@ class DifyClient: class CompletionClient(DifyClient): def create_completion_message( - self, inputs: dict, response_mode: Literal["blocking", "streaming"], user: str, files: dict | None = None + self, + inputs: dict, + response_mode: Literal["blocking", "streaming"], + user: str, + files: dict | None = None, ): data = { "inputs": inputs, @@ -77,7 +163,7 @@ class CompletionClient(DifyClient): "POST", "/completion-messages", data, - stream=True if response_mode == "streaming" else False, + stream=(response_mode == "streaming"), ) @@ -105,7 +191,7 @@ class ChatClient(DifyClient): "POST", "/chat-messages", data, - stream=True if response_mode == "streaming" else False, + stream=(response_mode == "streaming"), ) def get_suggested(self, message_id: str, user: str): @@ -166,10 +252,6 @@ class ChatClient(DifyClient): embedding_model_name: str, ): """Enable or disable annotation reply feature.""" - # Backend API requires these fields to be non-None values - if score_threshold is None or embedding_provider_name is None or embedding_model_name is None: - raise ValueError("score_threshold, embedding_provider_name, and embedding_model_name cannot be None") - data = { "score_threshold": score_threshold, "embedding_provider_name": embedding_provider_name, @@ -181,11 +263,9 @@ class ChatClient(DifyClient): """Get the status of an annotation reply action job.""" return self._send_request("GET", f"/apps/annotation-reply/{action}/status/{job_id}") - def list_annotations(self, page: int = 1, limit: int = 20, keyword: str = ""): + def list_annotations(self, page: int = 1, limit: int = 20, keyword: str | None = None): """List annotations for the application.""" - params = {"page": page, "limit": limit} - if keyword: - params["keyword"] = keyword + params = {"page": page, "limit": limit, "keyword": keyword} return self._send_request("GET", "/apps/annotations", params=params) def create_annotation(self, question: str, answer: str): @@ -202,9 +282,47 @@ class ChatClient(DifyClient): """Delete an annotation.""" return self._send_request("DELETE", f"/apps/annotations/{annotation_id}") + # Conversation Variables APIs + def get_conversation_variables(self, conversation_id: str, user: str): + """Get all variables for a specific conversation. + + Args: + conversation_id: The conversation ID to query variables for + user: User identifier + + Returns: + Response from the API containing: + - variables: List of conversation variables with their values + - conversation_id: The conversation ID + """ + params = {"user": user} + url = f"/conversations/{conversation_id}/variables" + return self._send_request("GET", url, params=params) + + def update_conversation_variable(self, conversation_id: str, variable_id: str, value: Any, user: str): + """Update a specific conversation variable. + + Args: + conversation_id: The conversation ID + variable_id: The variable ID to update + value: New value for the variable + user: User identifier + + Returns: + Response from the API with updated variable information + """ + data = {"value": value, "user": user} + url = f"/conversations/{conversation_id}/variables/{variable_id}" + return self._send_request("PATCH", url, json=data) + class WorkflowClient(DifyClient): - def run(self, inputs: dict, response_mode: Literal["blocking", "streaming"] = "streaming", user: str = "abc-123"): + def run( + self, + inputs: dict, + response_mode: Literal["blocking", "streaming"] = "streaming", + user: str = "abc-123", + ): data = {"inputs": inputs, "response_mode": response_mode, "user": user} return self._send_request("POST", "/workflows/run", data) @@ -252,7 +370,10 @@ class WorkflowClient(DifyClient): """Run a specific workflow by workflow ID.""" data = {"inputs": inputs, "response_mode": response_mode, "user": user} return self._send_request( - "POST", f"/workflows/{workflow_id}/run", data, stream=True if response_mode == "streaming" else False + "POST", + f"/workflows/{workflow_id}/run", + data, + stream=(response_mode == "streaming"), ) @@ -293,7 +414,7 @@ class KnowledgeBaseClient(DifyClient): return self._send_request("POST", "/datasets", {"name": name}, **kwargs) def list_datasets(self, page: int = 1, page_size: int = 20, **kwargs): - return self._send_request("GET", f"/datasets?page={page}&limit={page_size}", **kwargs) + return self._send_request("GET", "/datasets", params={"page": page, "limit": page_size}, **kwargs) def create_document_by_text(self, name, text, extra_params: dict | None = None, **kwargs): """ @@ -333,7 +454,12 @@ class KnowledgeBaseClient(DifyClient): return self._send_request("POST", url, json=data, **kwargs) def update_document_by_text( - self, document_id: str, name: str, text: str, extra_params: dict | None = None, **kwargs + self, + document_id: str, + name: str, + text: str, + extra_params: dict | None = None, + **kwargs, ): """ Update a document by text. @@ -368,7 +494,10 @@ class KnowledgeBaseClient(DifyClient): return self._send_request("POST", url, json=data, **kwargs) def create_document_by_file( - self, file_path: str, original_document_id: str | None = None, extra_params: dict | None = None + self, + file_path: str, + original_document_id: str | None = None, + extra_params: dict | None = None, ): """ Create a document by file. @@ -395,17 +524,18 @@ class KnowledgeBaseClient(DifyClient): } :return: Response from the API """ - files = {"file": open(file_path, "rb")} - data = { - "process_rule": {"mode": "automatic"}, - "indexing_technique": "high_quality", - } - if extra_params is not None and isinstance(extra_params, dict): - data.update(extra_params) - if original_document_id is not None: - data["original_document_id"] = original_document_id - url = f"/datasets/{self._get_dataset_id()}/document/create_by_file" - return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) + with open(file_path, "rb") as f: + files = {"file": (os.path.basename(file_path), f)} + data = { + "process_rule": {"mode": "automatic"}, + "indexing_technique": "high_quality", + } + if extra_params is not None and isinstance(extra_params, dict): + data.update(extra_params) + if original_document_id is not None: + data["original_document_id"] = original_document_id + url = f"/datasets/{self._get_dataset_id()}/document/create_by_file" + return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) def update_document_by_file(self, document_id: str, file_path: str, extra_params: dict | None = None): """ @@ -433,12 +563,13 @@ class KnowledgeBaseClient(DifyClient): } :return: """ - files = {"file": open(file_path, "rb")} - data = {} - if extra_params is not None and isinstance(extra_params, dict): - data.update(extra_params) - url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_file" - return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) + with open(file_path, "rb") as f: + files = {"file": (os.path.basename(file_path), f)} + data = {} + if extra_params is not None and isinstance(extra_params, dict): + data.update(extra_params) + url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_file" + return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) def batch_indexing_status(self, batch_id: str, **kwargs): """ @@ -516,6 +647,8 @@ class KnowledgeBaseClient(DifyClient): :param document_id: ID of the document :param keyword: query keyword, optional :param status: status of the segment, optional, e.g. completed + :param kwargs: Additional parameters to pass to the API. + Can include a 'params' dict for extra query parameters. """ url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments" params = {} @@ -524,7 +657,7 @@ class KnowledgeBaseClient(DifyClient): if status is not None: params["status"] = status if "params" in kwargs: - params.update(kwargs["params"]) + params.update(kwargs.pop("params")) return self._send_request("GET", url, params=params, **kwargs) def delete_document_segment(self, document_id: str, segment_id: str): @@ -553,7 +686,10 @@ class KnowledgeBaseClient(DifyClient): # Advanced Knowledge Base APIs def hit_testing( - self, query: str, retrieval_model: Dict[str, Any] = None, external_retrieval_model: Dict[str, Any] = None + self, + query: str, + retrieval_model: Dict[str, Any] = None, + external_retrieval_model: Dict[str, Any] = None, ): """Perform hit testing on the dataset.""" data = {"query": query} @@ -632,7 +768,11 @@ class KnowledgeBaseClient(DifyClient): credential_id: str = None, ): """Run a datasource node in RAG pipeline.""" - data = {"inputs": inputs, "datasource_type": datasource_type, "is_published": is_published} + data = { + "inputs": inputs, + "datasource_type": datasource_type, + "is_published": is_published, + } if credential_id: data["credential_id"] = credential_id url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource/nodes/{node_id}/run" @@ -662,5 +802,94 @@ class KnowledgeBaseClient(DifyClient): def upload_pipeline_file(self, file_path: str): """Upload file for RAG pipeline.""" with open(file_path, "rb") as f: - files = {"file": f} + files = {"file": (os.path.basename(file_path), f)} return self._send_request_with_files("POST", "/datasets/pipeline/file-upload", {}, files) + + # Dataset Management APIs + def get_dataset(self, dataset_id: str | None = None): + """Get detailed information about a specific dataset. + + Args: + dataset_id: Dataset ID (optional, uses current dataset_id if not provided) + + Returns: + Response from the API containing dataset details including: + - name, description, permission + - indexing_technique, embedding_model, embedding_model_provider + - retrieval_model configuration + - document_count, word_count, app_count + - created_at, updated_at + """ + ds_id = dataset_id or self._get_dataset_id() + url = f"/datasets/{ds_id}" + return self._send_request("GET", url) + + def update_dataset( + self, + dataset_id: str | None = None, + name: str | None = None, + description: str | None = None, + indexing_technique: str | None = None, + embedding_model: str | None = None, + embedding_model_provider: str | None = None, + retrieval_model: Dict[str, Any] | None = None, + **kwargs, + ): + """Update dataset configuration. + + Args: + dataset_id: Dataset ID (optional, uses current dataset_id if not provided) + name: New dataset name + description: New dataset description + indexing_technique: Indexing technique ('high_quality' or 'economy') + embedding_model: Embedding model name + embedding_model_provider: Embedding model provider + retrieval_model: Retrieval model configuration dict + **kwargs: Additional parameters to pass to the API + + Returns: + Response from the API with updated dataset information + """ + ds_id = dataset_id or self._get_dataset_id() + url = f"/datasets/{ds_id}" + + # Build data dictionary with all possible parameters + payload = { + "name": name, + "description": description, + "indexing_technique": indexing_technique, + "embedding_model": embedding_model, + "embedding_model_provider": embedding_model_provider, + "retrieval_model": retrieval_model, + } + + # Filter out None values and merge with additional kwargs + data = {k: v for k, v in payload.items() if v is not None} + data.update(kwargs) + + return self._send_request("PATCH", url, json=data) + + def batch_update_document_status( + self, + action: Literal["enable", "disable", "archive", "un_archive"], + document_ids: List[str], + dataset_id: str | None = None, + ): + """Batch update document status (enable/disable/archive/unarchive). + + Args: + action: Action to perform on documents + - 'enable': Enable documents for retrieval + - 'disable': Disable documents from retrieval + - 'archive': Archive documents + - 'un_archive': Unarchive documents + document_ids: List of document IDs to update + dataset_id: Dataset ID (optional, uses current dataset_id if not provided) + + Returns: + Response from the API with operation result + """ + ds_id = dataset_id or self._get_dataset_id() + url = f"/datasets/{ds_id}/documents/status/{action}" + data = {"document_ids": document_ids} + return self._send_request("PATCH", url, json=data) diff --git a/sdks/python-client/pyproject.toml b/sdks/python-client/pyproject.toml new file mode 100644 index 0000000000..db02cbd6e3 --- /dev/null +++ b/sdks/python-client/pyproject.toml @@ -0,0 +1,43 @@ +[project] +name = "dify-client" +version = "0.1.12" +description = "A package for interacting with the Dify Service-API" +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + "httpx>=0.27.0", + "aiofiles>=23.0.0", +] +authors = [ + {name = "Dify", email = "hello@dify.ai"} +] +license = {text = "MIT"} +keywords = ["dify", "nlp", "ai", "language-processing"] +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] + +[project.urls] +Homepage = "https://github.com/langgenius/dify" + +[project.optional-dependencies] +dev = [ + "pytest>=7.0.0", + "pytest-asyncio>=0.21.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["dify_client"] + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +asyncio_mode = "auto" diff --git a/sdks/python-client/setup.py b/sdks/python-client/setup.py deleted file mode 100644 index a05f6410fb..0000000000 --- a/sdks/python-client/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import setup - -with open("README.md", encoding="utf-8") as fh: - long_description = fh.read() - -setup( - name="dify-client", - version="0.1.12", - author="Dify", - author_email="hello@dify.ai", - description="A package for interacting with the Dify Service-API", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/langgenius/dify", - license="MIT", - packages=["dify_client"], - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires=">=3.6", - install_requires=["requests"], - keywords="dify nlp ai language-processing", - include_package_data=True, -) diff --git a/sdks/python-client/tests/test_async_client.py b/sdks/python-client/tests/test_async_client.py new file mode 100644 index 0000000000..4f5001866f --- /dev/null +++ b/sdks/python-client/tests/test_async_client.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +""" +Test suite for async client implementation in the Python SDK. + +This test validates the async/await functionality using httpx.AsyncClient +and ensures API parity with sync clients. +""" + +import unittest +from unittest.mock import Mock, patch, AsyncMock + +from dify_client.async_client import ( + AsyncDifyClient, + AsyncChatClient, + AsyncCompletionClient, + AsyncWorkflowClient, + AsyncWorkspaceClient, + AsyncKnowledgeBaseClient, +) + + +class TestAsyncAPIParity(unittest.TestCase): + """Test that async clients have API parity with sync clients.""" + + def test_dify_client_api_parity(self): + """Test AsyncDifyClient has same methods as DifyClient.""" + from dify_client import DifyClient + + sync_methods = {name for name in dir(DifyClient) if not name.startswith("_")} + async_methods = {name for name in dir(AsyncDifyClient) if not name.startswith("_")} + + # aclose is async-specific, close is sync-specific + sync_methods.discard("close") + async_methods.discard("aclose") + + # Verify parity + self.assertEqual(sync_methods, async_methods, "API parity mismatch for DifyClient") + + def test_chat_client_api_parity(self): + """Test AsyncChatClient has same methods as ChatClient.""" + from dify_client import ChatClient + + sync_methods = {name for name in dir(ChatClient) if not name.startswith("_")} + async_methods = {name for name in dir(AsyncChatClient) if not name.startswith("_")} + + sync_methods.discard("close") + async_methods.discard("aclose") + + self.assertEqual(sync_methods, async_methods, "API parity mismatch for ChatClient") + + def test_completion_client_api_parity(self): + """Test AsyncCompletionClient has same methods as CompletionClient.""" + from dify_client import CompletionClient + + sync_methods = {name for name in dir(CompletionClient) if not name.startswith("_")} + async_methods = {name for name in dir(AsyncCompletionClient) if not name.startswith("_")} + + sync_methods.discard("close") + async_methods.discard("aclose") + + self.assertEqual(sync_methods, async_methods, "API parity mismatch for CompletionClient") + + def test_workflow_client_api_parity(self): + """Test AsyncWorkflowClient has same methods as WorkflowClient.""" + from dify_client import WorkflowClient + + sync_methods = {name for name in dir(WorkflowClient) if not name.startswith("_")} + async_methods = {name for name in dir(AsyncWorkflowClient) if not name.startswith("_")} + + sync_methods.discard("close") + async_methods.discard("aclose") + + self.assertEqual(sync_methods, async_methods, "API parity mismatch for WorkflowClient") + + def test_workspace_client_api_parity(self): + """Test AsyncWorkspaceClient has same methods as WorkspaceClient.""" + from dify_client import WorkspaceClient + + sync_methods = {name for name in dir(WorkspaceClient) if not name.startswith("_")} + async_methods = {name for name in dir(AsyncWorkspaceClient) if not name.startswith("_")} + + sync_methods.discard("close") + async_methods.discard("aclose") + + self.assertEqual(sync_methods, async_methods, "API parity mismatch for WorkspaceClient") + + def test_knowledge_base_client_api_parity(self): + """Test AsyncKnowledgeBaseClient has same methods as KnowledgeBaseClient.""" + from dify_client import KnowledgeBaseClient + + sync_methods = {name for name in dir(KnowledgeBaseClient) if not name.startswith("_")} + async_methods = {name for name in dir(AsyncKnowledgeBaseClient) if not name.startswith("_")} + + sync_methods.discard("close") + async_methods.discard("aclose") + + self.assertEqual(sync_methods, async_methods, "API parity mismatch for KnowledgeBaseClient") + + +class TestAsyncClientMocked(unittest.IsolatedAsyncioTestCase): + """Test async client with mocked httpx.AsyncClient.""" + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_client_initialization(self, mock_httpx_async_client): + """Test async client initializes with httpx.AsyncClient.""" + mock_client_instance = AsyncMock() + mock_httpx_async_client.return_value = mock_client_instance + + client = AsyncDifyClient("test-key", "https://api.dify.ai/v1") + + # Verify httpx.AsyncClient was called + mock_httpx_async_client.assert_called_once() + self.assertEqual(client.api_key, "test-key") + + await client.aclose() + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_context_manager(self, mock_httpx_async_client): + """Test async context manager works.""" + mock_client_instance = AsyncMock() + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncDifyClient("test-key") as client: + self.assertEqual(client.api_key, "test-key") + + # Verify aclose was called + mock_client_instance.aclose.assert_called_once() + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_send_request(self, mock_httpx_async_client): + """Test async _send_request method.""" + mock_response = AsyncMock() + mock_response.json = AsyncMock(return_value={"result": "success"}) + mock_response.status_code = 200 + + mock_client_instance = AsyncMock() + mock_client_instance.request = AsyncMock(return_value=mock_response) + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncDifyClient("test-key") as client: + response = await client._send_request("GET", "/test") + + # Verify request was called + mock_client_instance.request.assert_called_once() + call_args = mock_client_instance.request.call_args + + # Verify parameters + self.assertEqual(call_args[0][0], "GET") + self.assertEqual(call_args[0][1], "/test") + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_chat_client(self, mock_httpx_async_client): + """Test AsyncChatClient functionality.""" + mock_response = AsyncMock() + mock_response.text = '{"answer": "Hello!"}' + mock_response.json = AsyncMock(return_value={"answer": "Hello!"}) + + mock_client_instance = AsyncMock() + mock_client_instance.request = AsyncMock(return_value=mock_response) + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncChatClient("test-key") as client: + response = await client.create_chat_message({}, "Hi", "user123") + self.assertIn("answer", response.text) + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_completion_client(self, mock_httpx_async_client): + """Test AsyncCompletionClient functionality.""" + mock_response = AsyncMock() + mock_response.text = '{"answer": "Response"}' + mock_response.json = AsyncMock(return_value={"answer": "Response"}) + + mock_client_instance = AsyncMock() + mock_client_instance.request = AsyncMock(return_value=mock_response) + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncCompletionClient("test-key") as client: + response = await client.create_completion_message({"query": "test"}, "blocking", "user123") + self.assertIn("answer", response.text) + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_workflow_client(self, mock_httpx_async_client): + """Test AsyncWorkflowClient functionality.""" + mock_response = AsyncMock() + mock_response.json = AsyncMock(return_value={"result": "success"}) + + mock_client_instance = AsyncMock() + mock_client_instance.request = AsyncMock(return_value=mock_response) + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncWorkflowClient("test-key") as client: + response = await client.run({"input": "test"}, "blocking", "user123") + data = await response.json() + self.assertEqual(data["result"], "success") + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_workspace_client(self, mock_httpx_async_client): + """Test AsyncWorkspaceClient functionality.""" + mock_response = AsyncMock() + mock_response.json = AsyncMock(return_value={"data": []}) + + mock_client_instance = AsyncMock() + mock_client_instance.request = AsyncMock(return_value=mock_response) + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncWorkspaceClient("test-key") as client: + response = await client.get_available_models("llm") + data = await response.json() + self.assertIn("data", data) + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_async_knowledge_base_client(self, mock_httpx_async_client): + """Test AsyncKnowledgeBaseClient functionality.""" + mock_response = AsyncMock() + mock_response.json = AsyncMock(return_value={"data": [], "total": 0}) + + mock_client_instance = AsyncMock() + mock_client_instance.request = AsyncMock(return_value=mock_response) + mock_httpx_async_client.return_value = mock_client_instance + + async with AsyncKnowledgeBaseClient("test-key") as client: + response = await client.list_datasets() + data = await response.json() + self.assertIn("data", data) + + @patch("dify_client.async_client.httpx.AsyncClient") + async def test_all_async_client_classes(self, mock_httpx_async_client): + """Test all async client classes work with httpx.AsyncClient.""" + mock_client_instance = AsyncMock() + mock_httpx_async_client.return_value = mock_client_instance + + clients = [ + AsyncDifyClient("key"), + AsyncChatClient("key"), + AsyncCompletionClient("key"), + AsyncWorkflowClient("key"), + AsyncWorkspaceClient("key"), + AsyncKnowledgeBaseClient("key"), + ] + + # Verify httpx.AsyncClient was called for each + self.assertEqual(mock_httpx_async_client.call_count, 6) + + # Clean up + for client in clients: + await client.aclose() + + +if __name__ == "__main__": + unittest.main() diff --git a/sdks/python-client/tests/test_httpx_migration.py b/sdks/python-client/tests/test_httpx_migration.py new file mode 100644 index 0000000000..b8e434d7ec --- /dev/null +++ b/sdks/python-client/tests/test_httpx_migration.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 +""" +Test suite for httpx migration in the Python SDK. + +This test validates that the migration from requests to httpx maintains +backward compatibility and proper resource management. +""" + +import unittest +from unittest.mock import Mock, patch + +from dify_client import ( + DifyClient, + ChatClient, + CompletionClient, + WorkflowClient, + WorkspaceClient, + KnowledgeBaseClient, +) + + +class TestHttpxMigrationMocked(unittest.TestCase): + """Test cases for httpx migration with mocked requests.""" + + def setUp(self): + """Set up test fixtures.""" + self.api_key = "test-api-key" + self.base_url = "https://api.dify.ai/v1" + + @patch("dify_client.client.httpx.Client") + def test_client_initialization(self, mock_httpx_client): + """Test that client initializes with httpx.Client.""" + mock_client_instance = Mock() + mock_httpx_client.return_value = mock_client_instance + + client = DifyClient(self.api_key, self.base_url) + + # Verify httpx.Client was called with correct parameters + mock_httpx_client.assert_called_once() + call_kwargs = mock_httpx_client.call_args[1] + self.assertEqual(call_kwargs["base_url"], self.base_url) + + # Verify client properties + self.assertEqual(client.api_key, self.api_key) + self.assertEqual(client.base_url, self.base_url) + + client.close() + + @patch("dify_client.client.httpx.Client") + def test_context_manager_support(self, mock_httpx_client): + """Test that client works as context manager.""" + mock_client_instance = Mock() + mock_httpx_client.return_value = mock_client_instance + + with DifyClient(self.api_key, self.base_url) as client: + self.assertEqual(client.api_key, self.api_key) + + # Verify close was called + mock_client_instance.close.assert_called_once() + + @patch("dify_client.client.httpx.Client") + def test_manual_close(self, mock_httpx_client): + """Test manual close() method.""" + mock_client_instance = Mock() + mock_httpx_client.return_value = mock_client_instance + + client = DifyClient(self.api_key, self.base_url) + client.close() + + # Verify close was called + mock_client_instance.close.assert_called_once() + + @patch("dify_client.client.httpx.Client") + def test_send_request_httpx_compatibility(self, mock_httpx_client): + """Test _send_request uses httpx.Client.request properly.""" + mock_response = Mock() + mock_response.json.return_value = {"result": "success"} + mock_response.status_code = 200 + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + client = DifyClient(self.api_key, self.base_url) + response = client._send_request("GET", "/test-endpoint") + + # Verify httpx.Client.request was called correctly + mock_client_instance.request.assert_called_once() + call_args = mock_client_instance.request.call_args + + # Verify method and endpoint + self.assertEqual(call_args[0][0], "GET") + self.assertEqual(call_args[0][1], "/test-endpoint") + + # Verify headers contain authorization + headers = call_args[1]["headers"] + self.assertEqual(headers["Authorization"], f"Bearer {self.api_key}") + self.assertEqual(headers["Content-Type"], "application/json") + + client.close() + + @patch("dify_client.client.httpx.Client") + def test_response_compatibility(self, mock_httpx_client): + """Test httpx.Response is compatible with requests.Response API.""" + mock_response = Mock() + mock_response.json.return_value = {"key": "value"} + mock_response.text = '{"key": "value"}' + mock_response.content = b'{"key": "value"}' + mock_response.status_code = 200 + mock_response.headers = {"Content-Type": "application/json"} + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + client = DifyClient(self.api_key, self.base_url) + response = client._send_request("GET", "/test") + + # Verify all common response methods work + self.assertEqual(response.json(), {"key": "value"}) + self.assertEqual(response.text, '{"key": "value"}') + self.assertEqual(response.content, b'{"key": "value"}') + self.assertEqual(response.status_code, 200) + self.assertEqual(response.headers["Content-Type"], "application/json") + + client.close() + + @patch("dify_client.client.httpx.Client") + def test_all_client_classes_use_httpx(self, mock_httpx_client): + """Test that all client classes properly use httpx.""" + mock_client_instance = Mock() + mock_httpx_client.return_value = mock_client_instance + + clients = [ + DifyClient(self.api_key, self.base_url), + ChatClient(self.api_key, self.base_url), + CompletionClient(self.api_key, self.base_url), + WorkflowClient(self.api_key, self.base_url), + WorkspaceClient(self.api_key, self.base_url), + KnowledgeBaseClient(self.api_key, self.base_url), + ] + + # Verify httpx.Client was called for each client + self.assertEqual(mock_httpx_client.call_count, 6) + + # Clean up + for client in clients: + client.close() + + @patch("dify_client.client.httpx.Client") + def test_json_parameter_handling(self, mock_httpx_client): + """Test that json parameter is passed correctly.""" + mock_response = Mock() + mock_response.json.return_value = {"result": "success"} + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + client = DifyClient(self.api_key, self.base_url) + test_data = {"key": "value", "number": 123} + + client._send_request("POST", "/test", json=test_data) + + # Verify json parameter was passed + call_args = mock_client_instance.request.call_args + self.assertEqual(call_args[1]["json"], test_data) + + client.close() + + @patch("dify_client.client.httpx.Client") + def test_params_parameter_handling(self, mock_httpx_client): + """Test that params parameter is passed correctly.""" + mock_response = Mock() + mock_response.json.return_value = {"result": "success"} + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + client = DifyClient(self.api_key, self.base_url) + test_params = {"page": 1, "limit": 20} + + client._send_request("GET", "/test", params=test_params) + + # Verify params parameter was passed + call_args = mock_client_instance.request.call_args + self.assertEqual(call_args[1]["params"], test_params) + + client.close() + + @patch("dify_client.client.httpx.Client") + def test_inheritance_chain(self, mock_httpx_client): + """Test that inheritance chain is maintained.""" + mock_client_instance = Mock() + mock_httpx_client.return_value = mock_client_instance + + # ChatClient inherits from DifyClient + chat_client = ChatClient(self.api_key, self.base_url) + self.assertIsInstance(chat_client, DifyClient) + + # CompletionClient inherits from DifyClient + completion_client = CompletionClient(self.api_key, self.base_url) + self.assertIsInstance(completion_client, DifyClient) + + # WorkflowClient inherits from DifyClient + workflow_client = WorkflowClient(self.api_key, self.base_url) + self.assertIsInstance(workflow_client, DifyClient) + + # Clean up + chat_client.close() + completion_client.close() + workflow_client.close() + + @patch("dify_client.client.httpx.Client") + def test_nested_context_managers(self, mock_httpx_client): + """Test nested context managers work correctly.""" + mock_client_instance = Mock() + mock_httpx_client.return_value = mock_client_instance + + with DifyClient(self.api_key, self.base_url) as client1: + with ChatClient(self.api_key, self.base_url) as client2: + self.assertEqual(client1.api_key, self.api_key) + self.assertEqual(client2.api_key, self.api_key) + + # Both close methods should have been called + self.assertEqual(mock_client_instance.close.call_count, 2) + + +class TestChatClientHttpx(unittest.TestCase): + """Test ChatClient specific httpx integration.""" + + @patch("dify_client.client.httpx.Client") + def test_create_chat_message_httpx(self, mock_httpx_client): + """Test create_chat_message works with httpx.""" + mock_response = Mock() + mock_response.text = '{"answer": "Hello!"}' + mock_response.json.return_value = {"answer": "Hello!"} + mock_response.status_code = 200 + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + with ChatClient("test-key") as client: + response = client.create_chat_message({}, "Hi", "user123") + self.assertIn("answer", response.text) + self.assertEqual(response.json()["answer"], "Hello!") + + +class TestCompletionClientHttpx(unittest.TestCase): + """Test CompletionClient specific httpx integration.""" + + @patch("dify_client.client.httpx.Client") + def test_create_completion_message_httpx(self, mock_httpx_client): + """Test create_completion_message works with httpx.""" + mock_response = Mock() + mock_response.text = '{"answer": "Response"}' + mock_response.json.return_value = {"answer": "Response"} + mock_response.status_code = 200 + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + with CompletionClient("test-key") as client: + response = client.create_completion_message({"query": "test"}, "blocking", "user123") + self.assertIn("answer", response.text) + + +class TestKnowledgeBaseClientHttpx(unittest.TestCase): + """Test KnowledgeBaseClient specific httpx integration.""" + + @patch("dify_client.client.httpx.Client") + def test_list_datasets_httpx(self, mock_httpx_client): + """Test list_datasets works with httpx.""" + mock_response = Mock() + mock_response.json.return_value = {"data": [], "total": 0} + mock_response.status_code = 200 + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + with KnowledgeBaseClient("test-key") as client: + response = client.list_datasets() + data = response.json() + self.assertIn("data", data) + self.assertIn("total", data) + + +class TestWorkflowClientHttpx(unittest.TestCase): + """Test WorkflowClient specific httpx integration.""" + + @patch("dify_client.client.httpx.Client") + def test_run_workflow_httpx(self, mock_httpx_client): + """Test run workflow works with httpx.""" + mock_response = Mock() + mock_response.json.return_value = {"result": "success"} + mock_response.status_code = 200 + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + with WorkflowClient("test-key") as client: + response = client.run({"input": "test"}, "blocking", "user123") + self.assertEqual(response.json()["result"], "success") + + +class TestWorkspaceClientHttpx(unittest.TestCase): + """Test WorkspaceClient specific httpx integration.""" + + @patch("dify_client.client.httpx.Client") + def test_get_available_models_httpx(self, mock_httpx_client): + """Test get_available_models works with httpx.""" + mock_response = Mock() + mock_response.json.return_value = {"data": []} + mock_response.status_code = 200 + + mock_client_instance = Mock() + mock_client_instance.request.return_value = mock_response + mock_httpx_client.return_value = mock_client_instance + + with WorkspaceClient("test-key") as client: + response = client.get_available_models("llm") + self.assertIn("data", response.json()) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdks/python-client/tests/test_new_apis.py b/sdks/python-client/tests/test_new_apis.py deleted file mode 100644 index 09c62dfda7..0000000000 --- a/sdks/python-client/tests/test_new_apis.py +++ /dev/null @@ -1,416 +0,0 @@ -#!/usr/bin/env python3 -""" -Test suite for the new Service API functionality in the Python SDK. - -This test validates the implementation of the missing Service API endpoints -that were added to the Python SDK to achieve complete coverage. -""" - -import unittest -from unittest.mock import Mock, patch, MagicMock -import json - -from dify_client import ( - DifyClient, - ChatClient, - WorkflowClient, - KnowledgeBaseClient, - WorkspaceClient, -) - - -class TestNewServiceAPIs(unittest.TestCase): - """Test cases for new Service API implementations.""" - - def setUp(self): - """Set up test fixtures.""" - self.api_key = "test-api-key" - self.base_url = "https://api.dify.ai/v1" - - @patch("dify_client.client.requests.request") - def test_app_info_apis(self, mock_request): - """Test application info APIs.""" - mock_response = Mock() - mock_response.json.return_value = { - "name": "Test App", - "description": "Test Description", - "tags": ["test", "api"], - "mode": "chat", - "author_name": "Test Author", - } - mock_request.return_value = mock_response - - client = DifyClient(self.api_key, self.base_url) - - # Test get_app_info - result = client.get_app_info() - mock_request.assert_called_with( - "GET", - f"{self.base_url}/info", - json=None, - params=None, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - # Test get_app_site_info - client.get_app_site_info() - mock_request.assert_called_with( - "GET", - f"{self.base_url}/site", - json=None, - params=None, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - # Test get_file_preview - file_id = "test-file-id" - client.get_file_preview(file_id) - mock_request.assert_called_with( - "GET", - f"{self.base_url}/files/{file_id}/preview", - json=None, - params=None, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - @patch("dify_client.client.requests.request") - def test_annotation_apis(self, mock_request): - """Test annotation APIs.""" - mock_response = Mock() - mock_response.json.return_value = {"result": "success"} - mock_request.return_value = mock_response - - client = ChatClient(self.api_key, self.base_url) - - # Test annotation_reply_action - enable - client.annotation_reply_action( - action="enable", - score_threshold=0.8, - embedding_provider_name="openai", - embedding_model_name="text-embedding-ada-002", - ) - mock_request.assert_called_with( - "POST", - f"{self.base_url}/apps/annotation-reply/enable", - json={ - "score_threshold": 0.8, - "embedding_provider_name": "openai", - "embedding_model_name": "text-embedding-ada-002", - }, - params=None, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - # Test annotation_reply_action - disable (now requires same fields as enable) - client.annotation_reply_action( - action="disable", - score_threshold=0.5, - embedding_provider_name="openai", - embedding_model_name="text-embedding-ada-002", - ) - - # Test annotation_reply_action with score_threshold=0 (edge case) - client.annotation_reply_action( - action="enable", - score_threshold=0.0, # This should work and not raise ValueError - embedding_provider_name="openai", - embedding_model_name="text-embedding-ada-002", - ) - - # Test get_annotation_reply_status - client.get_annotation_reply_status("enable", "job-123") - - # Test list_annotations - client.list_annotations(page=1, limit=20, keyword="test") - - # Test create_annotation - client.create_annotation("Test question?", "Test answer.") - - # Test update_annotation - client.update_annotation("annotation-123", "Updated question?", "Updated answer.") - - # Test delete_annotation - client.delete_annotation("annotation-123") - - # Verify all calls were made (8 calls: enable + disable + enable with 0.0 + 5 other operations) - self.assertEqual(mock_request.call_count, 8) - - @patch("dify_client.client.requests.request") - def test_knowledge_base_advanced_apis(self, mock_request): - """Test advanced knowledge base APIs.""" - mock_response = Mock() - mock_response.json.return_value = {"result": "success"} - mock_request.return_value = mock_response - - dataset_id = "test-dataset-id" - client = KnowledgeBaseClient(self.api_key, self.base_url, dataset_id) - - # Test hit_testing - client.hit_testing("test query", {"type": "vector"}) - mock_request.assert_called_with( - "POST", - f"{self.base_url}/datasets/{dataset_id}/hit-testing", - json={"query": "test query", "retrieval_model": {"type": "vector"}}, - params=None, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - # Test metadata operations - client.get_dataset_metadata() - client.create_dataset_metadata({"key": "value"}) - client.update_dataset_metadata("meta-123", {"key": "new_value"}) - client.get_built_in_metadata() - client.manage_built_in_metadata("enable", {"type": "built_in"}) - client.update_documents_metadata([{"document_id": "doc1", "metadata": {"key": "value"}}]) - - # Test tag operations - client.list_dataset_tags() - client.bind_dataset_tags(["tag1", "tag2"]) - client.unbind_dataset_tag("tag1") - client.get_dataset_tags() - - # Verify multiple calls were made - self.assertGreater(mock_request.call_count, 5) - - @patch("dify_client.client.requests.request") - def test_rag_pipeline_apis(self, mock_request): - """Test RAG pipeline APIs.""" - mock_response = Mock() - mock_response.json.return_value = {"result": "success"} - mock_request.return_value = mock_response - - dataset_id = "test-dataset-id" - client = KnowledgeBaseClient(self.api_key, self.base_url, dataset_id) - - # Test get_datasource_plugins - client.get_datasource_plugins(is_published=True) - mock_request.assert_called_with( - "GET", - f"{self.base_url}/datasets/{dataset_id}/pipeline/datasource-plugins", - json=None, - params={"is_published": True}, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - # Test run_datasource_node - client.run_datasource_node( - node_id="node-123", - inputs={"param": "value"}, - datasource_type="online_document", - is_published=True, - credential_id="cred-123", - ) - - # Test run_rag_pipeline with blocking mode - client.run_rag_pipeline( - inputs={"query": "test"}, - datasource_type="online_document", - datasource_info_list=[{"id": "ds1"}], - start_node_id="start-node", - is_published=True, - response_mode="blocking", - ) - - # Test run_rag_pipeline with streaming mode - client.run_rag_pipeline( - inputs={"query": "test"}, - datasource_type="online_document", - datasource_info_list=[{"id": "ds1"}], - start_node_id="start-node", - is_published=True, - response_mode="streaming", - ) - - self.assertEqual(mock_request.call_count, 4) - - @patch("dify_client.client.requests.request") - def test_workspace_apis(self, mock_request): - """Test workspace APIs.""" - mock_response = Mock() - mock_response.json.return_value = { - "data": [{"name": "gpt-3.5-turbo", "type": "llm"}, {"name": "gpt-4", "type": "llm"}] - } - mock_request.return_value = mock_response - - client = WorkspaceClient(self.api_key, self.base_url) - - # Test get_available_models - result = client.get_available_models("llm") - mock_request.assert_called_with( - "GET", - f"{self.base_url}/workspaces/current/models/model-types/llm", - json=None, - params=None, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - @patch("dify_client.client.requests.request") - def test_workflow_advanced_apis(self, mock_request): - """Test advanced workflow APIs.""" - mock_response = Mock() - mock_response.json.return_value = {"result": "success"} - mock_request.return_value = mock_response - - client = WorkflowClient(self.api_key, self.base_url) - - # Test get_workflow_logs - client.get_workflow_logs(keyword="test", status="succeeded", page=1, limit=20) - mock_request.assert_called_with( - "GET", - f"{self.base_url}/workflows/logs", - json=None, - params={"page": 1, "limit": 20, "keyword": "test", "status": "succeeded"}, - headers={ - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - }, - stream=False, - ) - - # Test get_workflow_logs with additional filters - client.get_workflow_logs( - keyword="test", - status="succeeded", - page=1, - limit=20, - created_at__before="2024-01-01", - created_at__after="2023-01-01", - created_by_account="user123", - ) - - # Test run_specific_workflow - client.run_specific_workflow( - workflow_id="workflow-123", inputs={"param": "value"}, response_mode="streaming", user="user-123" - ) - - self.assertEqual(mock_request.call_count, 3) - - def test_error_handling(self): - """Test error handling for required parameters.""" - client = ChatClient(self.api_key, self.base_url) - - # Test annotation_reply_action with missing required parameters would be a TypeError now - # since parameters are required in method signature - with self.assertRaises(TypeError): - client.annotation_reply_action("enable") - - # Test annotation_reply_action with explicit None values should raise ValueError - with self.assertRaises(ValueError) as context: - client.annotation_reply_action("enable", None, "provider", "model") - - self.assertIn("cannot be None", str(context.exception)) - - # Test KnowledgeBaseClient without dataset_id - kb_client = KnowledgeBaseClient(self.api_key, self.base_url) - with self.assertRaises(ValueError) as context: - kb_client.hit_testing("test query") - - self.assertIn("dataset_id is not set", str(context.exception)) - - @patch("dify_client.client.open") - @patch("dify_client.client.requests.request") - def test_file_upload_apis(self, mock_request, mock_open): - """Test file upload APIs.""" - mock_response = Mock() - mock_response.json.return_value = {"result": "success"} - mock_request.return_value = mock_response - - mock_file = MagicMock() - mock_open.return_value.__enter__.return_value = mock_file - - dataset_id = "test-dataset-id" - client = KnowledgeBaseClient(self.api_key, self.base_url, dataset_id) - - # Test upload_pipeline_file - client.upload_pipeline_file("/path/to/test.pdf") - - mock_open.assert_called_with("/path/to/test.pdf", "rb") - mock_request.assert_called_once() - - def test_comprehensive_coverage(self): - """Test that all previously missing APIs are now implemented.""" - - # Test DifyClient methods - dify_methods = ["get_app_info", "get_app_site_info", "get_file_preview"] - client = DifyClient(self.api_key) - for method in dify_methods: - self.assertTrue(hasattr(client, method), f"DifyClient missing method: {method}") - - # Test ChatClient annotation methods - chat_methods = [ - "annotation_reply_action", - "get_annotation_reply_status", - "list_annotations", - "create_annotation", - "update_annotation", - "delete_annotation", - ] - chat_client = ChatClient(self.api_key) - for method in chat_methods: - self.assertTrue(hasattr(chat_client, method), f"ChatClient missing method: {method}") - - # Test WorkflowClient advanced methods - workflow_methods = ["get_workflow_logs", "run_specific_workflow"] - workflow_client = WorkflowClient(self.api_key) - for method in workflow_methods: - self.assertTrue(hasattr(workflow_client, method), f"WorkflowClient missing method: {method}") - - # Test KnowledgeBaseClient advanced methods - kb_methods = [ - "hit_testing", - "get_dataset_metadata", - "create_dataset_metadata", - "update_dataset_metadata", - "get_built_in_metadata", - "manage_built_in_metadata", - "update_documents_metadata", - "list_dataset_tags", - "bind_dataset_tags", - "unbind_dataset_tag", - "get_dataset_tags", - "get_datasource_plugins", - "run_datasource_node", - "run_rag_pipeline", - "upload_pipeline_file", - ] - kb_client = KnowledgeBaseClient(self.api_key) - for method in kb_methods: - self.assertTrue(hasattr(kb_client, method), f"KnowledgeBaseClient missing method: {method}") - - # Test WorkspaceClient methods - workspace_methods = ["get_available_models"] - workspace_client = WorkspaceClient(self.api_key) - for method in workspace_methods: - self.assertTrue(hasattr(workspace_client, method), f"WorkspaceClient missing method: {method}") - - -if __name__ == "__main__": - unittest.main() diff --git a/sdks/python-client/uv.lock b/sdks/python-client/uv.lock new file mode 100644 index 0000000000..19f348289b --- /dev/null +++ b/sdks/python-client/uv.lock @@ -0,0 +1,271 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" + +[[package]] +name = "aiofiles" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/c3/534eac40372d8ee36ef40df62ec129bee4fdb5ad9706e58a29be53b2c970/aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2", size = 46354, upload-time = "2025-10-09T20:51:04.358Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/8a/340a1555ae33d7354dbca4faa54948d76d89a27ceef032c8c3bc661d003e/aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695", size = 14668, upload-time = "2025-10-09T20:51:03.174Z" }, +] + +[[package]] +name = "anyio" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, +] + +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, +] + +[[package]] +name = "certifi" +version = "2025.10.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dify-client" +version = "0.1.12" +source = { editable = "." } +dependencies = [ + { name = "aiofiles" }, + { name = "httpx" }, +] + +[package.optional-dependencies] +dev = [ + { name = "pytest" }, + { name = "pytest-asyncio" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiofiles", specifier = ">=23.0.0" }, + { name = "httpx", specifier = ">=0.27.0" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=7.0.0" }, + { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.21.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/86/9e3c5f48f7b7b638b216e4b9e645f54d199d7abbbab7a64a13b4e12ba10f/pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57", size = 50119, upload-time = "2025-09-12T07:33:53.816Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] From e3191d4e916f3301d76d649d6eadf9d0223c281b Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Sat, 11 Oct 2025 17:46:44 +0800 Subject: [PATCH 33/49] fix enum and type (#26756) Signed-off-by: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../model_config/converter.py | 6 +- api/core/entities/provider_configuration.py | 2 +- api/core/provider_manager.py | 2 +- .../core/test_provider_configuration.py | 179 +++++++++++++++++- 4 files changed, 185 insertions(+), 4 deletions(-) diff --git a/api/core/app/app_config/easy_ui_based_app/model_config/converter.py b/api/core/app/app_config/easy_ui_based_app/model_config/converter.py index 7cd5fe75d5..b816c8d7d0 100644 --- a/api/core/app/app_config/easy_ui_based_app/model_config/converter.py +++ b/api/core/app/app_config/easy_ui_based_app/model_config/converter.py @@ -70,7 +70,11 @@ class ModelConfigConverter: if not model_mode: model_mode = LLMMode.CHAT if model_schema and model_schema.model_properties.get(ModelPropertyKey.MODE): - model_mode = LLMMode(model_schema.model_properties[ModelPropertyKey.MODE]).value + try: + model_mode = LLMMode(model_schema.model_properties[ModelPropertyKey.MODE]) + except ValueError: + # Fall back to CHAT mode if the stored value is invalid + model_mode = LLMMode.CHAT if not model_schema: raise ValueError(f"Model {model_name} not exist.") diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index bc19afb52a..29b8f8f610 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -1414,7 +1414,7 @@ class ProviderConfiguration(BaseModel): """ secret_input_form_variables = [] for credential_form_schema in credential_form_schemas: - if credential_form_schema.type.value == FormType.SECRET_INPUT: + if credential_form_schema.type == FormType.SECRET_INPUT: secret_input_form_variables.append(credential_form_schema.variable) return secret_input_form_variables diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 7bc9830ac3..6cf6620d8d 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -1046,7 +1046,7 @@ class ProviderManager: """ secret_input_form_variables = [] for credential_form_schema in credential_form_schemas: - if credential_form_schema.type.value == FormType.SECRET_INPUT: + if credential_form_schema.type == FormType.SECRET_INPUT: secret_input_form_variables.append(credential_form_schema.variable) return secret_input_form_variables diff --git a/api/tests/unit_tests/core/test_provider_configuration.py b/api/tests/unit_tests/core/test_provider_configuration.py index 75621ecb6a..9060cf7b6c 100644 --- a/api/tests/unit_tests/core/test_provider_configuration.py +++ b/api/tests/unit_tests/core/test_provider_configuration.py @@ -14,7 +14,13 @@ from core.entities.provider_entities import ( ) from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity +from core.model_runtime.entities.provider_entities import ( + ConfigurateMethod, + CredentialFormSchema, + FormOption, + FormType, + ProviderEntity, +) from models.provider import Provider, ProviderType @@ -306,3 +312,174 @@ class TestProviderConfiguration: # Assert assert credentials == {"openai_api_key": "test_key"} + + def test_extract_secret_variables_with_secret_input(self, provider_configuration): + """Test extracting secret variables from credential form schemas""" + # Arrange + credential_form_schemas = [ + CredentialFormSchema( + variable="api_key", + label=I18nObject(en_US="API Key", zh_Hans="API 密钥"), + type=FormType.SECRET_INPUT, + required=True, + ), + CredentialFormSchema( + variable="model_name", + label=I18nObject(en_US="Model Name", zh_Hans="模型名称"), + type=FormType.TEXT_INPUT, + required=True, + ), + CredentialFormSchema( + variable="secret_token", + label=I18nObject(en_US="Secret Token", zh_Hans="密钥令牌"), + type=FormType.SECRET_INPUT, + required=False, + ), + ] + + # Act + secret_variables = provider_configuration.extract_secret_variables(credential_form_schemas) + + # Assert + assert len(secret_variables) == 2 + assert "api_key" in secret_variables + assert "secret_token" in secret_variables + assert "model_name" not in secret_variables + + def test_extract_secret_variables_no_secret_input(self, provider_configuration): + """Test extracting secret variables when no secret input fields exist""" + # Arrange + credential_form_schemas = [ + CredentialFormSchema( + variable="model_name", + label=I18nObject(en_US="Model Name", zh_Hans="模型名称"), + type=FormType.TEXT_INPUT, + required=True, + ), + CredentialFormSchema( + variable="temperature", + label=I18nObject(en_US="Temperature", zh_Hans="温度"), + type=FormType.SELECT, + required=True, + options=[FormOption(label=I18nObject(en_US="0.1", zh_Hans="0.1"), value="0.1")], + ), + ] + + # Act + secret_variables = provider_configuration.extract_secret_variables(credential_form_schemas) + + # Assert + assert len(secret_variables) == 0 + + def test_extract_secret_variables_empty_list(self, provider_configuration): + """Test extracting secret variables from empty credential form schemas""" + # Arrange + credential_form_schemas = [] + + # Act + secret_variables = provider_configuration.extract_secret_variables(credential_form_schemas) + + # Assert + assert len(secret_variables) == 0 + + @patch("core.entities.provider_configuration.encrypter") + def test_obfuscated_credentials_with_secret_variables(self, mock_encrypter, provider_configuration): + """Test obfuscating credentials with secret variables""" + # Arrange + credentials = { + "api_key": "sk-1234567890abcdef", + "model_name": "gpt-4", + "secret_token": "secret_value_123", + "temperature": "0.7", + } + + credential_form_schemas = [ + CredentialFormSchema( + variable="api_key", + label=I18nObject(en_US="API Key", zh_Hans="API 密钥"), + type=FormType.SECRET_INPUT, + required=True, + ), + CredentialFormSchema( + variable="model_name", + label=I18nObject(en_US="Model Name", zh_Hans="模型名称"), + type=FormType.TEXT_INPUT, + required=True, + ), + CredentialFormSchema( + variable="secret_token", + label=I18nObject(en_US="Secret Token", zh_Hans="密钥令牌"), + type=FormType.SECRET_INPUT, + required=False, + ), + CredentialFormSchema( + variable="temperature", + label=I18nObject(en_US="Temperature", zh_Hans="温度"), + type=FormType.TEXT_INPUT, + required=True, + ), + ] + + mock_encrypter.obfuscated_token.side_effect = lambda x: f"***{x[-4:]}" + + # Act + obfuscated = provider_configuration.obfuscated_credentials(credentials, credential_form_schemas) + + # Assert + assert obfuscated["api_key"] == "***cdef" + assert obfuscated["model_name"] == "gpt-4" # Not obfuscated + assert obfuscated["secret_token"] == "***_123" + assert obfuscated["temperature"] == "0.7" # Not obfuscated + + # Verify encrypter was called for secret fields only + assert mock_encrypter.obfuscated_token.call_count == 2 + mock_encrypter.obfuscated_token.assert_any_call("sk-1234567890abcdef") + mock_encrypter.obfuscated_token.assert_any_call("secret_value_123") + + def test_obfuscated_credentials_no_secret_variables(self, provider_configuration): + """Test obfuscating credentials when no secret variables exist""" + # Arrange + credentials = { + "model_name": "gpt-4", + "temperature": "0.7", + "max_tokens": "1000", + } + + credential_form_schemas = [ + CredentialFormSchema( + variable="model_name", + label=I18nObject(en_US="Model Name", zh_Hans="模型名称"), + type=FormType.TEXT_INPUT, + required=True, + ), + CredentialFormSchema( + variable="temperature", + label=I18nObject(en_US="Temperature", zh_Hans="温度"), + type=FormType.TEXT_INPUT, + required=True, + ), + CredentialFormSchema( + variable="max_tokens", + label=I18nObject(en_US="Max Tokens", zh_Hans="最大令牌数"), + type=FormType.TEXT_INPUT, + required=True, + ), + ] + + # Act + obfuscated = provider_configuration.obfuscated_credentials(credentials, credential_form_schemas) + + # Assert + assert obfuscated == credentials # No changes expected + + def test_obfuscated_credentials_empty_credentials(self, provider_configuration): + """Test obfuscating empty credentials""" + # Arrange + credentials = {} + credential_form_schemas = [] + + # Act + obfuscated = provider_configuration.obfuscated_credentials(credentials, credential_form_schemas) + + # Assert + assert obfuscated == {} From 30c5b47699d15d1031efa1ace29581654c724b4b Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 11 Oct 2025 18:18:18 +0800 Subject: [PATCH 34/49] refactor: simplify InlineDeleteConfirm component structure (#26771) --- .../base/inline-delete-confirm/index.tsx | 69 +++++++++---------- 1 file changed, 31 insertions(+), 38 deletions(-) diff --git a/web/app/components/base/inline-delete-confirm/index.tsx b/web/app/components/base/inline-delete-confirm/index.tsx index 2a33e14701..eb671609cf 100644 --- a/web/app/components/base/inline-delete-confirm/index.tsx +++ b/web/app/components/base/inline-delete-confirm/index.tsx @@ -34,48 +34,41 @@ const InlineDeleteConfirm: FC = ({ aria-labelledby="inline-delete-confirm-title" aria-describedby="inline-delete-confirm-description" className={cn( - 'flex h-16 w-[120px] flex-col', - 'rounded-xl border-0 border-t-[0.5px] border-components-panel-border', - 'bg-background-overlay-backdrop backdrop-blur-[10px]', - 'shadow-lg', - 'p-0 pt-1', - className, - )} - > -
-
- {titleText} -
+ 'shadow-lg', + className, + )} + > +
+ {titleText} +
-
- - -
+
+ +
From 0173496a7745372506f9b7f594a35598258378ab Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Sat, 11 Oct 2025 18:59:31 +0800 Subject: [PATCH 35/49] fix: happy-dom version (#26764) Co-authored-by: lyzno1 Co-authored-by: lyzno1 <92089059+lyzno1@users.noreply.github.com> --- web/__tests__/real-browser-flicker.test.tsx | 126 +++++++++++++------- web/jest.config.ts | 14 ++- web/package.json | 2 +- web/pnpm-lock.yaml | 50 +++++--- 4 files changed, 126 insertions(+), 66 deletions(-) diff --git a/web/__tests__/real-browser-flicker.test.tsx b/web/__tests__/real-browser-flicker.test.tsx index 52bdf4777f..f71e8de515 100644 --- a/web/__tests__/real-browser-flicker.test.tsx +++ b/web/__tests__/real-browser-flicker.test.tsx @@ -13,39 +13,60 @@ import { ThemeProvider } from 'next-themes' import useTheme from '@/hooks/use-theme' import { useEffect, useState } from 'react' +const DARK_MODE_MEDIA_QUERY = /prefers-color-scheme:\s*dark/i + // Setup browser environment for testing const setupMockEnvironment = (storedTheme: string | null, systemPrefersDark = false) => { - // Mock localStorage - const mockStorage = { - getItem: jest.fn((key: string) => { - if (key === 'theme') return storedTheme - return null - }), - setItem: jest.fn(), - removeItem: jest.fn(), + if (typeof window === 'undefined') + return + + try { + window.localStorage.clear() + } + catch { + // ignore if localStorage has been replaced by a throwing stub } - // Mock system theme preference - const mockMatchMedia = jest.fn((query: string) => ({ - matches: query.includes('dark') && systemPrefersDark, - media: query, - addListener: jest.fn(), - removeListener: jest.fn(), - })) + if (storedTheme === null) + window.localStorage.removeItem('theme') + else + window.localStorage.setItem('theme', storedTheme) - if (typeof window !== 'undefined') { - Object.defineProperty(window, 'localStorage', { - value: mockStorage, - configurable: true, - }) + document.documentElement.removeAttribute('data-theme') - Object.defineProperty(window, 'matchMedia', { - value: mockMatchMedia, - configurable: true, - }) + const mockMatchMedia: typeof window.matchMedia = (query: string) => { + const listeners = new Set<(event: MediaQueryListEvent) => void>() + const isDarkQuery = DARK_MODE_MEDIA_QUERY.test(query) + const matches = isDarkQuery ? systemPrefersDark : false + + const mediaQueryList: MediaQueryList = { + matches, + media: query, + onchange: null, + addListener: (listener: MediaQueryListListener) => { + listeners.add(listener) + }, + removeListener: (listener: MediaQueryListListener) => { + listeners.delete(listener) + }, + addEventListener: (_event, listener: EventListener) => { + if (typeof listener === 'function') + listeners.add(listener as MediaQueryListListener) + }, + removeEventListener: (_event, listener: EventListener) => { + if (typeof listener === 'function') + listeners.delete(listener as MediaQueryListListener) + }, + dispatchEvent: (event: Event) => { + listeners.forEach(listener => listener(event as MediaQueryListEvent)) + return true + }, + } + + return mediaQueryList } - return { mockStorage, mockMatchMedia } + jest.spyOn(window, 'matchMedia').mockImplementation(mockMatchMedia) } // Simulate real page component based on Dify's actual theme usage @@ -94,7 +115,17 @@ const TestThemeProvider = ({ children }: { children: React.ReactNode }) => ( describe('Real Browser Environment Dark Mode Flicker Test', () => { beforeEach(() => { + jest.restoreAllMocks() jest.clearAllMocks() + if (typeof window !== 'undefined') { + try { + window.localStorage.clear() + } + catch { + // ignore when localStorage is replaced with an error-throwing stub + } + document.documentElement.removeAttribute('data-theme') + } }) describe('Page Refresh Scenario Simulation', () => { @@ -323,35 +354,40 @@ describe('Real Browser Environment Dark Mode Flicker Test', () => { describe('Edge Cases and Error Handling', () => { test('handles localStorage access errors gracefully', async () => { - // Mock localStorage to throw an error + setupMockEnvironment(null) + const mockStorage = { getItem: jest.fn(() => { throw new Error('LocalStorage access denied') }), setItem: jest.fn(), removeItem: jest.fn(), + clear: jest.fn(), } - if (typeof window !== 'undefined') { - Object.defineProperty(window, 'localStorage', { - value: mockStorage, - configurable: true, - }) - } - - render( - - - , - ) - - // Should fallback gracefully without crashing - await waitFor(() => { - expect(screen.getByTestId('theme-indicator')).toBeInTheDocument() + Object.defineProperty(window, 'localStorage', { + value: mockStorage, + configurable: true, }) - // Should default to light theme when localStorage fails - expect(screen.getByTestId('visual-appearance')).toHaveTextContent('Appearance: light') + try { + render( + + + , + ) + + // Should fallback gracefully without crashing + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toBeInTheDocument() + }) + + // Should default to light theme when localStorage fails + expect(screen.getByTestId('visual-appearance')).toHaveTextContent('Appearance: light') + } + finally { + Reflect.deleteProperty(window, 'localStorage') + } }) test('handles invalid theme values in localStorage', async () => { @@ -403,6 +439,8 @@ describe('Real Browser Environment Dark Mode Flicker Test', () => { setupMockEnvironment('dark') + expect(window.localStorage.getItem('theme')).toBe('dark') + render( diff --git a/web/jest.config.ts b/web/jest.config.ts index ebeb2f7d7e..6c2d88448c 100644 --- a/web/jest.config.ts +++ b/web/jest.config.ts @@ -160,7 +160,11 @@ const config: Config = { testEnvironment: '@happy-dom/jest-environment', // Options that will be passed to the testEnvironment - // testEnvironmentOptions: {}, + testEnvironmentOptions: { + // Match happy-dom's default to ensure Node.js environment resolution + // This prevents ESM packages like uuid from using browser exports + customExportConditions: ['node', 'node-addons'], + }, // Adds a location field to test results // testLocationInResults: false, @@ -189,10 +193,10 @@ const config: Config = { // transform: undefined, // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation - // transformIgnorePatterns: [ - // "/node_modules/", - // "\\.pnp\\.[^\\/]+$" - // ], + // For pnpm: allow transforming uuid ESM package + transformIgnorePatterns: [ + 'node_modules/(?!(.pnpm|uuid))', + ], // An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them // unmockedModulePathPatterns: undefined, diff --git a/web/package.json b/web/package.json index a5cfd387f7..33ba0dc5a4 100644 --- a/web/package.json +++ b/web/package.json @@ -143,7 +143,7 @@ "@babel/core": "^7.28.3", "@chromatic-com/storybook": "^3.1.0", "@eslint-react/eslint-plugin": "^1.15.0", - "@happy-dom/jest-environment": "^17.4.4", + "@happy-dom/jest-environment": "^20.0.0", "@mdx-js/loader": "^3.1.0", "@mdx-js/react": "^3.1.0", "@next/bundle-analyzer": "15.5.4", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 0a45f14ba0..2fcd0f17b1 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -345,8 +345,8 @@ importers: specifier: ^1.15.0 version: 1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) '@happy-dom/jest-environment': - specifier: ^17.4.4 - version: 17.6.3 + specifier: ^20.0.0 + version: 20.0.0(@jest/environment@29.7.0)(@jest/fake-timers@29.7.0)(@jest/types@29.6.3)(jest-mock@29.7.0)(jest-util@29.7.0) '@mdx-js/loader': specifier: ^3.1.0 version: 3.1.0(acorn@8.15.0)(webpack@5.100.2(esbuild@0.25.0)(uglify-js@3.19.3)) @@ -1644,9 +1644,15 @@ packages: '@formatjs/intl-localematcher@0.5.10': resolution: {integrity: sha512-af3qATX+m4Rnd9+wHcjJ4w2ijq+rAVP3CCinJQvFv1kgSu1W6jypUmvleJxcewdxmutM8dmIRZFxO/IQBZmP2Q==} - '@happy-dom/jest-environment@17.6.3': - resolution: {integrity: sha512-HXuHKvpHLo9/GQ/yKMmKFyS1AYL2t9pL67+GfpYZfOAb29qD80EMozi50zRZk82KmNRBcA2A0/ErjpOwUxJrNg==} + '@happy-dom/jest-environment@20.0.0': + resolution: {integrity: sha512-dUyMDNJzPDFopSDyzKdbeYs8z9B4jLj9kXnru8TjYdGeLsQKf+6r0lq/9T2XVcu04QFxXMykt64A+KjsaJTaNA==} engines: {node: '>=20.0.0'} + peerDependencies: + '@jest/environment': '>=25.0.0' + '@jest/fake-timers': '>=25.0.0' + '@jest/types': '>=25.0.0' + jest-mock: '>=25.0.0' + jest-util: '>=25.0.0' '@headlessui/react@2.2.1': resolution: {integrity: sha512-daiUqVLae8CKVjEVT19P/izW0aGK0GNhMSAeMlrDebKmoVZHcRRwbxzgtnEadUVDXyBsWo9/UH4KHeniO+0tMg==} @@ -3416,6 +3422,9 @@ packages: '@types/node@18.15.0': resolution: {integrity: sha512-z6nr0TTEOBGkzLGmbypWOGnpSpSIBorEhC4L+4HeQ2iezKCi4f77kyslRwvHeNitymGQ+oFyIWGP96l/DPSV9w==} + '@types/node@20.19.20': + resolution: {integrity: sha512-2Q7WS25j4pS1cS8yw3d6buNCVJukOTeQ39bAnwR6sOJbaxvyCGebzTMypDFN82CxBLnl+lSWVdCCWbRY6y9yZQ==} + '@types/papaparse@5.3.16': resolution: {integrity: sha512-T3VuKMC2H0lgsjI9buTB3uuKj3EMD2eap1MOuEQuBQ44EnDx/IkGhU6EwiTf9zG3za4SKlmwKAImdDKdNnCsXg==} @@ -3475,6 +3484,9 @@ packages: '@types/uuid@9.0.8': resolution: {integrity: sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==} + '@types/whatwg-mimetype@3.0.2': + resolution: {integrity: sha512-c2AKvDT8ToxLIOUlN51gTiHXflsfIFisS4pO7pDPoKouJCESkhZnEy623gwP9laCy5lnLDAw1vAzu2vM2YLOrA==} + '@types/yargs-parser@21.0.3': resolution: {integrity: sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==} @@ -5542,8 +5554,8 @@ packages: hachure-fill@0.5.2: resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} - happy-dom@17.6.3: - resolution: {integrity: sha512-UVIHeVhxmxedbWPCfgS55Jg2rDfwf2BCKeylcPSqazLz5w3Kri7Q4xdBJubsr/+VUzFLh0VjIvh13RaDA2/Xug==} + happy-dom@20.0.0: + resolution: {integrity: sha512-GkWnwIFxVGCf2raNrxImLo397RdGhLapj5cT3R2PT7FwL62Ze1DROhzmYW7+J3p9105DYMVenEejEbnq5wA37w==} engines: {node: '>=20.0.0'} has-flag@4.0.0: @@ -8246,6 +8258,9 @@ packages: engines: {node: '>=0.8.0'} hasBin: true + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + unicode-canonical-property-names-ecmascript@2.0.1: resolution: {integrity: sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==} engines: {node: '>=4'} @@ -8472,10 +8487,6 @@ packages: webidl-conversions@4.0.2: resolution: {integrity: sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==} - webidl-conversions@7.0.0: - resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==} - engines: {node: '>=12'} - webpack-bundle-analyzer@4.10.1: resolution: {integrity: sha512-s3P7pgexgT/HTUSYgxJyn28A+99mmLq4HsJepMPzu0R8ImJc52QNqaFYW1Z2z2uIb1/J3eYgaAWVpaC+v/1aAQ==} engines: {node: '>= 10.13.0'} @@ -10105,12 +10116,12 @@ snapshots: dependencies: tslib: 2.8.1 - '@happy-dom/jest-environment@17.6.3': + '@happy-dom/jest-environment@20.0.0(@jest/environment@29.7.0)(@jest/fake-timers@29.7.0)(@jest/types@29.6.3)(jest-mock@29.7.0)(jest-util@29.7.0)': dependencies: '@jest/environment': 29.7.0 '@jest/fake-timers': 29.7.0 '@jest/types': 29.6.3 - happy-dom: 17.6.3 + happy-dom: 20.0.0 jest-mock: 29.7.0 jest-util: 29.7.0 @@ -12201,6 +12212,10 @@ snapshots: '@types/node@18.15.0': {} + '@types/node@20.19.20': + dependencies: + undici-types: 6.21.0 + '@types/papaparse@5.3.16': dependencies: '@types/node': 18.15.0 @@ -12255,6 +12270,8 @@ snapshots: '@types/uuid@9.0.8': {} + '@types/whatwg-mimetype@3.0.2': {} + '@types/yargs-parser@21.0.3': {} '@types/yargs@17.0.33': @@ -14709,9 +14726,10 @@ snapshots: hachure-fill@0.5.2: {} - happy-dom@17.6.3: + happy-dom@20.0.0: dependencies: - webidl-conversions: 7.0.0 + '@types/node': 20.19.20 + '@types/whatwg-mimetype': 3.0.2 whatwg-mimetype: 3.0.0 has-flag@4.0.0: {} @@ -18125,6 +18143,8 @@ snapshots: uglify-js@3.19.3: {} + undici-types@6.21.0: {} + unicode-canonical-property-names-ecmascript@2.0.1: {} unicode-match-property-ecmascript@2.0.0: @@ -18351,8 +18371,6 @@ snapshots: webidl-conversions@4.0.2: {} - webidl-conversions@7.0.0: {} - webpack-bundle-analyzer@4.10.1: dependencies: '@discoveryjs/json-ext': 0.5.7 From 5830c6969453ff217add69426f7048a421521f71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 09:59:31 +0800 Subject: [PATCH 36/49] chore(deps): bump @lexical/utils from 0.36.2 to 0.37.0 in /web (#26801) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/package.json | 2 +- web/pnpm-lock.yaml | 75 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 74 insertions(+), 3 deletions(-) diff --git a/web/package.json b/web/package.json index 33ba0dc5a4..3cb00a67bc 100644 --- a/web/package.json +++ b/web/package.json @@ -55,7 +55,7 @@ "@lexical/react": "^0.36.2", "@lexical/selection": "^0.36.2", "@lexical/text": "^0.36.2", - "@lexical/utils": "^0.36.2", + "@lexical/utils": "^0.37.0", "@monaco-editor/react": "^4.6.0", "@octokit/core": "^6.1.2", "@octokit/request-error": "^6.1.5", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 2fcd0f17b1..343fefaa48 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -86,8 +86,8 @@ importers: specifier: ^0.36.2 version: 0.36.2 '@lexical/utils': - specifier: ^0.36.2 - version: 0.36.2 + specifier: ^0.37.0 + version: 0.37.0 '@monaco-editor/react': specifier: ^4.6.0 version: 4.7.0(monaco-editor@0.52.2)(react-dom@19.1.1(react@19.1.1))(react@19.1.1) @@ -2039,6 +2039,9 @@ packages: '@lexical/clipboard@0.36.2': resolution: {integrity: sha512-l7z52jltlMz1HmJRmG7ZdxySPjheRRxdV/75QEnzalMtqfLPgh4G5IpycISjbX+95PgEaC6rXbcjPix0CyHDJg==} + '@lexical/clipboard@0.37.0': + resolution: {integrity: sha512-hRwASFX/ilaI5r8YOcZuQgONFshRgCPfdxfofNL7uruSFYAO6LkUhsjzZwUgf0DbmCJmbBADFw15FSthgCUhGA==} + '@lexical/code@0.36.2': resolution: {integrity: sha512-dfS62rNo3uKwNAJQ39zC+8gYX0k8UAoW7u+JPIqx+K2VPukZlvpsPLNGft15pdWBkHc7Pv+o9gJlB6gGv+EBfA==} @@ -2054,6 +2057,9 @@ packages: '@lexical/extension@0.36.2': resolution: {integrity: sha512-NWxtqMFMzScq4Eemqp1ST2KREIfj57fUbn7qHv+mMnYgQZK4iIhrHKo5klonxi1oBURcxUZMIbdtH7MJ4BdisA==} + '@lexical/extension@0.37.0': + resolution: {integrity: sha512-Z58f2tIdz9bn8gltUu5cVg37qROGha38dUZv20gI2GeNugXAkoPzJYEcxlI1D/26tkevJ/7VaFUr9PTk+iKmaA==} + '@lexical/hashtag@0.36.2': resolution: {integrity: sha512-WdmKtzXFcahQT3ShFDeHF6LCR5C8yvFCj3ImI09rZwICrYeonbMrzsBUxS1joBz0HQ+ufF9Tx+RxLvGWx6WxzQ==} @@ -2063,12 +2069,18 @@ packages: '@lexical/html@0.36.2': resolution: {integrity: sha512-fgqALzgKnoy93G0yFyYD4C4qJTSMZyUt4JE5kj/POFwWNOnXThIqJhQGwBvH/ibImpIfOeds2TrSr8PbStlrNg==} + '@lexical/html@0.37.0': + resolution: {integrity: sha512-oTsBc45eL8/lmF7fqGR+UCjrJYP04gumzf5nk4TczrxWL2pM4GIMLLKG1mpQI2H1MDiRLzq3T/xdI7Gh74z7Zw==} + '@lexical/link@0.36.2': resolution: {integrity: sha512-Zb+DeHA1po8VMiOAAXsBmAHhfWmQttsUkI5oiZUmOXJruRuQ2rVr01NoxHpoEpLwHOABVNzD3PMbwov+g3c7lg==} '@lexical/list@0.36.2': resolution: {integrity: sha512-JpaIaE0lgNUrAR7iaCaIoETcCKG9EvZjM3G71VxiexTs7PltmEMq36LUlO2goafWurP7knG2rUpVnTcuSbYYeA==} + '@lexical/list@0.37.0': + resolution: {integrity: sha512-AOC6yAA3mfNvJKbwo+kvAbPJI+13yF2ISA65vbA578CugvJ08zIVgM+pSzxquGhD0ioJY3cXVW7+gdkCP1qu5g==} + '@lexical/mark@0.36.2': resolution: {integrity: sha512-n0MNXtGH+1i43hglgHjpQV0093HmIiFR7Budg2BJb8ZNzO1KZRqeXAHlA5ZzJ698FkAnS4R5bqG9tZ0JJHgAuA==} @@ -2096,15 +2108,24 @@ packages: '@lexical/selection@0.36.2': resolution: {integrity: sha512-n96joW3HCKBmPeESR172BxVE+m8V9SdidQm4kKb9jOZ1Ota+tnam2386TeI6795TWwgjDQJPK3HZNKcX6Gb+Bg==} + '@lexical/selection@0.37.0': + resolution: {integrity: sha512-Lix1s2r71jHfsTEs4q/YqK2s3uXKOnyA3fd1VDMWysO+bZzRwEO5+qyDvENZ0WrXSDCnlibNFV1HttWX9/zqyw==} + '@lexical/table@0.36.2': resolution: {integrity: sha512-96rNNPiVbC65i+Jn1QzIsehCS7UVUc69ovrh9Bt4+pXDebZSdZai153Q7RUq8q3AQ5ocK4/SA2kLQfMu0grj3Q==} + '@lexical/table@0.37.0': + resolution: {integrity: sha512-g7S8ml8kIujEDLWlzYKETgPCQ2U9oeWqdytRuHjHGi/rjAAGHSej5IRqTPIMxNP3VVQHnBoQ+Y9hBtjiuddhgQ==} + '@lexical/text@0.36.2': resolution: {integrity: sha512-IbbqgRdMAD6Uk9b2+qSVoy+8RVcczrz6OgXvg39+EYD+XEC7Rbw7kDTWzuNSJJpP7vxSO8YDZSaIlP5gNH3qKA==} '@lexical/utils@0.36.2': resolution: {integrity: sha512-P9+t2Ob10YNGYT/PWEER+1EqH8SAjCNRn+7SBvKbr0IdleGF2JvzbJwAWaRwZs1c18P11XdQZ779dGvWlfwBIw==} + '@lexical/utils@0.37.0': + resolution: {integrity: sha512-CFp4diY/kR5RqhzQSl/7SwsMod1sgLpI1FBifcOuJ6L/S6YywGpEB4B7aV5zqW21A/jU2T+2NZtxSUn6S+9gMg==} + '@lexical/yjs@0.36.2': resolution: {integrity: sha512-gZ66Mw+uKXTO8KeX/hNKAinXbFg3gnNYraG76lBXCwb/Ka3q34upIY9FUeGOwGVaau3iIDQhE49I+6MugAX2FQ==} peerDependencies: @@ -6218,6 +6239,9 @@ packages: lexical@0.36.2: resolution: {integrity: sha512-gIDJCmSAhtxD7h95WK17Nz19wCZu92Zn0p1/R45X01S/KAsLCwEtVJ2fTvIJNFTyx3QNJTuGcm5mYgRMUwq8rg==} + lexical@0.37.0: + resolution: {integrity: sha512-r5VJR2TioQPAsZATfktnJFrGIiy6gjQN8b/+0a2u1d7/QTH7lhbB7byhGSvcq1iaa1TV/xcf/pFV55a5V5hTDQ==} + lib0@0.2.114: resolution: {integrity: sha512-gcxmNFzA4hv8UYi8j43uPlQ7CGcyMJ2KQb5kZASw6SnAKAf10hK12i2fjrS3Cl/ugZa5Ui6WwIu1/6MIXiHttQ==} engines: {node: '>=16'} @@ -10556,6 +10580,14 @@ snapshots: '@lexical/utils': 0.36.2 lexical: 0.36.2 + '@lexical/clipboard@0.37.0': + dependencies: + '@lexical/html': 0.37.0 + '@lexical/list': 0.37.0 + '@lexical/selection': 0.37.0 + '@lexical/utils': 0.37.0 + lexical: 0.37.0 + '@lexical/code@0.36.2': dependencies: '@lexical/utils': 0.36.2 @@ -10584,6 +10616,12 @@ snapshots: '@preact/signals-core': 1.12.1 lexical: 0.36.2 + '@lexical/extension@0.37.0': + dependencies: + '@lexical/utils': 0.37.0 + '@preact/signals-core': 1.12.1 + lexical: 0.37.0 + '@lexical/hashtag@0.36.2': dependencies: '@lexical/text': 0.36.2 @@ -10602,6 +10640,12 @@ snapshots: '@lexical/utils': 0.36.2 lexical: 0.36.2 + '@lexical/html@0.37.0': + dependencies: + '@lexical/selection': 0.37.0 + '@lexical/utils': 0.37.0 + lexical: 0.37.0 + '@lexical/link@0.36.2': dependencies: '@lexical/extension': 0.36.2 @@ -10615,6 +10659,13 @@ snapshots: '@lexical/utils': 0.36.2 lexical: 0.36.2 + '@lexical/list@0.37.0': + dependencies: + '@lexical/extension': 0.37.0 + '@lexical/selection': 0.37.0 + '@lexical/utils': 0.37.0 + lexical: 0.37.0 + '@lexical/mark@0.36.2': dependencies: '@lexical/utils': 0.36.2 @@ -10684,6 +10735,10 @@ snapshots: dependencies: lexical: 0.36.2 + '@lexical/selection@0.37.0': + dependencies: + lexical: 0.37.0 + '@lexical/table@0.36.2': dependencies: '@lexical/clipboard': 0.36.2 @@ -10691,6 +10746,13 @@ snapshots: '@lexical/utils': 0.36.2 lexical: 0.36.2 + '@lexical/table@0.37.0': + dependencies: + '@lexical/clipboard': 0.37.0 + '@lexical/extension': 0.37.0 + '@lexical/utils': 0.37.0 + lexical: 0.37.0 + '@lexical/text@0.36.2': dependencies: lexical: 0.36.2 @@ -10702,6 +10764,13 @@ snapshots: '@lexical/table': 0.36.2 lexical: 0.36.2 + '@lexical/utils@0.37.0': + dependencies: + '@lexical/list': 0.37.0 + '@lexical/selection': 0.37.0 + '@lexical/table': 0.37.0 + lexical: 0.37.0 + '@lexical/yjs@0.36.2(yjs@13.6.27)': dependencies: '@lexical/offset': 0.36.2 @@ -15629,6 +15698,8 @@ snapshots: lexical@0.36.2: {} + lexical@0.37.0: {} + lib0@0.2.114: dependencies: isomorphic.js: 0.2.5 From f50c85d536cdfe11058286384c238a5f7102f211 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:00:03 +0800 Subject: [PATCH 37/49] chore(deps-dev): bump knip from 5.64.1 to 5.64.3 in /web (#26802) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/package.json | 2 +- web/pnpm-lock.yaml | 672 ++++++++++++++++++++++----------------------- 2 files changed, 330 insertions(+), 344 deletions(-) diff --git a/web/package.json b/web/package.json index 3cb00a67bc..366dc99e6f 100644 --- a/web/package.json +++ b/web/package.json @@ -190,7 +190,7 @@ "globals": "^15.11.0", "husky": "^9.1.6", "jest": "^29.7.0", - "knip": "^5.64.1", + "knip": "^5.64.3", "lint-staged": "^15.2.10", "lodash": "^4.17.21", "magicast": "^0.3.4", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 343fefaa48..28758f1142 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -334,7 +334,7 @@ importers: devDependencies: '@antfu/eslint-config': specifier: ^5.0.0 - version: 5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.5.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.35.0(jiti@2.6.0)))(eslint-plugin-react-refresh@0.4.20(eslint@9.35.0(jiti@2.6.0)))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + version: 5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.5.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.35.0(jiti@2.6.1)))(eslint-plugin-react-refresh@0.4.20(eslint@9.35.0(jiti@2.6.1)))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@babel/core': specifier: ^7.28.3 version: 7.28.3 @@ -343,7 +343,7 @@ importers: version: 3.2.7(react@19.1.1)(storybook@8.5.0) '@eslint-react/eslint-plugin': specifier: ^1.15.0 - version: 1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + version: 1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) '@happy-dom/jest-environment': specifier: ^20.0.0 version: 20.0.0(@jest/environment@29.7.0)(@jest/fake-timers@29.7.0)(@jest/types@29.6.3)(jest-mock@29.7.0)(jest-util@29.7.0) @@ -457,22 +457,22 @@ importers: version: 7.0.3 eslint: specifier: ^9.35.0 - version: 9.35.0(jiti@2.6.0) + version: 9.35.0(jiti@2.6.1) eslint-plugin-oxlint: specifier: ^1.6.0 version: 1.6.0 eslint-plugin-react-hooks: specifier: ^5.1.0 - version: 5.2.0(eslint@9.35.0(jiti@2.6.0)) + version: 5.2.0(eslint@9.35.0(jiti@2.6.1)) eslint-plugin-react-refresh: specifier: ^0.4.19 - version: 0.4.20(eslint@9.35.0(jiti@2.6.0)) + version: 0.4.20(eslint@9.35.0(jiti@2.6.1)) eslint-plugin-sonarjs: specifier: ^3.0.2 - version: 3.0.4(eslint@9.35.0(jiti@2.6.0)) + version: 3.0.4(eslint@9.35.0(jiti@2.6.1)) eslint-plugin-storybook: specifier: ^9.0.7 - version: 9.0.7(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + version: 9.0.7(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) eslint-plugin-tailwindcss: specifier: ^3.18.0 version: 3.18.2(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@18.15.0)(typescript@5.8.3))) @@ -486,8 +486,8 @@ importers: specifier: ^29.7.0 version: 29.7.0(@types/node@18.15.0)(ts-node@10.9.2(@types/node@18.15.0)(typescript@5.8.3)) knip: - specifier: ^5.64.1 - version: 5.64.1(@types/node@18.15.0)(typescript@5.8.3) + specifier: ^5.64.3 + version: 5.64.3(@types/node@18.15.0)(typescript@5.8.3) lint-staged: specifier: ^15.2.10 version: 15.5.2 @@ -2165,8 +2165,8 @@ packages: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - '@napi-rs/wasm-runtime@1.0.5': - resolution: {integrity: sha512-TBr9Cf9onSAS2LQ2+QHx6XcC6h9+RIzJgbqG3++9TUZSH204AwEy5jg3BTQ0VATsyoGj4ee49tN/y6rvaOOtcg==} + '@napi-rs/wasm-runtime@1.0.7': + resolution: {integrity: sha512-SeDnOO0Tk7Okiq6DbXmmBODgOAb9dp9gjlphokTUxmt8U3liIP1ZsozBahH69j/RJv+Rfs6IwUKHTgQYJ/HBAw==} '@next/bundle-analyzer@15.5.4': resolution: {integrity: sha512-wMtpIjEHi+B/wC34ZbEcacGIPgQTwTFjjp0+F742s9TxC6QwT0MwB/O0QEgalMe8s3SH/K09DO0gmTvUSJrLRA==} @@ -2360,98 +2360,98 @@ packages: '@octokit/types@14.1.0': resolution: {integrity: sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g==} - '@oxc-resolver/binding-android-arm-eabi@11.8.4': - resolution: {integrity: sha512-6BjMji0TcvQfJ4EoSunOSyu/SiyHKficBD0V3Y0NxF0beaNnnZ7GYEi2lHmRNnRCuIPK8IuVqQ6XizYau+CkKw==} + '@oxc-resolver/binding-android-arm-eabi@11.9.0': + resolution: {integrity: sha512-4AxaG6TkSBQ2FiC5oGZEJQ35DjsSfAbW6/AJauebq4EzIPVOIgDJCF4de+PvX/Xi9BkNw6VtJuMXJdWW97iEAA==} cpu: [arm] os: [android] - '@oxc-resolver/binding-android-arm64@11.8.4': - resolution: {integrity: sha512-SxF4X6rzCBS9XNPXKZGoIHIABjfGmtQpEgRBDzpDHx5VTuLAUmwLTHXnVBAZoX5bmnhF79RiMElavzFdJ2cA1A==} + '@oxc-resolver/binding-android-arm64@11.9.0': + resolution: {integrity: sha512-oOEg7rUd2M6YlmRkvPcszJ6KO6TaLGN21oDdcs27gbTVYbQQtCWYbZz5jRW5zEBJu6dopoWVx+shJNGtG1qDFw==} cpu: [arm64] os: [android] - '@oxc-resolver/binding-darwin-arm64@11.8.4': - resolution: {integrity: sha512-8zWeERrzgscAniE6kh1TQ4E7GJyglYsvdoKrHYLBCbHWD+0/soffiwAYxZuckKEQSc2RXMSPjcu+JTCALaY0Dw==} + '@oxc-resolver/binding-darwin-arm64@11.9.0': + resolution: {integrity: sha512-fM6zE/j6o3C1UIkcZPV7C1f186R7w97guY2N4lyNLlhlgwwhd46acnOezLARvRNU5oyKNev4PvOJhGCCDnFMGg==} cpu: [arm64] os: [darwin] - '@oxc-resolver/binding-darwin-x64@11.8.4': - resolution: {integrity: sha512-BUwggKz8Hi5uEQ0AeVTSun1+sp4lzNcItn+L7fDsHu5Cx0Zueuo10BtVm+dIwmYVVPL5oGYOeD0fS7MKAazKiw==} + '@oxc-resolver/binding-darwin-x64@11.9.0': + resolution: {integrity: sha512-Bg3Orw7gAxbUqQlt64YPWvHDVo3bo2JfI26Qmzv6nKo7mIMTDhQKl7YmywtLNMYbX0IgUM4qu1V90euu+WCDOw==} cpu: [x64] os: [darwin] - '@oxc-resolver/binding-freebsd-x64@11.8.4': - resolution: {integrity: sha512-fPO5TQhnn8gA6yP4o49lc4Gn8KeDwAp9uYd4PlE3Q00JVqU6cY9WecDhYHrWtiFcyoZ8UVBlIxuhRqT/DP4Z4A==} + '@oxc-resolver/binding-freebsd-x64@11.9.0': + resolution: {integrity: sha512-eBqVZqTETH6miBfIZXvpzUe98WATz2+Sh+LEFwuRpGsTsKkIpTyb4p1kwylCLkxrd3Yx7wkxQku+L0AMEGBiAA==} cpu: [x64] os: [freebsd] - '@oxc-resolver/binding-linux-arm-gnueabihf@11.8.4': - resolution: {integrity: sha512-QuNbdUaVGiP0W0GrXsvCDZjqeL4lZGU7aXlx/S2tCvyTk3wh6skoiLJgqUf/eeqXfUPnzTfntYqyfolzCAyBYA==} + '@oxc-resolver/binding-linux-arm-gnueabihf@11.9.0': + resolution: {integrity: sha512-QgCk/IJnGBvpbc8rYTVgO+A3m3edJjH1zfv8Nvx7fmsxpbXwWH2l4b4tY3/SLMzasxsp7x7k87+HWt095bI5Lg==} cpu: [arm] os: [linux] - '@oxc-resolver/binding-linux-arm-musleabihf@11.8.4': - resolution: {integrity: sha512-p/zLMfza8OsC4BDKxqeZ9Qel+4eA/oiMSyKLRkMrTgt6OWQq1d5nHntjfG35Abcw4ev6Q9lRU3NOW5hj0xlUbw==} + '@oxc-resolver/binding-linux-arm-musleabihf@11.9.0': + resolution: {integrity: sha512-xkJH0jldIXD2GwoHpCDEF0ucJ7fvRETCL+iFLctM679o7qeDXvtzsO/E401EgFFXcWBJNKXWvH+ZfdYMKyowfA==} cpu: [arm] os: [linux] - '@oxc-resolver/binding-linux-arm64-gnu@11.8.4': - resolution: {integrity: sha512-bvJF9wWxF1+a5YZATlS5JojpOMC7OsnTatA6sXVHoOb7MIigjledYB5ZMAeRrnWWexRMiEX3YSaA46oSfOzmOg==} + '@oxc-resolver/binding-linux-arm64-gnu@11.9.0': + resolution: {integrity: sha512-TWq+y2psMzbMtZB9USAq2bSA7NV1TMmh9lhAFbMGQ8Yp2YV4BRC/HilD6qF++efQl6shueGBFOv0LVe9BUXaIA==} cpu: [arm64] os: [linux] - '@oxc-resolver/binding-linux-arm64-musl@11.8.4': - resolution: {integrity: sha512-gf4nwGBfu+EFwOn5p7/T7VF4jmIdfodwJS9MRkOBHvuAm3LQgCX7O6d3Y80mm0TV7ZMRD/trfW628rHfd5++vQ==} + '@oxc-resolver/binding-linux-arm64-musl@11.9.0': + resolution: {integrity: sha512-8WwGLfXk7yttc6rD6g53+RnYfX5B8xOot1ffthLn8oCXzVRO4cdChlmeHStxwLD/MWx8z8BGeyfyINNrsh9N2w==} cpu: [arm64] os: [linux] - '@oxc-resolver/binding-linux-ppc64-gnu@11.8.4': - resolution: {integrity: sha512-T120R5GIzRd41rYWWKCI6cSYrZjmRQzf3X4xeE1WX396Uabz5DX8KU7RnVHihSK+KDxccCVOFBxcH3ITd+IEpw==} + '@oxc-resolver/binding-linux-ppc64-gnu@11.9.0': + resolution: {integrity: sha512-ZWiAXfan6actlSzayaFS/kYO2zD6k1k0fmLb1opbujXYMKepEnjjVOvKdzCIYR/zKzudqI39dGc+ywqVdsPIpQ==} cpu: [ppc64] os: [linux] - '@oxc-resolver/binding-linux-riscv64-gnu@11.8.4': - resolution: {integrity: sha512-PVG7SxBFFjAaQ76p9O/0Xt5mTBlziRwpck+6cRNhy/hbWY/hSt8BFfPqw0EDSfnl40Uuh+NPsHFMnaWWyxbQEg==} + '@oxc-resolver/binding-linux-riscv64-gnu@11.9.0': + resolution: {integrity: sha512-p9mCSb+Bym+eycNo9k+81wQ5SAE31E+/rtfbDmF4/7krPotkEjPsEBSc3rqunRwO+FtsUn7H68JLY7hlai49eQ==} cpu: [riscv64] os: [linux] - '@oxc-resolver/binding-linux-riscv64-musl@11.8.4': - resolution: {integrity: sha512-L0OklUhM2qLGaKvPSyKmwWpoijfc++VJtPyVgz031ShOXyo0WjD0ZGzusyJMsA1a/gdulAmN6CQ/0Sf4LGXEcw==} + '@oxc-resolver/binding-linux-riscv64-musl@11.9.0': + resolution: {integrity: sha512-/SePuVxgFhLPciRwsJ8kLVltr+rxh0b6riGFuoPnFXBbHFclKnjNIt3TfqzUj0/vOnslXw3cVGPpmtkm2TgCgg==} cpu: [riscv64] os: [linux] - '@oxc-resolver/binding-linux-s390x-gnu@11.8.4': - resolution: {integrity: sha512-18Ajz5hqO4cRGuoHzLFUsIPod9GIaIRDiXFg2m6CS3NgVdHx7iCZscplYH7KtjdE42M8nGWYMyyq5BOk7QVgPw==} + '@oxc-resolver/binding-linux-s390x-gnu@11.9.0': + resolution: {integrity: sha512-zLuEjlYIzfnr1Ei2UZYQBbCTa/9deh+BEjO9rh1ai8BfEq4uj6RupTtNpgHfgAsEYdqOBVExw9EU1S6SW3RCAw==} cpu: [s390x] os: [linux] - '@oxc-resolver/binding-linux-x64-gnu@11.8.4': - resolution: {integrity: sha512-uHvH4RyYBdQ/lFGV9H+R1ScHg6EBnAhE3mnX+u+mO/btnalvg7j80okuHf8Qw0tLQiP5P1sEBoVeE6zviXY9IA==} + '@oxc-resolver/binding-linux-x64-gnu@11.9.0': + resolution: {integrity: sha512-cxdg73WG+aVlPu/k4lEQPRVOhWunYOUglW6OSzclZLJJAXZU0tSZ5ymKaqPRkfTsyNSAafj1cA1XYd+P9UxBgw==} cpu: [x64] os: [linux] - '@oxc-resolver/binding-linux-x64-musl@11.8.4': - resolution: {integrity: sha512-X5z44qh5DdJfVhcqXAQFTDFUpcxdpf6DT/lHL5CFcdQGIZxatjc7gFUy05IXPI9xwfq39RValjJBvFovUk9XBw==} + '@oxc-resolver/binding-linux-x64-musl@11.9.0': + resolution: {integrity: sha512-sy5nkVdMvNgqcx9sIY7G6U9TYZUZC4cmMGw/wKhJNuuD2/HFGtbje62ttXSwBAbVbmJ2GgZ4ZUo/S1OMyU+/OA==} cpu: [x64] os: [linux] - '@oxc-resolver/binding-wasm32-wasi@11.8.4': - resolution: {integrity: sha512-z3906y+cd8RRhBGNwHRrRAFxnKjXsBeL3+rdQjZpBrUyrhhsaV5iKD/ROx64FNJ9GjL/9mfon8A5xx/McYIqHA==} + '@oxc-resolver/binding-wasm32-wasi@11.9.0': + resolution: {integrity: sha512-dfi/a0Xh6o6nOLbJdaYuy7txncEcwkRHp9DGGZaAP7zxDiepkBZ6ewSJODQrWwhjVmMteXo+XFzEOMjsC7WUtQ==} engines: {node: '>=14.0.0'} cpu: [wasm32] - '@oxc-resolver/binding-win32-arm64-msvc@11.8.4': - resolution: {integrity: sha512-70vXFs74uA3X5iYOkpclbkWlQEF+MI325uAQ+Or2n8HJip2T0SEmuBlyw/sRL2E8zLC4oocb+1g25fmzlDVkmg==} + '@oxc-resolver/binding-win32-arm64-msvc@11.9.0': + resolution: {integrity: sha512-b1yKr+eFwyi8pZMjAQwW352rXpaHAmz7FLK03vFIxdyWzWiiL6S3UrfMu+nKQud38963zu4wNNLm7rdXQazgRA==} cpu: [arm64] os: [win32] - '@oxc-resolver/binding-win32-ia32-msvc@11.8.4': - resolution: {integrity: sha512-SEOUAzTvr+nyMia3nx1dMtD7YUxZwuhQ3QAPnxy21261Lj0yT3JY4EIfwWH54lAWWfMdRSRRMFuGeF/dq7XjEw==} + '@oxc-resolver/binding-win32-ia32-msvc@11.9.0': + resolution: {integrity: sha512-DxRT+1HjCpRH8qYCmGHzgsRCYiK+X14PUM9Fb+aD4TljplA7MdDQXqMISTb4zBZ70AuclvlXKTbW+K1GZop3xA==} cpu: [ia32] os: [win32] - '@oxc-resolver/binding-win32-x64-msvc@11.8.4': - resolution: {integrity: sha512-1gARIQsOPOU7LJ7jvMyPmZEVMapL/PymeG3J7naOdLZDrIZKX6CTvgawJmETYKt+8icP8M6KbBinrVkKVqFd+A==} + '@oxc-resolver/binding-win32-x64-msvc@11.9.0': + resolution: {integrity: sha512-gE3QJvhh0Yj9cSAkkHjRLKPmC7BTJeiaB5YyhVKVUwbnWQgTszV92lZ9pvZtNPEghP7jPbhEs4c6983A0ojQwA==} cpu: [x64] os: [win32] @@ -6103,8 +6103,8 @@ packages: resolution: {integrity: sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==} hasBin: true - jiti@2.6.0: - resolution: {integrity: sha512-VXe6RjJkBPj0ohtqaO8vSWP3ZhAKo66fKrFNCll4BTcwljPLz03pCbaNKfzGP5MbrCYcbJ7v0nOYYwUzTEIdXQ==} + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} hasBin: true js-audio-recorder@1.0.7: @@ -6197,8 +6197,8 @@ packages: resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} engines: {node: '>=6'} - knip@5.64.1: - resolution: {integrity: sha512-80XnLsyeXuyxj1F4+NBtQFHxaRH0xWRw8EKwfQ6EkVZZ0bSz/kqqan08k/Qg8ajWsFPhFq+0S2RbLCBGIQtuOg==} + knip@5.64.3: + resolution: {integrity: sha512-P9dZetEZfSBwNBFwj55CAnPAMdzVLTTscWx6rdB8eBmPqXPji8F3L+hhWi+Xp+u9O6Xp2ClRDq2JENSK8Z04Qg==} engines: {node: '>=18.18.0'} hasBin: true peerDependencies: @@ -6709,11 +6709,6 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - napi-postinstall@0.3.0: - resolution: {integrity: sha512-M7NqKyhODKV1gRLdkwE7pDsZP2/SC2a2vHkOYh9MCpKMbWVfyVfUw5MaH83Fv6XMjxr5jryUp3IDDL9rlxsTeA==} - engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} - hasBin: true - natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} @@ -6866,8 +6861,8 @@ packages: os-browserify@0.3.0: resolution: {integrity: sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==} - oxc-resolver@11.8.4: - resolution: {integrity: sha512-qpimS3tHHEf+kgESMAme+q+rj7aCzMya00u9YdKOKyX2o7q4lozjPo6d7ZTTi979KHEcVOPWdNTueAKdeNq72w==} + oxc-resolver@11.9.0: + resolution: {integrity: sha512-u714L0DBBXpD0ERErCQlun2XwinuBfIGo2T8bA7xE8WLQ4uaJudO/VOEQCWslOmcDY2nEkS+UVir5PpyvSG23w==} p-cancelable@2.1.1: resolution: {integrity: sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==} @@ -8728,11 +8723,8 @@ packages: zod@3.25.76: resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} - zod@4.0.5: - resolution: {integrity: sha512-/5UuuRPStvHXu7RS+gmvRf4NXrNxpSllGwDnCBcJZtQsKrviYXm54yDGV2KYNLT5kq0lHGcl7lqWJLgSaG+tgA==} - - zod@4.1.11: - resolution: {integrity: sha512-WPsqwxITS2tzx1bzhIKsEs19ABD5vmCVa4xBo2tq/SrV4RNZtfws1EnCWQXM6yh8bD08a1idvkB5MZSBiZsjwg==} + zod@4.1.12: + resolution: {integrity: sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==} zrender@5.6.1: resolution: {integrity: sha512-OFXkDJKcrlx5su2XbzJvj/34Q3m6PvyCZkVPHGYpcCJ52ek4U/ymZyfuV1nKE23AyBJ51E/6Yr0mhZ7xGTO4ag==} @@ -8771,50 +8763,50 @@ snapshots: '@jridgewell/gen-mapping': 0.3.12 '@jridgewell/trace-mapping': 0.3.29 - '@antfu/eslint-config@5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.5.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.35.0(jiti@2.6.0)))(eslint-plugin-react-refresh@0.4.20(eslint@9.35.0(jiti@2.6.0)))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@antfu/eslint-config@5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.5.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.35.0(jiti@2.6.1)))(eslint-plugin-react-refresh@0.4.20(eslint@9.35.0(jiti@2.6.1)))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@antfu/install-pkg': 1.1.0 '@clack/prompts': 0.11.0 - '@eslint-community/eslint-plugin-eslint-comments': 4.5.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-plugin-eslint-comments': 4.5.0(eslint@9.35.0(jiti@2.6.1)) '@eslint/markdown': 7.1.0 - '@stylistic/eslint-plugin': 5.2.2(eslint@9.35.0(jiti@2.6.0)) - '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@typescript-eslint/parser': 8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@vitest/eslint-plugin': 1.3.4(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@stylistic/eslint-plugin': 5.2.2(eslint@9.35.0(jiti@2.6.1)) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@vitest/eslint-plugin': 1.3.4(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) ansis: 4.1.0 cac: 6.7.14 - eslint: 9.35.0(jiti@2.6.0) - eslint-config-flat-gitignore: 2.1.0(eslint@9.35.0(jiti@2.6.0)) + eslint: 9.35.0(jiti@2.6.1) + eslint-config-flat-gitignore: 2.1.0(eslint@9.35.0(jiti@2.6.1)) eslint-flat-config-utils: 2.1.0 - eslint-merge-processors: 2.0.0(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-antfu: 3.1.1(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-command: 3.3.1(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-import-lite: 0.3.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-jsdoc: 51.4.1(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-jsonc: 2.20.1(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-n: 17.21.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + eslint-merge-processors: 2.0.0(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-antfu: 3.1.1(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-command: 3.3.1(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-import-lite: 0.3.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-jsdoc: 51.4.1(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-jsonc: 2.20.1(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-n: 17.21.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) eslint-plugin-no-only-tests: 3.3.0 - eslint-plugin-perfectionist: 4.15.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-pnpm: 1.1.0(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-regexp: 2.9.0(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-toml: 0.12.0(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-unicorn: 60.0.0(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-unused-imports: 4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-vue: 10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(vue-eslint-parser@10.2.0(eslint@9.35.0(jiti@2.6.0))) - eslint-plugin-yml: 1.18.0(eslint@9.35.0(jiti@2.6.0)) - eslint-processor-vue-blocks: 2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.35.0(jiti@2.6.0)) + eslint-plugin-perfectionist: 4.15.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-pnpm: 1.1.0(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-regexp: 2.9.0(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-toml: 0.12.0(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-unicorn: 60.0.0(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-unused-imports: 4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-vue: 10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(vue-eslint-parser@10.2.0(eslint@9.35.0(jiti@2.6.1))) + eslint-plugin-yml: 1.18.0(eslint@9.35.0(jiti@2.6.1)) + eslint-processor-vue-blocks: 2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.35.0(jiti@2.6.1)) globals: 16.3.0 jsonc-eslint-parser: 2.4.0 local-pkg: 1.1.1 parse-gitignore: 2.0.0 toml-eslint-parser: 0.10.0 - vue-eslint-parser: 10.2.0(eslint@9.35.0(jiti@2.6.0)) + vue-eslint-parser: 10.2.0(eslint@9.35.0(jiti@2.6.1)) yaml-eslint-parser: 1.3.0 optionalDependencies: - '@eslint-react/eslint-plugin': 1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + '@eslint-react/eslint-plugin': 1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) '@next/eslint-plugin-next': 15.5.4 - eslint-plugin-react-hooks: 5.2.0(eslint@9.35.0(jiti@2.6.0)) - eslint-plugin-react-refresh: 0.4.20(eslint@9.35.0(jiti@2.6.0)) + eslint-plugin-react-hooks: 5.2.0(eslint@9.35.0(jiti@2.6.1)) + eslint-plugin-react-refresh: 0.4.20(eslint@9.35.0(jiti@2.6.1)) transitivePeerDependencies: - '@eslint/json' - '@vue/compiler-sfc' @@ -9915,30 +9907,30 @@ snapshots: '@esbuild/win32-x64@0.25.0': optional: true - '@eslint-community/eslint-plugin-eslint-comments@4.5.0(eslint@9.35.0(jiti@2.6.0))': + '@eslint-community/eslint-plugin-eslint-comments@4.5.0(eslint@9.35.0(jiti@2.6.1))': dependencies: escape-string-regexp: 4.0.0 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) ignore: 5.3.2 - '@eslint-community/eslint-utils@4.7.0(eslint@9.35.0(jiti@2.6.0))': + '@eslint-community/eslint-utils@4.7.0(eslint@9.35.0(jiti@2.6.1))': dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) eslint-visitor-keys: 3.4.3 - '@eslint-community/eslint-utils@4.9.0(eslint@9.35.0(jiti@2.6.0))': + '@eslint-community/eslint-utils@4.9.0(eslint@9.35.0(jiti@2.6.1))': dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) eslint-visitor-keys: 3.4.3 '@eslint-community/regexpp@4.12.1': {} - '@eslint-react/ast@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@eslint-react/ast@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 '@typescript-eslint/types': 8.37.0 '@typescript-eslint/typescript-estree': 8.44.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) string-ts: 2.2.1 ts-pattern: 5.7.1 transitivePeerDependencies: @@ -9946,17 +9938,17 @@ snapshots: - supports-color - typescript - '@eslint-react/core@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@eslint-react/core@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) birecord: 0.1.1 ts-pattern: 5.7.1 transitivePeerDependencies: @@ -9966,58 +9958,58 @@ snapshots: '@eslint-react/eff@1.52.3': {} - '@eslint-react/eslint-plugin@1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3)': + '@eslint-react/eslint-plugin@1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) - eslint-plugin-react-debug: 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-react-dom: 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-react-hooks-extra: 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-react-naming-convention: 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-react-web-api: 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint-plugin-react-x: 1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) + eslint-plugin-react-debug: 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-react-dom: 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-react-hooks-extra: 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-react-naming-convention: 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-react-web-api: 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint-plugin-react-x: 1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) optionalDependencies: typescript: 5.8.3 transitivePeerDependencies: - supports-color - ts-api-utils - '@eslint-react/kit@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@eslint-react/kit@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) ts-pattern: 5.7.1 - zod: 4.0.5 + zod: 4.1.12 transitivePeerDependencies: - eslint - supports-color - typescript - '@eslint-react/shared@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@eslint-react/shared@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) ts-pattern: 5.7.1 - zod: 4.0.5 + zod: 4.1.12 transitivePeerDependencies: - eslint - supports-color - typescript - '@eslint-react/var@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@eslint-react/var@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) string-ts: 2.2.1 ts-pattern: 5.7.1 transitivePeerDependencies: @@ -10025,9 +10017,9 @@ snapshots: - supports-color - typescript - '@eslint/compat@1.3.1(eslint@9.35.0(jiti@2.6.0))': + '@eslint/compat@1.3.1(eslint@9.35.0(jiti@2.6.1))': optionalDependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) '@eslint/config-array@0.21.0': dependencies: @@ -10861,7 +10853,7 @@ snapshots: react: 19.1.1 react-dom: 19.1.1(react@19.1.1) - '@napi-rs/wasm-runtime@1.0.5': + '@napi-rs/wasm-runtime@1.0.7': dependencies: '@emnapi/core': 1.5.0 '@emnapi/runtime': 1.5.0 @@ -11033,63 +11025,63 @@ snapshots: dependencies: '@octokit/openapi-types': 25.1.0 - '@oxc-resolver/binding-android-arm-eabi@11.8.4': + '@oxc-resolver/binding-android-arm-eabi@11.9.0': optional: true - '@oxc-resolver/binding-android-arm64@11.8.4': + '@oxc-resolver/binding-android-arm64@11.9.0': optional: true - '@oxc-resolver/binding-darwin-arm64@11.8.4': + '@oxc-resolver/binding-darwin-arm64@11.9.0': optional: true - '@oxc-resolver/binding-darwin-x64@11.8.4': + '@oxc-resolver/binding-darwin-x64@11.9.0': optional: true - '@oxc-resolver/binding-freebsd-x64@11.8.4': + '@oxc-resolver/binding-freebsd-x64@11.9.0': optional: true - '@oxc-resolver/binding-linux-arm-gnueabihf@11.8.4': + '@oxc-resolver/binding-linux-arm-gnueabihf@11.9.0': optional: true - '@oxc-resolver/binding-linux-arm-musleabihf@11.8.4': + '@oxc-resolver/binding-linux-arm-musleabihf@11.9.0': optional: true - '@oxc-resolver/binding-linux-arm64-gnu@11.8.4': + '@oxc-resolver/binding-linux-arm64-gnu@11.9.0': optional: true - '@oxc-resolver/binding-linux-arm64-musl@11.8.4': + '@oxc-resolver/binding-linux-arm64-musl@11.9.0': optional: true - '@oxc-resolver/binding-linux-ppc64-gnu@11.8.4': + '@oxc-resolver/binding-linux-ppc64-gnu@11.9.0': optional: true - '@oxc-resolver/binding-linux-riscv64-gnu@11.8.4': + '@oxc-resolver/binding-linux-riscv64-gnu@11.9.0': optional: true - '@oxc-resolver/binding-linux-riscv64-musl@11.8.4': + '@oxc-resolver/binding-linux-riscv64-musl@11.9.0': optional: true - '@oxc-resolver/binding-linux-s390x-gnu@11.8.4': + '@oxc-resolver/binding-linux-s390x-gnu@11.9.0': optional: true - '@oxc-resolver/binding-linux-x64-gnu@11.8.4': + '@oxc-resolver/binding-linux-x64-gnu@11.9.0': optional: true - '@oxc-resolver/binding-linux-x64-musl@11.8.4': + '@oxc-resolver/binding-linux-x64-musl@11.9.0': optional: true - '@oxc-resolver/binding-wasm32-wasi@11.8.4': + '@oxc-resolver/binding-wasm32-wasi@11.9.0': dependencies: - '@napi-rs/wasm-runtime': 1.0.5 + '@napi-rs/wasm-runtime': 1.0.7 optional: true - '@oxc-resolver/binding-win32-arm64-msvc@11.8.4': + '@oxc-resolver/binding-win32-arm64-msvc@11.9.0': optional: true - '@oxc-resolver/binding-win32-ia32-msvc@11.8.4': + '@oxc-resolver/binding-win32-ia32-msvc@11.9.0': optional: true - '@oxc-resolver/binding-win32-x64-msvc@11.8.4': + '@oxc-resolver/binding-win32-x64-msvc@11.9.0': optional: true '@parcel/watcher-android-arm64@2.5.1': @@ -11894,11 +11886,11 @@ snapshots: dependencies: storybook: 8.5.0 - '@stylistic/eslint-plugin@5.2.2(eslint@9.35.0(jiti@2.6.0))': + '@stylistic/eslint-plugin@5.2.2(eslint@9.35.0(jiti@2.6.1))': dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@typescript-eslint/types': 8.38.0 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) eslint-visitor-keys: 4.2.1 espree: 10.4.0 estraverse: 5.3.0 @@ -12347,15 +12339,15 @@ snapshots: dependencies: '@types/yargs-parser': 21.0.3 - '@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.38.0 - '@typescript-eslint/type-utils': 8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@typescript-eslint/utils': 8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/visitor-keys': 8.38.0 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) graphemer: 1.4.0 ignore: 7.0.5 natural-compare: 1.4.0 @@ -12364,14 +12356,14 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@typescript-eslint/scope-manager': 8.38.0 '@typescript-eslint/types': 8.38.0 '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) '@typescript-eslint/visitor-keys': 8.38.0 debug: 4.4.1 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -12430,25 +12422,25 @@ snapshots: dependencies: typescript: 5.8.3 - '@typescript-eslint/type-utils@8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/type-utils@8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@typescript-eslint/types': 8.37.0 '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) debug: 4.4.1 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) ts-api-utils: 2.1.0(typescript@5.8.3) typescript: 5.8.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/type-utils@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/type-utils@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: '@typescript-eslint/types': 8.38.0 '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) debug: 4.4.1 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) ts-api-utils: 2.1.0(typescript@5.8.3) typescript: 5.8.3 transitivePeerDependencies: @@ -12508,35 +12500,35 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/utils@8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) typescript: 5.8.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/utils@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@typescript-eslint/scope-manager': 8.38.0 '@typescript-eslint/types': 8.38.0 '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) typescript: 5.8.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@typescript-eslint/utils@8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: - '@eslint-community/eslint-utils': 4.9.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.9.0(eslint@9.35.0(jiti@2.6.1)) '@typescript-eslint/scope-manager': 8.44.0 '@typescript-eslint/types': 8.44.0 '@typescript-eslint/typescript-estree': 8.44.0(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -12558,10 +12550,10 @@ snapshots: '@ungap/structured-clone@1.3.0': {} - '@vitest/eslint-plugin@1.3.4(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3)': + '@vitest/eslint-plugin@1.3.4(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3)': dependencies: - '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) optionalDependencies: typescript: 5.8.3 transitivePeerDependencies: @@ -13983,67 +13975,67 @@ snapshots: escape-string-regexp@5.0.0: {} - eslint-compat-utils@0.5.1(eslint@9.35.0(jiti@2.6.0)): + eslint-compat-utils@0.5.1(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) semver: 7.7.2 - eslint-compat-utils@0.6.5(eslint@9.35.0(jiti@2.6.0)): + eslint-compat-utils@0.6.5(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) semver: 7.7.2 - eslint-config-flat-gitignore@2.1.0(eslint@9.35.0(jiti@2.6.0)): + eslint-config-flat-gitignore@2.1.0(eslint@9.35.0(jiti@2.6.1)): dependencies: - '@eslint/compat': 1.3.1(eslint@9.35.0(jiti@2.6.0)) - eslint: 9.35.0(jiti@2.6.0) + '@eslint/compat': 1.3.1(eslint@9.35.0(jiti@2.6.1)) + eslint: 9.35.0(jiti@2.6.1) eslint-flat-config-utils@2.1.0: dependencies: pathe: 2.0.3 - eslint-json-compat-utils@0.2.1(eslint@9.35.0(jiti@2.6.0))(jsonc-eslint-parser@2.4.0): + eslint-json-compat-utils@0.2.1(eslint@9.35.0(jiti@2.6.1))(jsonc-eslint-parser@2.4.0): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) esquery: 1.6.0 jsonc-eslint-parser: 2.4.0 - eslint-merge-processors@2.0.0(eslint@9.35.0(jiti@2.6.0)): + eslint-merge-processors@2.0.0(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) - eslint-plugin-antfu@3.1.1(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-antfu@3.1.1(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) - eslint-plugin-command@3.3.1(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-command@3.3.1(eslint@9.35.0(jiti@2.6.1)): dependencies: '@es-joy/jsdoccomment': 0.50.2 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) - eslint-plugin-es-x@7.8.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-es-x@7.8.0(eslint@9.35.0(jiti@2.6.1)): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.1 - eslint: 9.35.0(jiti@2.6.0) - eslint-compat-utils: 0.5.1(eslint@9.35.0(jiti@2.6.0)) + eslint: 9.35.0(jiti@2.6.1) + eslint-compat-utils: 0.5.1(eslint@9.35.0(jiti@2.6.1)) - eslint-plugin-import-lite@0.3.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-import-lite@0.3.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@typescript-eslint/types': 8.38.0 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) optionalDependencies: typescript: 5.8.3 - eslint-plugin-jsdoc@51.4.1(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-jsdoc@51.4.1(eslint@9.35.0(jiti@2.6.1)): dependencies: '@es-joy/jsdoccomment': 0.52.0 are-docs-informative: 0.0.2 comment-parser: 1.4.1 debug: 4.4.1 escape-string-regexp: 4.0.0 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) espree: 10.4.0 esquery: 1.6.0 parse-imports-exports: 0.2.4 @@ -14052,12 +14044,12 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-jsonc@2.20.1(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-jsonc@2.20.1(eslint@9.35.0(jiti@2.6.1)): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) - eslint: 9.35.0(jiti@2.6.0) - eslint-compat-utils: 0.6.5(eslint@9.35.0(jiti@2.6.0)) - eslint-json-compat-utils: 0.2.1(eslint@9.35.0(jiti@2.6.0))(jsonc-eslint-parser@2.4.0) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) + eslint: 9.35.0(jiti@2.6.1) + eslint-compat-utils: 0.6.5(eslint@9.35.0(jiti@2.6.1)) + eslint-json-compat-utils: 0.2.1(eslint@9.35.0(jiti@2.6.1))(jsonc-eslint-parser@2.4.0) espree: 10.4.0 graphemer: 1.4.0 jsonc-eslint-parser: 2.4.0 @@ -14066,12 +14058,12 @@ snapshots: transitivePeerDependencies: - '@eslint/json' - eslint-plugin-n@17.21.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-n@17.21.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) enhanced-resolve: 5.18.2 - eslint: 9.35.0(jiti@2.6.0) - eslint-plugin-es-x: 7.8.0(eslint@9.35.0(jiti@2.6.0)) + eslint: 9.35.0(jiti@2.6.1) + eslint-plugin-es-x: 7.8.0(eslint@9.35.0(jiti@2.6.1)) get-tsconfig: 4.10.1 globals: 15.15.0 ignore: 5.3.2 @@ -14087,19 +14079,19 @@ snapshots: dependencies: jsonc-parser: 3.3.1 - eslint-plugin-perfectionist@4.15.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-perfectionist@4.15.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: '@typescript-eslint/types': 8.38.0 - '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) natural-orderby: 5.0.0 transitivePeerDependencies: - supports-color - typescript - eslint-plugin-pnpm@1.1.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-pnpm@1.1.0(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) find-up-simple: 1.0.1 jsonc-eslint-parser: 2.4.0 pathe: 2.0.3 @@ -14107,19 +14099,19 @@ snapshots: tinyglobby: 0.2.14 yaml-eslint-parser: 1.3.0 - eslint-plugin-react-debug@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-react-debug@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -14127,19 +14119,19 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-dom@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-react-dom@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) compare-versions: 6.1.1 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -14147,19 +14139,19 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks-extra@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-react-hooks-extra@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -14167,23 +14159,23 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks@5.2.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-react-hooks@5.2.0(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) - eslint-plugin-react-naming-convention@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-react-naming-convention@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -14191,22 +14183,22 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-refresh@0.4.20(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-react-refresh@0.4.20(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) - eslint-plugin-react-web-api@1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-react-web-api@1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -14214,21 +14206,21 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-x@1.52.3(eslint@9.35.0(jiti@2.6.0))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3): + eslint-plugin-react-x@1.52.3(eslint@9.35.0(jiti@2.6.1))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) compare-versions: 6.1.1 - eslint: 9.35.0(jiti@2.6.0) - is-immutable-type: 5.0.1(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) + is-immutable-type: 5.0.1(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -14237,23 +14229,23 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-regexp@2.9.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-regexp@2.9.0(eslint@9.35.0(jiti@2.6.1)): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.1 comment-parser: 1.4.1 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) jsdoc-type-pratt-parser: 4.1.0 refa: 0.12.1 regexp-ast-analysis: 0.7.1 scslre: 0.3.0 - eslint-plugin-sonarjs@3.0.4(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-sonarjs@3.0.4(eslint@9.35.0(jiti@2.6.1)): dependencies: '@eslint-community/regexpp': 4.12.1 builtin-modules: 3.3.0 bytes: 3.1.2 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) functional-red-black-tree: 1.0.1 jsx-ast-utils: 3.3.5 lodash.merge: 4.6.2 @@ -14262,11 +14254,11 @@ snapshots: semver: 7.7.2 typescript: 5.8.3 - eslint-plugin-storybook@9.0.7(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + eslint-plugin-storybook@9.0.7(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: '@storybook/csf': 0.1.13 - '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/utils': 8.44.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) ts-dedent: 2.2.0 transitivePeerDependencies: - supports-color @@ -14278,26 +14270,26 @@ snapshots: postcss: 8.5.6 tailwindcss: 3.4.17(ts-node@10.9.2(@types/node@18.15.0)(typescript@5.8.3)) - eslint-plugin-toml@0.12.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-toml@0.12.0(eslint@9.35.0(jiti@2.6.1)): dependencies: debug: 4.4.1 - eslint: 9.35.0(jiti@2.6.0) - eslint-compat-utils: 0.6.5(eslint@9.35.0(jiti@2.6.0)) + eslint: 9.35.0(jiti@2.6.1) + eslint-compat-utils: 0.6.5(eslint@9.35.0(jiti@2.6.1)) lodash: 4.17.21 toml-eslint-parser: 0.10.0 transitivePeerDependencies: - supports-color - eslint-plugin-unicorn@60.0.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-unicorn@60.0.0(eslint@9.35.0(jiti@2.6.1)): dependencies: '@babel/helper-validator-identifier': 7.27.1 - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) '@eslint/plugin-kit': 0.3.4 change-case: 5.4.4 ci-info: 4.3.0 clean-regexp: 1.0.0 core-js-compat: 3.44.0 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) esquery: 1.6.0 find-up-simple: 1.0.1 globals: 16.3.0 @@ -14310,40 +14302,40 @@ snapshots: semver: 7.7.2 strip-indent: 4.0.0 - eslint-plugin-unused-imports@4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-unused-imports@4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1)): dependencies: - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) - eslint-plugin-vue@10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.0))(vue-eslint-parser@10.2.0(eslint@9.35.0(jiti@2.6.0))): + eslint-plugin-vue@10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3))(eslint@9.35.0(jiti@2.6.1))(vue-eslint-parser@10.2.0(eslint@9.35.0(jiti@2.6.1))): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.0)) - eslint: 9.35.0(jiti@2.6.0) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.35.0(jiti@2.6.1)) + eslint: 9.35.0(jiti@2.6.1) natural-compare: 1.4.0 nth-check: 2.1.1 postcss-selector-parser: 6.1.2 semver: 7.7.2 - vue-eslint-parser: 10.2.0(eslint@9.35.0(jiti@2.6.0)) + vue-eslint-parser: 10.2.0(eslint@9.35.0(jiti@2.6.1)) xml-name-validator: 4.0.0 optionalDependencies: - '@typescript-eslint/parser': 8.38.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) - eslint-plugin-yml@1.18.0(eslint@9.35.0(jiti@2.6.0)): + eslint-plugin-yml@1.18.0(eslint@9.35.0(jiti@2.6.1)): dependencies: debug: 4.4.1 escape-string-regexp: 4.0.0 - eslint: 9.35.0(jiti@2.6.0) - eslint-compat-utils: 0.6.5(eslint@9.35.0(jiti@2.6.0)) + eslint: 9.35.0(jiti@2.6.1) + eslint-compat-utils: 0.6.5(eslint@9.35.0(jiti@2.6.1)) natural-compare: 1.4.0 yaml-eslint-parser: 1.3.0 transitivePeerDependencies: - supports-color - eslint-processor-vue-blocks@2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.35.0(jiti@2.6.0)): + eslint-processor-vue-blocks@2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.35.0(jiti@2.6.1)): dependencies: '@vue/compiler-sfc': 3.5.17 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) eslint-scope@5.1.1: dependencies: @@ -14359,9 +14351,9 @@ snapshots: eslint-visitor-keys@4.2.1: {} - eslint@9.35.0(jiti@2.6.0): + eslint@9.35.0(jiti@2.6.1): dependencies: - '@eslint-community/eslint-utils': 4.9.0(eslint@9.35.0(jiti@2.6.0)) + '@eslint-community/eslint-utils': 4.9.0(eslint@9.35.0(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.1 '@eslint/config-array': 0.21.0 '@eslint/config-helpers': 0.3.1 @@ -14397,7 +14389,7 @@ snapshots: natural-compare: 1.4.0 optionator: 0.9.4 optionalDependencies: - jiti: 2.6.0 + jiti: 2.6.1 transitivePeerDependencies: - supports-color @@ -15152,10 +15144,10 @@ snapshots: is-hexadecimal@2.0.1: {} - is-immutable-type@5.0.1(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3): + is-immutable-type@5.0.1(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3): dependencies: - '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.0))(typescript@5.8.3) - eslint: 9.35.0(jiti@2.6.0) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.35.0(jiti@2.6.1))(typescript@5.8.3) + eslint: 9.35.0(jiti@2.6.1) ts-api-utils: 2.1.0(typescript@5.8.3) ts-declaration-location: 1.0.7(typescript@5.8.3) typescript: 5.8.3 @@ -15572,7 +15564,7 @@ snapshots: jiti@1.21.7: {} - jiti@2.6.0: {} + jiti@2.6.1: {} js-audio-recorder@1.0.7: {} @@ -15647,22 +15639,22 @@ snapshots: kleur@3.0.3: {} - knip@5.64.1(@types/node@18.15.0)(typescript@5.8.3): + knip@5.64.3(@types/node@18.15.0)(typescript@5.8.3): dependencies: '@nodelib/fs.walk': 1.2.8 '@types/node': 18.15.0 fast-glob: 3.3.3 formatly: 0.3.0 - jiti: 2.6.0 + jiti: 2.6.1 js-yaml: 4.1.0 minimist: 1.2.8 - oxc-resolver: 11.8.4 + oxc-resolver: 11.9.0 picocolors: 1.1.1 picomatch: 4.0.3 smol-toml: 1.4.2 strip-json-comments: 5.0.2 typescript: 5.8.3 - zod: 4.1.11 + zod: 4.1.12 kolorist@1.8.0: {} @@ -16482,8 +16474,6 @@ snapshots: nanoid@3.3.11: {} - napi-postinstall@0.3.0: {} - natural-compare@1.4.0: {} natural-orderby@5.0.0: {} @@ -16661,29 +16651,27 @@ snapshots: os-browserify@0.3.0: {} - oxc-resolver@11.8.4: - dependencies: - napi-postinstall: 0.3.0 + oxc-resolver@11.9.0: optionalDependencies: - '@oxc-resolver/binding-android-arm-eabi': 11.8.4 - '@oxc-resolver/binding-android-arm64': 11.8.4 - '@oxc-resolver/binding-darwin-arm64': 11.8.4 - '@oxc-resolver/binding-darwin-x64': 11.8.4 - '@oxc-resolver/binding-freebsd-x64': 11.8.4 - '@oxc-resolver/binding-linux-arm-gnueabihf': 11.8.4 - '@oxc-resolver/binding-linux-arm-musleabihf': 11.8.4 - '@oxc-resolver/binding-linux-arm64-gnu': 11.8.4 - '@oxc-resolver/binding-linux-arm64-musl': 11.8.4 - '@oxc-resolver/binding-linux-ppc64-gnu': 11.8.4 - '@oxc-resolver/binding-linux-riscv64-gnu': 11.8.4 - '@oxc-resolver/binding-linux-riscv64-musl': 11.8.4 - '@oxc-resolver/binding-linux-s390x-gnu': 11.8.4 - '@oxc-resolver/binding-linux-x64-gnu': 11.8.4 - '@oxc-resolver/binding-linux-x64-musl': 11.8.4 - '@oxc-resolver/binding-wasm32-wasi': 11.8.4 - '@oxc-resolver/binding-win32-arm64-msvc': 11.8.4 - '@oxc-resolver/binding-win32-ia32-msvc': 11.8.4 - '@oxc-resolver/binding-win32-x64-msvc': 11.8.4 + '@oxc-resolver/binding-android-arm-eabi': 11.9.0 + '@oxc-resolver/binding-android-arm64': 11.9.0 + '@oxc-resolver/binding-darwin-arm64': 11.9.0 + '@oxc-resolver/binding-darwin-x64': 11.9.0 + '@oxc-resolver/binding-freebsd-x64': 11.9.0 + '@oxc-resolver/binding-linux-arm-gnueabihf': 11.9.0 + '@oxc-resolver/binding-linux-arm-musleabihf': 11.9.0 + '@oxc-resolver/binding-linux-arm64-gnu': 11.9.0 + '@oxc-resolver/binding-linux-arm64-musl': 11.9.0 + '@oxc-resolver/binding-linux-ppc64-gnu': 11.9.0 + '@oxc-resolver/binding-linux-riscv64-gnu': 11.9.0 + '@oxc-resolver/binding-linux-riscv64-musl': 11.9.0 + '@oxc-resolver/binding-linux-s390x-gnu': 11.9.0 + '@oxc-resolver/binding-linux-x64-gnu': 11.9.0 + '@oxc-resolver/binding-linux-x64-musl': 11.9.0 + '@oxc-resolver/binding-wasm32-wasi': 11.9.0 + '@oxc-resolver/binding-win32-arm64-msvc': 11.9.0 + '@oxc-resolver/binding-win32-ia32-msvc': 11.9.0 + '@oxc-resolver/binding-win32-x64-msvc': 11.9.0 p-cancelable@2.1.1: {} @@ -18412,10 +18400,10 @@ snapshots: vscode-uri@3.0.8: {} - vue-eslint-parser@10.2.0(eslint@9.35.0(jiti@2.6.0)): + vue-eslint-parser@10.2.0(eslint@9.35.0(jiti@2.6.1)): dependencies: debug: 4.4.1 - eslint: 9.35.0(jiti@2.6.0) + eslint: 9.35.0(jiti@2.6.1) eslint-scope: 8.4.0 eslint-visitor-keys: 4.2.1 espree: 10.4.0 @@ -18742,9 +18730,7 @@ snapshots: zod@3.25.76: {} - zod@4.0.5: {} - - zod@4.1.11: {} + zod@4.1.12: {} zrender@5.6.1: dependencies: From 81e1376e0800192df1f0fbd43904979dfc624c06 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:00:35 +0800 Subject: [PATCH 38/49] chore(deps): bump opik from 1.7.43 to 1.8.72 in /api (#26804) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- api/pyproject.toml | 2 +- api/uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/pyproject.toml b/api/pyproject.toml index 897d114dcc..7e9aeeaa97 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -36,7 +36,7 @@ dependencies = [ "markdown~=3.5.1", "numpy~=1.26.4", "openpyxl~=3.1.5", - "opik~=1.7.25", + "opik~=1.8.72", "opentelemetry-api==1.27.0", "opentelemetry-distro==0.48b0", "opentelemetry-exporter-otlp==1.27.0", diff --git a/api/uv.lock b/api/uv.lock index 49339129e1..050bd4ec1d 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1520,7 +1520,7 @@ requires-dist = [ { name = "opentelemetry-sdk", specifier = "==1.27.0" }, { name = "opentelemetry-semantic-conventions", specifier = "==0.48b0" }, { name = "opentelemetry-util-http", specifier = "==0.48b0" }, - { name = "opik", specifier = "~=1.7.25" }, + { name = "opik", specifier = "~=1.8.72" }, { name = "packaging", specifier = "~=23.2" }, { name = "pandas", extras = ["excel", "output-formatting", "performance"], specifier = "~=2.2.2" }, { name = "psycogreen", specifier = "~=1.0.2" }, @@ -4019,7 +4019,7 @@ wheels = [ [[package]] name = "opik" -version = "1.7.43" +version = "1.8.72" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "boto3-stubs", extra = ["bedrock-runtime"] }, @@ -4038,9 +4038,9 @@ dependencies = [ { name = "tqdm" }, { name = "uuid6" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ba/52/cea0317bc3207bc967b48932781995d9cdb2c490e7e05caa00ff660f7205/opik-1.7.43.tar.gz", hash = "sha256:0b02522b0b74d0a67b141939deda01f8bb69690eda6b04a7cecb1c7f0649ccd0", size = 326886, upload-time = "2025-07-07T10:30:07.715Z" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/08/679b60db21994cf3318d4cdd1d08417c1877b79ac20971a8d80f118c9455/opik-1.8.72.tar.gz", hash = "sha256:26fcb003dc609d96b52eaf6a12fb16eb2b69eb0d1b35d88279ec612925d23944", size = 409774, upload-time = "2025-10-10T13:22:38.2Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/ae/f3566bdc3c49a1a8f795b1b6e726ef211c87e31f92d870ca6d63999c9bbf/opik-1.7.43-py3-none-any.whl", hash = "sha256:a66395c8b5ea7c24846f72dafc70c74d5b8f24ffbc4c8a1b3a7f9456e550568d", size = 625356, upload-time = "2025-07-07T10:30:06.389Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f5/04d35af828d127de65a36286ce5b53e7310087a6b55a56f398daa7f0c9a6/opik-1.8.72-py3-none-any.whl", hash = "sha256:697e361a8364666f36aeb197aaba7ffa0696b49f04d2257b733d436749c90a8c", size = 768233, upload-time = "2025-10-10T13:22:36.352Z" }, ] [[package]] From dfc03bac9f7242a0c544a7084c1b2b68a62a2575 Mon Sep 17 00:00:00 2001 From: Yuto Yamada <46510874+opeco17@users.noreply.github.com> Date: Mon, 13 Oct 2025 11:04:19 +0900 Subject: [PATCH 39/49] Fix typo: reponse to response (#26792) --- api/core/plugin/impl/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/plugin/impl/base.py b/api/core/plugin/impl/base.py index c791b35161..952fefdbbc 100644 --- a/api/core/plugin/impl/base.py +++ b/api/core/plugin/impl/base.py @@ -255,7 +255,7 @@ class BasePluginClient: except Exception: raise PluginDaemonInnerError(code=rep.code, message=rep.message) - logger.error("Error in stream reponse for plugin %s", rep.__dict__) + logger.error("Error in stream response for plugin %s", rep.__dict__) self._handle_plugin_daemon_error(error.error_type, error.message) raise ValueError(f"plugin daemon: {rep.message}, code: {rep.code}") if rep.data is None: From 0a56d655818f773568f554aae74ccb5f05a60993 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Mon, 13 Oct 2025 11:16:12 +0900 Subject: [PATCH 40/49] Issue 23579 (#26777) Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/commands.py | 16 ++ api/core/tools/mcp_tool/provider.py | 3 +- .../tools/utils/model_invocation_utils.py | 9 +- api/migrations/env.py | 5 +- api/models/source.py | 48 ++-- api/models/task.py | 44 ++-- api/models/tools.py | 214 +++++++++++------- api/models/web.py | 42 ++-- api/services/auth/api_key_auth_service.py | 9 +- .../tools/api_tools_manage_service.py | 2 +- .../tools/builtin_tools_manage_service.py | 2 +- .../tools/mcp_tools_manage_service.py | 2 + api/services/tools/tools_transform_service.py | 3 +- 13 files changed, 256 insertions(+), 143 deletions(-) diff --git a/api/commands.py b/api/commands.py index 82efe34611..8ca19e1dac 100644 --- a/api/commands.py +++ b/api/commands.py @@ -1521,6 +1521,14 @@ def transform_datasource_credentials(): auth_count = 0 for firecrawl_tenant_credential in firecrawl_tenant_credentials: auth_count += 1 + if not firecrawl_tenant_credential.credentials: + click.echo( + click.style( + f"Skipping firecrawl credential for tenant {tenant_id} due to missing credentials.", + fg="yellow", + ) + ) + continue # get credential api key credentials_json = json.loads(firecrawl_tenant_credential.credentials) api_key = credentials_json.get("config", {}).get("api_key") @@ -1576,6 +1584,14 @@ def transform_datasource_credentials(): auth_count = 0 for jina_tenant_credential in jina_tenant_credentials: auth_count += 1 + if not jina_tenant_credential.credentials: + click.echo( + click.style( + f"Skipping jina credential for tenant {tenant_id} due to missing credentials.", + fg="yellow", + ) + ) + continue # get credential api key credentials_json = json.loads(jina_tenant_credential.credentials) api_key = credentials_json.get("config", {}).get("api_key") diff --git a/api/core/tools/mcp_tool/provider.py b/api/core/tools/mcp_tool/provider.py index f269b8db9b..0c2870727e 100644 --- a/api/core/tools/mcp_tool/provider.py +++ b/api/core/tools/mcp_tool/provider.py @@ -76,7 +76,8 @@ class MCPToolProviderController(ToolProviderController): ) for remote_mcp_tool in remote_mcp_tools ] - + if not db_provider.icon: + raise ValueError("Database provider icon is required") return cls( entity=ToolProviderEntityWithPlugin( identity=ToolProviderIdentity( diff --git a/api/core/tools/utils/model_invocation_utils.py b/api/core/tools/utils/model_invocation_utils.py index 526f5c8b9a..b4bae08a9b 100644 --- a/api/core/tools/utils/model_invocation_utils.py +++ b/api/core/tools/utils/model_invocation_utils.py @@ -5,6 +5,7 @@ Therefore, a model manager is needed to list/invoke/validate models. """ import json +from decimal import Decimal from typing import cast from core.model_manager import ModelManager @@ -118,10 +119,10 @@ class ModelInvocationUtils: model_response="", prompt_tokens=prompt_tokens, answer_tokens=0, - answer_unit_price=0, - answer_price_unit=0, + answer_unit_price=Decimal(), + answer_price_unit=Decimal(), provider_response_latency=0, - total_price=0, + total_price=Decimal(), currency="USD", ) @@ -152,7 +153,7 @@ class ModelInvocationUtils: raise InvokeModelError(f"Invoke error: {e}") # update tool model invoke - tool_model_invoke.model_response = response.message.content + tool_model_invoke.model_response = str(response.message.content) if response.usage: tool_model_invoke.answer_tokens = response.usage.completion_tokens tool_model_invoke.answer_unit_price = response.usage.completion_unit_price diff --git a/api/migrations/env.py b/api/migrations/env.py index a5d815dcfd..66a4614e80 100644 --- a/api/migrations/env.py +++ b/api/migrations/env.py @@ -37,10 +37,11 @@ config.set_main_option('sqlalchemy.url', get_engine_url()) # my_important_option = config.get_main_option("my_important_option") # ... etc. -from models.base import Base +from models.base import TypeBase + def get_metadata(): - return Base.metadata + return TypeBase.metadata def include_object(object, name, type_, reflected, compare_to): if type_ == "foreign_key_constraint": diff --git a/api/models/source.py b/api/models/source.py index 5b4c486bc4..0ed7c4c70e 100644 --- a/api/models/source.py +++ b/api/models/source.py @@ -6,12 +6,12 @@ from sqlalchemy import DateTime, String, func from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Mapped, mapped_column -from models.base import Base +from models.base import TypeBase from .types import StringUUID -class DataSourceOauthBinding(Base): +class DataSourceOauthBinding(TypeBase): __tablename__ = "data_source_oauth_bindings" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="source_binding_pkey"), @@ -19,17 +19,25 @@ class DataSourceOauthBinding(Base): sa.Index("source_info_idx", "source_info", postgresql_using="gin"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) - tenant_id = mapped_column(StringUUID, nullable=False) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) access_token: Mapped[str] = mapped_column(String(255), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) - source_info = mapped_column(JSONB, nullable=False) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - disabled: Mapped[bool | None] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) + source_info: Mapped[dict] = mapped_column(JSONB, nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, + ) + disabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false"), default=False) -class DataSourceApiKeyAuthBinding(Base): +class DataSourceApiKeyAuthBinding(TypeBase): __tablename__ = "data_source_api_key_auth_bindings" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="data_source_api_key_auth_binding_pkey"), @@ -37,14 +45,22 @@ class DataSourceApiKeyAuthBinding(Base): sa.Index("data_source_api_key_auth_binding_provider_idx", "provider"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) - tenant_id = mapped_column(StringUUID, nullable=False) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) category: Mapped[str] = mapped_column(String(255), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) - credentials = mapped_column(sa.Text, nullable=True) # JSON - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - disabled: Mapped[bool | None] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) + credentials: Mapped[str | None] = mapped_column(sa.Text, nullable=True, default=None) # JSON + created_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, + ) + disabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false"), default=False) def to_dict(self): return { @@ -52,7 +68,7 @@ class DataSourceApiKeyAuthBinding(Base): "tenant_id": self.tenant_id, "category": self.category, "provider": self.provider, - "credentials": json.loads(self.credentials), + "credentials": json.loads(self.credentials) if self.credentials else None, "created_at": self.created_at.timestamp(), "updated_at": self.updated_at.timestamp(), "disabled": self.disabled, diff --git a/api/models/task.py b/api/models/task.py index 4e49254dbd..513f167cce 100644 --- a/api/models/task.py +++ b/api/models/task.py @@ -6,41 +6,43 @@ from sqlalchemy import DateTime, String from sqlalchemy.orm import Mapped, mapped_column from libs.datetime_utils import naive_utc_now -from models.base import Base +from models.base import TypeBase -class CeleryTask(Base): +class CeleryTask(TypeBase): """Task result/status.""" __tablename__ = "celery_taskmeta" - id = mapped_column(sa.Integer, sa.Sequence("task_id_sequence"), primary_key=True, autoincrement=True) - task_id = mapped_column(String(155), unique=True) - status = mapped_column(String(50), default=states.PENDING) - result = mapped_column(sa.PickleType, nullable=True) - date_done = mapped_column( + id: Mapped[int] = mapped_column( + sa.Integer, sa.Sequence("task_id_sequence"), primary_key=True, autoincrement=True, init=False + ) + task_id: Mapped[str] = mapped_column(String(155), unique=True) + status: Mapped[str] = mapped_column(String(50), default=states.PENDING) + result: Mapped[bytes | None] = mapped_column(sa.PickleType, nullable=True, default=None) + date_done: Mapped[datetime | None] = mapped_column( DateTime, - default=lambda: naive_utc_now(), - onupdate=lambda: naive_utc_now(), + default=naive_utc_now, + onupdate=naive_utc_now, nullable=True, ) - traceback = mapped_column(sa.Text, nullable=True) - name = mapped_column(String(155), nullable=True) - args = mapped_column(sa.LargeBinary, nullable=True) - kwargs = mapped_column(sa.LargeBinary, nullable=True) - worker = mapped_column(String(155), nullable=True) - retries: Mapped[int | None] = mapped_column(sa.Integer, nullable=True) - queue = mapped_column(String(155), nullable=True) + traceback: Mapped[str | None] = mapped_column(sa.Text, nullable=True, default=None) + name: Mapped[str | None] = mapped_column(String(155), nullable=True, default=None) + args: Mapped[bytes | None] = mapped_column(sa.LargeBinary, nullable=True, default=None) + kwargs: Mapped[bytes | None] = mapped_column(sa.LargeBinary, nullable=True, default=None) + worker: Mapped[str | None] = mapped_column(String(155), nullable=True, default=None) + retries: Mapped[int | None] = mapped_column(sa.Integer, nullable=True, default=None) + queue: Mapped[str | None] = mapped_column(String(155), nullable=True, default=None) -class CeleryTaskSet(Base): +class CeleryTaskSet(TypeBase): """TaskSet result.""" __tablename__ = "celery_tasksetmeta" id: Mapped[int] = mapped_column( - sa.Integer, sa.Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True + sa.Integer, sa.Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True, init=False ) - taskset_id = mapped_column(String(155), unique=True) - result = mapped_column(sa.PickleType, nullable=True) - date_done: Mapped[datetime | None] = mapped_column(DateTime, default=lambda: naive_utc_now(), nullable=True) + taskset_id: Mapped[str] = mapped_column(String(155), unique=True) + result: Mapped[bytes | None] = mapped_column(sa.PickleType, nullable=True, default=None) + date_done: Mapped[datetime | None] = mapped_column(DateTime, default=naive_utc_now, nullable=True) diff --git a/api/models/tools.py b/api/models/tools.py index d581d588a4..aec53da50c 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -1,6 +1,7 @@ import json from collections.abc import Mapping from datetime import datetime +from decimal import Decimal from typing import TYPE_CHECKING, Any, cast from urllib.parse import urlparse @@ -13,7 +14,7 @@ from core.helper import encrypter from core.tools.entities.common_entities import I18nObject from core.tools.entities.tool_bundle import ApiToolBundle from core.tools.entities.tool_entities import ApiProviderSchemaType, WorkflowToolParameterConfiguration -from models.base import Base, TypeBase +from models.base import TypeBase from .engine import db from .model import Account, App, Tenant @@ -42,28 +43,28 @@ class ToolOAuthSystemClient(TypeBase): # tenant level tool oauth client params (client_id, client_secret, etc.) -class ToolOAuthTenantClient(Base): +class ToolOAuthTenantClient(TypeBase): __tablename__ = "tool_oauth_tenant_clients" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="tool_oauth_tenant_client_pkey"), sa.UniqueConstraint("tenant_id", "plugin_id", "provider", name="unique_tool_oauth_tenant_client"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # tenant id tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) plugin_id: Mapped[str] = mapped_column(String(512), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) - enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true"), init=False) # oauth params of the tool provider - encrypted_oauth_params: Mapped[str] = mapped_column(sa.Text, nullable=False) + encrypted_oauth_params: Mapped[str] = mapped_column(sa.Text, nullable=False, init=False) @property def oauth_params(self) -> dict[str, Any]: return cast(dict[str, Any], json.loads(self.encrypted_oauth_params or "{}")) -class BuiltinToolProvider(Base): +class BuiltinToolProvider(TypeBase): """ This table stores the tool provider information for built-in tools for each tenant. """ @@ -75,37 +76,45 @@ class BuiltinToolProvider(Base): ) # id of the tool provider - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) name: Mapped[str] = mapped_column( - String(256), nullable=False, server_default=sa.text("'API KEY 1'::character varying") + String(256), + nullable=False, + server_default=sa.text("'API KEY 1'::character varying"), ) # id of the tenant - tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=True) + tenant_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True) # who created this tool provider user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # name of the tool provider provider: Mapped[str] = mapped_column(String(256), nullable=False) # credential of the tool provider - encrypted_credentials: Mapped[str] = mapped_column(sa.Text, nullable=True) + encrypted_credentials: Mapped[str | None] = mapped_column(sa.Text, nullable=True, default=None) created_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)"), init=False ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, + nullable=False, + server_default=sa.text("CURRENT_TIMESTAMP(0)"), + onupdate=func.current_timestamp(), + init=False, ) - is_default: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) + is_default: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false"), default=False) # credential type, e.g., "api-key", "oauth2" credential_type: Mapped[str] = mapped_column( - String(32), nullable=False, server_default=sa.text("'api-key'::character varying") + String(32), nullable=False, server_default=sa.text("'api-key'::character varying"), default="api-key" ) - expires_at: Mapped[int] = mapped_column(sa.BigInteger, nullable=False, server_default=sa.text("-1")) + expires_at: Mapped[int] = mapped_column(sa.BigInteger, nullable=False, server_default=sa.text("-1"), default=-1) @property def credentials(self) -> dict[str, Any]: + if not self.encrypted_credentials: + return {} return cast(dict[str, Any], json.loads(self.encrypted_credentials)) -class ApiToolProvider(Base): +class ApiToolProvider(TypeBase): """ The table stores the api providers. """ @@ -116,31 +125,43 @@ class ApiToolProvider(Base): sa.UniqueConstraint("name", "tenant_id", name="unique_api_tool_provider"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # name of the api provider - name = mapped_column(String(255), nullable=False, server_default=sa.text("'API KEY 1'::character varying")) + name: Mapped[str] = mapped_column( + String(255), + nullable=False, + server_default=sa.text("'API KEY 1'::character varying"), + ) # icon icon: Mapped[str] = mapped_column(String(255), nullable=False) # original schema - schema = mapped_column(sa.Text, nullable=False) + schema: Mapped[str] = mapped_column(sa.Text, nullable=False) schema_type_str: Mapped[str] = mapped_column(String(40), nullable=False) # who created this tool - user_id = mapped_column(StringUUID, nullable=False) + user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # tenant id - tenant_id = mapped_column(StringUUID, nullable=False) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # description of the provider - description = mapped_column(sa.Text, nullable=False) + description: Mapped[str] = mapped_column(sa.Text, nullable=False) # json format tools - tools_str = mapped_column(sa.Text, nullable=False) + tools_str: Mapped[str] = mapped_column(sa.Text, nullable=False) # json format credentials - credentials_str = mapped_column(sa.Text, nullable=False) + credentials_str: Mapped[str] = mapped_column(sa.Text, nullable=False) # privacy policy - privacy_policy = mapped_column(String(255), nullable=True) + privacy_policy: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) # custom_disclaimer custom_disclaimer: Mapped[str] = mapped_column(sa.TEXT, default="") - created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + sa.DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + sa.DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, + ) @property def schema_type(self) -> "ApiProviderSchemaType": @@ -189,7 +210,7 @@ class ToolLabelBinding(TypeBase): label_name: Mapped[str] = mapped_column(String(40), nullable=False) -class WorkflowToolProvider(Base): +class WorkflowToolProvider(TypeBase): """ The table stores the workflow providers. """ @@ -201,7 +222,7 @@ class WorkflowToolProvider(Base): sa.UniqueConstraint("tenant_id", "app_id", name="unique_workflow_tool_provider_app_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # name of the workflow provider name: Mapped[str] = mapped_column(String(255), nullable=False) # label of the workflow provider @@ -219,15 +240,19 @@ class WorkflowToolProvider(Base): # description of the provider description: Mapped[str] = mapped_column(sa.Text, nullable=False) # parameter configuration - parameter_configuration: Mapped[str] = mapped_column(sa.Text, nullable=False, server_default="[]") + parameter_configuration: Mapped[str] = mapped_column(sa.Text, nullable=False, server_default="[]", default="[]") # privacy policy - privacy_policy: Mapped[str] = mapped_column(String(255), nullable=True, server_default="") + privacy_policy: Mapped[str | None] = mapped_column(String(255), nullable=True, server_default="", default=None) created_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)"), init=False ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, + nullable=False, + server_default=sa.text("CURRENT_TIMESTAMP(0)"), + onupdate=func.current_timestamp(), + init=False, ) @property @@ -252,7 +277,7 @@ class WorkflowToolProvider(Base): return db.session.query(App).where(App.id == self.app_id).first() -class MCPToolProvider(Base): +class MCPToolProvider(TypeBase): """ The table stores the mcp providers. """ @@ -265,7 +290,7 @@ class MCPToolProvider(Base): sa.UniqueConstraint("tenant_id", "server_identifier", name="unique_mcp_provider_server_identifier"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # name of the mcp provider name: Mapped[str] = mapped_column(String(40), nullable=False) # server identifier of the mcp provider @@ -275,27 +300,33 @@ class MCPToolProvider(Base): # hash of server_url for uniqueness check server_url_hash: Mapped[str] = mapped_column(String(64), nullable=False) # icon of the mcp provider - icon: Mapped[str] = mapped_column(String(255), nullable=True) + icon: Mapped[str | None] = mapped_column(String(255), nullable=True) # tenant id tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # who created this tool user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # encrypted credentials - encrypted_credentials: Mapped[str] = mapped_column(sa.Text, nullable=True) + encrypted_credentials: Mapped[str | None] = mapped_column(sa.Text, nullable=True, default=None) # authed authed: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=False) # tools tools: Mapped[str] = mapped_column(sa.Text, nullable=False, default="[]") created_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)"), init=False ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, + nullable=False, + server_default=sa.text("CURRENT_TIMESTAMP(0)"), + onupdate=func.current_timestamp(), + init=False, + ) + timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("30"), default=30.0) + sse_read_timeout: Mapped[float] = mapped_column( + sa.Float, nullable=False, server_default=sa.text("300"), default=300.0 ) - timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("30")) - sse_read_timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("300")) # encrypted headers for MCP server requests - encrypted_headers: Mapped[str | None] = mapped_column(sa.Text, nullable=True) + encrypted_headers: Mapped[str | None] = mapped_column(sa.Text, nullable=True, default=None) def load_user(self) -> Account | None: return db.session.query(Account).where(Account.id == self.user_id).first() @@ -306,9 +337,11 @@ class MCPToolProvider(Base): @property def credentials(self) -> dict[str, Any]: + if not self.encrypted_credentials: + return {} try: return cast(dict[str, Any], json.loads(self.encrypted_credentials)) or {} - except Exception: + except json.JSONDecodeError: return {} @property @@ -321,6 +354,7 @@ class MCPToolProvider(Base): def provider_icon(self) -> Mapping[str, str] | str: from core.file import helpers as file_helpers + assert self.icon try: return json.loads(self.icon) except json.JSONDecodeError: @@ -419,7 +453,7 @@ class MCPToolProvider(Base): return encrypter.decrypt(self.credentials) -class ToolModelInvoke(Base): +class ToolModelInvoke(TypeBase): """ store the invoke logs from tool invoke """ @@ -427,37 +461,47 @@ class ToolModelInvoke(Base): __tablename__ = "tool_model_invokes" __table_args__ = (sa.PrimaryKeyConstraint("id", name="tool_model_invoke_pkey"),) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # who invoke this tool - user_id = mapped_column(StringUUID, nullable=False) + user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # tenant id - tenant_id = mapped_column(StringUUID, nullable=False) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # provider provider: Mapped[str] = mapped_column(String(255), nullable=False) # type - tool_type = mapped_column(String(40), nullable=False) + tool_type: Mapped[str] = mapped_column(String(40), nullable=False) # tool name - tool_name = mapped_column(String(128), nullable=False) + tool_name: Mapped[str] = mapped_column(String(128), nullable=False) # invoke parameters - model_parameters = mapped_column(sa.Text, nullable=False) + model_parameters: Mapped[str] = mapped_column(sa.Text, nullable=False) # prompt messages - prompt_messages = mapped_column(sa.Text, nullable=False) + prompt_messages: Mapped[str] = mapped_column(sa.Text, nullable=False) # invoke response - model_response = mapped_column(sa.Text, nullable=False) + model_response: Mapped[str] = mapped_column(sa.Text, nullable=False) prompt_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) answer_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) - answer_unit_price = mapped_column(sa.Numeric(10, 4), nullable=False) - answer_price_unit = mapped_column(sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001")) - provider_response_latency = mapped_column(sa.Float, nullable=False, server_default=sa.text("0")) - total_price = mapped_column(sa.Numeric(10, 7)) + answer_unit_price: Mapped[Decimal] = mapped_column(sa.Numeric(10, 4), nullable=False) + answer_price_unit: Mapped[Decimal] = mapped_column( + sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001") + ) + provider_response_latency: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("0")) + total_price: Mapped[Decimal | None] = mapped_column(sa.Numeric(10, 7)) currency: Mapped[str] = mapped_column(String(255), nullable=False) - created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + sa.DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + sa.DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, + ) @deprecated -class ToolConversationVariables(Base): +class ToolConversationVariables(TypeBase): """ store the conversation variables from tool invoke """ @@ -470,18 +514,26 @@ class ToolConversationVariables(Base): sa.Index("conversation_id_idx", "conversation_id"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # conversation user id - user_id = mapped_column(StringUUID, nullable=False) + user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # tenant id - tenant_id = mapped_column(StringUUID, nullable=False) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # conversation id - conversation_id = mapped_column(StringUUID, nullable=False) + conversation_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # variables pool - variables_str = mapped_column(sa.Text, nullable=False) + variables_str: Mapped[str] = mapped_column(sa.Text, nullable=False) - created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + sa.DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + sa.DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, + ) @property def variables(self): @@ -519,7 +571,7 @@ class ToolFile(TypeBase): @deprecated -class DeprecatedPublishedAppTool(Base): +class DeprecatedPublishedAppTool(TypeBase): """ The table stores the apps published as a tool for each person. """ @@ -530,26 +582,34 @@ class DeprecatedPublishedAppTool(Base): sa.UniqueConstraint("app_id", "user_id", name="unique_published_app_tool"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) # id of the app - app_id = mapped_column(StringUUID, ForeignKey("apps.id"), nullable=False) + app_id: Mapped[str] = mapped_column(StringUUID, ForeignKey("apps.id"), nullable=False) user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # who published this tool - description = mapped_column(sa.Text, nullable=False) + description: Mapped[str] = mapped_column(sa.Text, nullable=False) # llm_description of the tool, for LLM - llm_description = mapped_column(sa.Text, nullable=False) + llm_description: Mapped[str] = mapped_column(sa.Text, nullable=False) # query description, query will be seem as a parameter of the tool, # to describe this parameter to llm, we need this field - query_description = mapped_column(sa.Text, nullable=False) + query_description: Mapped[str] = mapped_column(sa.Text, nullable=False) # query name, the name of the query parameter - query_name = mapped_column(String(40), nullable=False) + query_name: Mapped[str] = mapped_column(String(40), nullable=False) # name of the tool provider - tool_name = mapped_column(String(40), nullable=False) + tool_name: Mapped[str] = mapped_column(String(40), nullable=False) # author - author = mapped_column(String(40), nullable=False) - created_at = mapped_column(sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)")) - updated_at = mapped_column(sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)")) + author: Mapped[str] = mapped_column(String(40), nullable=False) + created_at: Mapped[datetime] = mapped_column( + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)"), init=False + ) + updated_at: Mapped[datetime] = mapped_column( + sa.DateTime, + nullable=False, + server_default=sa.text("CURRENT_TIMESTAMP(0)"), + onupdate=func.current_timestamp(), + init=False, + ) @property def description_i18n(self) -> "I18nObject": diff --git a/api/models/web.py b/api/models/web.py index 74f99e187b..7df5bd6e87 100644 --- a/api/models/web.py +++ b/api/models/web.py @@ -4,46 +4,58 @@ import sqlalchemy as sa from sqlalchemy import DateTime, String, func from sqlalchemy.orm import Mapped, mapped_column -from models.base import Base +from models.base import TypeBase from .engine import db from .model import Message from .types import StringUUID -class SavedMessage(Base): +class SavedMessage(TypeBase): __tablename__ = "saved_messages" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="saved_message_pkey"), sa.Index("saved_message_message_idx", "app_id", "message_id", "created_by_role", "created_by"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) - app_id = mapped_column(StringUUID, nullable=False) - message_id = mapped_column(StringUUID, nullable=False) - created_by_role = mapped_column( + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + message_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + created_by_role: Mapped[str] = mapped_column( String(255), nullable=False, server_default=sa.text("'end_user'::character varying") ) - created_by = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime, + nullable=False, + server_default=func.current_timestamp(), + init=False, + ) @property def message(self): return db.session.query(Message).where(Message.id == self.message_id).first() -class PinnedConversation(Base): +class PinnedConversation(TypeBase): __tablename__ = "pinned_conversations" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="pinned_conversation_pkey"), sa.Index("pinned_conversation_conversation_idx", "app_id", "conversation_id", "created_by_role", "created_by"), ) - id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) - app_id = mapped_column(StringUUID, nullable=False) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), init=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) conversation_id: Mapped[str] = mapped_column(StringUUID) - created_by_role = mapped_column( - String(255), nullable=False, server_default=sa.text("'end_user'::character varying") + created_by_role: Mapped[str] = mapped_column( + String(255), + nullable=False, + server_default=sa.text("'end_user'::character varying"), + ) + created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime, + nullable=False, + server_default=func.current_timestamp(), + init=False, ) - created_by = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) diff --git a/api/services/auth/api_key_auth_service.py b/api/services/auth/api_key_auth_service.py index 055cf65816..56aaf407ee 100644 --- a/api/services/auth/api_key_auth_service.py +++ b/api/services/auth/api_key_auth_service.py @@ -26,10 +26,9 @@ class ApiKeyAuthService: api_key = encrypter.encrypt_token(tenant_id, args["credentials"]["config"]["api_key"]) args["credentials"]["config"]["api_key"] = api_key - data_source_api_key_binding = DataSourceApiKeyAuthBinding() - data_source_api_key_binding.tenant_id = tenant_id - data_source_api_key_binding.category = args["category"] - data_source_api_key_binding.provider = args["provider"] + data_source_api_key_binding = DataSourceApiKeyAuthBinding( + tenant_id=tenant_id, category=args["category"], provider=args["provider"] + ) data_source_api_key_binding.credentials = json.dumps(args["credentials"], ensure_ascii=False) db.session.add(data_source_api_key_binding) db.session.commit() @@ -48,6 +47,8 @@ class ApiKeyAuthService: ) if not data_source_api_key_bindings: return None + if not data_source_api_key_bindings.credentials: + return None credentials = json.loads(data_source_api_key_bindings.credentials) return credentials diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index 2c0c63f634..bb024cc846 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -148,7 +148,7 @@ class ApiToolManageService: description=extra_info.get("description", ""), schema_type_str=schema_type, tools_str=json.dumps(jsonable_encoder(tool_bundles)), - credentials_str={}, + credentials_str="{}", privacy_policy=privacy_policy, custom_disclaimer=custom_disclaimer, ) diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index cab4a5c6ab..b5dcec17d0 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -683,7 +683,7 @@ class BuiltinToolManageService: cache=NoOpProviderCredentialCache(), ) original_params = encrypter.decrypt(custom_client_params.oauth_params) - new_params: dict = { + new_params = { key: value if value != HIDDEN_VALUE else original_params.get(key, UNKNOWN_VALUE) for key, value in client_params.items() } diff --git a/api/services/tools/mcp_tools_manage_service.py b/api/services/tools/mcp_tools_manage_service.py index 605ad8379b..54133d3801 100644 --- a/api/services/tools/mcp_tools_manage_service.py +++ b/api/services/tools/mcp_tools_manage_service.py @@ -188,6 +188,8 @@ class MCPToolManageService: raise user = mcp_provider.load_user() + if not mcp_provider.icon: + raise ValueError("MCP provider icon is required") return ToolProviderApiEntity( id=mcp_provider.id, name=mcp_provider.name, diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 81b4d6993a..b7850ea150 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -152,7 +152,8 @@ class ToolTransformService: if decrypt_credentials: credentials = db_provider.credentials - + if not db_provider.tenant_id: + raise ValueError(f"Required tenant_id is missing for BuiltinToolProvider with id {db_provider.id}") # init tool configuration encrypter, _ = create_provider_encrypter( tenant_id=db_provider.tenant_id, From f86b6658c9a2dc31904b628250ad4e37946faa8a Mon Sep 17 00:00:00 2001 From: yangzheli <43645580+yangzheli@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:22:34 +0800 Subject: [PATCH 41/49] perf(web): split constant files to improve web performance (#26794) --- .../components/workflow/nodes/components.ts | 94 ++++++++++++++++++ .../components/workflow/nodes/constants.ts | 96 ------------------- web/app/components/workflow/nodes/index.tsx | 2 +- 3 files changed, 95 insertions(+), 97 deletions(-) create mode 100644 web/app/components/workflow/nodes/components.ts diff --git a/web/app/components/workflow/nodes/components.ts b/web/app/components/workflow/nodes/components.ts new file mode 100644 index 0000000000..cdf3a21598 --- /dev/null +++ b/web/app/components/workflow/nodes/components.ts @@ -0,0 +1,94 @@ +import type { ComponentType } from 'react' +import { BlockEnum } from '../types' +import StartNode from './start/node' +import StartPanel from './start/panel' +import EndNode from './end/node' +import EndPanel from './end/panel' +import AnswerNode from './answer/node' +import AnswerPanel from './answer/panel' +import LLMNode from './llm/node' +import LLMPanel from './llm/panel' +import KnowledgeRetrievalNode from './knowledge-retrieval/node' +import KnowledgeRetrievalPanel from './knowledge-retrieval/panel' +import QuestionClassifierNode from './question-classifier/node' +import QuestionClassifierPanel from './question-classifier/panel' +import IfElseNode from './if-else/node' +import IfElsePanel from './if-else/panel' +import CodeNode from './code/node' +import CodePanel from './code/panel' +import TemplateTransformNode from './template-transform/node' +import TemplateTransformPanel from './template-transform/panel' +import HttpNode from './http/node' +import HttpPanel from './http/panel' +import ToolNode from './tool/node' +import ToolPanel from './tool/panel' +import VariableAssignerNode from './variable-assigner/node' +import VariableAssignerPanel from './variable-assigner/panel' +import AssignerNode from './assigner/node' +import AssignerPanel from './assigner/panel' +import ParameterExtractorNode from './parameter-extractor/node' +import ParameterExtractorPanel from './parameter-extractor/panel' +import IterationNode from './iteration/node' +import IterationPanel from './iteration/panel' +import LoopNode from './loop/node' +import LoopPanel from './loop/panel' +import DocExtractorNode from './document-extractor/node' +import DocExtractorPanel from './document-extractor/panel' +import ListFilterNode from './list-operator/node' +import ListFilterPanel from './list-operator/panel' +import AgentNode from './agent/node' +import AgentPanel from './agent/panel' +import DataSourceNode from './data-source/node' +import DataSourcePanel from './data-source/panel' +import KnowledgeBaseNode from './knowledge-base/node' +import KnowledgeBasePanel from './knowledge-base/panel' + +export const NodeComponentMap: Record> = { + [BlockEnum.Start]: StartNode, + [BlockEnum.End]: EndNode, + [BlockEnum.Answer]: AnswerNode, + [BlockEnum.LLM]: LLMNode, + [BlockEnum.KnowledgeRetrieval]: KnowledgeRetrievalNode, + [BlockEnum.QuestionClassifier]: QuestionClassifierNode, + [BlockEnum.IfElse]: IfElseNode, + [BlockEnum.Code]: CodeNode, + [BlockEnum.TemplateTransform]: TemplateTransformNode, + [BlockEnum.HttpRequest]: HttpNode, + [BlockEnum.Tool]: ToolNode, + [BlockEnum.VariableAssigner]: VariableAssignerNode, + [BlockEnum.Assigner]: AssignerNode, + [BlockEnum.VariableAggregator]: VariableAssignerNode, + [BlockEnum.ParameterExtractor]: ParameterExtractorNode, + [BlockEnum.Iteration]: IterationNode, + [BlockEnum.Loop]: LoopNode, + [BlockEnum.DocExtractor]: DocExtractorNode, + [BlockEnum.ListFilter]: ListFilterNode, + [BlockEnum.Agent]: AgentNode, + [BlockEnum.DataSource]: DataSourceNode, + [BlockEnum.KnowledgeBase]: KnowledgeBaseNode, +} + +export const PanelComponentMap: Record> = { + [BlockEnum.Start]: StartPanel, + [BlockEnum.End]: EndPanel, + [BlockEnum.Answer]: AnswerPanel, + [BlockEnum.LLM]: LLMPanel, + [BlockEnum.KnowledgeRetrieval]: KnowledgeRetrievalPanel, + [BlockEnum.QuestionClassifier]: QuestionClassifierPanel, + [BlockEnum.IfElse]: IfElsePanel, + [BlockEnum.Code]: CodePanel, + [BlockEnum.TemplateTransform]: TemplateTransformPanel, + [BlockEnum.HttpRequest]: HttpPanel, + [BlockEnum.Tool]: ToolPanel, + [BlockEnum.VariableAssigner]: VariableAssignerPanel, + [BlockEnum.VariableAggregator]: VariableAssignerPanel, + [BlockEnum.Assigner]: AssignerPanel, + [BlockEnum.ParameterExtractor]: ParameterExtractorPanel, + [BlockEnum.Iteration]: IterationPanel, + [BlockEnum.Loop]: LoopPanel, + [BlockEnum.DocExtractor]: DocExtractorPanel, + [BlockEnum.ListFilter]: ListFilterPanel, + [BlockEnum.Agent]: AgentPanel, + [BlockEnum.DataSource]: DataSourcePanel, + [BlockEnum.KnowledgeBase]: KnowledgeBasePanel, +} diff --git a/web/app/components/workflow/nodes/constants.ts b/web/app/components/workflow/nodes/constants.ts index 3efc7189ed..78684577f2 100644 --- a/web/app/components/workflow/nodes/constants.ts +++ b/web/app/components/workflow/nodes/constants.ts @@ -1,101 +1,5 @@ -import type { ComponentType } from 'react' -import { BlockEnum } from '../types' -import StartNode from './start/node' -import StartPanel from './start/panel' -import EndNode from './end/node' -import EndPanel from './end/panel' -import AnswerNode from './answer/node' -import AnswerPanel from './answer/panel' -import LLMNode from './llm/node' -import LLMPanel from './llm/panel' -import KnowledgeRetrievalNode from './knowledge-retrieval/node' -import KnowledgeRetrievalPanel from './knowledge-retrieval/panel' -import QuestionClassifierNode from './question-classifier/node' -import QuestionClassifierPanel from './question-classifier/panel' -import IfElseNode from './if-else/node' -import IfElsePanel from './if-else/panel' -import CodeNode from './code/node' -import CodePanel from './code/panel' -import TemplateTransformNode from './template-transform/node' -import TemplateTransformPanel from './template-transform/panel' -import HttpNode from './http/node' -import HttpPanel from './http/panel' -import ToolNode from './tool/node' -import ToolPanel from './tool/panel' -import VariableAssignerNode from './variable-assigner/node' -import VariableAssignerPanel from './variable-assigner/panel' -import AssignerNode from './assigner/node' -import AssignerPanel from './assigner/panel' -import ParameterExtractorNode from './parameter-extractor/node' -import ParameterExtractorPanel from './parameter-extractor/panel' -import IterationNode from './iteration/node' -import IterationPanel from './iteration/panel' -import LoopNode from './loop/node' -import LoopPanel from './loop/panel' -import DocExtractorNode from './document-extractor/node' -import DocExtractorPanel from './document-extractor/panel' -import ListFilterNode from './list-operator/node' -import ListFilterPanel from './list-operator/panel' -import AgentNode from './agent/node' -import AgentPanel from './agent/panel' -import DataSourceNode from './data-source/node' -import DataSourcePanel from './data-source/panel' -import KnowledgeBaseNode from './knowledge-base/node' -import KnowledgeBasePanel from './knowledge-base/panel' import { TransferMethod } from '@/types/app' -export const NodeComponentMap: Record> = { - [BlockEnum.Start]: StartNode, - [BlockEnum.End]: EndNode, - [BlockEnum.Answer]: AnswerNode, - [BlockEnum.LLM]: LLMNode, - [BlockEnum.KnowledgeRetrieval]: KnowledgeRetrievalNode, - [BlockEnum.QuestionClassifier]: QuestionClassifierNode, - [BlockEnum.IfElse]: IfElseNode, - [BlockEnum.Code]: CodeNode, - [BlockEnum.TemplateTransform]: TemplateTransformNode, - [BlockEnum.HttpRequest]: HttpNode, - [BlockEnum.Tool]: ToolNode, - [BlockEnum.VariableAssigner]: VariableAssignerNode, - [BlockEnum.Assigner]: AssignerNode, - [BlockEnum.VariableAggregator]: VariableAssignerNode, - [BlockEnum.ParameterExtractor]: ParameterExtractorNode, - [BlockEnum.Iteration]: IterationNode, - [BlockEnum.Loop]: LoopNode, - [BlockEnum.DocExtractor]: DocExtractorNode, - [BlockEnum.ListFilter]: ListFilterNode, - [BlockEnum.Agent]: AgentNode, - [BlockEnum.DataSource]: DataSourceNode, - [BlockEnum.KnowledgeBase]: KnowledgeBaseNode, -} - -export const PanelComponentMap: Record> = { - [BlockEnum.Start]: StartPanel, - [BlockEnum.End]: EndPanel, - [BlockEnum.Answer]: AnswerPanel, - [BlockEnum.LLM]: LLMPanel, - [BlockEnum.KnowledgeRetrieval]: KnowledgeRetrievalPanel, - [BlockEnum.QuestionClassifier]: QuestionClassifierPanel, - [BlockEnum.IfElse]: IfElsePanel, - [BlockEnum.Code]: CodePanel, - [BlockEnum.TemplateTransform]: TemplateTransformPanel, - [BlockEnum.HttpRequest]: HttpPanel, - [BlockEnum.Tool]: ToolPanel, - [BlockEnum.VariableAssigner]: VariableAssignerPanel, - [BlockEnum.VariableAggregator]: VariableAssignerPanel, - [BlockEnum.Assigner]: AssignerPanel, - [BlockEnum.ParameterExtractor]: ParameterExtractorPanel, - [BlockEnum.Iteration]: IterationPanel, - [BlockEnum.Loop]: LoopPanel, - [BlockEnum.DocExtractor]: DocExtractorPanel, - [BlockEnum.ListFilter]: ListFilterPanel, - [BlockEnum.Agent]: AgentPanel, - [BlockEnum.DataSource]: DataSourcePanel, - [BlockEnum.KnowledgeBase]: KnowledgeBasePanel, -} - -export const CUSTOM_NODE_TYPE = 'custom' - export const FILE_TYPE_OPTIONS = [ { value: 'image', i18nKey: 'image' }, { value: 'document', i18nKey: 'doc' }, diff --git a/web/app/components/workflow/nodes/index.tsx b/web/app/components/workflow/nodes/index.tsx index 8458051da2..ba880b398b 100644 --- a/web/app/components/workflow/nodes/index.tsx +++ b/web/app/components/workflow/nodes/index.tsx @@ -8,7 +8,7 @@ import { CUSTOM_NODE } from '../constants' import { NodeComponentMap, PanelComponentMap, -} from './constants' +} from './components' import BaseNode from './_base/node' import BasePanel from './_base/components/workflow-panel' From d299e75e1bee8b67dd804a0f2afb13cd861a2921 Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Mon, 13 Oct 2025 10:22:59 +0800 Subject: [PATCH 42/49] refactor: use dynamic max characters for chunking in extractors (#26782) --- .../rag/extractor/unstructured/unstructured_doc_extractor.py | 4 +++- .../rag/extractor/unstructured/unstructured_eml_extractor.py | 4 +++- .../rag/extractor/unstructured/unstructured_epub_extractor.py | 4 +++- .../extractor/unstructured/unstructured_markdown_extractor.py | 4 +++- .../rag/extractor/unstructured/unstructured_msg_extractor.py | 4 +++- .../rag/extractor/unstructured/unstructured_xml_extractor.py | 4 +++- 6 files changed, 18 insertions(+), 6 deletions(-) diff --git a/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py b/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py index 5199208f70..7dd8beaa46 100644 --- a/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py +++ b/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py @@ -1,6 +1,7 @@ import logging import os +from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor from core.rag.models.document import Document @@ -49,7 +50,8 @@ class UnstructuredWordExtractor(BaseExtractor): from unstructured.chunking.title import chunk_by_title - chunks = chunk_by_title(elements, max_characters=2000, combine_text_under_n_chars=2000) + max_characters = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH + chunks = chunk_by_title(elements, max_characters=max_characters, combine_text_under_n_chars=max_characters) documents = [] for chunk in chunks: text = chunk.text.strip() diff --git a/api/core/rag/extractor/unstructured/unstructured_eml_extractor.py b/api/core/rag/extractor/unstructured/unstructured_eml_extractor.py index ad04bd0bd1..d97d4c3a48 100644 --- a/api/core/rag/extractor/unstructured/unstructured_eml_extractor.py +++ b/api/core/rag/extractor/unstructured/unstructured_eml_extractor.py @@ -4,6 +4,7 @@ import logging from bs4 import BeautifulSoup +from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor from core.rag.models.document import Document @@ -46,7 +47,8 @@ class UnstructuredEmailExtractor(BaseExtractor): from unstructured.chunking.title import chunk_by_title - chunks = chunk_by_title(elements, max_characters=2000, combine_text_under_n_chars=2000) + max_characters = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH + chunks = chunk_by_title(elements, max_characters=max_characters, combine_text_under_n_chars=max_characters) documents = [] for chunk in chunks: text = chunk.text.strip() diff --git a/api/core/rag/extractor/unstructured/unstructured_epub_extractor.py b/api/core/rag/extractor/unstructured/unstructured_epub_extractor.py index fc14ee6275..3061d957ac 100644 --- a/api/core/rag/extractor/unstructured/unstructured_epub_extractor.py +++ b/api/core/rag/extractor/unstructured/unstructured_epub_extractor.py @@ -2,6 +2,7 @@ import logging import pypandoc # type: ignore +from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor from core.rag.models.document import Document @@ -40,7 +41,8 @@ class UnstructuredEpubExtractor(BaseExtractor): from unstructured.chunking.title import chunk_by_title - chunks = chunk_by_title(elements, max_characters=2000, combine_text_under_n_chars=2000) + max_characters = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH + chunks = chunk_by_title(elements, max_characters=max_characters, combine_text_under_n_chars=max_characters) documents = [] for chunk in chunks: text = chunk.text.strip() diff --git a/api/core/rag/extractor/unstructured/unstructured_markdown_extractor.py b/api/core/rag/extractor/unstructured/unstructured_markdown_extractor.py index 23030d7739..b6d8c47111 100644 --- a/api/core/rag/extractor/unstructured/unstructured_markdown_extractor.py +++ b/api/core/rag/extractor/unstructured/unstructured_markdown_extractor.py @@ -1,5 +1,6 @@ import logging +from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor from core.rag.models.document import Document @@ -32,7 +33,8 @@ class UnstructuredMarkdownExtractor(BaseExtractor): elements = partition_md(filename=self._file_path) from unstructured.chunking.title import chunk_by_title - chunks = chunk_by_title(elements, max_characters=2000, combine_text_under_n_chars=2000) + max_characters = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH + chunks = chunk_by_title(elements, max_characters=max_characters, combine_text_under_n_chars=max_characters) documents = [] for chunk in chunks: text = chunk.text.strip() diff --git a/api/core/rag/extractor/unstructured/unstructured_msg_extractor.py b/api/core/rag/extractor/unstructured/unstructured_msg_extractor.py index f29e639d1b..ae60fc7981 100644 --- a/api/core/rag/extractor/unstructured/unstructured_msg_extractor.py +++ b/api/core/rag/extractor/unstructured/unstructured_msg_extractor.py @@ -1,5 +1,6 @@ import logging +from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor from core.rag.models.document import Document @@ -31,7 +32,8 @@ class UnstructuredMsgExtractor(BaseExtractor): elements = partition_msg(filename=self._file_path) from unstructured.chunking.title import chunk_by_title - chunks = chunk_by_title(elements, max_characters=2000, combine_text_under_n_chars=2000) + max_characters = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH + chunks = chunk_by_title(elements, max_characters=max_characters, combine_text_under_n_chars=max_characters) documents = [] for chunk in chunks: text = chunk.text.strip() diff --git a/api/core/rag/extractor/unstructured/unstructured_xml_extractor.py b/api/core/rag/extractor/unstructured/unstructured_xml_extractor.py index d75e166f1b..2d4846d85e 100644 --- a/api/core/rag/extractor/unstructured/unstructured_xml_extractor.py +++ b/api/core/rag/extractor/unstructured/unstructured_xml_extractor.py @@ -1,5 +1,6 @@ import logging +from configs import dify_config from core.rag.extractor.extractor_base import BaseExtractor from core.rag.models.document import Document @@ -32,7 +33,8 @@ class UnstructuredXmlExtractor(BaseExtractor): from unstructured.chunking.title import chunk_by_title - chunks = chunk_by_title(elements, max_characters=2000, combine_text_under_n_chars=2000) + max_characters = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH + chunks = chunk_by_title(elements, max_characters=max_characters, combine_text_under_n_chars=max_characters) documents = [] for chunk in chunks: text = chunk.text.strip() From 24cd7bbc622a9bb4ae8384e51ab6722ca3e92210 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Mon, 13 Oct 2025 11:29:37 +0900 Subject: [PATCH 43/49] fix RetrievalMethod StrEnum (#26768) Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- .../entities/datasource_entities.py | 4 ++-- .../entities/provider_entities.py | 4 ++-- api/core/rag/datasource/retrieval_service.py | 6 +++--- api/core/rag/entities/event.py | 4 ++-- .../index_processor/index_processor_base.py | 3 ++- .../processor/paragraph_index_processor.py | 3 ++- .../processor/parent_child_index_processor.py | 3 ++- .../processor/qa_index_processor.py | 3 ++- api/core/rag/retrieval/dataset_retrieval.py | 4 ++-- api/core/rag/retrieval/retrieval_methods.py | 4 ++-- .../dataset_multi_retriever_tool.py | 2 +- .../dataset_retriever_tool.py | 2 +- api/core/workflow/enums.py | 4 ++-- .../graph_engine/layers/execution_limits.py | 4 ++-- .../nodes/knowledge_index/entities.py | 3 ++- api/services/dataset_service.py | 6 +++--- .../knowledge_entities/knowledge_entities.py | 4 +++- .../rag_pipeline_entities.py | 4 +++- .../entities/model_provider_entities.py | 4 ++-- api/services/hit_testing_service.py | 2 +- .../rag_pipeline_transform_service.py | 3 ++- .../rag/extractor/firecrawl/test_firecrawl.py | 4 +++- .../rag/extractor/test_notion_extractor.py | 6 ++++-- .../unit_tests/core/test_model_manager.py | 3 ++- .../unit_tests/core/test_provider_manager.py | 19 +++++++++++++------ 25 files changed, 65 insertions(+), 43 deletions(-) diff --git a/api/core/datasource/entities/datasource_entities.py b/api/core/datasource/entities/datasource_entities.py index 7f503b963f..260dcf04f5 100644 --- a/api/core/datasource/entities/datasource_entities.py +++ b/api/core/datasource/entities/datasource_entities.py @@ -1,5 +1,5 @@ import enum -from enum import Enum +from enum import StrEnum from typing import Any from pydantic import BaseModel, Field, ValidationInfo, field_validator @@ -218,7 +218,7 @@ class DatasourceLabel(BaseModel): icon: str = Field(..., description="The icon of the tool") -class DatasourceInvokeFrom(Enum): +class DatasourceInvokeFrom(StrEnum): """ Enum class for datasource invoke """ diff --git a/api/core/model_runtime/entities/provider_entities.py b/api/core/model_runtime/entities/provider_entities.py index 831fb9d4db..0508116962 100644 --- a/api/core/model_runtime/entities/provider_entities.py +++ b/api/core/model_runtime/entities/provider_entities.py @@ -1,5 +1,5 @@ from collections.abc import Sequence -from enum import Enum, StrEnum, auto +from enum import StrEnum, auto from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator @@ -7,7 +7,7 @@ from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, ModelType -class ConfigurateMethod(Enum): +class ConfigurateMethod(StrEnum): """ Enum class for configurate method of provider model. """ diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 6e9e2b4527..2290de19bc 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -34,7 +34,7 @@ class RetrievalService: @classmethod def retrieve( cls, - retrieval_method: str, + retrieval_method: RetrievalMethod, dataset_id: str, query: str, top_k: int, @@ -56,7 +56,7 @@ class RetrievalService: # Optimize multithreading with thread pools with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor: # type: ignore futures = [] - if retrieval_method == "keyword_search": + if retrieval_method == RetrievalMethod.KEYWORD_SEARCH: futures.append( executor.submit( cls.keyword_search, @@ -220,7 +220,7 @@ class RetrievalService: score_threshold: float | None, reranking_model: dict | None, all_documents: list, - retrieval_method: str, + retrieval_method: RetrievalMethod, exceptions: list, document_ids_filter: list[str] | None = None, ): diff --git a/api/core/rag/entities/event.py b/api/core/rag/entities/event.py index a61b17ddb8..2d8d4060dd 100644 --- a/api/core/rag/entities/event.py +++ b/api/core/rag/entities/event.py @@ -1,11 +1,11 @@ from collections.abc import Mapping -from enum import Enum +from enum import StrEnum from typing import Any from pydantic import BaseModel, Field -class DatasourceStreamEvent(Enum): +class DatasourceStreamEvent(StrEnum): """ Datasource Stream event """ diff --git a/api/core/rag/index_processor/index_processor_base.py b/api/core/rag/index_processor/index_processor_base.py index 05cffb5a55..d4eff53204 100644 --- a/api/core/rag/index_processor/index_processor_base.py +++ b/api/core/rag/index_processor/index_processor_base.py @@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Optional from configs import dify_config from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.models.document import Document +from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.rag.splitter.fixed_text_splitter import ( EnhanceRecursiveCharacterTextSplitter, FixedRecursiveCharacterTextSplitter, @@ -49,7 +50,7 @@ class BaseIndexProcessor(ABC): @abstractmethod def retrieve( self, - retrieval_method: str, + retrieval_method: RetrievalMethod, query: str, dataset: Dataset, top_k: int, diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index 4fcffbcc77..5e5fea7ea9 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -14,6 +14,7 @@ from core.rag.extractor.extract_processor import ExtractProcessor from core.rag.index_processor.constant.index_type import IndexType from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.models.document import Document +from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols from libs import helper from models.dataset import Dataset, DatasetProcessRule @@ -106,7 +107,7 @@ class ParagraphIndexProcessor(BaseIndexProcessor): def retrieve( self, - retrieval_method: str, + retrieval_method: RetrievalMethod, query: str, dataset: Dataset, top_k: int, diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index 7bdde286f5..4fa78e2f95 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -16,6 +16,7 @@ from core.rag.extractor.extract_processor import ExtractProcessor from core.rag.index_processor.constant.index_type import IndexType from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.models.document import ChildDocument, Document, ParentChildStructureChunk +from core.rag.retrieval.retrieval_methods import RetrievalMethod from extensions.ext_database import db from libs import helper from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegment @@ -161,7 +162,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): def retrieve( self, - retrieval_method: str, + retrieval_method: RetrievalMethod, query: str, dataset: Dataset, top_k: int, diff --git a/api/core/rag/index_processor/processor/qa_index_processor.py b/api/core/rag/index_processor/processor/qa_index_processor.py index 9c8f70dba8..3e3deb0180 100644 --- a/api/core/rag/index_processor/processor/qa_index_processor.py +++ b/api/core/rag/index_processor/processor/qa_index_processor.py @@ -21,6 +21,7 @@ from core.rag.extractor.extract_processor import ExtractProcessor from core.rag.index_processor.constant.index_type import IndexType from core.rag.index_processor.index_processor_base import BaseIndexProcessor from core.rag.models.document import Document, QAStructureChunk +from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.tools.utils.text_processing_utils import remove_leading_symbols from libs import helper from models.dataset import Dataset @@ -141,7 +142,7 @@ class QAIndexProcessor(BaseIndexProcessor): def retrieve( self, - retrieval_method: str, + retrieval_method: RetrievalMethod, query: str, dataset: Dataset, top_k: int, diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 0a702d2902..99bbe615fb 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -364,7 +364,7 @@ class DatasetRetrieval: top_k = retrieval_model_config["top_k"] # get retrieval method if dataset.indexing_technique == "economy": - retrieval_method = "keyword_search" + retrieval_method = RetrievalMethod.KEYWORD_SEARCH else: retrieval_method = retrieval_model_config["search_method"] # get reranking model @@ -623,7 +623,7 @@ class DatasetRetrieval: if dataset.indexing_technique == "economy": # use keyword table query documents = RetrievalService.retrieve( - retrieval_method="keyword_search", + retrieval_method=RetrievalMethod.KEYWORD_SEARCH, dataset_id=dataset.id, query=query, top_k=top_k, diff --git a/api/core/rag/retrieval/retrieval_methods.py b/api/core/rag/retrieval/retrieval_methods.py index 5f0f2a9d33..c77a026351 100644 --- a/api/core/rag/retrieval/retrieval_methods.py +++ b/api/core/rag/retrieval/retrieval_methods.py @@ -1,7 +1,7 @@ -from enum import Enum +from enum import StrEnum -class RetrievalMethod(Enum): +class RetrievalMethod(StrEnum): SEMANTIC_SEARCH = "semantic_search" FULL_TEXT_SEARCH = "full_text_search" HYBRID_SEARCH = "hybrid_search" diff --git a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py index b5bc4d3c00..20e10be075 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_multi_retriever_tool.py @@ -172,7 +172,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool): if dataset.indexing_technique == "economy": # use keyword table query documents = RetrievalService.retrieve( - retrieval_method="keyword_search", + retrieval_method=RetrievalMethod.KEYWORD_SEARCH, dataset_id=dataset.id, query=query, top_k=retrieval_model.get("top_k") or 4, diff --git a/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py b/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py index 1eae582f67..915a22dd0f 100644 --- a/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever/dataset_retriever_tool.py @@ -130,7 +130,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool): if dataset.indexing_technique == "economy": # use keyword table query documents = RetrievalService.retrieve( - retrieval_method="keyword_search", + retrieval_method=RetrievalMethod.KEYWORD_SEARCH, dataset_id=dataset.id, query=query, top_k=self.top_k, diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index 00a125660a..eb88bb67ee 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -1,7 +1,7 @@ -from enum import Enum, StrEnum +from enum import StrEnum -class NodeState(Enum): +class NodeState(StrEnum): """State of a node or edge during workflow execution.""" UNKNOWN = "unknown" diff --git a/api/core/workflow/graph_engine/layers/execution_limits.py b/api/core/workflow/graph_engine/layers/execution_limits.py index e39af89837..a2d36d142d 100644 --- a/api/core/workflow/graph_engine/layers/execution_limits.py +++ b/api/core/workflow/graph_engine/layers/execution_limits.py @@ -10,7 +10,7 @@ When limits are exceeded, the layer automatically aborts execution. import logging import time -from enum import Enum +from enum import StrEnum from typing import final from typing_extensions import override @@ -24,7 +24,7 @@ from core.workflow.graph_events import ( from core.workflow.graph_events.node import NodeRunFailedEvent, NodeRunSucceededEvent -class LimitType(Enum): +class LimitType(StrEnum): """Types of execution limits that can be exceeded.""" STEP_LIMIT = "step_limit" diff --git a/api/core/workflow/nodes/knowledge_index/entities.py b/api/core/workflow/nodes/knowledge_index/entities.py index c79373afd5..3daca90b9b 100644 --- a/api/core/workflow/nodes/knowledge_index/entities.py +++ b/api/core/workflow/nodes/knowledge_index/entities.py @@ -2,6 +2,7 @@ from typing import Literal, Union from pydantic import BaseModel +from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.workflow.nodes.base import BaseNodeData @@ -63,7 +64,7 @@ class RetrievalSetting(BaseModel): Retrieval Setting. """ - search_method: Literal["semantic_search", "keyword_search", "full_text_search", "hybrid_search"] + search_method: RetrievalMethod top_k: int score_threshold: float | None = 0.5 score_threshold_enabled: bool = False diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 87861ada87..53216e4fdd 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -1470,7 +1470,7 @@ class DocumentService: dataset.collection_binding_id = dataset_collection_binding.id if not dataset.retrieval_model: default_retrieval_model = { - "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + "search_method": RetrievalMethod.SEMANTIC_SEARCH, "reranking_enable": False, "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, "top_k": 4, @@ -1752,7 +1752,7 @@ class DocumentService: # dataset.collection_binding_id = dataset_collection_binding.id # if not dataset.retrieval_model: # default_retrieval_model = { - # "search_method": RetrievalMethod.SEMANTIC_SEARCH.value, + # "search_method": RetrievalMethod.SEMANTIC_SEARCH, # "reranking_enable": False, # "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""}, # "top_k": 2, @@ -2205,7 +2205,7 @@ class DocumentService: retrieval_model = knowledge_config.retrieval_model else: retrieval_model = RetrievalModel( - search_method=RetrievalMethod.SEMANTIC_SEARCH.value, + search_method=RetrievalMethod.SEMANTIC_SEARCH, reranking_enable=False, reranking_model=RerankingModel(reranking_provider_name="", reranking_model_name=""), top_k=4, diff --git a/api/services/entities/knowledge_entities/knowledge_entities.py b/api/services/entities/knowledge_entities/knowledge_entities.py index 33f65bde58..b9a210740d 100644 --- a/api/services/entities/knowledge_entities/knowledge_entities.py +++ b/api/services/entities/knowledge_entities/knowledge_entities.py @@ -3,6 +3,8 @@ from typing import Literal from pydantic import BaseModel +from core.rag.retrieval.retrieval_methods import RetrievalMethod + class ParentMode(StrEnum): FULL_DOC = "full-doc" @@ -95,7 +97,7 @@ class WeightModel(BaseModel): class RetrievalModel(BaseModel): - search_method: Literal["hybrid_search", "semantic_search", "full_text_search", "keyword_search"] + search_method: RetrievalMethod reranking_enable: bool reranking_model: RerankingModel | None = None reranking_mode: str | None = None diff --git a/api/services/entities/knowledge_entities/rag_pipeline_entities.py b/api/services/entities/knowledge_entities/rag_pipeline_entities.py index 860bfde401..a97ccab914 100644 --- a/api/services/entities/knowledge_entities/rag_pipeline_entities.py +++ b/api/services/entities/knowledge_entities/rag_pipeline_entities.py @@ -2,6 +2,8 @@ from typing import Literal from pydantic import BaseModel, field_validator +from core.rag.retrieval.retrieval_methods import RetrievalMethod + class IconInfo(BaseModel): icon: str @@ -83,7 +85,7 @@ class RetrievalSetting(BaseModel): Retrieval Setting. """ - search_method: Literal["semantic_search", "full_text_search", "keyword_search", "hybrid_search"] + search_method: RetrievalMethod top_k: int score_threshold: float | None = 0.5 score_threshold_enabled: bool = False diff --git a/api/services/entities/model_provider_entities.py b/api/services/entities/model_provider_entities.py index 0f5151919f..d07badefa7 100644 --- a/api/services/entities/model_provider_entities.py +++ b/api/services/entities/model_provider_entities.py @@ -1,5 +1,5 @@ from collections.abc import Sequence -from enum import Enum +from enum import StrEnum from pydantic import BaseModel, ConfigDict, model_validator @@ -27,7 +27,7 @@ from core.model_runtime.entities.provider_entities import ( from models.provider import ProviderType -class CustomConfigurationStatus(Enum): +class CustomConfigurationStatus(StrEnum): """ Enum class for custom configuration status. """ diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index aa29354a6e..c6ea35076e 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -63,7 +63,7 @@ class HitTestingService: if metadata_condition and not document_ids_filter: return cls.compact_retrieve_response(query, []) all_documents = RetrievalService.retrieve( - retrieval_method=retrieval_model.get("search_method", "semantic_search"), + retrieval_method=RetrievalMethod(retrieval_model.get("search_method", RetrievalMethod.SEMANTIC_SEARCH)), dataset_id=dataset.id, query=query, top_k=retrieval_model.get("top_k", 4), diff --git a/api/services/rag_pipeline/rag_pipeline_transform_service.py b/api/services/rag_pipeline/rag_pipeline_transform_service.py index 39f426a2b0..d79ab71668 100644 --- a/api/services/rag_pipeline/rag_pipeline_transform_service.py +++ b/api/services/rag_pipeline/rag_pipeline_transform_service.py @@ -9,6 +9,7 @@ from flask_login import current_user from constants import DOCUMENT_EXTENSIONS from core.plugin.impl.plugin import PluginInstaller +from core.rag.retrieval.retrieval_methods import RetrievalMethod from extensions.ext_database import db from factories import variable_factory from models.dataset import Dataset, Document, DocumentPipelineExecutionLog, Pipeline @@ -164,7 +165,7 @@ class RagPipelineTransformService: if retrieval_model: retrieval_setting = RetrievalSetting.model_validate(retrieval_model) if indexing_technique == "economy": - retrieval_setting.search_method = "keyword_search" + retrieval_setting.search_method = RetrievalMethod.KEYWORD_SEARCH knowledge_configuration.retrieval_model = retrieval_setting else: dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump() diff --git a/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py b/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py index e5ead6ff66..b4ee1b91b4 100644 --- a/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py @@ -1,10 +1,12 @@ import os +from pytest_mock import MockerFixture + from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp from tests.unit_tests.core.rag.extractor.test_notion_extractor import _mock_response -def test_firecrawl_web_extractor_crawl_mode(mocker): +def test_firecrawl_web_extractor_crawl_mode(mocker: MockerFixture): url = "https://firecrawl.dev" api_key = os.getenv("FIRECRAWL_API_KEY") or "fc-" base_url = "https://api.firecrawl.dev" diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index f1e1820acc..58bec7d19e 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -1,5 +1,7 @@ from unittest import mock +from pytest_mock import MockerFixture + from core.rag.extractor import notion_extractor user_id = "user1" @@ -57,7 +59,7 @@ def _remove_multiple_new_lines(text): return text.strip() -def test_notion_page(mocker): +def test_notion_page(mocker: MockerFixture): texts = ["Head 1", "1.1", "paragraph 1", "1.1.1"] mocked_notion_page = { "object": "list", @@ -77,7 +79,7 @@ def test_notion_page(mocker): assert content == "# Head 1\n## 1.1\nparagraph 1\n### 1.1.1" -def test_notion_database(mocker): +def test_notion_database(mocker: MockerFixture): page_title_list = ["page1", "page2", "page3"] mocked_notion_database = { "object": "list", diff --git a/api/tests/unit_tests/core/test_model_manager.py b/api/tests/unit_tests/core/test_model_manager.py index d98e9f6bad..5a7547e85c 100644 --- a/api/tests/unit_tests/core/test_model_manager.py +++ b/api/tests/unit_tests/core/test_model_manager.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock, patch import pytest import redis +from pytest_mock import MockerFixture from core.entities.provider_entities import ModelLoadBalancingConfiguration from core.model_manager import LBModelManager @@ -39,7 +40,7 @@ def lb_model_manager(): return lb_model_manager -def test_lb_model_manager_fetch_next(mocker, lb_model_manager): +def test_lb_model_manager_fetch_next(mocker: MockerFixture, lb_model_manager: LBModelManager): # initialize redis client redis_client.initialize(redis.Redis()) diff --git a/api/tests/unit_tests/core/test_provider_manager.py b/api/tests/unit_tests/core/test_provider_manager.py index 2dab394029..0c3887beab 100644 --- a/api/tests/unit_tests/core/test_provider_manager.py +++ b/api/tests/unit_tests/core/test_provider_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.entities.provider_entities import ModelSettings from core.model_runtime.entities.model_entities import ModelType @@ -7,19 +8,25 @@ from models.provider import LoadBalancingModelConfig, ProviderModelSetting @pytest.fixture -def mock_provider_entity(mocker): +def mock_provider_entity(mocker: MockerFixture): mock_entity = mocker.Mock() mock_entity.provider = "openai" mock_entity.configurate_methods = ["predefined-model"] mock_entity.supported_model_types = [ModelType.LLM] - mock_entity.model_credential_schema = mocker.Mock() - mock_entity.model_credential_schema.credential_form_schemas = [] + # Use PropertyMock to ensure credential_form_schemas is iterable + provider_credential_schema = mocker.Mock() + type(provider_credential_schema).credential_form_schemas = mocker.PropertyMock(return_value=[]) + mock_entity.provider_credential_schema = provider_credential_schema + + model_credential_schema = mocker.Mock() + type(model_credential_schema).credential_form_schemas = mocker.PropertyMock(return_value=[]) + mock_entity.model_credential_schema = model_credential_schema return mock_entity -def test__to_model_settings(mocker, mock_provider_entity): +def test__to_model_settings(mocker: MockerFixture, mock_provider_entity): # Mocking the inputs provider_model_settings = [ ProviderModelSetting( @@ -79,7 +86,7 @@ def test__to_model_settings(mocker, mock_provider_entity): assert result[0].load_balancing_configs[1].name == "first" -def test__to_model_settings_only_one_lb(mocker, mock_provider_entity): +def test__to_model_settings_only_one_lb(mocker: MockerFixture, mock_provider_entity): # Mocking the inputs provider_model_settings = [ ProviderModelSetting( @@ -127,7 +134,7 @@ def test__to_model_settings_only_one_lb(mocker, mock_provider_entity): assert len(result[0].load_balancing_configs) == 0 -def test__to_model_settings_lb_disabled(mocker, mock_provider_entity): +def test__to_model_settings_lb_disabled(mocker: MockerFixture, mock_provider_entity): # Mocking the inputs provider_model_settings = [ ProviderModelSetting( From 2f50f3fd4b36119ca17fc8a371e01ef52ffff311 Mon Sep 17 00:00:00 2001 From: AsperforMias Date: Mon, 13 Oct 2025 10:33:33 +0800 Subject: [PATCH 44/49] refactor: use libs.login current_user in console controllers (#26745) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/controllers/console/apikey.py | 12 +++-- api/controllers/console/billing/compliance.py | 6 ++- .../console/datasets/hit_testing_base.py | 7 +-- api/controllers/console/explore/wraps.py | 7 ++- api/controllers/console/extension.py | 14 +++++- api/controllers/console/feature.py | 6 ++- api/controllers/console/remote_files.py | 6 +-- api/controllers/console/tag/tags.py | 26 +++++++--- .../console/workspace/agent_providers.py | 8 +++- api/controllers/console/workspace/endpoint.py | 45 ++++++++--------- api/controllers/console/workspace/members.py | 3 +- .../console/workspace/workspace.py | 3 +- api/controllers/console/wraps.py | 48 ++++++++++++++----- .../controllers/console/test_wraps.py | 16 +++---- 14 files changed, 134 insertions(+), 73 deletions(-) diff --git a/api/controllers/console/apikey.py b/api/controllers/console/apikey.py index fec527e4cb..b1e3813f33 100644 --- a/api/controllers/console/apikey.py +++ b/api/controllers/console/apikey.py @@ -1,5 +1,4 @@ import flask_restx -from flask_login import current_user from flask_restx import Resource, fields, marshal_with from flask_restx._http import HTTPStatus from sqlalchemy import select @@ -8,7 +7,8 @@ from werkzeug.exceptions import Forbidden from extensions.ext_database import db from libs.helper import TimestampField -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from models.dataset import Dataset from models.model import ApiToken, App @@ -57,6 +57,8 @@ class BaseApiKeyListResource(Resource): def get(self, resource_id): assert self.resource_id_field is not None, "resource_id_field must be set" resource_id = str(resource_id) + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None _get_resource(resource_id, current_user.current_tenant_id, self.resource_model) keys = db.session.scalars( select(ApiToken).where( @@ -69,8 +71,10 @@ class BaseApiKeyListResource(Resource): def post(self, resource_id): assert self.resource_id_field is not None, "resource_id_field must be set" resource_id = str(resource_id) + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None _get_resource(resource_id, current_user.current_tenant_id, self.resource_model) - if not current_user.is_editor: + if not current_user.has_edit_permission: raise Forbidden() current_key_count = ( @@ -108,6 +112,8 @@ class BaseApiKeyResource(Resource): assert self.resource_id_field is not None, "resource_id_field must be set" resource_id = str(resource_id) api_key_id = str(api_key_id) + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None _get_resource(resource_id, current_user.current_tenant_id, self.resource_model) # The role of the current user in the ta table must be admin or owner diff --git a/api/controllers/console/billing/compliance.py b/api/controllers/console/billing/compliance.py index e489b48c82..c0d104e0d4 100644 --- a/api/controllers/console/billing/compliance.py +++ b/api/controllers/console/billing/compliance.py @@ -1,9 +1,9 @@ from flask import request -from flask_login import current_user from flask_restx import Resource, reqparse from libs.helper import extract_remote_ip -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from services.billing_service import BillingService from .. import console_ns @@ -17,6 +17,8 @@ class ComplianceApi(Resource): @account_initialization_required @only_edition_cloud def get(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None parser = reqparse.RequestParser() parser.add_argument("doc_name", type=str, required=True, location="args") args = parser.parse_args() diff --git a/api/controllers/console/datasets/hit_testing_base.py b/api/controllers/console/datasets/hit_testing_base.py index a68e337135..6113f1fd17 100644 --- a/api/controllers/console/datasets/hit_testing_base.py +++ b/api/controllers/console/datasets/hit_testing_base.py @@ -1,7 +1,5 @@ import logging -from typing import cast -from flask_login import current_user from flask_restx import marshal, reqparse from werkzeug.exceptions import Forbidden, InternalServerError, NotFound @@ -21,6 +19,7 @@ from core.errors.error import ( ) from core.model_runtime.errors.invoke import InvokeError from fields.hit_testing_fields import hit_testing_record_fields +from libs.login import current_user from models.account import Account from services.dataset_service import DatasetService from services.hit_testing_service import HitTestingService @@ -31,6 +30,7 @@ logger = logging.getLogger(__name__) class DatasetsHitTestingBase: @staticmethod def get_and_validate_dataset(dataset_id: str): + assert isinstance(current_user, Account) dataset = DatasetService.get_dataset(dataset_id) if dataset is None: raise NotFound("Dataset not found.") @@ -57,11 +57,12 @@ class DatasetsHitTestingBase: @staticmethod def perform_hit_testing(dataset, args): + assert isinstance(current_user, Account) try: response = HitTestingService.retrieve( dataset=dataset, query=args["query"], - account=cast(Account, current_user), + account=current_user, retrieval_model=args["retrieval_model"], external_retrieval_model=args["external_retrieval_model"], limit=10, diff --git a/api/controllers/console/explore/wraps.py b/api/controllers/console/explore/wraps.py index 3a8ba64a03..5956eb52c4 100644 --- a/api/controllers/console/explore/wraps.py +++ b/api/controllers/console/explore/wraps.py @@ -2,15 +2,15 @@ from collections.abc import Callable from functools import wraps from typing import Concatenate, ParamSpec, TypeVar -from flask_login import current_user from flask_restx import Resource from werkzeug.exceptions import NotFound from controllers.console.explore.error import AppAccessDeniedError from controllers.console.wraps import account_initialization_required from extensions.ext_database import db -from libs.login import login_required +from libs.login import current_user, login_required from models import InstalledApp +from models.account import Account from services.app_service import AppService from services.enterprise.enterprise_service import EnterpriseService from services.feature_service import FeatureService @@ -24,6 +24,8 @@ def installed_app_required(view: Callable[Concatenate[InstalledApp, P], R] | Non def decorator(view: Callable[Concatenate[InstalledApp, P], R]): @wraps(view) def decorated(installed_app_id: str, *args: P.args, **kwargs: P.kwargs): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None installed_app = ( db.session.query(InstalledApp) .where( @@ -56,6 +58,7 @@ def user_allowed_to_access_app(view: Callable[Concatenate[InstalledApp, P], R] | def decorated(installed_app: InstalledApp, *args: P.args, **kwargs: P.kwargs): feature = FeatureService.get_system_features() if feature.webapp_auth.enabled: + assert isinstance(current_user, Account) app_id = installed_app.app_id app_code = AppService.get_app_code_by_id(app_id) res = EnterpriseService.WebAppAuth.is_user_allowed_to_access_webapp( diff --git a/api/controllers/console/extension.py b/api/controllers/console/extension.py index 57f5ab191e..c6b3cf7515 100644 --- a/api/controllers/console/extension.py +++ b/api/controllers/console/extension.py @@ -1,11 +1,11 @@ -from flask_login import current_user from flask_restx import Resource, fields, marshal_with, reqparse from constants import HIDDEN_VALUE from controllers.console import api, console_ns from controllers.console.wraps import account_initialization_required, setup_required from fields.api_based_extension_fields import api_based_extension_fields -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from models.api_based_extension import APIBasedExtension from services.api_based_extension_service import APIBasedExtensionService from services.code_based_extension_service import CodeBasedExtensionService @@ -47,6 +47,8 @@ class APIBasedExtensionAPI(Resource): @account_initialization_required @marshal_with(api_based_extension_fields) def get(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None tenant_id = current_user.current_tenant_id return APIBasedExtensionService.get_all_by_tenant_id(tenant_id) @@ -68,6 +70,8 @@ class APIBasedExtensionAPI(Resource): @account_initialization_required @marshal_with(api_based_extension_fields) def post(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None parser = reqparse.RequestParser() parser.add_argument("name", type=str, required=True, location="json") parser.add_argument("api_endpoint", type=str, required=True, location="json") @@ -95,6 +99,8 @@ class APIBasedExtensionDetailAPI(Resource): @account_initialization_required @marshal_with(api_based_extension_fields) def get(self, id): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None api_based_extension_id = str(id) tenant_id = current_user.current_tenant_id @@ -119,6 +125,8 @@ class APIBasedExtensionDetailAPI(Resource): @account_initialization_required @marshal_with(api_based_extension_fields) def post(self, id): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None api_based_extension_id = str(id) tenant_id = current_user.current_tenant_id @@ -146,6 +154,8 @@ class APIBasedExtensionDetailAPI(Resource): @login_required @account_initialization_required def delete(self, id): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None api_based_extension_id = str(id) tenant_id = current_user.current_tenant_id diff --git a/api/controllers/console/feature.py b/api/controllers/console/feature.py index d43b839291..80847b8fef 100644 --- a/api/controllers/console/feature.py +++ b/api/controllers/console/feature.py @@ -1,7 +1,7 @@ -from flask_login import current_user from flask_restx import Resource, fields -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from services.feature_service import FeatureService from . import api, console_ns @@ -23,6 +23,8 @@ class FeatureApi(Resource): @cloud_utm_record def get(self): """Get feature configuration for current tenant""" + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None return FeatureService.get_features(current_user.current_tenant_id).model_dump() diff --git a/api/controllers/console/remote_files.py b/api/controllers/console/remote_files.py index 7aaf807fb0..4d4bb5d779 100644 --- a/api/controllers/console/remote_files.py +++ b/api/controllers/console/remote_files.py @@ -1,8 +1,6 @@ import urllib.parse -from typing import cast import httpx -from flask_login import current_user from flask_restx import Resource, marshal_with, reqparse import services @@ -16,6 +14,7 @@ from core.file import helpers as file_helpers from core.helper import ssrf_proxy from extensions.ext_database import db from fields.file_fields import file_fields_with_signed_url, remote_file_info_fields +from libs.login import current_user from models.account import Account from services.file_service import FileService @@ -65,7 +64,8 @@ class RemoteFileUploadApi(Resource): content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content try: - user = cast(Account, current_user) + assert isinstance(current_user, Account) + user = current_user upload_file = FileService(db.engine).upload_file( filename=file_info.filename, content=content, diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py index 3d29b3ee61..b6086c5766 100644 --- a/api/controllers/console/tag/tags.py +++ b/api/controllers/console/tag/tags.py @@ -1,12 +1,12 @@ from flask import request -from flask_login import current_user from flask_restx import Resource, marshal_with, reqparse from werkzeug.exceptions import Forbidden from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from fields.tag_fields import dataset_tag_fields -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from models.model import Tag from services.tag_service import TagService @@ -24,6 +24,8 @@ class TagListApi(Resource): @account_initialization_required @marshal_with(dataset_tag_fields) def get(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None tag_type = request.args.get("type", type=str, default="") keyword = request.args.get("keyword", default=None, type=str) tags = TagService.get_tags(tag_type, current_user.current_tenant_id, keyword) @@ -34,8 +36,10 @@ class TagListApi(Resource): @login_required @account_initialization_required def post(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None # The role of the current user in the ta table must be admin, owner, or editor - if not (current_user.is_editor or current_user.is_dataset_editor): + if not (current_user.has_edit_permission or current_user.is_dataset_editor): raise Forbidden() parser = reqparse.RequestParser() @@ -59,9 +63,11 @@ class TagUpdateDeleteApi(Resource): @login_required @account_initialization_required def patch(self, tag_id): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None tag_id = str(tag_id) # The role of the current user in the ta table must be admin, owner, or editor - if not (current_user.is_editor or current_user.is_dataset_editor): + if not (current_user.has_edit_permission or current_user.is_dataset_editor): raise Forbidden() parser = reqparse.RequestParser() @@ -81,9 +87,11 @@ class TagUpdateDeleteApi(Resource): @login_required @account_initialization_required def delete(self, tag_id): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None tag_id = str(tag_id) # The role of the current user in the ta table must be admin, owner, or editor - if not current_user.is_editor: + if not current_user.has_edit_permission: raise Forbidden() TagService.delete_tag(tag_id) @@ -97,8 +105,10 @@ class TagBindingCreateApi(Resource): @login_required @account_initialization_required def post(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator - if not (current_user.is_editor or current_user.is_dataset_editor): + if not (current_user.has_edit_permission or current_user.is_dataset_editor): raise Forbidden() parser = reqparse.RequestParser() @@ -123,8 +133,10 @@ class TagBindingDeleteApi(Resource): @login_required @account_initialization_required def post(self): + assert isinstance(current_user, Account) + assert current_user.current_tenant_id is not None # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator - if not (current_user.is_editor or current_user.is_dataset_editor): + if not (current_user.has_edit_permission or current_user.is_dataset_editor): raise Forbidden() parser = reqparse.RequestParser() diff --git a/api/controllers/console/workspace/agent_providers.py b/api/controllers/console/workspace/agent_providers.py index 0a2c8fcfb4..e044b2db5b 100644 --- a/api/controllers/console/workspace/agent_providers.py +++ b/api/controllers/console/workspace/agent_providers.py @@ -1,10 +1,10 @@ -from flask_login import current_user from flask_restx import Resource, fields from controllers.console import api, console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.utils.encoders import jsonable_encoder -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from services.agent_service import AgentService @@ -21,7 +21,9 @@ class AgentProviderListApi(Resource): @login_required @account_initialization_required def get(self): + assert isinstance(current_user, Account) user = current_user + assert user.current_tenant_id is not None user_id = user.id tenant_id = user.current_tenant_id @@ -43,7 +45,9 @@ class AgentProviderApi(Resource): @login_required @account_initialization_required def get(self, provider_name: str): + assert isinstance(current_user, Account) user = current_user + assert user.current_tenant_id is not None user_id = user.id tenant_id = user.current_tenant_id return jsonable_encoder(AgentService.get_agent_provider(user_id, tenant_id, provider_name)) diff --git a/api/controllers/console/workspace/endpoint.py b/api/controllers/console/workspace/endpoint.py index 0657b764cc..782bd72565 100644 --- a/api/controllers/console/workspace/endpoint.py +++ b/api/controllers/console/workspace/endpoint.py @@ -1,4 +1,3 @@ -from flask_login import current_user from flask_restx import Resource, fields, reqparse from werkzeug.exceptions import Forbidden @@ -6,10 +5,18 @@ from controllers.console import api, console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.utils.encoders import jsonable_encoder from core.plugin.impl.exc import PluginPermissionDeniedError -from libs.login import login_required +from libs.login import current_user, login_required +from models.account import Account from services.plugin.endpoint_service import EndpointService +def _current_account_with_tenant() -> tuple[Account, str]: + assert isinstance(current_user, Account) + tenant_id = current_user.current_tenant_id + assert tenant_id is not None + return current_user, tenant_id + + @console_ns.route("/workspaces/current/endpoints/create") class EndpointCreateApi(Resource): @api.doc("create_endpoint") @@ -34,7 +41,7 @@ class EndpointCreateApi(Resource): @login_required @account_initialization_required def post(self): - user = current_user + user, tenant_id = _current_account_with_tenant() if not user.is_admin_or_owner: raise Forbidden() @@ -51,7 +58,7 @@ class EndpointCreateApi(Resource): try: return { "success": EndpointService.create_endpoint( - tenant_id=user.current_tenant_id, + tenant_id=tenant_id, user_id=user.id, plugin_unique_identifier=plugin_unique_identifier, name=name, @@ -80,7 +87,7 @@ class EndpointListApi(Resource): @login_required @account_initialization_required def get(self): - user = current_user + user, tenant_id = _current_account_with_tenant() parser = reqparse.RequestParser() parser.add_argument("page", type=int, required=True, location="args") @@ -93,7 +100,7 @@ class EndpointListApi(Resource): return jsonable_encoder( { "endpoints": EndpointService.list_endpoints( - tenant_id=user.current_tenant_id, + tenant_id=tenant_id, user_id=user.id, page=page, page_size=page_size, @@ -123,7 +130,7 @@ class EndpointListForSinglePluginApi(Resource): @login_required @account_initialization_required def get(self): - user = current_user + user, tenant_id = _current_account_with_tenant() parser = reqparse.RequestParser() parser.add_argument("page", type=int, required=True, location="args") @@ -138,7 +145,7 @@ class EndpointListForSinglePluginApi(Resource): return jsonable_encoder( { "endpoints": EndpointService.list_endpoints_for_single_plugin( - tenant_id=user.current_tenant_id, + tenant_id=tenant_id, user_id=user.id, plugin_id=plugin_id, page=page, @@ -165,7 +172,7 @@ class EndpointDeleteApi(Resource): @login_required @account_initialization_required def post(self): - user = current_user + user, tenant_id = _current_account_with_tenant() parser = reqparse.RequestParser() parser.add_argument("endpoint_id", type=str, required=True) @@ -177,9 +184,7 @@ class EndpointDeleteApi(Resource): endpoint_id = args["endpoint_id"] return { - "success": EndpointService.delete_endpoint( - tenant_id=user.current_tenant_id, user_id=user.id, endpoint_id=endpoint_id - ) + "success": EndpointService.delete_endpoint(tenant_id=tenant_id, user_id=user.id, endpoint_id=endpoint_id) } @@ -207,7 +212,7 @@ class EndpointUpdateApi(Resource): @login_required @account_initialization_required def post(self): - user = current_user + user, tenant_id = _current_account_with_tenant() parser = reqparse.RequestParser() parser.add_argument("endpoint_id", type=str, required=True) @@ -224,7 +229,7 @@ class EndpointUpdateApi(Resource): return { "success": EndpointService.update_endpoint( - tenant_id=user.current_tenant_id, + tenant_id=tenant_id, user_id=user.id, endpoint_id=endpoint_id, name=name, @@ -250,7 +255,7 @@ class EndpointEnableApi(Resource): @login_required @account_initialization_required def post(self): - user = current_user + user, tenant_id = _current_account_with_tenant() parser = reqparse.RequestParser() parser.add_argument("endpoint_id", type=str, required=True) @@ -262,9 +267,7 @@ class EndpointEnableApi(Resource): raise Forbidden() return { - "success": EndpointService.enable_endpoint( - tenant_id=user.current_tenant_id, user_id=user.id, endpoint_id=endpoint_id - ) + "success": EndpointService.enable_endpoint(tenant_id=tenant_id, user_id=user.id, endpoint_id=endpoint_id) } @@ -285,7 +288,7 @@ class EndpointDisableApi(Resource): @login_required @account_initialization_required def post(self): - user = current_user + user, tenant_id = _current_account_with_tenant() parser = reqparse.RequestParser() parser.add_argument("endpoint_id", type=str, required=True) @@ -297,7 +300,5 @@ class EndpointDisableApi(Resource): raise Forbidden() return { - "success": EndpointService.disable_endpoint( - tenant_id=user.current_tenant_id, user_id=user.id, endpoint_id=endpoint_id - ) + "success": EndpointService.disable_endpoint(tenant_id=tenant_id, user_id=user.id, endpoint_id=endpoint_id) } diff --git a/api/controllers/console/workspace/members.py b/api/controllers/console/workspace/members.py index 8b89853bd9..dd6a878d87 100644 --- a/api/controllers/console/workspace/members.py +++ b/api/controllers/console/workspace/members.py @@ -1,7 +1,6 @@ from urllib import parse from flask import abort, request -from flask_login import current_user from flask_restx import Resource, marshal_with, reqparse import services @@ -26,7 +25,7 @@ from controllers.console.wraps import ( from extensions.ext_database import db from fields.member_fields import account_with_role_list_fields from libs.helper import extract_remote_ip -from libs.login import login_required +from libs.login import current_user, login_required from models.account import Account, TenantAccountRole from services.account_service import AccountService, RegisterService, TenantService from services.errors.account import AccountAlreadyInTenantError diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index bc748ac3d2..4a0539785a 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -1,7 +1,6 @@ import logging from flask import request -from flask_login import current_user from flask_restx import Resource, fields, inputs, marshal, marshal_with, reqparse from sqlalchemy import select from werkzeug.exceptions import Unauthorized @@ -24,7 +23,7 @@ from controllers.console.wraps import ( ) from extensions.ext_database import db from libs.helper import TimestampField -from libs.login import login_required +from libs.login import current_user, login_required from models.account import Account, Tenant, TenantStatus from services.account_service import TenantService from services.feature_service import FeatureService diff --git a/api/controllers/console/wraps.py b/api/controllers/console/wraps.py index 914d386c78..9e903d9286 100644 --- a/api/controllers/console/wraps.py +++ b/api/controllers/console/wraps.py @@ -7,13 +7,13 @@ from functools import wraps from typing import ParamSpec, TypeVar from flask import abort, request -from flask_login import current_user from configs import dify_config from controllers.console.workspace.error import AccountNotInitializedError from extensions.ext_database import db from extensions.ext_redis import redis_client -from models.account import AccountStatus +from libs.login import current_user +from models.account import Account, AccountStatus from models.dataset import RateLimitLog from models.model import DifySetup from services.feature_service import FeatureService, LicenseStatus @@ -25,11 +25,16 @@ P = ParamSpec("P") R = TypeVar("R") +def _current_account() -> Account: + assert isinstance(current_user, Account) + return current_user + + def account_initialization_required(view: Callable[P, R]): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): # check account initialization - account = current_user + account = _current_account() if account.status == AccountStatus.UNINITIALIZED: raise AccountNotInitializedError() @@ -75,7 +80,9 @@ def only_edition_self_hosted(view: Callable[P, R]): def cloud_edition_billing_enabled(view: Callable[P, R]): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): - features = FeatureService.get_features(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + features = FeatureService.get_features(account.current_tenant_id) if not features.billing.enabled: abort(403, "Billing feature is not enabled.") return view(*args, **kwargs) @@ -87,7 +94,10 @@ def cloud_edition_billing_resource_check(resource: str): def interceptor(view: Callable[P, R]): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): - features = FeatureService.get_features(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + tenant_id = account.current_tenant_id + features = FeatureService.get_features(tenant_id) if features.billing.enabled: members = features.members apps = features.apps @@ -128,7 +138,9 @@ def cloud_edition_billing_knowledge_limit_check(resource: str): def interceptor(view: Callable[P, R]): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): - features = FeatureService.get_features(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + features = FeatureService.get_features(account.current_tenant_id) if features.billing.enabled: if resource == "add_segment": if features.billing.subscription.plan == "sandbox": @@ -151,10 +163,13 @@ def cloud_edition_billing_rate_limit_check(resource: str): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): if resource == "knowledge": - knowledge_rate_limit = FeatureService.get_knowledge_rate_limit(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + tenant_id = account.current_tenant_id + knowledge_rate_limit = FeatureService.get_knowledge_rate_limit(tenant_id) if knowledge_rate_limit.enabled: current_time = int(time.time() * 1000) - key = f"rate_limit_{current_user.current_tenant_id}" + key = f"rate_limit_{tenant_id}" redis_client.zadd(key, {current_time: current_time}) @@ -165,7 +180,7 @@ def cloud_edition_billing_rate_limit_check(resource: str): if request_count > knowledge_rate_limit.limit: # add ratelimit record rate_limit_log = RateLimitLog( - tenant_id=current_user.current_tenant_id, + tenant_id=tenant_id, subscription_plan=knowledge_rate_limit.subscription_plan, operation="knowledge", ) @@ -185,14 +200,17 @@ def cloud_utm_record(view: Callable[P, R]): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): with contextlib.suppress(Exception): - features = FeatureService.get_features(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + tenant_id = account.current_tenant_id + features = FeatureService.get_features(tenant_id) if features.billing.enabled: utm_info = request.cookies.get("utm_info") if utm_info: utm_info_dict: dict = json.loads(utm_info) - OperationService.record_utm(current_user.current_tenant_id, utm_info_dict) + OperationService.record_utm(tenant_id, utm_info_dict) return view(*args, **kwargs) @@ -271,7 +289,9 @@ def enable_change_email(view: Callable[P, R]): def is_allow_transfer_owner(view: Callable[P, R]): @wraps(view) def decorated(*args: P.args, **kwargs: P.kwargs): - features = FeatureService.get_features(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + features = FeatureService.get_features(account.current_tenant_id) if features.is_allow_transfer_workspace: return view(*args, **kwargs) @@ -284,7 +304,9 @@ def is_allow_transfer_owner(view: Callable[P, R]): def knowledge_pipeline_publish_enabled(view): @wraps(view) def decorated(*args, **kwargs): - features = FeatureService.get_features(current_user.current_tenant_id) + account = _current_account() + assert account.current_tenant_id is not None + features = FeatureService.get_features(account.current_tenant_id) if features.knowledge_pipeline.publish_enabled: return view(*args, **kwargs) abort(403) diff --git a/api/tests/unit_tests/controllers/console/test_wraps.py b/api/tests/unit_tests/controllers/console/test_wraps.py index 9742368f04..5d132cb787 100644 --- a/api/tests/unit_tests/controllers/console/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/test_wraps.py @@ -60,7 +60,7 @@ class TestAccountInitialization: return "success" # Act - with patch("controllers.console.wraps.current_user", mock_user): + with patch("controllers.console.wraps._current_account", return_value=mock_user): result = protected_view() # Assert @@ -77,7 +77,7 @@ class TestAccountInitialization: return "success" # Act & Assert - with patch("controllers.console.wraps.current_user", mock_user): + with patch("controllers.console.wraps._current_account", return_value=mock_user): with pytest.raises(AccountNotInitializedError): protected_view() @@ -163,7 +163,7 @@ class TestBillingResourceLimits: return "member_added" # Act - with patch("controllers.console.wraps.current_user"): + with patch("controllers.console.wraps._current_account", return_value=MockUser("test_user")): with patch("controllers.console.wraps.FeatureService.get_features", return_value=mock_features): result = add_member() @@ -185,7 +185,7 @@ class TestBillingResourceLimits: # Act & Assert with app.test_request_context(): - with patch("controllers.console.wraps.current_user", MockUser("test_user")): + with patch("controllers.console.wraps._current_account", return_value=MockUser("test_user")): with patch("controllers.console.wraps.FeatureService.get_features", return_value=mock_features): with pytest.raises(Exception) as exc_info: add_member() @@ -207,7 +207,7 @@ class TestBillingResourceLimits: # Test 1: Should reject when source is datasets with app.test_request_context("/?source=datasets"): - with patch("controllers.console.wraps.current_user", MockUser("test_user")): + with patch("controllers.console.wraps._current_account", return_value=MockUser("test_user")): with patch("controllers.console.wraps.FeatureService.get_features", return_value=mock_features): with pytest.raises(Exception) as exc_info: upload_document() @@ -215,7 +215,7 @@ class TestBillingResourceLimits: # Test 2: Should allow when source is not datasets with app.test_request_context("/?source=other"): - with patch("controllers.console.wraps.current_user", MockUser("test_user")): + with patch("controllers.console.wraps._current_account", return_value=MockUser("test_user")): with patch("controllers.console.wraps.FeatureService.get_features", return_value=mock_features): result = upload_document() assert result == "document_uploaded" @@ -239,7 +239,7 @@ class TestRateLimiting: return "knowledge_success" # Act - with patch("controllers.console.wraps.current_user"): + with patch("controllers.console.wraps._current_account", return_value=MockUser("test_user")): with patch( "controllers.console.wraps.FeatureService.get_knowledge_rate_limit", return_value=mock_rate_limit ): @@ -271,7 +271,7 @@ class TestRateLimiting: # Act & Assert with app.test_request_context(): - with patch("controllers.console.wraps.current_user", MockUser("test_user")): + with patch("controllers.console.wraps._current_account", return_value=MockUser("test_user")): with patch( "controllers.console.wraps.FeatureService.get_knowledge_rate_limit", return_value=mock_rate_limit ): From f0a60a900043f96574492103cd4a563e2c0e5173 Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:43:51 +0800 Subject: [PATCH 45/49] feat: enhance DataSources component with marketplace plugin integration and search filtering (#26810) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../workflow/block-selector/data-sources.tsx | 71 ++++++++++++++----- 1 file changed, 52 insertions(+), 19 deletions(-) diff --git a/web/app/components/workflow/block-selector/data-sources.tsx b/web/app/components/workflow/block-selector/data-sources.tsx index 294c7c1c79..441ede2334 100644 --- a/web/app/components/workflow/block-selector/data-sources.tsx +++ b/web/app/components/workflow/block-selector/data-sources.tsx @@ -1,10 +1,9 @@ import { useCallback, + useEffect, + useMemo, useRef, } from 'react' -import Link from 'next/link' -import { useTranslation } from 'react-i18next' -import { RiArrowRightUpLine } from '@remixicon/react' import { BlockEnum } from '../types' import type { OnSelectBlock, @@ -14,10 +13,12 @@ import type { DataSourceDefaultValue, ToolDefaultValue } from './types' import Tools from './tools' import { ViewType } from './view-type-select' import cn from '@/utils/classnames' -import type { ListRef } from '@/app/components/workflow/block-selector/market-place-plugin/list' -import { getMarketplaceUrl } from '@/utils/var' +import PluginList, { type ListRef } from '@/app/components/workflow/block-selector/market-place-plugin/list' import { useGlobalPublicStore } from '@/context/global-public-context' import { DEFAULT_FILE_EXTENSIONS_IN_LOCAL_FILE_DATA_SOURCE } from './constants' +import { useMarketplacePlugins } from '../../plugins/marketplace/hooks' +import { PluginType } from '../../plugins/types' +import { useGetLanguage } from '@/context/i18n' type AllToolsProps = { className?: string @@ -34,9 +35,26 @@ const DataSources = ({ onSelect, dataSources, }: AllToolsProps) => { - const { t } = useTranslation() + const language = useGetLanguage() const pluginRef = useRef(null) const wrapElemRef = useRef(null) + + const isMatchingKeywords = (text: string, keywords: string) => { + return text.toLowerCase().includes(keywords.toLowerCase()) + } + + const filteredDatasources = useMemo(() => { + const hasFilter = searchText + if (!hasFilter) + return dataSources.filter(toolWithProvider => toolWithProvider.tools.length > 0) + + return dataSources.filter((toolWithProvider) => { + return isMatchingKeywords(toolWithProvider.name, searchText) || toolWithProvider.tools.some((tool) => { + return tool.label[language].toLowerCase().includes(searchText.toLowerCase()) || tool.name.toLowerCase().includes(searchText.toLowerCase()) + }) + }) + }, [searchText, dataSources, language]) + const handleSelect = useCallback((_: any, toolDefaultValue: ToolDefaultValue) => { let defaultValue: DataSourceDefaultValue = { plugin_id: toolDefaultValue?.provider_id, @@ -55,8 +73,24 @@ const DataSources = ({ } onSelect(BlockEnum.DataSource, toolDefaultValue && defaultValue) }, [onSelect]) + const { enable_marketplace } = useGlobalPublicStore(s => s.systemFeatures) + const { + queryPluginsWithDebounced: fetchPlugins, + plugins: notInstalledPlugins = [], + } = useMarketplacePlugins() + + useEffect(() => { + if (!enable_marketplace) return + if (searchText) { + fetchPlugins({ + query: searchText, + category: PluginType.datasource, + }) + } + }, [searchText, enable_marketplace]) + return (
- { - enable_marketplace && ( - - {t('plugin.findMoreInMarketplace')} - - - ) - } + {/* Plugins from marketplace */} + {enable_marketplace && ( + + )}
) From c692962650cb632653ff82a93a9d9d8c27797e66 Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:44:10 +0800 Subject: [PATCH 46/49] fix: update tooltip for chunk structure in knowledge base component (#26808) --- .../nodes/knowledge-base/components/chunk-structure/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/index.tsx b/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/index.tsx index 6410ab706f..60aa3d5590 100644 --- a/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/index.tsx +++ b/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/index.tsx @@ -29,7 +29,7 @@ const ChunkStructure = ({ Date: Mon, 13 Oct 2025 11:17:46 +0800 Subject: [PATCH 47/49] fix: invalid data source list in plugin refresh hook (#26813) --- .../install-plugin/hooks/use-refresh-plugin-list.tsx | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx b/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx index 6294887356..024444cd6a 100644 --- a/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx +++ b/web/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list.tsx @@ -7,6 +7,7 @@ import { useInvalidateStrategyProviders } from '@/service/use-strategy' import type { Plugin, PluginDeclaration, PluginManifestInMarket } from '../../types' import { PluginType } from '../../types' import { useInvalidDataSourceList } from '@/service/use-pipeline' +import { useInvalidDataSourceListAuth } from '@/service/use-datasource' const useRefreshPluginList = () => { const invalidateInstalledPluginList = useInvalidateInstalledPluginList() @@ -19,6 +20,8 @@ const useRefreshPluginList = () => { const invalidateAllBuiltInTools = useInvalidateAllBuiltInTools() const invalidateAllDataSources = useInvalidDataSourceList() + const invalidateDataSourceListAuth = useInvalidDataSourceListAuth() + const invalidateStrategyProviders = useInvalidateStrategyProviders() return { refreshPluginList: (manifest?: PluginManifestInMarket | Plugin | PluginDeclaration | null, refreshAllType?: boolean) => { @@ -32,8 +35,10 @@ const useRefreshPluginList = () => { // TODO: update suggested tools. It's a function in hook useMarketplacePlugins,handleUpdatePlugins } - if ((manifest && PluginType.datasource.includes(manifest.category)) || refreshAllType) + if ((manifest && PluginType.datasource.includes(manifest.category)) || refreshAllType) { invalidateAllDataSources() + invalidateDataSourceListAuth() + } // model select if ((manifest && PluginType.model.includes(manifest.category)) || refreshAllType) { From 44d36f246087fcebc239690c9f99f29584fc76af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=88=E5=AE=9A?= Date: Mon, 13 Oct 2025 11:19:00 +0800 Subject: [PATCH 48/49] fix: external knowledge url check ssrf (#26789) Co-authored-by: Asuka Minato Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- api/services/external_knowledge_service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/services/external_knowledge_service.py b/api/services/external_knowledge_service.py index b6ba3bafea..5cd3b471f9 100644 --- a/api/services/external_knowledge_service.py +++ b/api/services/external_knowledge_service.py @@ -88,9 +88,9 @@ class ExternalDatasetService: else: raise ValueError(f"invalid endpoint: {endpoint}") try: - response = httpx.post(endpoint, headers={"Authorization": f"Bearer {api_key}"}) - except Exception: - raise ValueError(f"failed to connect to the endpoint: {endpoint}") + response = ssrf_proxy.post(endpoint, headers={"Authorization": f"Bearer {api_key}"}) + except Exception as e: + raise ValueError(f"failed to connect to the endpoint: {endpoint}") from e if response.status_code == 502: raise ValueError(f"Bad Gateway: failed to connect to the endpoint: {endpoint}") if response.status_code == 404: From d1de3cfb94abfd9e7263b322651d871c190e670b Mon Sep 17 00:00:00 2001 From: fenglin <790872612@qq.com> Date: Mon, 13 Oct 2025 13:01:44 +0800 Subject: [PATCH 49/49] fix: use enum .value strings in retrieval-setting API to fix JSON serialization error (#26785) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/console/datasets/datasets.py | 162 +++++++++---------- 1 file changed, 75 insertions(+), 87 deletions(-) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 72cd33eab6..f86c5dfc3c 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -45,6 +45,79 @@ def _validate_name(name: str) -> str: return name +def _get_retrieval_methods_by_vector_type(vector_type: str | None, is_mock: bool = False) -> dict[str, list[str]]: + """ + Get supported retrieval methods based on vector database type. + + Args: + vector_type: Vector database type, can be None + is_mock: Whether this is a Mock API, affects MILVUS handling + + Returns: + Dictionary containing supported retrieval methods + + Raises: + ValueError: If vector_type is None or unsupported + """ + if vector_type is None: + raise ValueError("Vector store type is not configured.") + + # Define vector database types that only support semantic search + semantic_only_types = { + VectorType.RELYT, + VectorType.TIDB_VECTOR, + VectorType.CHROMA, + VectorType.PGVECTO_RS, + VectorType.VIKINGDB, + VectorType.UPSTASH, + } + + # Define vector database types that support all retrieval methods + full_search_types = { + VectorType.QDRANT, + VectorType.WEAVIATE, + VectorType.OPENSEARCH, + VectorType.ANALYTICDB, + VectorType.MYSCALE, + VectorType.ORACLE, + VectorType.ELASTICSEARCH, + VectorType.ELASTICSEARCH_JA, + VectorType.PGVECTOR, + VectorType.VASTBASE, + VectorType.TIDB_ON_QDRANT, + VectorType.LINDORM, + VectorType.COUCHBASE, + VectorType.OPENGAUSS, + VectorType.OCEANBASE, + VectorType.TABLESTORE, + VectorType.HUAWEI_CLOUD, + VectorType.TENCENT, + VectorType.MATRIXONE, + VectorType.CLICKZETTA, + VectorType.BAIDU, + VectorType.ALIBABACLOUD_MYSQL, + } + + semantic_methods = {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]} + full_methods = { + "retrieval_method": [ + RetrievalMethod.SEMANTIC_SEARCH.value, + RetrievalMethod.FULL_TEXT_SEARCH.value, + RetrievalMethod.HYBRID_SEARCH.value, + ] + } + + if vector_type == VectorType.MILVUS: + return semantic_methods if is_mock else full_methods + + if vector_type in semantic_only_types: + return semantic_methods + elif vector_type in full_search_types: + return full_methods + else: + raise ValueError(f"Unsupported vector db type {vector_type}.") + + @console_ns.route("/datasets") class DatasetListApi(Resource): @api.doc("get_datasets") @@ -777,50 +850,7 @@ class DatasetRetrievalSettingApi(Resource): @account_initialization_required def get(self): vector_type = dify_config.VECTOR_STORE - match vector_type: - case ( - VectorType.RELYT - | VectorType.TIDB_VECTOR - | VectorType.CHROMA - | VectorType.PGVECTO_RS - | VectorType.VIKINGDB - | VectorType.UPSTASH - ): - return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH]} - case ( - VectorType.QDRANT - | VectorType.WEAVIATE - | VectorType.OPENSEARCH - | VectorType.ANALYTICDB - | VectorType.MYSCALE - | VectorType.ORACLE - | VectorType.ELASTICSEARCH - | VectorType.ELASTICSEARCH_JA - | VectorType.PGVECTOR - | VectorType.VASTBASE - | VectorType.TIDB_ON_QDRANT - | VectorType.LINDORM - | VectorType.COUCHBASE - | VectorType.MILVUS - | VectorType.OPENGAUSS - | VectorType.OCEANBASE - | VectorType.TABLESTORE - | VectorType.HUAWEI_CLOUD - | VectorType.TENCENT - | VectorType.MATRIXONE - | VectorType.CLICKZETTA - | VectorType.BAIDU - | VectorType.ALIBABACLOUD_MYSQL - ): - return { - "retrieval_method": [ - RetrievalMethod.SEMANTIC_SEARCH, - RetrievalMethod.FULL_TEXT_SEARCH, - RetrievalMethod.HYBRID_SEARCH, - ] - } - case _: - raise ValueError(f"Unsupported vector db type {vector_type}.") + return _get_retrieval_methods_by_vector_type(vector_type, is_mock=False) @console_ns.route("/datasets/retrieval-setting/") @@ -833,49 +863,7 @@ class DatasetRetrievalSettingMockApi(Resource): @login_required @account_initialization_required def get(self, vector_type): - match vector_type: - case ( - VectorType.MILVUS - | VectorType.RELYT - | VectorType.TIDB_VECTOR - | VectorType.CHROMA - | VectorType.PGVECTO_RS - | VectorType.VIKINGDB - | VectorType.UPSTASH - ): - return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH]} - case ( - VectorType.QDRANT - | VectorType.WEAVIATE - | VectorType.OPENSEARCH - | VectorType.ANALYTICDB - | VectorType.MYSCALE - | VectorType.ORACLE - | VectorType.ELASTICSEARCH - | VectorType.ELASTICSEARCH_JA - | VectorType.COUCHBASE - | VectorType.PGVECTOR - | VectorType.VASTBASE - | VectorType.LINDORM - | VectorType.OPENGAUSS - | VectorType.OCEANBASE - | VectorType.TABLESTORE - | VectorType.TENCENT - | VectorType.HUAWEI_CLOUD - | VectorType.MATRIXONE - | VectorType.CLICKZETTA - | VectorType.BAIDU - | VectorType.ALIBABACLOUD_MYSQL - ): - return { - "retrieval_method": [ - RetrievalMethod.SEMANTIC_SEARCH, - RetrievalMethod.FULL_TEXT_SEARCH, - RetrievalMethod.HYBRID_SEARCH, - ] - } - case _: - raise ValueError(f"Unsupported vector db type {vector_type}.") + return _get_retrieval_methods_by_vector_type(vector_type, is_mock=True) @console_ns.route("/datasets//error-docs")