Compare commits

..

No commits in common. "main" and "1.13.3" have entirely different histories.
main ... 1.13.3

7299 changed files with 254209 additions and 414110 deletions

View File

@ -63,7 +63,7 @@ pnpm analyze-component <path> --json
```typescript ```typescript
// ❌ Before: Complex state logic in component // ❌ Before: Complex state logic in component
function Configuration() { const Configuration: FC = () => {
const [modelConfig, setModelConfig] = useState<ModelConfig>(...) const [modelConfig, setModelConfig] = useState<ModelConfig>(...)
const [datasetConfigs, setDatasetConfigs] = useState<DatasetConfigs>(...) const [datasetConfigs, setDatasetConfigs] = useState<DatasetConfigs>(...)
const [completionParams, setCompletionParams] = useState<FormValue>({}) const [completionParams, setCompletionParams] = useState<FormValue>({})
@ -85,7 +85,7 @@ export const useModelConfig = (appId: string) => {
} }
// Component becomes cleaner // Component becomes cleaner
function Configuration() { const Configuration: FC = () => {
const { modelConfig, setModelConfig } = useModelConfig(appId) const { modelConfig, setModelConfig } = useModelConfig(appId)
return <div>...</div> return <div>...</div>
} }
@ -189,6 +189,8 @@ const Template = useMemo(() => {
**Dify Convention**: **Dify Convention**:
- This skill is for component decomposition, not query/mutation design. - This skill is for component decomposition, not query/mutation design.
- When refactoring data fetching, follow `web/AGENTS.md`.
- Use `frontend-query-mutation` for contracts, query shape, data-fetching wrappers, query/mutation call-site patterns, conditional queries, invalidation, and mutation error handling.
- Do not introduce deprecated `useInvalid` / `useReset`. - Do not introduce deprecated `useInvalid` / `useReset`.
- Do not add thin passthrough `useQuery` wrappers during refactoring; only extract a custom hook when it truly orchestrates multiple queries/mutations or shared derived state. - Do not add thin passthrough `useQuery` wrappers during refactoring; only extract a custom hook when it truly orchestrates multiple queries/mutations or shared derived state.
@ -365,7 +367,7 @@ For each extraction:
┌────────────────────────────────────────┐ ┌────────────────────────────────────────┐
│ 1. Extract code │ │ 1. Extract code │
│ 2. Run: pnpm lint:fix │ │ 2. Run: pnpm lint:fix │
│ 3. Run: pnpm type-check │ 3. Run: pnpm type-check:tsgo
│ 4. Run: pnpm test │ │ 4. Run: pnpm test │
│ 5. Test functionality manually │ │ 5. Test functionality manually │
│ 6. PASS? → Next extraction │ │ 6. PASS? → Next extraction │

View File

@ -60,10 +60,8 @@ const Template = useMemo(() => {
**After** (complexity: ~3): **After** (complexity: ~3):
```typescript ```typescript
import type { ComponentType } from 'react'
// Define lookup table outside component // Define lookup table outside component
const TEMPLATE_MAP: Record<AppModeEnum, Record<string, ComponentType<TemplateProps>>> = { const TEMPLATE_MAP: Record<AppModeEnum, Record<string, FC<TemplateProps>>> = {
[AppModeEnum.CHAT]: { [AppModeEnum.CHAT]: {
[LanguagesSupported[1]]: TemplateChatZh, [LanguagesSupported[1]]: TemplateChatZh,
[LanguagesSupported[7]]: TemplateChatJa, [LanguagesSupported[7]]: TemplateChatJa,

View File

@ -65,10 +65,10 @@ interface ConfigurationHeaderProps {
onPublish: () => void onPublish: () => void
} }
function ConfigurationHeader({ const ConfigurationHeader: FC<ConfigurationHeaderProps> = ({
isAdvancedMode, isAdvancedMode,
onPublish, onPublish,
}: ConfigurationHeaderProps) { }) => {
const { t } = useTranslation() const { t } = useTranslation()
return ( return (
@ -136,7 +136,7 @@ const AppInfo = () => {
} }
// ✅ After: Separate view components // ✅ After: Separate view components
function AppInfoExpanded({ appDetail, onAction }: AppInfoViewProps) { const AppInfoExpanded: FC<AppInfoViewProps> = ({ appDetail, onAction }) => {
return ( return (
<div className="expanded"> <div className="expanded">
{/* Clean, focused expanded view */} {/* Clean, focused expanded view */}
@ -144,7 +144,7 @@ function AppInfoExpanded({ appDetail, onAction }: AppInfoViewProps) {
) )
} }
function AppInfoCollapsed({ appDetail, onAction }: AppInfoViewProps) { const AppInfoCollapsed: FC<AppInfoViewProps> = ({ appDetail, onAction }) => {
return ( return (
<div className="collapsed"> <div className="collapsed">
{/* Clean, focused collapsed view */} {/* Clean, focused collapsed view */}
@ -203,12 +203,12 @@ interface AppInfoModalsProps {
onSuccess: () => void onSuccess: () => void
} }
function AppInfoModals({ const AppInfoModals: FC<AppInfoModalsProps> = ({
appDetail, appDetail,
activeModal, activeModal,
onClose, onClose,
onSuccess, onSuccess,
}: AppInfoModalsProps) { }) => {
const handleEdit = async (data) => { /* logic */ } const handleEdit = async (data) => { /* logic */ }
const handleDuplicate = async (data) => { /* logic */ } const handleDuplicate = async (data) => { /* logic */ }
const handleDelete = async () => { /* logic */ } const handleDelete = async () => { /* logic */ }
@ -296,7 +296,7 @@ interface OperationItemProps {
onAction: (id: string) => void onAction: (id: string) => void
} }
function OperationItem({ operation, onAction }: OperationItemProps) { const OperationItem: FC<OperationItemProps> = ({ operation, onAction }) => {
return ( return (
<div className="operation-item"> <div className="operation-item">
<span className="icon">{operation.icon}</span> <span className="icon">{operation.icon}</span>
@ -435,7 +435,7 @@ interface ChildProps {
onSubmit: () => void onSubmit: () => void
} }
function Child({ value, onChange, onSubmit }: ChildProps) { const Child: FC<ChildProps> = ({ value, onChange, onSubmit }) => {
return ( return (
<div> <div>
<input value={value} onChange={e => onChange(e.target.value)} /> <input value={value} onChange={e => onChange(e.target.value)} />

View File

@ -112,13 +112,13 @@ export const useModelConfig = ({
```typescript ```typescript
// Before: 50+ lines of state management // Before: 50+ lines of state management
function Configuration() { const Configuration: FC = () => {
const [modelConfig, setModelConfig] = useState<ModelConfig>(...) const [modelConfig, setModelConfig] = useState<ModelConfig>(...)
// ... lots of related state and effects // ... lots of related state and effects
} }
// After: Clean component // After: Clean component
function Configuration() { const Configuration: FC = () => {
const { const {
modelConfig, modelConfig,
setModelConfig, setModelConfig,
@ -159,6 +159,8 @@ function Configuration() {
When hook extraction touches query or mutation code, do not use this reference as the source of truth for data-layer patterns. When hook extraction touches query or mutation code, do not use this reference as the source of truth for data-layer patterns.
- Follow `web/AGENTS.md` first.
- Use `frontend-query-mutation` for contracts, query shape, data-fetching wrappers, query/mutation call-site patterns, conditional queries, invalidation, and mutation error handling.
- Do not introduce deprecated `useInvalid` / `useReset`. - Do not introduce deprecated `useInvalid` / `useReset`.
- Do not extract thin passthrough `useQuery` hooks; only extract orchestration hooks. - Do not extract thin passthrough `useQuery` hooks; only extract orchestration hooks.

View File

@ -1,79 +0,0 @@
---
name: e2e-cucumber-playwright
description: Write, update, or review Dify end-to-end tests under `e2e/` that use Cucumber, Gherkin, and Playwright. Use when the task involves `.feature` files, `features/step-definitions/`, `features/support/`, `DifyWorld`, scenario tags, locator/assertion choices, or E2E testing best practices for this repository.
---
# Dify E2E Cucumber + Playwright
Use this skill for Dify's repository-level E2E suite in `e2e/`. Use [`e2e/AGENTS.md`](../../../e2e/AGENTS.md) as the canonical guide for local architecture and conventions, then apply Playwright/Cucumber best practices only where they fit the current suite.
## Scope
- Use this skill for `.feature` files, Cucumber step definitions, `DifyWorld`, hooks, tags, and E2E review work under `e2e/`.
- Do not use this skill for Vitest or React Testing Library work under `web/`; use `frontend-testing` instead.
- Do not use this skill for backend test or API review tasks under `api/`.
## Read Order
1. Read [`e2e/AGENTS.md`](../../../e2e/AGENTS.md) first.
2. Read only the files directly involved in the task:
- target `.feature` files under `e2e/features/`
- related step files under `e2e/features/step-definitions/`
- `e2e/features/support/hooks.ts` and `e2e/features/support/world.ts` when session lifecycle or shared state matters
- `e2e/scripts/run-cucumber.ts` and `e2e/cucumber.config.ts` when tags or execution flow matter
3. Read [`references/playwright-best-practices.md`](references/playwright-best-practices.md) only when locator, assertion, isolation, or waiting choices are involved.
4. Read [`references/cucumber-best-practices.md`](references/cucumber-best-practices.md) only when scenario wording, step granularity, tags, or expression design are involved.
5. Re-check official Playwright or Cucumber docs with the available documentation tools before introducing a new framework pattern.
## Local Rules
- `e2e/` uses Cucumber for scenarios and Playwright as the browser layer.
- `DifyWorld` is the per-scenario context object. Type `this` as `DifyWorld` and use `async function`, not arrow functions.
- Keep glue organized by capability under `e2e/features/step-definitions/`; use `common/` only for broadly reusable steps.
- Browser session behavior comes from `features/support/hooks.ts`:
- default: authenticated session with shared storage state
- `@unauthenticated`: clean browser context
- `@authenticated`: readability/selective-run tag only unless implementation changes
- `@fresh`: only for `e2e:full*` flows
- Do not import Playwright Test runner patterns that bypass the current Cucumber + `DifyWorld` architecture unless the task is explicitly about changing that architecture.
## Workflow
1. Rebuild local context.
- Inspect the target feature area.
- Reuse an existing step when wording and behavior already match.
- Add a new step only for a genuinely new user action or assertion.
- Keep edits close to the current capability folder unless the step is broadly reusable.
2. Write behavior-first scenarios.
- Describe user-observable behavior, not DOM mechanics.
- Keep each scenario focused on one workflow or outcome.
- Keep scenarios independent and re-runnable.
3. Write step definitions in the local style.
- Keep one step to one user-visible action or one assertion.
- Prefer Cucumber Expressions such as `{string}` and `{int}`.
- Scope locators to stable containers when the page has repeated elements.
- Avoid page-object layers or extra helper abstractions unless repeated complexity clearly justifies them.
4. Use Playwright in the local style.
- Prefer user-facing locators: `getByRole`, `getByLabel`, `getByPlaceholder`, `getByText`, then `getByTestId` for explicit contracts.
- Use web-first `expect(...)` assertions.
- Do not use `waitForTimeout`, manual polling, or raw visibility checks when a locator action or retrying assertion already expresses the behavior.
5. Validate narrowly.
- Run the narrowest tagged scenario or flow that exercises the change.
- Run `pnpm -C e2e check`.
- Broaden verification only when the change affects hooks, tags, setup, or shared step semantics.
## Review Checklist
- Does the scenario describe behavior rather than implementation?
- Does it fit the current session model, tags, and `DifyWorld` usage?
- Should an existing step be reused instead of adding a new one?
- Are locators user-facing and assertions web-first?
- Does the change introduce hidden coupling across scenarios, tags, or instance state?
- Does it document or implement behavior that differs from the real hooks or configuration?
Lead findings with correctness, flake risk, and architecture drift.
## References
- [`references/playwright-best-practices.md`](references/playwright-best-practices.md)
- [`references/cucumber-best-practices.md`](references/cucumber-best-practices.md)

View File

@ -1,4 +0,0 @@
interface:
display_name: "E2E Cucumber + Playwright"
short_description: "Write and review Dify E2E scenarios."
default_prompt: "Use $e2e-cucumber-playwright to write or review a Dify E2E scenario under e2e/."

View File

@ -1,93 +0,0 @@
# Cucumber Best Practices For Dify E2E
Use this reference when writing or reviewing Gherkin scenarios, step definitions, parameter expressions, and step reuse in Dify's `e2e/` suite.
Official sources:
- https://cucumber.io/docs/guides/10-minute-tutorial/
- https://cucumber.io/docs/cucumber/step-definitions/
- https://cucumber.io/docs/cucumber/cucumber-expressions/
## What Matters Most
### 1. Treat scenarios as executable specifications
Cucumber scenarios should describe examples of behavior, not test implementation recipes.
Apply it like this:
- write what the user does and what should happen
- avoid UI-internal wording such as selector details, DOM structure, or component names
- keep language concrete enough that the scenario reads like living documentation
### 2. Keep scenarios focused
A scenario should usually prove one workflow or business outcome. If a scenario wanders across several unrelated behaviors, split it.
In Dify's suite, this means:
- one capability-focused scenario per feature path
- no long setup chains when existing bootstrap or reusable steps already cover them
- no hidden dependency on another scenario's side effects
### 3. Reuse steps, but only when behavior really matches
Good reuse reduces duplication. Bad reuse hides meaning.
Prefer reuse when:
- the user action is genuinely the same
- the expected outcome is genuinely the same
- the wording stays natural across features
Write a new step when:
- the behavior is materially different
- reusing the old wording would make the scenario misleading
- a supposedly generic step would become an implementation-detail wrapper
### 4. Prefer Cucumber Expressions
Use Cucumber Expressions for parameters unless regex is clearly necessary.
Common examples:
- `{string}` for labels, names, and visible text
- `{int}` for counts
- `{float}` for decimal values
- `{word}` only when the value is truly a single token
Keep expressions readable. If a step needs complicated parsing logic, first ask whether the scenario wording should be simpler.
### 5. Keep step definitions thin and meaningful
Step definitions are glue between Gherkin and automation, not a second abstraction language.
For Dify:
- type `this` as `DifyWorld`
- use `async function`
- keep each step to one user-visible action or assertion
- rely on `DifyWorld` and existing support code for shared context
- avoid leaking cross-scenario state
### 6. Use tags intentionally
Tags should communicate run scope or session semantics, not become ad hoc metadata.
In Dify's current suite:
- capability tags group related scenarios
- `@unauthenticated` changes session behavior
- `@authenticated` is descriptive/selective, not a behavior switch by itself
- `@fresh` belongs to reset/full-install flows only
If a proposed tag implies behavior, verify that hooks or runner configuration actually implement it.
## Review Questions
- Does the scenario read like a real example of product behavior?
- Are the steps behavior-oriented instead of implementation-oriented?
- Is a reused step still truthful in this feature?
- Is a new tag documenting real behavior, or inventing semantics that the suite does not implement?
- Would a new reader understand the outcome without opening the step-definition file?

View File

@ -1,96 +0,0 @@
# Playwright Best Practices For Dify E2E
Use this reference when writing or reviewing locator, assertion, isolation, or synchronization logic for Dify's Cucumber-based E2E suite.
Official sources:
- https://playwright.dev/docs/best-practices
- https://playwright.dev/docs/locators
- https://playwright.dev/docs/test-assertions
- https://playwright.dev/docs/browser-contexts
## What Matters Most
### 1. Keep scenarios isolated
Playwright's model is built around clean browser contexts so one test does not leak into another. In Dify's suite, that principle maps to per-scenario session setup in `features/support/hooks.ts` and `DifyWorld`.
Apply it like this:
- do not depend on another scenario having run first
- do not persist ad hoc scenario state outside `DifyWorld`
- do not couple ordinary scenarios to `@fresh` behavior
- when a flow needs special auth/session semantics, express that through the existing tag model or explicit hook changes
### 2. Prefer user-facing locators
Playwright recommends built-in locators that reflect what users perceive on the page.
Preferred order in this repository:
1. `getByRole`
2. `getByLabel`
3. `getByPlaceholder`
4. `getByText`
5. `getByTestId` when an explicit test contract is the most stable option
Avoid raw CSS/XPath selectors unless no stable user-facing contract exists and adding one is not practical.
Also remember:
- repeated content usually needs scoping to a stable container
- exact text matching is often too brittle when role/name or label already exists
- `getByTestId` is acceptable when semantics are weak but the contract is intentional
### 3. Use web-first assertions
Playwright assertions auto-wait and retry. Prefer them over manual state inspection.
Prefer:
- `await expect(page).toHaveURL(...)`
- `await expect(locator).toBeVisible()`
- `await expect(locator).toBeHidden()`
- `await expect(locator).toBeEnabled()`
- `await expect(locator).toHaveText(...)`
Avoid:
- `expect(await locator.isVisible()).toBe(true)`
- custom polling loops for DOM state
- `waitForTimeout` as synchronization
If a condition genuinely needs custom retry logic, use Playwright's polling/assertion tools deliberately and keep that choice local and explicit.
### 4. Let actions wait for actionability
Locator actions already wait for the element to be actionable. Do not preface every click/fill with extra timing logic unless the action needs a specific visible/ready assertion for clarity.
Good pattern:
- assert a meaningful visible state when that is part of the behavior
- then click/fill/select via locator APIs
Bad pattern:
- stack arbitrary waits before every action
- wait on unstable implementation details instead of the visible state the user cares about
### 5. Match debugging to the current suite
Playwright's wider ecosystem supports traces and rich debugging tools. Dify's current suite already captures:
- full-page screenshots
- page HTML
- console errors
- page errors
Use the existing artifact flow by default. If a task is specifically about improving diagnostics, confirm the change fits the current Cucumber architecture before importing broader Playwright tooling.
## Review Questions
- Would this locator survive DOM refactors that do not change user-visible behavior?
- Is this assertion using Playwright's retrying semantics?
- Is any explicit wait masking a real readiness problem?
- Does this code preserve per-scenario isolation?
- Is a new abstraction really needed, or does it bypass the existing `DifyWorld` + step-definition model?

View File

@ -9,18 +9,18 @@ Category: Performance
When rendering React Flow, prefer `useNodes`/`useEdges` for UI consumption and rely on `useStoreApi` inside callbacks that mutate or read node/edge state. Avoid manually pulling Flow data outside of these hooks. When rendering React Flow, prefer `useNodes`/`useEdges` for UI consumption and rely on `useStoreApi` inside callbacks that mutate or read node/edge state. Avoid manually pulling Flow data outside of these hooks.
## Complex prop stability ## Complex prop memoization
IsUrgent: False IsUrgent: True
Category: Performance Category: Performance
### Description ### Description
Only require stable object, array, or map props when there is a clear reason: the child is memoized, the value participates in effect/query dependencies, the value is part of a stable-reference API contract, or profiling/local behavior shows avoidable re-renders. Do not request `useMemo` for every inline object by default; `how-to-write-component` treats memoization as a targeted optimization. Wrap complex prop values (objects, arrays, maps) in `useMemo` prior to passing them into child components to guarantee stable references and prevent unnecessary renders.
Update this file when adding, editing, or removing Performance rules so the catalog remains accurate. Update this file when adding, editing, or removing Performance rules so the catalog remains accurate.
Risky: Wrong:
```tsx ```tsx
<HeavyComp <HeavyComp
@ -31,7 +31,7 @@ Risky:
/> />
``` ```
Better when stable identity matters: Right:
```tsx ```tsx
const config = useMemo(() => ({ const config = useMemo(() => ({

View File

@ -0,0 +1,44 @@
---
name: frontend-query-mutation
description: Guide for implementing Dify frontend query and mutation patterns with TanStack Query and oRPC. Trigger when creating or updating contracts in web/contract, wiring router composition, consuming consoleQuery or marketplaceQuery in components or services, deciding whether to call queryOptions() directly or extract a helper or use-* hook, handling conditional queries, cache invalidation, mutation error handling, or migrating legacy service calls to contract-first query and mutation helpers.
---
# Frontend Query & Mutation
## Intent
- Keep contract as the single source of truth in `web/contract/*`.
- Prefer contract-shaped `queryOptions()` and `mutationOptions()`.
- Keep invalidation and mutation flow knowledge in the service layer.
- Keep abstractions minimal to preserve TypeScript inference.
## Workflow
1. Identify the change surface.
- Read `references/contract-patterns.md` for contract files, router composition, client helpers, and query or mutation call-site shape.
- Read `references/runtime-rules.md` for conditional queries, invalidation, error handling, and legacy migrations.
- Read both references when a task spans contract shape and runtime behavior.
2. Implement the smallest abstraction that fits the task.
- Default to direct `useQuery(...)` or `useMutation(...)` calls with oRPC helpers at the call site.
- Extract a small shared query helper only when multiple call sites share the same extra options.
- Create `web/service/use-{domain}.ts` only for orchestration or shared domain behavior.
3. Preserve Dify conventions.
- Keep contract inputs in `{ params, query?, body? }` shape.
- Bind invalidation in the service-layer mutation definition.
- Prefer `mutate(...)`; use `mutateAsync(...)` only when Promise semantics are required.
## Files Commonly Touched
- `web/contract/console/*.ts`
- `web/contract/marketplace.ts`
- `web/contract/router.ts`
- `web/service/client.ts`
- `web/service/use-*.ts`
- component and hook call sites using `consoleQuery` or `marketplaceQuery`
## References
- Use `references/contract-patterns.md` for contract shape, router registration, query and mutation helpers, and anti-patterns that degrade inference.
- Use `references/runtime-rules.md` for conditional queries, invalidation, `mutate` versus `mutateAsync`, and legacy migration rules.
Treat this skill as the single query and mutation entry point for Dify frontend work. Keep detailed rules in the reference files instead of duplicating them in project docs.

View File

@ -0,0 +1,4 @@
interface:
display_name: "Frontend Query & Mutation"
short_description: "Dify TanStack Query and oRPC patterns"
default_prompt: "Use this skill when implementing or reviewing Dify frontend contracts, query and mutation call sites, conditional queries, invalidation, or legacy query/mutation migrations."

View File

@ -0,0 +1,98 @@
# Contract Patterns
## Table of Contents
- Intent
- Minimal structure
- Core workflow
- Query usage decision rule
- Mutation usage decision rule
- Anti-patterns
- Contract rules
- Type export
## Intent
- Keep contract as the single source of truth in `web/contract/*`.
- Default query usage to call-site `useQuery(consoleQuery|marketplaceQuery.xxx.queryOptions(...))` when endpoint behavior maps 1:1 to the contract.
- Keep abstractions minimal and preserve TypeScript inference.
## Minimal Structure
```text
web/contract/
├── base.ts
├── router.ts
├── marketplace.ts
└── console/
├── billing.ts
└── ...other domains
web/service/client.ts
```
## Core Workflow
1. Define contract in `web/contract/console/{domain}.ts` or `web/contract/marketplace.ts`.
- Use `base.route({...}).output(type<...>())` as the baseline.
- Add `.input(type<...>())` only when the request has `params`, `query`, or `body`.
- For `GET` without input, omit `.input(...)`; do not use `.input(type<unknown>())`.
2. Register contract in `web/contract/router.ts`.
- Import directly from domain files and nest by API prefix.
3. Consume from UI call sites via oRPC query utilities.
```typescript
import { useQuery } from '@tanstack/react-query'
import { consoleQuery } from '@/service/client'
const invoiceQuery = useQuery(consoleQuery.billing.invoices.queryOptions({
staleTime: 5 * 60 * 1000,
throwOnError: true,
select: invoice => invoice.url,
}))
```
## Query Usage Decision Rule
1. Default to direct `*.queryOptions(...)` usage at the call site.
2. If 3 or more call sites share the same extra options, extract a small query helper, not a `use-*` passthrough hook.
3. Create `web/service/use-{domain}.ts` only for orchestration.
- Combine multiple queries or mutations.
- Share domain-level derived state or invalidation helpers.
```typescript
const invoicesBaseQueryOptions = () =>
consoleQuery.billing.invoices.queryOptions({ retry: false })
const invoiceQuery = useQuery({
...invoicesBaseQueryOptions(),
throwOnError: true,
})
```
## Mutation Usage Decision Rule
1. Default to mutation helpers from `consoleQuery` or `marketplaceQuery`, for example `useMutation(consoleQuery.billing.bindPartnerStack.mutationOptions(...))`.
2. If the mutation flow is heavily custom, use oRPC clients as `mutationFn`, for example `consoleClient.xxx` or `marketplaceClient.xxx`, instead of handwritten non-oRPC mutation logic.
## Anti-Patterns
- Do not wrap `useQuery` with `options?: Partial<UseQueryOptions>`.
- Do not split local `queryKey` and `queryFn` when oRPC `queryOptions` already exists and fits the use case.
- Do not create thin `use-*` passthrough hooks for a single endpoint.
- These patterns can degrade inference, especially around `throwOnError` and `select`, and add unnecessary indirection.
## Contract Rules
- Input structure: always use `{ params, query?, body? }`.
- No-input `GET`: omit `.input(...)`; do not use `.input(type<unknown>())`.
- Path params: use `{paramName}` in the path and match it in the `params` object.
- Router nesting: group by API prefix, for example `/billing/*` becomes `billing: {}`.
- No barrel files: import directly from specific files.
- Types: import from `@/types/` and use the `type<T>()` helper.
- Mutations: prefer `mutationOptions`; use explicit `mutationKey` mainly for defaults, filtering, and devtools.
## Type Export
```typescript
export type ConsoleInputs = InferContractRouterInputs<typeof consoleRouterContract>
```

View File

@ -0,0 +1,133 @@
# Runtime Rules
## Table of Contents
- Conditional queries
- Cache invalidation
- Key API guide
- `mutate` vs `mutateAsync`
- Legacy migration
## Conditional Queries
Prefer contract-shaped `queryOptions(...)`.
When required input is missing, prefer `input: skipToken` instead of placeholder params or non-null assertions.
Use `enabled` only for extra business gating after the input itself is already valid.
```typescript
import { skipToken, useQuery } from '@tanstack/react-query'
// Disable the query by skipping input construction.
function useAccessMode(appId: string | undefined) {
return useQuery(consoleQuery.accessControl.appAccessMode.queryOptions({
input: appId
? { params: { appId } }
: skipToken,
}))
}
// Avoid runtime-only guards that bypass type checking.
function useBadAccessMode(appId: string | undefined) {
return useQuery(consoleQuery.accessControl.appAccessMode.queryOptions({
input: { params: { appId: appId! } },
enabled: !!appId,
}))
}
```
## Cache Invalidation
Bind invalidation in the service-layer mutation definition.
Components may add UI feedback in call-site callbacks, but they should not decide which queries to invalidate.
Use:
- `.key()` for namespace or prefix invalidation
- `.queryKey(...)` only for exact cache reads or writes such as `getQueryData` and `setQueryData`
- `queryClient.invalidateQueries(...)` in mutation `onSuccess`
Do not use deprecated `useInvalid` from `use-base.ts`.
```typescript
// Service layer owns cache invalidation.
export const useUpdateAccessMode = () => {
const queryClient = useQueryClient()
return useMutation(consoleQuery.accessControl.updateAccessMode.mutationOptions({
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: consoleQuery.accessControl.appWhitelistSubjects.key(),
})
},
}))
}
// Component only adds UI behavior.
updateAccessMode({ appId, mode }, {
onSuccess: () => Toast.notify({ type: 'success', message: '...' }),
})
// Avoid putting invalidation knowledge in the component.
mutate({ appId, mode }, {
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: consoleQuery.accessControl.appWhitelistSubjects.key(),
})
},
})
```
## Key API Guide
- `.key(...)`
- Use for partial matching operations.
- Prefer it for invalidation, refetch, and cancel patterns.
- Example: `queryClient.invalidateQueries({ queryKey: consoleQuery.billing.key() })`
- `.queryKey(...)`
- Use for a specific query's full key.
- Prefer it for exact cache addressing and direct reads or writes.
- `.mutationKey(...)`
- Use for a specific mutation's full key.
- Prefer it for mutation defaults registration, mutation-status filtering, and devtools grouping.
## `mutate` vs `mutateAsync`
Prefer `mutate` by default.
Use `mutateAsync` only when Promise semantics are truly required, such as parallel mutations or sequential steps with result dependencies.
Rules:
- Event handlers should usually call `mutate(...)` with `onSuccess` or `onError`.
- Every `await mutateAsync(...)` must be wrapped in `try/catch`.
- Do not use `mutateAsync` when callbacks already express the flow clearly.
```typescript
// Default case.
mutation.mutate(data, {
onSuccess: result => router.push(result.url),
})
// Promise semantics are required.
try {
const order = await createOrder.mutateAsync(orderData)
await confirmPayment.mutateAsync({ orderId: order.id, token })
router.push(`/orders/${order.id}`)
}
catch (error) {
Toast.notify({
type: 'error',
message: error instanceof Error ? error.message : 'Unknown error',
})
}
```
## Legacy Migration
When touching old code, migrate it toward these rules:
| Old pattern | New pattern |
|---|---|
| `useInvalid(key)` in service layer | `queryClient.invalidateQueries(...)` inside mutation `onSuccess` |
| component-triggered invalidation after mutation | move invalidation into the service-layer mutation definition |
| imperative fetch plus manual invalidation | wrap it in `useMutation(...mutationOptions(...))` |
| `await mutateAsync()` without `try/catch` | switch to `mutate(...)` or add `try/catch` |

View File

@ -5,7 +5,7 @@ description: Generate Vitest + React Testing Library tests for Dify frontend com
# Dify Frontend Testing Skill # Dify Frontend Testing Skill
This skill enables Codex to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices. This skill enables Claude to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices.
> **⚠️ Authoritative Source**: This skill is derived from `web/docs/test.md`. Use Vitest mock/timer APIs (`vi.*`). > **⚠️ Authoritative Source**: This skill is derived from `web/docs/test.md`. Use Vitest mock/timer APIs (`vi.*`).
@ -24,27 +24,35 @@ Apply this skill when the user:
**Do NOT apply** when: **Do NOT apply** when:
- User is asking about backend/API tests (Python/pytest) - User is asking about backend/API tests (Python/pytest)
- User is asking about E2E tests (Cucumber + Playwright under `e2e/`) - User is asking about E2E tests (Playwright/Cypress)
- User is only asking conceptual questions without code context - User is only asking conceptual questions without code context
## Quick Reference ## Quick Reference
### Key Commands ### Tech Stack
Run these commands from `web/`. From the repository root, prefix them with `pnpm -C web`. | Tool | Version | Purpose |
|------|---------|---------|
| Vitest | 4.0.16 | Test runner |
| React Testing Library | 16.0 | Component testing |
| jsdom | - | Test environment |
| nock | 14.0 | HTTP mocking |
| TypeScript | 5.x | Type safety |
### Key Commands
```bash ```bash
# Run all tests # Run all tests
pnpm test pnpm test
# Watch mode # Watch mode
pnpm test --watch pnpm test:watch
# Run specific file # Run specific file
pnpm test path/to/file.spec.tsx pnpm test path/to/file.spec.tsx
# Generate coverage report # Generate coverage report
pnpm test --coverage pnpm test:coverage
# Analyze component complexity # Analyze component complexity
pnpm analyze-component <path> pnpm analyze-component <path>
@ -192,7 +200,7 @@ When assigned to test a directory/path, test **ALL content** within that path:
- ✅ **Import real project components** directly (including base components and siblings) - ✅ **Import real project components** directly (including base components and siblings)
- ✅ **Only mock**: API services (`@/service/*`), `next/navigation`, complex context providers - ✅ **Only mock**: API services (`@/service/*`), `next/navigation`, complex context providers
- ❌ **DO NOT mock** base components (`@/app/components/base/*`) or dify-ui primitives (`@langgenius/dify-ui/*`) - ❌ **DO NOT mock** base components (`@/app/components/base/*`)
- ❌ **DO NOT mock** sibling/child components in the same directory - ❌ **DO NOT mock** sibling/child components in the same directory
> See [Test Structure Template](#test-structure-template) for correct import/mock patterns. > See [Test Structure Template](#test-structure-template) for correct import/mock patterns.
@ -220,10 +228,7 @@ Every test should clearly separate:
### 2. Black-Box Testing ### 2. Black-Box Testing
- Test observable behavior, not implementation details - Test observable behavior, not implementation details
- Use semantic queries (`getByRole` with accessible `name`, `getByLabelText`, `getByPlaceholderText`, `getByText`, and scoped `within(...)`) - Use semantic queries (getByRole, getByLabelText)
- Treat `getByTestId` as a last resort. If a control cannot be found by role/name, label, landmark, or dialog scope, fix the component accessibility first instead of adding or relying on `data-testid`.
- Remove production `data-testid` attributes when semantic selectors can cover the behavior. Keep them only for non-visual mocked boundaries, editor/browser shims such as Monaco, canvas/chart output, or third-party widgets with no accessible DOM in the test environment.
- Do not assert decorative icons by test id. Assert the named control that contains them, or mark decorative icons `aria-hidden`.
- Avoid testing internal state directly - Avoid testing internal state directly
- **Prefer pattern matching over hardcoded strings** in assertions: - **Prefer pattern matching over hardcoded strings** in assertions:
@ -320,12 +325,12 @@ For more detailed information, refer to:
### Reference Examples in Codebase ### Reference Examples in Codebase
- `web/utils/classnames.spec.ts` - Utility function tests - `web/utils/classnames.spec.ts` - Utility function tests
- `web/app/components/base/radio/__tests__/index.spec.tsx` - Component tests - `web/app/components/base/button/index.spec.tsx` - Component tests
- `web/__mocks__/provider-context.ts` - Mock factory example - `web/__mocks__/provider-context.ts` - Mock factory example
### Project Configuration ### Project Configuration
- `web/vite.config.ts` - Vite/Vitest configuration - `web/vitest.config.ts` - Vitest configuration
- `web/vitest.setup.ts` - Test environment setup - `web/vitest.setup.ts` - Test environment setup
- `web/scripts/analyze-component.js` - Component analysis tool - `web/scripts/analyze-component.js` - Component analysis tool
- Modules are not mocked automatically. Global mocks live in `web/vitest.setup.ts` (for example `react-i18next`, `next/image`); mock other modules like `ky` or `mime` locally in test files. - Modules are not mocked automatically. Global mocks live in `web/vitest.setup.ts` (for example `react-i18next`, `next/image`); mock other modules like `ky` or `mime` locally in test files.

View File

@ -36,7 +36,7 @@ Use this checklist when generating or reviewing tests for Dify frontend componen
### Integration vs Mocking ### Integration vs Mocking
- [ ] **DO NOT mock base components or dify-ui primitives** (base `Loading`, `Input`, `Badge`; dify-ui `Button`, `Tooltip`, `Dialog`, etc.) - [ ] **DO NOT mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
- [ ] Import real project components instead of mocking - [ ] Import real project components instead of mocking
- [ ] Only mock: API calls, complex context providers, third-party libs with side effects - [ ] Only mock: API calls, complex context providers, third-party libs with side effects
- [ ] Prefer integration testing when using single spec file - [ ] Prefer integration testing when using single spec file
@ -73,7 +73,7 @@ Use this checklist when generating or reviewing tests for Dify frontend componen
### Mocks ### Mocks
- [ ] **DO NOT mock base components or dify-ui primitives** (`@/app/components/base/*` or `@langgenius/dify-ui/*`) - [ ] **DO NOT mock base components** (`@/app/components/base/*`)
- [ ] `vi.clearAllMocks()` in `beforeEach` (not `afterEach`) - [ ] `vi.clearAllMocks()` in `beforeEach` (not `afterEach`)
- [ ] Shared mock state reset in `beforeEach` - [ ] Shared mock state reset in `beforeEach`
- [ ] i18n uses global mock (auto-loaded in `web/vitest.setup.ts`); only override locally for custom translations - [ ] i18n uses global mock (auto-loaded in `web/vitest.setup.ts`); only override locally for custom translations
@ -127,7 +127,7 @@ For the current file being tested:
- [ ] Run full directory test: `pnpm test path/to/directory/` - [ ] Run full directory test: `pnpm test path/to/directory/`
- [ ] Check coverage report: `pnpm test:coverage` - [ ] Check coverage report: `pnpm test:coverage`
- [ ] Run `pnpm lint:fix` on all test files - [ ] Run `pnpm lint:fix` on all test files
- [ ] Run `pnpm type-check` - [ ] Run `pnpm type-check:tsgo`
## Common Issues to Watch ## Common Issues to Watch

View File

@ -2,27 +2,29 @@
## ⚠️ Important: What NOT to Mock ## ⚠️ Important: What NOT to Mock
### DO NOT Mock Base Components or dify-ui Primitives ### DO NOT Mock Base Components
**Never mock components from `@/app/components/base/` or from `@langgenius/dify-ui/*`** such as: **Never mock components from `@/app/components/base/`** such as:
- Legacy base (`@/app/components/base/*`): `Loading`, `Spinner`, `Input`, `Badge`, `Tag` - `Loading`, `Spinner`
- dify-ui primitives (`@langgenius/dify-ui/*`): `Button`, `Tooltip`, `Dialog`, `Popover`, `DropdownMenu`, `ContextMenu`, `Select`, `AlertDialog`, `Toast` - `Button`, `Input`, `Select`
- `Tooltip`, `Modal`, `Dropdown`
- `Icon`, `Badge`, `Tag`
**Why?** **Why?**
- These components have their own dedicated tests - Base components will have their own dedicated tests
- Mocking them creates false positives (tests pass but real integration fails) - Mocking them creates false positives (tests pass but real integration fails)
- Using real components tests actual integration behavior - Using real components tests actual integration behavior
```typescript ```typescript
// ❌ WRONG: Don't mock base components or dify-ui primitives // ❌ WRONG: Don't mock base components
vi.mock('@/app/components/base/loading', () => () => <div>Loading</div>) vi.mock('@/app/components/base/loading', () => () => <div>Loading</div>)
vi.mock('@langgenius/dify-ui/button', () => ({ Button: ({ children }: any) => <button>{children}</button> })) vi.mock('@/app/components/base/button', () => ({ children }: any) => <button>{children}</button>)
// ✅ CORRECT: Import and use the real components // ✅ CORRECT: Import and use real base components
import Loading from '@/app/components/base/loading' import Loading from '@/app/components/base/loading'
import { Button } from '@langgenius/dify-ui/button' import Button from '@/app/components/base/button'
// They will render normally in tests // They will render normally in tests
``` ```
@ -56,7 +58,7 @@ See [Zustand Store Testing](#zustand-store-testing) section for full details.
| Location | Purpose | | Location | Purpose |
|----------|---------| |----------|---------|
| `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `zustand`, clipboard, FloatingPortal, Monaco, localStorage`) | | `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `next/image`, `zustand`) |
| `web/__mocks__/zustand.ts` | Zustand mock implementation (auto-resets stores after each test) | | `web/__mocks__/zustand.ts` | Zustand mock implementation (auto-resets stores after each test) |
| `web/__mocks__/` | Reusable mock factories shared across multiple test files | | `web/__mocks__/` | Reusable mock factories shared across multiple test files |
| Test file | Test-specific mocks, inline with `vi.mock()` | | Test file | Test-specific mocks, inline with `vi.mock()` |
@ -216,21 +218,28 @@ describe('Component', () => {
}) })
``` ```
### 5. HTTP and `fetch` Mocking ### 5. HTTP Mocking with Nock
```typescript ```typescript
import nock from 'nock'
const GITHUB_HOST = 'https://api.github.com'
const GITHUB_PATH = '/repos/owner/repo'
const mockGithubApi = (status: number, body: Record<string, unknown>, delayMs = 0) => {
return nock(GITHUB_HOST)
.get(GITHUB_PATH)
.delay(delayMs)
.reply(status, body)
}
describe('GithubComponent', () => { describe('GithubComponent', () => {
beforeEach(() => { afterEach(() => {
vi.clearAllMocks() nock.cleanAll()
}) })
it('should display repo info', async () => { it('should display repo info', async () => {
vi.mocked(globalThis.fetch).mockResolvedValueOnce( mockGithubApi(200, { name: 'dify', stars: 1000 })
new Response(JSON.stringify({ name: 'dify', stars: 1000 }), {
status: 200,
headers: { 'Content-Type': 'application/json' },
}),
)
render(<GithubComponent />) render(<GithubComponent />)
@ -240,12 +249,7 @@ describe('GithubComponent', () => {
}) })
it('should handle API error', async () => { it('should handle API error', async () => {
vi.mocked(globalThis.fetch).mockResolvedValueOnce( mockGithubApi(500, { message: 'Server error' })
new Response(JSON.stringify({ message: 'Server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
}),
)
render(<GithubComponent />) render(<GithubComponent />)
@ -256,8 +260,6 @@ describe('GithubComponent', () => {
}) })
``` ```
Prefer mocking `@/service/*` modules or spying on `global.fetch` / `ky` clients with deterministic responses. Do not introduce an HTTP interception dependency such as `nock` or MSW unless it is already declared in the workspace or adding it is part of the task.
### 6. Context Providers ### 6. Context Providers
```typescript ```typescript
@ -317,7 +319,7 @@ const renderWithQueryClient = (ui: React.ReactElement) => {
### ✅ DO ### ✅ DO
1. **Use real base components and dify-ui primitives** - Import from `@/app/components/base/` or `@langgenius/dify-ui/*` directly 1. **Use real base components** - Import from `@/app/components/base/` directly
1. **Use real project components** - Prefer importing over mocking 1. **Use real project components** - Prefer importing over mocking
1. **Use real Zustand stores** - Set test state via `store.setState()` 1. **Use real Zustand stores** - Set test state via `store.setState()`
1. **Reset mocks in `beforeEach`**, not `afterEach` 1. **Reset mocks in `beforeEach`**, not `afterEach`
@ -328,11 +330,11 @@ const renderWithQueryClient = (ui: React.ReactElement) => {
### ❌ DON'T ### ❌ DON'T
1. **Don't mock base components or dify-ui primitives** (`Loading`, `Input`, `Button`, `Tooltip`, `Dialog`, etc.) 1. **Don't mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
1. **Don't mock Zustand store modules** - Use real stores with `setState()` 1. **Don't mock Zustand store modules** - Use real stores with `setState()`
1. Don't mock components you can import directly 1. Don't mock components you can import directly
1. Don't create overly simplified mocks that miss conditional logic 1. Don't create overly simplified mocks that miss conditional logic
1. Don't leave HTTP mocks or service mock state leaking between tests 1. Don't forget to clean up nock after each test
1. Don't use `any` types in mocks without necessity 1. Don't use `any` types in mocks without necessity
### Mock Decision Tree ### Mock Decision Tree
@ -340,7 +342,7 @@ const renderWithQueryClient = (ui: React.ReactElement) => {
``` ```
Need to use a component in test? Need to use a component in test?
├─ Is it from @/app/components/base/* or @langgenius/dify-ui/*? ├─ Is it from @/app/components/base/*?
│ └─ YES → Import real component, DO NOT mock │ └─ YES → Import real component, DO NOT mock
├─ Is it a project component? ├─ Is it a project component?

View File

@ -227,12 +227,12 @@ Failing tests compound:
**Fix failures immediately before proceeding.** **Fix failures immediately before proceeding.**
## Integration with Codex's Todo Feature ## Integration with Claude's Todo Feature
When using Codex for multi-file testing: When using Claude for multi-file testing:
1. **Create a todo list** before starting 1. **Ask Claude to create a todo list** before starting
1. **Process one file at a time** 1. **Request one file at a time** or ensure Claude processes incrementally
1. **Verify each test passes** before asking for the next 1. **Verify each test passes** before asking for the next
1. **Mark todos complete** as you progress 1. **Mark todos complete** as you progress

View File

@ -1,71 +0,0 @@
---
name: how-to-write-component
description: React/TypeScript component style guide. Use when writing, refactoring, or reviewing React components, especially around props typing, state boundaries, shared local state with Jotai atoms, API types, query/mutation contracts, navigation, memoization, wrappers, and empty-state handling.
---
# How To Write A Component
Use this as the decision guide for React/TypeScript component structure. Existing code is reference material, not automatic precedent; when it conflicts with these rules, adapt the approach instead of reproducing the violation.
## Core Defaults
- Search before adding UI, hooks, helpers, or styling patterns. Reuse existing base components, feature components, hooks, utilities, and design styles when they fit.
- Group code by feature workflow, route, or ownership area: components, hooks, local types, query helpers, atoms, constants, and small utilities should live near the code that changes with them.
- Promote code to shared only when multiple verticals need the same stable primitive. Otherwise keep it local and compose shared primitives inside the owning feature.
- Use Tailwind CSS v4.1+ rules via the `tailwind-css-rules` skill. Prefer v4 utilities, `gap`, `text-size/line-height`, `min-h-dvh`, and avoid deprecated utilities and `@apply`.
## Ownership
- Put local state, queries, mutations, handlers, and derived UI data in the lowest component that uses them. Extract a purpose-built owner component only when the logic has no natural home.
- Repeated TanStack query calls in sibling components are acceptable when each component independently consumes the data. Do not hoist a query only because it is duplicated; TanStack Query handles deduplication and cache sharing.
- Hoist state, queries, or callbacks to a parent only when the parent consumes the data, coordinates shared loading/error/empty UI, needs one consistent snapshot, or owns a workflow spanning children.
- Avoid prop drilling. One pass-through layer is acceptable; repeated forwarding means ownership should move down or into feature-scoped Jotai UI state. Keep server/cache state in query and API data flow.
- Keep callbacks in a parent only for workflow coordination such as form submission, shared selection, batch behavior, or navigation. Otherwise let the child or row own its action.
- Prefer uncontrolled DOM state and CSS variables before adding controlled props.
## Components, Props, And Types
- Type component signatures directly; do not use `FC` or `React.FC`.
- Prefer `function` for top-level components and module helpers. Use arrow functions for local callbacks, handlers, and lambda-style APIs.
- Prefer named exports. Use default exports only where the framework requires them, such as Next.js route files.
- Type simple one-off props inline. Use a named `Props` type only when reused, exported, complex, or clearer.
- Use API-generated or API-returned types at component boundaries. Keep small UI conversion helpers beside the component that needs them.
- Name values by their domain role and backend API contract, and keep that name stable across the call chain, especially IDs like `appInstanceId`. Normalize framework or route params at the boundary.
- Keep fallback and invariant checks at the lowest component that already handles that state; callers should pass raw values through instead of duplicating checks.
## Queries And Mutations
- Keep `web/contract/*` as the single source of truth for API shape; follow existing domain/router patterns and the `{ params, query?, body? }` input shape.
- Consume queries directly with `useQuery(consoleQuery.xxx.queryOptions(...))` or `useQuery(marketplaceQuery.xxx.queryOptions(...))`.
- Avoid pass-through hooks and thin `web/service/use-*` wrappers that only rename `queryOptions()` or `mutationOptions()`. Extract a small `queryOptions` helper only when repeated call-site options justify it.
- Keep feature hooks for real orchestration, workflow state, or shared domain behavior.
- For missing required query input, use `input: skipToken`; use `enabled` only for extra business gating after the input is valid.
- Consume mutations directly with `useMutation(consoleQuery.xxx.mutationOptions(...))` or `useMutation(marketplaceQuery.xxx.mutationOptions(...))`; use oRPC clients as `mutationFn` only for custom flows.
- Put shared cache behavior in `createTanstackQueryUtils(...experimental_defaults...)`; components may add UI feedback callbacks, but should not own shared invalidation rules.
- Do not use deprecated `useInvalid` or `useReset`.
- Prefer `mutate(...)`; use `mutateAsync(...)` only when Promise semantics are required, and wrap awaited calls in `try/catch`.
## Component Boundaries
- Use the first level below a page or tab to organize independent page sections when it adds real structure. This layer is layout/semantic first, not automatically the data owner.
- Split deeper components by the data and state each layer actually needs. Each component should access only necessary data, and ownership should stay at the lowest consumer.
- Keep cohesive forms, menu bodies, and one-off helpers local unless they need their own state, reuse, or semantic boundary.
- Separate hidden secondary surfaces from the trigger's main flow. For dialogs, dropdowns, popovers, and similar branches, extract a small local component that owns the trigger, open state, and hidden content when it would obscure the parent flow.
- Preserve composability by separating behavior ownership from layout ownership. A dropdown action may own its trigger, open state, and menu content; the caller owns placement such as slots, offsets, and alignment.
- Avoid unnecessary DOM hierarchy. Do not add wrapper elements unless they provide layout, semantics, accessibility, state ownership, or integration with a library API; prefer fragments or styling an existing element when possible.
- Avoid shallow wrappers and prop renaming unless the wrapper adds validation, orchestration, error handling, state ownership, or a real semantic boundary.
## You Might Not Need An Effect
- Use Effects only to synchronize with external systems such as browser APIs, non-React widgets, subscriptions, timers, analytics that must run because the component was shown, or imperative DOM integration.
- Do not use Effects to transform props or state for rendering. Calculate derived values during render, and use `useMemo` only when the calculation is actually expensive.
- Do not use Effects to handle user actions. Put action-specific logic in the event handler where the cause is known.
- Do not use Effects to copy one state value into another state value representing the same concept. Pick one source of truth and derive the rest during render.
- Do not reset or adjust state from props with an Effect. Prefer a `key` reset, storing a stable ID and deriving the selected object, or guarded same-component render-time adjustment when truly necessary.
- Prefer framework data APIs or TanStack Query for data fetching instead of writing request Effects in components.
- If an Effect still seems necessary, first name the external system it synchronizes with. If there is no external system, remove the Effect and restructure the state or event flow.
## Navigation And Performance
- Prefer `Link` for normal navigation. Use router APIs only for command-flow side effects such as mutation success, guarded redirects, or form submission.
- Avoid `memo`, `useMemo`, and `useCallback` unless there is a clear performance reason.

View File

@ -1,367 +0,0 @@
---
name: tailwind-css-rules
description: Tailwind CSS v4.1+ rules and best practices. Use when writing, reviewing, refactoring, or upgrading Tailwind CSS classes and styles, especially v4 utility migrations, layout spacing, typography, responsive variants, dark mode, gradients, CSS variables, and component styling.
---
# Tailwind CSS Rules and Best Practices
## Core Principles
- **Always use Tailwind CSS v4.1+** - Ensure the codebase is using the latest version
- **Do not use deprecated or removed utilities** - ALWAYS use the replacement
- **Never use `@apply`** - Use CSS variables, the `--spacing()` function, or framework components instead
- **Check for redundant classes** - Remove any classes that aren't necessary
- **Group elements logically** to simplify responsive tweaks later
## Upgrading to Tailwind CSS v4
### Before Upgrading
- **Always read the upgrade documentation first** - Read https://tailwindcss.com/docs/upgrade-guide and https://tailwindcss.com/blog/tailwindcss-v4 before starting an upgrade.
- Ensure the git repository is in a clean state before starting
### Upgrade Process
1. Run the upgrade command: `npx @tailwindcss/upgrade@latest` for both major and minor updates
2. The tool will convert JavaScript config files to the new CSS format
3. Review all changes extensively to clean up any false positives
4. Test thoroughly across your application
## Breaking Changes Reference
### Removed Utilities (NEVER use these in v4)
| ❌ Deprecated | ✅ Replacement |
| ----------------------- | ------------------------------------------------- |
| `bg-opacity-*` | Use opacity modifiers like `bg-black/50` |
| `text-opacity-*` | Use opacity modifiers like `text-black/50` |
| `border-opacity-*` | Use opacity modifiers like `border-black/50` |
| `divide-opacity-*` | Use opacity modifiers like `divide-black/50` |
| `ring-opacity-*` | Use opacity modifiers like `ring-black/50` |
| `placeholder-opacity-*` | Use opacity modifiers like `placeholder-black/50` |
| `flex-shrink-*` | `shrink-*` |
| `flex-grow-*` | `grow-*` |
| `overflow-ellipsis` | `text-ellipsis` |
| `decoration-slice` | `box-decoration-slice` |
| `decoration-clone` | `box-decoration-clone` |
### Renamed Utilities
Use the v4 name when migrating code that still carries Tailwind v3 semantics. Do not blanket-replace existing v4 classes: classes such as `rounded-sm`, `shadow-sm`, `ring-1`, and `ring-2` are valid in this codebase when they intentionally represent the current design scale.
| ❌ v3 pattern | ✅ v4 pattern |
| ------------------- | -------------------------------------------------- |
| `bg-gradient-*` | `bg-linear-*` |
| old shadow scale | verify against the current Tailwind/design scale |
| old blur scale | verify against the current Tailwind/design scale |
| old radius scale | use the Dify radius token mapping when applicable |
| `outline-none` | `outline-hidden` |
| bare `ring` utility | use an explicit ring width such as `ring-1`/`ring-2`/`ring-3` |
For Figma radius tokens, follow `packages/dify-ui/AGENTS.md`. For example, `--radius/xs` maps to `rounded-sm`; do not rewrite it to `rounded-xs`.
## Layout and Spacing Rules
### Flexbox and Grid Spacing
#### Always use gap utilities for internal spacing
Gap provides consistent spacing without edge cases (no extra space on last items). It's cleaner and more maintainable than margins on children.
```html
<!-- ❌ Don't do this -->
<div class="flex">
<div class="mr-4">Item 1</div>
<div class="mr-4">Item 2</div>
<div>Item 3</div>
<!-- No margin on last -->
</div>
<!-- ✅ Do this instead -->
<div class="flex gap-4">
<div>Item 1</div>
<div>Item 2</div>
<div>Item 3</div>
</div>
```
#### Gap vs Space utilities
- **Never use `space-x-*` or `space-y-*` in flex/grid layouts** - always use gap
- Space utilities add margins to children and have issues with wrapped items
- Gap works correctly with flex-wrap and all flex directions
```html
<!-- ❌ Avoid space utilities in flex containers -->
<div class="flex flex-wrap space-x-4">
<!-- Space utilities break with wrapped items -->
</div>
<!-- ✅ Use gap for consistent spacing -->
<div class="flex flex-wrap gap-4">
<!-- Gap works perfectly with wrapping -->
</div>
```
### General Spacing Guidelines
- **Prefer top and left margins** over bottom and right margins (unless conditionally rendered)
- **Use padding on parent containers** instead of bottom margins on the last child
- **Always use `min-h-dvh` instead of `min-h-screen`** - `min-h-screen` is buggy on mobile Safari
- **Prefer `size-*` utilities** over separate `w-*` and `h-*` when setting equal dimensions
- For max-widths, prefer the container scale (e.g., `max-w-2xs` over `max-w-72`)
## Typography Rules
### Line Heights
- **Never use `leading-*` classes** - Always use line height modifiers with text size
- **Always use fixed line heights from the spacing scale** - Don't use named values
```html
<!-- ❌ Don't do this -->
<p class="text-base leading-7">Text with separate line height</p>
<p class="text-lg leading-relaxed">Text with named line height</p>
<!-- ✅ Do this instead -->
<p class="text-base/7">Text with line height modifier</p>
<p class="text-lg/8">Text with specific line height</p>
```
### Font Size Reference
Be precise with font sizes - know the actual pixel values:
- `text-xs` = 12px
- `text-sm` = 14px
- `text-base` = 16px
- `text-lg` = 18px
- `text-xl` = 20px
## Color and Opacity
### Opacity Modifiers
**Never use `bg-opacity-*`, `text-opacity-*`, etc.** - use the opacity modifier syntax:
```html
<!-- ❌ Don't do this -->
<div class="bg-red-500 bg-opacity-60">Old opacity syntax</div>
<!-- ✅ Do this instead -->
<div class="bg-red-500/60">Modern opacity syntax</div>
```
## Responsive Design
### Breakpoint Optimization
- **Check for redundant classes across breakpoints**
- **Only add breakpoint variants when values change**
```html
<!-- ❌ Redundant breakpoint classes -->
<div class="px-4 md:px-4 lg:px-4">
<!-- md:px-4 and lg:px-4 are redundant -->
</div>
<!-- ✅ Efficient breakpoint usage -->
<div class="px-4 lg:px-8">
<!-- Only specify when value changes -->
</div>
```
## Dark Mode
### Dark Mode Best Practices
- Use the plain `dark:` variant pattern
- Put light mode styles first, then dark mode styles
- Ensure `dark:` variant comes before other variants
```html
<!-- ✅ Correct dark mode pattern -->
<div class="bg-white text-black dark:bg-black dark:text-white">
<button class="hover:bg-gray-100 dark:hover:bg-gray-800">Click me</button>
</div>
```
## Gradient Utilities
- **ALWAYS Use `bg-linear-*` instead of `bg-gradient-*` utilities** - The gradient utilities were renamed in v4
- Use the new `bg-radial` or `bg-radial-[<position>]` to create radial gradients
- Use the new `bg-conic` or `bg-conic-*` to create conic gradients
```html
<!-- ✅ Use the new gradient utilities -->
<div class="h-14 bg-linear-to-br from-violet-500 to-fuchsia-500"></div>
<div
class="size-18 bg-radial-[at_50%_75%] from-sky-200 via-blue-400 to-indigo-900 to-90%"
></div>
<div
class="size-24 bg-conic-180 from-indigo-600 via-indigo-50 to-indigo-600"
></div>
<!-- ❌ Do not use bg-gradient-* utilities -->
<div class="h-14 bg-gradient-to-br from-violet-500 to-fuchsia-500"></div>
```
## Working with CSS Variables
### Accessing Theme Values
Tailwind CSS v4 exposes all theme values as CSS variables:
```css
/* Access colors, and other theme values */
.custom-element {
background: var(--color-red-500);
border-radius: var(--radius-lg);
}
```
### The `--spacing()` Function
Use the dedicated `--spacing()` function for spacing calculations:
```css
.custom-class {
margin-top: calc(100vh - --spacing(16));
}
```
### Extending theme values
Use CSS to extend theme values:
```css
@import "tailwindcss";
@theme {
--color-mint-500: oklch(0.72 0.11 178);
}
```
```html
<div class="bg-mint-500">
<!-- ... -->
</div>
```
## New v4 Features
### Container Queries
Use the `@container` class and size variants:
```html
<article class="@container">
<div class="flex flex-col @md:flex-row @lg:gap-8">
<img class="w-full @md:w-48" />
<div class="mt-4 @md:mt-0">
<!-- Content adapts to container size -->
</div>
</div>
</article>
```
### Container Query Units
Use container-based units like `cqw` for responsive sizing:
```html
<div class="@container">
<h1 class="text-[50cqw]">Responsive to container width</h1>
</div>
```
### Text Shadows (v4.1)
Use text-shadow-\* utilities from text-shadow-2xs to text-shadow-lg:
```html
<!-- ✅ Text shadow examples -->
<h1 class="text-shadow-lg">Large shadow</h1>
<p class="text-shadow-sm/50">Small shadow with opacity</p>
```
### Masking (v4.1)
Use the new composable mask utilities for image and gradient masks:
```html
<!-- ✅ Linear gradient masks on specific sides -->
<div class="mask-t-from-50%">Top fade</div>
<div class="mask-b-from-20% mask-b-to-80%">Bottom gradient</div>
<div class="mask-linear-from-white mask-linear-to-black/60">
Fade from white to black
</div>
<!-- ✅ Radial gradient masks -->
<div class="mask-radial-[100%_100%] mask-radial-from-75% mask-radial-at-left">
Radial mask
</div>
```
## Component Patterns
### Avoiding Utility Inheritance
Don't add utilities to parents that you override in children:
```html
<!-- ❌ Avoid this pattern -->
<div class="text-center">
<h1>Centered Heading</h1>
<div class="text-left">Left-aligned content</div>
</div>
<!-- ✅ Better approach -->
<div>
<h1 class="text-center">Centered Heading</h1>
<div>Left-aligned content</div>
</div>
```
### Component Extraction
- Extract repeated patterns into framework components, not CSS classes
- Keep utility classes in templates/JSX
- Use data attributes for complex state-based styling
## CSS Best Practices
### Nesting Guidelines
- Use nesting when styling both parent and children
- Avoid empty parent selectors
```css
/* ✅ Good nesting - parent has styles */
.card {
padding: --spacing(4);
> .card-title {
font-weight: bold;
}
}
/* ❌ Avoid empty parents */
ul {
> li {
/* Parent has no styles */
}
}
```
## Common Pitfalls to Avoid
1. **Using old opacity utilities** - Always use `/opacity` syntax like `bg-red-500/60`
2. **Redundant breakpoint classes** - Only specify changes
3. **Space utilities in flex/grid** - Always use gap
4. **Leading utilities** - Use line-height modifiers like `text-sm/6`
5. **Arbitrary values** - Use the design scale
6. **@apply directive** - Use components or CSS variables
7. **min-h-screen on mobile** - Use min-h-dvh
8. **Separate width/height** - Use size utilities when equal
9. **Arbitrary values** - Always use Tailwind's predefined scale whenever possible (e.g., use `ml-4` over `ml-[16px]`)

View File

@ -1 +0,0 @@
../../.agents/skills/e2e-cucumber-playwright

View File

@ -7,7 +7,7 @@ cd web && pnpm install
pipx install uv pipx install uv
echo "alias start-api=\"cd $WORKSPACE_ROOT/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug\"" >> ~/.bashrc echo "alias start-api=\"cd $WORKSPACE_ROOT/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug\"" >> ~/.bashrc
echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,dataset_summary,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_publisher,trigger_refresh_executor,retention\"" >> ~/.bashrc echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,dataset_summary,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention\"" >> ~/.bashrc
echo "alias start-web=\"cd $WORKSPACE_ROOT/web && pnpm dev:inspect\"" >> ~/.bashrc echo "alias start-web=\"cd $WORKSPACE_ROOT/web && pnpm dev:inspect\"" >> ~/.bashrc
echo "alias start-web-prod=\"cd $WORKSPACE_ROOT/web && pnpm build && pnpm start\"" >> ~/.bashrc echo "alias start-web-prod=\"cd $WORKSPACE_ROOT/web && pnpm build && pnpm start\"" >> ~/.bashrc
echo "alias start-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d\"" >> ~/.bashrc echo "alias start-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d\"" >> ~/.bashrc

4
.github/CODEOWNERS vendored
View File

@ -6,9 +6,6 @@
* @crazywoola @laipz8200 @Yeuoly * @crazywoola @laipz8200 @Yeuoly
# ESLint suppression file is maintained by autofix.ci pruning.
/eslint-suppressions.json
# CODEOWNERS file # CODEOWNERS file
/.github/CODEOWNERS @laipz8200 @crazywoola /.github/CODEOWNERS @laipz8200 @crazywoola
@ -39,6 +36,7 @@
/api/core/workflow/graph/ @laipz8200 @QuantumGhost /api/core/workflow/graph/ @laipz8200 @QuantumGhost
/api/core/workflow/graph_events/ @laipz8200 @QuantumGhost /api/core/workflow/graph_events/ @laipz8200 @QuantumGhost
/api/core/workflow/node_events/ @laipz8200 @QuantumGhost /api/core/workflow/node_events/ @laipz8200 @QuantumGhost
/api/dify_graph/model_runtime/ @laipz8200 @QuantumGhost
# Backend - Workflow - Nodes (Agent, Iteration, Loop, LLM) # Backend - Workflow - Nodes (Agent, Iteration, Loop, LLM)
/api/core/workflow/nodes/agent/ @Nov1c444 /api/core/workflow/nodes/agent/ @Nov1c444

View File

@ -4,8 +4,9 @@ runs:
using: composite using: composite
steps: steps:
- name: Setup Vite+ - name: Setup Vite+
uses: voidzero-dev/setup-vp@4f5aa3e38c781f1b01e78fb9255527cee8a6efa6 # v1.8.0 uses: voidzero-dev/setup-vp@20553a7a7429c429a74894104a2835d7fed28a72 # v1.3.0
with: with:
working-directory: web
node-version-file: .nvmrc node-version-file: .nvmrc
cache: true cache: true
run-install: true run-install: true

100
.github/dependabot.yml vendored
View File

@ -1,6 +1,106 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: "pip"
directory: "/api"
open-pull-requests-limit: 10
schedule:
interval: "weekly"
groups:
flask:
patterns:
- "flask"
- "flask-*"
- "werkzeug"
- "gunicorn"
google:
patterns:
- "google-*"
- "googleapis-*"
opentelemetry:
patterns:
- "opentelemetry-*"
pydantic:
patterns:
- "pydantic"
- "pydantic-*"
llm:
patterns:
- "langfuse"
- "langsmith"
- "litellm"
- "mlflow*"
- "opik"
- "weave*"
- "arize*"
- "tiktoken"
- "transformers"
database:
patterns:
- "sqlalchemy"
- "psycopg2*"
- "psycogreen"
- "redis*"
- "alembic*"
storage:
patterns:
- "boto3*"
- "botocore*"
- "azure-*"
- "bce-*"
- "cos-python-*"
- "esdk-obs-*"
- "google-cloud-storage"
- "opendal"
- "oss2"
- "supabase*"
- "tos*"
vdb:
patterns:
- "alibabacloud*"
- "chromadb"
- "clickhouse-*"
- "clickzetta-*"
- "couchbase"
- "elasticsearch"
- "opensearch-py"
- "oracledb"
- "pgvect*"
- "pymilvus"
- "pymochow"
- "pyobvector"
- "qdrant-client"
- "intersystems-*"
- "tablestore"
- "tcvectordb"
- "tidb-vector"
- "upstash-*"
- "volcengine-*"
- "weaviate-*"
- "xinference-*"
- "mo-vector"
- "mysql-connector-*"
dev:
patterns:
- "coverage"
- "dotenv-linter"
- "faker"
- "lxml-stubs"
- "basedpyright"
- "ruff"
- "pytest*"
- "types-*"
- "boto3-stubs"
- "hypothesis"
- "pandas-stubs"
- "scipy-stubs"
- "import-linter"
- "celery-types"
- "mypy*"
- "pyrefly"
python-packages:
patterns:
- "*"
- package-ecosystem: "uv" - package-ecosystem: "uv"
directory: "/api" directory: "/api"
open-pull-requests-limit: 10 open-pull-requests-limit: 10

8
.github/labeler.yml vendored
View File

@ -1,9 +1,3 @@
web: web:
- changed-files: - changed-files:
- any-glob-to-any-file: - any-glob-to-any-file: 'web/**'
- 'web/**'
- 'packages/**'
- 'package.json'
- 'pnpm-lock.yaml'
- 'pnpm-workspace.yaml'
- '.nvmrc'

View File

@ -7,7 +7,6 @@
## Summary ## Summary
<!-- Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. --> <!-- Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. -->
<!-- If this PR was created by an automated agent, add `From <Tool Name>` as the final line of the description. Example: `From Codex`. -->
## Screenshots ## Screenshots
@ -18,7 +17,7 @@
## Checklist ## Checklist
- [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs) - [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs)
- [ ] I understand that this PR may be closed in case there was no previous discussion or issues. (This doesn't apply to typos!) - [x] I understand that this PR may be closed in case there was no previous discussion or issues. (This doesn't apply to typos!)
- [ ] I've added a test for each change that was introduced, and I tried as much as possible to make a single atomic change. - [x] I've added a test for each change that was introduced, and I tried as much as possible to make a single atomic change.
- [ ] I've updated the documentation accordingly. - [x] I've updated the documentation accordingly.
- [ ] I ran `make lint && make type-check` (backend) and `cd web && pnpm exec vp staged` (frontend) to appease the lint gods - [x] I ran `make lint` and `make type-check` (backend) and `cd web && npx lint-staged` (frontend) to appease the lint gods

View File

@ -1,82 +0,0 @@
import { execFileSync } from 'node:child_process'
import fs from 'node:fs'
import path from 'node:path'
const repoRoot = process.cwd()
const baseSha = process.env.BASE_SHA || ''
const headSha = process.env.HEAD_SHA || ''
const files = (process.env.CHANGED_FILES || '').split(/\s+/).filter(Boolean)
const outputPath = process.env.I18N_CHANGES_OUTPUT_PATH || '/tmp/i18n-changes.json'
const englishPath = fileStem => path.join(repoRoot, 'web', 'i18n', 'en-US', `${fileStem}.json`)
const readCurrentJson = (fileStem) => {
const filePath = englishPath(fileStem)
if (!fs.existsSync(filePath))
return null
return JSON.parse(fs.readFileSync(filePath, 'utf8'))
}
const readBaseJson = (fileStem) => {
if (!baseSha)
return null
try {
const relativePath = `web/i18n/en-US/${fileStem}.json`
const content = execFileSync('git', ['show', `${baseSha}:${relativePath}`], { encoding: 'utf8' })
return JSON.parse(content)
}
catch {
return null
}
}
const compareJson = (beforeValue, afterValue) => JSON.stringify(beforeValue) === JSON.stringify(afterValue)
const changes = {}
for (const fileStem of files) {
const currentJson = readCurrentJson(fileStem)
const beforeJson = readBaseJson(fileStem) || {}
const afterJson = currentJson || {}
const added = {}
const updated = {}
const deleted = []
for (const [key, value] of Object.entries(afterJson)) {
if (!(key in beforeJson)) {
added[key] = value
continue
}
if (!compareJson(beforeJson[key], value)) {
updated[key] = {
before: beforeJson[key],
after: value,
}
}
}
for (const key of Object.keys(beforeJson)) {
if (!(key in afterJson))
deleted.push(key)
}
changes[fileStem] = {
fileDeleted: currentJson === null,
added,
updated,
deleted,
}
}
fs.writeFileSync(
outputPath,
JSON.stringify({
baseSha,
headSha,
files,
changes,
})
)

19
.github/workflows/anti-slop.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: Anti-Slop PR Check
on:
pull_request_target:
types: [opened, edited, synchronize]
permissions:
pull-requests: write
contents: read
jobs:
anti-slop:
runs-on: ubuntu-latest
steps:
- uses: peakoss/anti-slop@85daca1880e9e1af197fc06ea03349daf08f4202 # v0.2.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
close-pr: false
failure-add-pr-labels: "needs-revision"

View File

@ -14,17 +14,18 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
api-unit: test:
name: API Unit Tests name: API Tests
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
env: env:
COVERAGE_FILE: coverage-unit CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
defaults: defaults:
run: run:
shell: bash shell: bash
strategy: strategy:
matrix: matrix:
python-version: python-version:
- "3.11"
- "3.12" - "3.12"
steps: steps:
@ -35,7 +36,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Setup UV and Python - name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
with: with:
enable-cache: true enable-cache: true
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
@ -50,62 +51,16 @@ jobs:
- name: Run dify config tests - name: Run dify config tests
run: uv run --project api dev/pytest/pytest_config_tests.py run: uv run --project api dev/pytest/pytest_config_tests.py
- name: Run Unit Tests
run: uv run --project api bash dev/pytest/pytest_unit_tests.sh
- name: Upload unit coverage data
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: api-coverage-unit
path: coverage-unit
retention-days: 1
api-integration:
name: API Integration Tests
runs-on: depot-ubuntu-24.04
env:
COVERAGE_FILE: coverage-integration
STORAGE_TYPE: opendal
OPENDAL_SCHEME: fs
OPENDAL_FS_ROOT: /tmp/dify-storage
defaults:
run:
shell: bash
strategy:
matrix:
python-version:
- "3.12"
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
persist-credentials: false
- name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
with:
enable-cache: true
python-version: ${{ matrix.python-version }}
cache-dependency-glob: api/uv.lock
- name: Check UV lockfile
run: uv lock --project api --check
- name: Install dependencies
run: uv sync --project api --dev
- name: Set up dotenvs - name: Set up dotenvs
run: | run: |
cp docker/.env.example docker/.env cp docker/.env.example docker/.env
cp docker/envs/middleware.env.example docker/middleware.env cp docker/middleware.env.example docker/middleware.env
- name: Expose Service Ports - name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh run: sh .github/workflows/expose_service_ports.sh
- name: Set up Sandbox - name: Set up Sandbox
uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 uses: hoverkraft-tech/compose-action@4894d2492015c1774ee5a13a95b1072093087ec3 # v2.5.0
with: with:
compose-file: | compose-file: |
docker/docker-compose.middleware.yaml docker/docker-compose.middleware.yaml
@ -119,91 +74,23 @@ jobs:
run: | run: |
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
- name: Run Integration Tests - name: Run API Tests
env:
STORAGE_TYPE: opendal
OPENDAL_SCHEME: fs
OPENDAL_FS_ROOT: /tmp/dify-storage
run: | run: |
uv run --project api pytest \ uv run --project api pytest \
-n auto \ -n auto \
--timeout "${PYTEST_TIMEOUT:-180}" \ --timeout "${PYTEST_TIMEOUT:-180}" \
api/tests/integration_tests/workflow \ api/tests/integration_tests/workflow \
api/tests/integration_tests/tools \ api/tests/integration_tests/tools \
api/tests/test_containers_integration_tests api/tests/test_containers_integration_tests \
api/tests/unit_tests
- name: Upload integration coverage data
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: api-coverage-integration
path: coverage-integration
retention-days: 1
api-coverage:
name: API Coverage
runs-on: depot-ubuntu-24.04
needs:
- api-unit
- api-integration
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
COVERAGE_FILE: .coverage
defaults:
run:
shell: bash
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
persist-credentials: false
- name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
with:
enable-cache: true
python-version: "3.12"
cache-dependency-glob: api/uv.lock
- name: Install dependencies
run: uv sync --project api --dev
- name: Download coverage data
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
path: coverage-data
pattern: api-coverage-*
merge-multiple: true
- name: Combine coverage
run: |
set -euo pipefail
echo "### API Coverage" >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "Merged backend coverage report generated for Codecov project status." >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
unit_coverage="$(find coverage-data -type f -name coverage-unit -print -quit)"
integration_coverage="$(find coverage-data -type f -name coverage-integration -print -quit)"
: "${unit_coverage:?coverage-unit artifact not found}"
: "${integration_coverage:?coverage-integration artifact not found}"
report_file="$(mktemp)"
uv run --project api coverage combine "$unit_coverage" "$integration_coverage"
uv run --project api coverage report --show-missing | tee "$report_file"
echo "Summary: \`$(tail -n 1 "$report_file")\`" >> "$GITHUB_STEP_SUMMARY"
{
echo ""
echo "<details><summary>Coverage report</summary>"
echo ""
echo '```'
cat "$report_file"
echo '```'
echo "</details>"
} >> "$GITHUB_STEP_SUMMARY"
uv run --project api coverage xml -o coverage.xml
- name: Report coverage - name: Report coverage
if: ${{ env.CODECOV_TOKEN != '' }} if: ${{ env.CODECOV_TOKEN != '' && matrix.python-version == '3.12' }}
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0 uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5.5.3
with: with:
files: ./coverage.xml files: ./coverage.xml
disable_search: true disable_search: true

View File

@ -2,9 +2,6 @@ name: autofix.ci
on: on:
pull_request: pull_request:
branches: ["main"] branches: ["main"]
merge_group:
branches: ["main"]
types: [checks_requested]
push: push:
branches: ["main"] branches: ["main"]
permissions: permissions:
@ -13,19 +10,13 @@ permissions:
jobs: jobs:
autofix: autofix:
if: github.repository == 'langgenius/dify' if: github.repository == 'langgenius/dify'
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- name: Complete merge group check - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
if: github.event_name == 'merge_group'
run: echo "autofix.ci updates pull request branches, not merge group refs."
- if: github.event_name != 'merge_group'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check Docker Compose inputs - name: Check Docker Compose inputs
if: github.event_name != 'merge_group'
id: docker-compose-changes id: docker-compose-changes
uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6 uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with: with:
files: | files: |
docker/generate_docker_compose docker/generate_docker_compose
@ -33,39 +24,30 @@ jobs:
docker/docker-compose-template.yaml docker/docker-compose-template.yaml
docker/docker-compose.yaml docker/docker-compose.yaml
- name: Check web inputs - name: Check web inputs
if: github.event_name != 'merge_group'
id: web-changes id: web-changes
uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6 uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with: with:
files: | files: |
web/** web/**
packages/**
package.json
pnpm-lock.yaml
pnpm-workspace.yaml
.nvmrc
- name: Check api inputs - name: Check api inputs
if: github.event_name != 'merge_group'
id: api-changes id: api-changes
uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6 uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with: with:
files: | files: |
api/** api/**
- if: github.event_name != 'merge_group' - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with: with:
python-version: "3.11" python-version: "3.11"
- if: github.event_name != 'merge_group' - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
- name: Generate Docker Compose - name: Generate Docker Compose
if: github.event_name != 'merge_group' && steps.docker-compose-changes.outputs.any_changed == 'true' if: steps.docker-compose-changes.outputs.any_changed == 'true'
run: | run: |
cd docker cd docker
./generate_docker_compose ./generate_docker_compose
- if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' - if: steps.api-changes.outputs.any_changed == 'true'
run: | run: |
cd api cd api
uv sync --dev uv sync --dev
@ -77,13 +59,13 @@ jobs:
uv run ruff format .. uv run ruff format ..
- name: count migration progress - name: count migration progress
if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' if: steps.api-changes.outputs.any_changed == 'true'
run: | run: |
cd api cd api
./cnt_base.sh ./cnt_base.sh
- name: ast-grep - name: ast-grep
if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' if: steps.api-changes.outputs.any_changed == 'true'
run: | run: |
# ast-grep exits 1 if no matches are found; allow idempotent runs. # ast-grep exits 1 if no matches are found; allow idempotent runs.
uvx --from ast-grep-cli ast-grep --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all || true uvx --from ast-grep-cli ast-grep --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all || true
@ -113,19 +95,13 @@ jobs:
find . -name "*.py.bak" -type f -delete find . -name "*.py.bak" -type f -delete
- name: Setup web environment - name: Setup web environment
if: github.event_name != 'merge_group' if: steps.web-changes.outputs.any_changed == 'true'
uses: ./.github/actions/setup-web uses: ./.github/actions/setup-web
- name: Generate API docs
if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true'
run: |
cd api
uv run dev/generate_swagger_markdown_docs.py --swagger-dir openapi --markdown-dir openapi/markdown
- name: ESLint autofix - name: ESLint autofix
if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' if: steps.web-changes.outputs.any_changed == 'true'
run: | run: |
cd web
vp exec eslint --concurrency=2 --prune-suppressions --quiet || true vp exec eslint --concurrency=2 --prune-suppressions --quiet || true
- if: github.event_name != 'merge_group' - uses: autofix-ci/action@7a166d7532b277f34e16238930461bf77f9d7ed8 # v1.3.3
uses: autofix-ci/action@c5b2d67aa2274e7b5a18224e8171550871fc7e4a # v1.3.4

View File

@ -24,42 +24,27 @@ env:
jobs: jobs:
build: build:
runs-on: ${{ matrix.runs_on }} runs-on: ${{ matrix.platform == 'linux/arm64' && 'arm64_runner' || 'ubuntu-latest' }}
if: github.repository == 'langgenius/dify' if: github.repository == 'langgenius/dify'
permissions:
contents: read
id-token: write
strategy: strategy:
matrix: matrix:
include: include:
- service_name: "build-api-amd64" - service_name: "build-api-amd64"
image_name_env: "DIFY_API_IMAGE_NAME" image_name_env: "DIFY_API_IMAGE_NAME"
artifact_context: "api" context: "api"
build_context: "{{defaultContext}}:api"
file: "Dockerfile"
platform: linux/amd64 platform: linux/amd64
runs_on: depot-ubuntu-24.04-4
- service_name: "build-api-arm64" - service_name: "build-api-arm64"
image_name_env: "DIFY_API_IMAGE_NAME" image_name_env: "DIFY_API_IMAGE_NAME"
artifact_context: "api" context: "api"
build_context: "{{defaultContext}}:api"
file: "Dockerfile"
platform: linux/arm64 platform: linux/arm64
runs_on: depot-ubuntu-24.04-4
- service_name: "build-web-amd64" - service_name: "build-web-amd64"
image_name_env: "DIFY_WEB_IMAGE_NAME" image_name_env: "DIFY_WEB_IMAGE_NAME"
artifact_context: "web" context: "web"
build_context: "{{defaultContext}}"
file: "web/Dockerfile"
platform: linux/amd64 platform: linux/amd64
runs_on: depot-ubuntu-24.04-4
- service_name: "build-web-arm64" - service_name: "build-web-arm64"
image_name_env: "DIFY_WEB_IMAGE_NAME" image_name_env: "DIFY_WEB_IMAGE_NAME"
artifact_context: "web" context: "web"
build_context: "{{defaultContext}}"
file: "web/Dockerfile"
platform: linux/arm64 platform: linux/arm64
runs_on: depot-ubuntu-24.04-4
steps: steps:
- name: Prepare - name: Prepare
@ -68,13 +53,16 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0 uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with: with:
username: ${{ env.DOCKERHUB_USER }} username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_TOKEN }} password: ${{ env.DOCKERHUB_TOKEN }}
- name: Set up Depot CLI - name: Set up QEMU
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1 uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Extract metadata for Docker - name: Extract metadata for Docker
id: meta id: meta
@ -84,15 +72,15 @@ jobs:
- name: Build Docker image - name: Build Docker image
id: build id: build
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0 uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
with: with:
project: ${{ vars.DEPOT_PROJECT_ID }} context: "{{defaultContext}}:${{ matrix.context }}"
context: ${{ matrix.build_context }}
file: ${{ matrix.file }}
platforms: ${{ matrix.platform }} platforms: ${{ matrix.platform }}
build-args: COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }} build-args: COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env[matrix.image_name_env] }},push-by-digest=true,name-canonical=true,push=true outputs: type=image,name=${{ env[matrix.image_name_env] }},push-by-digest=true,name-canonical=true,push=true
cache-from: type=gha,scope=${{ matrix.service_name }}
cache-to: type=gha,mode=max,scope=${{ matrix.service_name }}
- name: Export digest - name: Export digest
env: env:
@ -103,40 +91,16 @@ jobs:
touch "/tmp/digests/${sanitized_digest}" touch "/tmp/digests/${sanitized_digest}"
- name: Upload digest - name: Upload digest
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with: with:
name: digests-${{ matrix.artifact_context }}-${{ env.PLATFORM_PAIR }} name: digests-${{ matrix.context }}-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/* path: /tmp/digests/*
if-no-files-found: error if-no-files-found: error
retention-days: 1 retention-days: 1
fork-build-validate:
if: github.repository != 'langgenius/dify'
runs-on: ubuntu-24.04
strategy:
matrix:
include:
- service_name: "validate-api-amd64"
build_context: "{{defaultContext}}:api"
file: "Dockerfile"
- service_name: "validate-web-amd64"
build_context: "{{defaultContext}}"
file: "web/Dockerfile"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Validate Docker image
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
push: false
context: ${{ matrix.build_context }}
file: ${{ matrix.file }}
platforms: linux/amd64
create-manifest: create-manifest:
needs: build needs: build
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
if: github.repository == 'langgenius/dify' if: github.repository == 'langgenius/dify'
strategy: strategy:
matrix: matrix:
@ -156,7 +120,7 @@ jobs:
merge-multiple: true merge-multiple: true
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0 uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with: with:
username: ${{ env.DOCKERHUB_USER }} username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_TOKEN }} password: ${{ env.DOCKERHUB_TOKEN }}

View File

@ -9,7 +9,7 @@ concurrency:
jobs: jobs:
db-migration-test-postgres: db-migration-test-postgres:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
@ -19,7 +19,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Setup UV and Python - name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
with: with:
enable-cache: true enable-cache: true
python-version: "3.12" python-version: "3.12"
@ -37,10 +37,10 @@ jobs:
- name: Prepare middleware env - name: Prepare middleware env
run: | run: |
cd docker cd docker
cp envs/middleware.env.example middleware.env cp middleware.env.example middleware.env
- name: Set up Middlewares - name: Set up Middlewares
uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 uses: hoverkraft-tech/compose-action@4894d2492015c1774ee5a13a95b1072093087ec3 # v2.5.0
with: with:
compose-file: | compose-file: |
docker/docker-compose.middleware.yaml docker/docker-compose.middleware.yaml
@ -59,7 +59,7 @@ jobs:
run: uv run --directory api flask upgrade-db run: uv run --directory api flask upgrade-db
db-migration-test-mysql: db-migration-test-mysql:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
@ -69,7 +69,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Setup UV and Python - name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
with: with:
enable-cache: true enable-cache: true
python-version: "3.12" python-version: "3.12"
@ -87,14 +87,14 @@ jobs:
- name: Prepare middleware env for MySQL - name: Prepare middleware env for MySQL
run: | run: |
cd docker cd docker
cp envs/middleware.env.example middleware.env cp middleware.env.example middleware.env
sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env
sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env
sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env
sed -i 's/DB_USERNAME=postgres/DB_USERNAME=mysql/' middleware.env sed -i 's/DB_USERNAME=postgres/DB_USERNAME=mysql/' middleware.env
- name: Set up Middlewares - name: Set up Middlewares
uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 uses: hoverkraft-tech/compose-action@4894d2492015c1774ee5a13a95b1072093087ec3 # v2.5.0
with: with:
compose-file: | compose-file: |
docker/docker-compose.middleware.yaml docker/docker-compose.middleware.yaml
@ -110,28 +110,6 @@ jobs:
sed -i 's/DB_PORT=5432/DB_PORT=3306/' .env sed -i 's/DB_PORT=5432/DB_PORT=3306/' .env
sed -i 's/DB_USERNAME=postgres/DB_USERNAME=root/' .env sed -i 's/DB_USERNAME=postgres/DB_USERNAME=root/' .env
# hoverkraft-tech/compose-action@v2.6.0 only waits for `docker compose up -d`
# to return (container processes started); it does not wait on healthcheck
# status. mysql:8.0's first-time init takes 15-30s, so without an explicit
# wait the migration runs while InnoDB is still initialising and gets
# killed with "Lost connection during query". Poll a real SELECT until it
# succeeds.
- name: Wait for MySQL to accept queries
run: |
set +e
for i in $(seq 1 60); do
if docker run --rm --network host mysql:8.0 \
mysql -h 127.0.0.1 -P 3306 -uroot -pdifyai123456 \
-e 'SELECT 1' >/dev/null 2>&1; then
echo "MySQL ready after ${i}s"
exit 0
fi
sleep 1
done
echo "MySQL not ready after 60s; dumping container logs:"
docker compose -f docker/docker-compose.middleware.yaml --profile mysql logs --tail=200 db_mysql
exit 1
- name: Run DB Migration - name: Run DB Migration
env: env:
DEBUG: true DEBUG: true

View File

@ -13,7 +13,7 @@ on:
jobs: jobs:
deploy: deploy:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
if: | if: |
github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_branch == 'deploy/agent-dev' github.event.workflow_run.head_branch == 'deploy/agent-dev'

View File

@ -10,7 +10,7 @@ on:
jobs: jobs:
deploy: deploy:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
if: | if: |
github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_branch == 'deploy/dev' github.event.workflow_run.head_branch == 'deploy/dev'

View File

@ -13,7 +13,7 @@ on:
jobs: jobs:
deploy: deploy:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
if: | if: |
github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_branch == 'deploy/enterprise' github.event.workflow_run.head_branch == 'deploy/enterprise'

View File

@ -10,7 +10,7 @@ on:
jobs: jobs:
deploy: deploy:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
if: | if: |
github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_branch == 'build/feat/hitl' github.event.workflow_run.head_branch == 'build/feat/hitl'

View File

@ -14,69 +14,35 @@ concurrency:
jobs: jobs:
build-docker: build-docker:
if: github.event.pull_request.head.repo.full_name == github.repository runs-on: ubuntu-latest
runs-on: ${{ matrix.runs_on }}
permissions:
contents: read
id-token: write
strategy: strategy:
matrix: matrix:
include: include:
- service_name: "api-amd64" - service_name: "api-amd64"
platform: linux/amd64 platform: linux/amd64
runs_on: depot-ubuntu-24.04-4 context: "api"
context: "{{defaultContext}}:api"
file: "Dockerfile"
- service_name: "api-arm64" - service_name: "api-arm64"
platform: linux/arm64 platform: linux/arm64
runs_on: depot-ubuntu-24.04-4 context: "api"
context: "{{defaultContext}}:api"
file: "Dockerfile"
- service_name: "web-amd64" - service_name: "web-amd64"
platform: linux/amd64 platform: linux/amd64
runs_on: depot-ubuntu-24.04-4 context: "web"
context: "{{defaultContext}}"
file: "web/Dockerfile"
- service_name: "web-arm64" - service_name: "web-arm64"
platform: linux/arm64 platform: linux/arm64
runs_on: depot-ubuntu-24.04-4 context: "web"
context: "{{defaultContext}}"
file: "web/Dockerfile"
steps: steps:
- name: Set up Depot CLI - name: Set up QEMU
uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1 uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
- name: Build Docker Image
uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0
with:
project: ${{ vars.DEPOT_PROJECT_ID }}
push: false
context: ${{ matrix.context }}
file: ${{ matrix.file }}
platforms: ${{ matrix.platform }}
build-docker-fork:
if: github.event.pull_request.head.repo.full_name != github.repository
runs-on: ubuntu-24.04
permissions:
contents: read
strategy:
matrix:
include:
- service_name: "api-amd64"
context: "{{defaultContext}}:api"
file: "Dockerfile"
- service_name: "web-amd64"
context: "{{defaultContext}}"
file: "web/Dockerfile"
steps:
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Build Docker Image - name: Build Docker Image
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
with: with:
push: false push: false
context: ${{ matrix.context }} context: "{{defaultContext}}:${{ matrix.context }}"
file: ${{ matrix.file }} file: "${{ matrix.file }}"
platforms: linux/amd64 platforms: ${{ matrix.platform }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -7,8 +7,8 @@ jobs:
permissions: permissions:
contents: read contents: read
pull-requests: write pull-requests: write
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/labeler@f27b608878404679385c85cfa523b85ccb86e213 # v6.1.0 - uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1
with: with:
sync-labels: true sync-labels: true

View File

@ -3,14 +3,10 @@ name: Main CI Pipeline
on: on:
pull_request: pull_request:
branches: ["main"] branches: ["main"]
merge_group:
branches: ["main"]
types: [checks_requested]
push: push:
branches: ["main"] branches: ["main"]
permissions: permissions:
actions: write
contents: write contents: write
pull-requests: write pull-requests: write
checks: write checks: write
@ -21,28 +17,12 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
pre_job:
name: Skip Duplicate Checks
runs-on: depot-ubuntu-24.04
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip || 'false' }}
steps:
- id: skip_check
continue-on-error: true
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
with:
cancel_others: 'true'
concurrent_skipping: same_content_newer
# Check which paths were changed to determine which tests to run # Check which paths were changed to determine which tests to run
check-changes: check-changes:
name: Check Changed Files name: Check Changed Files
needs: pre_job runs-on: ubuntu-latest
if: needs.pre_job.outputs.should_skip != 'true'
runs-on: depot-ubuntu-24.04
outputs: outputs:
api-changed: ${{ steps.changes.outputs.api }} api-changed: ${{ steps.changes.outputs.api }}
e2e-changed: ${{ steps.changes.outputs.e2e }}
web-changed: ${{ steps.changes.outputs.web }} web-changed: ${{ steps.changes.outputs.web }}
vdb-changed: ${{ steps.changes.outputs.vdb }} vdb-changed: ${{ steps.changes.outputs.vdb }}
migration-changed: ${{ steps.changes.outputs.migration }} migration-changed: ${{ steps.changes.outputs.migration }}
@ -54,375 +34,49 @@ jobs:
filters: | filters: |
api: api:
- 'api/**' - 'api/**'
- 'docker/**'
- '.github/workflows/api-tests.yml' - '.github/workflows/api-tests.yml'
- '.github/workflows/expose_service_ports.sh'
- 'docker/.env.example'
- 'docker/envs/middleware.env.example'
- 'docker/docker-compose.middleware.yaml'
- 'docker/docker-compose-template.yaml'
- 'docker/generate_docker_compose'
- 'docker/ssrf_proxy/**'
- 'docker/volumes/sandbox/conf/**'
web: web:
- 'web/**' - 'web/**'
- 'packages/**'
- 'package.json'
- 'pnpm-lock.yaml'
- 'pnpm-workspace.yaml'
- '.nvmrc'
- '.github/workflows/web-tests.yml' - '.github/workflows/web-tests.yml'
- '.github/actions/setup-web/**' - '.github/actions/setup-web/**'
e2e:
- 'api/**'
- 'api/pyproject.toml'
- 'api/uv.lock'
- 'e2e/**'
- 'web/**'
- 'packages/**'
- 'package.json'
- 'pnpm-lock.yaml'
- 'pnpm-workspace.yaml'
- '.nvmrc'
- 'docker/docker-compose.middleware.yaml'
- 'docker/envs/middleware.env.example'
- '.github/workflows/web-e2e.yml'
- '.github/actions/setup-web/**'
vdb: vdb:
- 'api/core/rag/datasource/**' - 'api/core/rag/datasource/**'
- 'api/tests/integration_tests/vdb/**' - 'docker/**'
- 'api/providers/vdb/*/tests/**'
- '.github/workflows/vdb-tests.yml' - '.github/workflows/vdb-tests.yml'
- '.github/workflows/expose_service_ports.sh'
- 'docker/.env.example'
- 'docker/envs/middleware.env.example'
- 'docker/docker-compose.yaml'
- 'docker/docker-compose-template.yaml'
- 'docker/generate_docker_compose'
- 'docker/certbot/**'
- 'docker/couchbase-server/**'
- 'docker/elasticsearch/**'
- 'docker/iris/**'
- 'docker/nginx/**'
- 'docker/pgvector/**'
- 'docker/ssrf_proxy/**'
- 'docker/startupscripts/**'
- 'docker/tidb/**'
- 'docker/volumes/**'
- 'api/uv.lock' - 'api/uv.lock'
- 'api/pyproject.toml' - 'api/pyproject.toml'
migration: migration:
- 'api/migrations/**' - 'api/migrations/**'
- 'api/.env.example'
- '.github/workflows/db-migration-test.yml' - '.github/workflows/db-migration-test.yml'
- '.github/workflows/expose_service_ports.sh'
- 'docker/.env.example'
- 'docker/envs/middleware.env.example'
- 'docker/docker-compose.middleware.yaml'
- 'docker/docker-compose-template.yaml'
- 'docker/generate_docker_compose'
- 'docker/ssrf_proxy/**'
- 'docker/volumes/sandbox/conf/**'
# Run tests in parallel while always emitting stable required checks. # Run tests in parallel
api-tests-run: api-tests:
name: Run API Tests name: API Tests
needs: needs: check-changes
- pre_job if: needs.check-changes.outputs.api-changed == 'true'
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.api-changed == 'true'
uses: ./.github/workflows/api-tests.yml uses: ./.github/workflows/api-tests.yml
secrets: inherit secrets: inherit
api-tests-skip: web-tests:
name: Skip API Tests name: Web Tests
needs: needs: check-changes
- pre_job if: needs.check-changes.outputs.web-changed == 'true'
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.api-changed != 'true'
runs-on: depot-ubuntu-24.04
steps:
- name: Report skipped API tests
run: echo "No API-related changes detected; skipping API tests."
api-tests:
name: API Tests
if: ${{ always() }}
needs:
- pre_job
- check-changes
- api-tests-run
- api-tests-skip
runs-on: depot-ubuntu-24.04
steps:
- name: Finalize API Tests status
env:
SHOULD_SKIP_WORKFLOW: ${{ needs.pre_job.outputs.should_skip }}
TESTS_CHANGED: ${{ needs.check-changes.outputs.api-changed }}
RUN_RESULT: ${{ needs.api-tests-run.result }}
SKIP_RESULT: ${{ needs.api-tests-skip.result }}
run: |
if [[ "$SHOULD_SKIP_WORKFLOW" == 'true' ]]; then
echo "API tests were skipped because this workflow run duplicated a successful or newer run."
exit 0
fi
if [[ "$TESTS_CHANGED" == 'true' ]]; then
if [[ "$RUN_RESULT" == 'success' ]]; then
echo "API tests ran successfully."
exit 0
fi
echo "API tests were required but finished with result: $RUN_RESULT" >&2
exit 1
fi
if [[ "$SKIP_RESULT" == 'success' ]]; then
echo "API tests were skipped because no API-related files changed."
exit 0
fi
echo "API tests were not required, but the skip job finished with result: $SKIP_RESULT" >&2
exit 1
web-tests-run:
name: Run Web Tests
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.web-changed == 'true'
uses: ./.github/workflows/web-tests.yml uses: ./.github/workflows/web-tests.yml
secrets: inherit secrets: inherit
web-tests-skip:
name: Skip Web Tests
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.web-changed != 'true'
runs-on: depot-ubuntu-24.04
steps:
- name: Report skipped web tests
run: echo "No web-related changes detected; skipping web tests."
web-tests:
name: Web Tests
if: ${{ always() }}
needs:
- pre_job
- check-changes
- web-tests-run
- web-tests-skip
runs-on: depot-ubuntu-24.04
steps:
- name: Finalize Web Tests status
env:
SHOULD_SKIP_WORKFLOW: ${{ needs.pre_job.outputs.should_skip }}
TESTS_CHANGED: ${{ needs.check-changes.outputs.web-changed }}
RUN_RESULT: ${{ needs.web-tests-run.result }}
SKIP_RESULT: ${{ needs.web-tests-skip.result }}
run: |
if [[ "$SHOULD_SKIP_WORKFLOW" == 'true' ]]; then
echo "Web tests were skipped because this workflow run duplicated a successful or newer run."
exit 0
fi
if [[ "$TESTS_CHANGED" == 'true' ]]; then
if [[ "$RUN_RESULT" == 'success' ]]; then
echo "Web tests ran successfully."
exit 0
fi
echo "Web tests were required but finished with result: $RUN_RESULT" >&2
exit 1
fi
if [[ "$SKIP_RESULT" == 'success' ]]; then
echo "Web tests were skipped because no web-related files changed."
exit 0
fi
echo "Web tests were not required, but the skip job finished with result: $SKIP_RESULT" >&2
exit 1
web-e2e-run:
name: Run Web Full-Stack E2E
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.e2e-changed == 'true'
uses: ./.github/workflows/web-e2e.yml
web-e2e-skip:
name: Skip Web Full-Stack E2E
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.e2e-changed != 'true'
runs-on: depot-ubuntu-24.04
steps:
- name: Report skipped web full-stack e2e
run: echo "No E2E-related changes detected; skipping web full-stack E2E."
web-e2e:
name: Web Full-Stack E2E
if: ${{ always() }}
needs:
- pre_job
- check-changes
- web-e2e-run
- web-e2e-skip
runs-on: depot-ubuntu-24.04
steps:
- name: Finalize Web Full-Stack E2E status
env:
SHOULD_SKIP_WORKFLOW: ${{ needs.pre_job.outputs.should_skip }}
TESTS_CHANGED: ${{ needs.check-changes.outputs.e2e-changed }}
RUN_RESULT: ${{ needs.web-e2e-run.result }}
SKIP_RESULT: ${{ needs.web-e2e-skip.result }}
run: |
if [[ "$SHOULD_SKIP_WORKFLOW" == 'true' ]]; then
echo "Web full-stack E2E was skipped because this workflow run duplicated a successful or newer run."
exit 0
fi
if [[ "$TESTS_CHANGED" == 'true' ]]; then
if [[ "$RUN_RESULT" == 'success' ]]; then
echo "Web full-stack E2E ran successfully."
exit 0
fi
echo "Web full-stack E2E was required but finished with result: $RUN_RESULT" >&2
exit 1
fi
if [[ "$SKIP_RESULT" == 'success' ]]; then
echo "Web full-stack E2E was skipped because no E2E-related files changed."
exit 0
fi
echo "Web full-stack E2E was not required, but the skip job finished with result: $SKIP_RESULT" >&2
exit 1
style-check: style-check:
name: Style Check name: Style Check
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
uses: ./.github/workflows/style.yml uses: ./.github/workflows/style.yml
vdb-tests-run:
name: Run VDB Tests
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.vdb-changed == 'true'
uses: ./.github/workflows/vdb-tests.yml
vdb-tests-skip:
name: Skip VDB Tests
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.vdb-changed != 'true'
runs-on: depot-ubuntu-24.04
steps:
- name: Report skipped VDB tests
run: echo "No VDB-related changes detected; skipping VDB tests."
vdb-tests: vdb-tests:
name: VDB Tests name: VDB Tests
if: ${{ always() }} needs: check-changes
needs: if: needs.check-changes.outputs.vdb-changed == 'true'
- pre_job uses: ./.github/workflows/vdb-tests.yml
- check-changes
- vdb-tests-run
- vdb-tests-skip
runs-on: depot-ubuntu-24.04
steps:
- name: Finalize VDB Tests status
env:
SHOULD_SKIP_WORKFLOW: ${{ needs.pre_job.outputs.should_skip }}
TESTS_CHANGED: ${{ needs.check-changes.outputs.vdb-changed }}
RUN_RESULT: ${{ needs.vdb-tests-run.result }}
SKIP_RESULT: ${{ needs.vdb-tests-skip.result }}
run: |
if [[ "$SHOULD_SKIP_WORKFLOW" == 'true' ]]; then
echo "VDB tests were skipped because this workflow run duplicated a successful or newer run."
exit 0
fi
if [[ "$TESTS_CHANGED" == 'true' ]]; then
if [[ "$RUN_RESULT" == 'success' ]]; then
echo "VDB tests ran successfully."
exit 0
fi
echo "VDB tests were required but finished with result: $RUN_RESULT" >&2
exit 1
fi
if [[ "$SKIP_RESULT" == 'success' ]]; then
echo "VDB tests were skipped because no VDB-related files changed."
exit 0
fi
echo "VDB tests were not required, but the skip job finished with result: $SKIP_RESULT" >&2
exit 1
db-migration-test-run:
name: Run DB Migration Test
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.migration-changed == 'true'
uses: ./.github/workflows/db-migration-test.yml
db-migration-test-skip:
name: Skip DB Migration Test
needs:
- pre_job
- check-changes
if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.migration-changed != 'true'
runs-on: depot-ubuntu-24.04
steps:
- name: Report skipped DB migration tests
run: echo "No migration-related changes detected; skipping DB migration tests."
db-migration-test: db-migration-test:
name: DB Migration Test name: DB Migration Test
if: ${{ always() }} needs: check-changes
needs: if: needs.check-changes.outputs.migration-changed == 'true'
- pre_job uses: ./.github/workflows/db-migration-test.yml
- check-changes
- db-migration-test-run
- db-migration-test-skip
runs-on: depot-ubuntu-24.04
steps:
- name: Finalize DB Migration Test status
env:
SHOULD_SKIP_WORKFLOW: ${{ needs.pre_job.outputs.should_skip }}
TESTS_CHANGED: ${{ needs.check-changes.outputs.migration-changed }}
RUN_RESULT: ${{ needs.db-migration-test-run.result }}
SKIP_RESULT: ${{ needs.db-migration-test-skip.result }}
run: |
if [[ "$SHOULD_SKIP_WORKFLOW" == 'true' ]]; then
echo "DB migration tests were skipped because this workflow run duplicated a successful or newer run."
exit 0
fi
if [[ "$TESTS_CHANGED" == 'true' ]]; then
if [[ "$RUN_RESULT" == 'success' ]]; then
echo "DB migration tests ran successfully."
exit 0
fi
echo "DB migration tests were required but finished with result: $RUN_RESULT" >&2
exit 1
fi
if [[ "$SKIP_RESULT" == 'success' ]]; then
echo "DB migration tests were skipped because no migration-related files changed."
exit 0
fi
echo "DB migration tests were not required, but the skip job finished with result: $SKIP_RESULT" >&2
exit 1

View File

@ -12,7 +12,7 @@ permissions: {}
jobs: jobs:
comment: comment:
name: Comment PR with pyrefly diff name: Comment PR with pyrefly diff
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
permissions: permissions:
actions: read actions: read
contents: read contents: read
@ -21,7 +21,7 @@ jobs:
if: ${{ github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.pull_requests[0].head.repo.full_name != github.repository }} if: ${{ github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.pull_requests[0].head.repo.full_name != github.repository }}
steps: steps:
- name: Download pyrefly diff artifact - name: Download pyrefly diff artifact
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} github-token: ${{ secrets.GITHUB_TOKEN }}
script: | script: |
@ -49,7 +49,7 @@ jobs:
run: unzip -o pyrefly_diff.zip run: unzip -o pyrefly_diff.zip
- name: Post comment - name: Post comment
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} github-token: ${{ secrets.GITHUB_TOKEN }}
script: | script: |
@ -76,29 +76,13 @@ jobs:
diff += '\\n\\n... (truncated) ...'; diff += '\\n\\n... (truncated) ...';
} }
if (diff.trim()) { const body = diff.trim()
const body = '### Pyrefly Diff\n<details>\n<summary>base → PR</summary>\n\n```diff\n' + diff + '\n```\n</details>'; ? '### Pyrefly Diff\n<details>\n<summary>base → PR</summary>\n\n```diff\n' + diff + '\n```\n</details>'
const marker = '### Pyrefly Diff'; : '### Pyrefly Diff\nNo changes detected.';
const { data: comments } = await github.rest.issues.listComments({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
});
const existing = comments.find((comment) => comment.body.startsWith(marker));
if (existing) { await github.rest.issues.createComment({
await github.rest.issues.updateComment({ issue_number: prNumber,
comment_id: existing.id, owner: context.repo.owner,
owner: context.repo.owner, repo: context.repo.repo,
repo: context.repo.repo, body,
body, });
});
} else {
await github.rest.issues.createComment({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
}
}

View File

@ -10,7 +10,7 @@ permissions:
jobs: jobs:
pyrefly-diff: pyrefly-diff:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
permissions: permissions:
contents: read contents: read
issues: write issues: write
@ -22,7 +22,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Setup Python & UV - name: Setup Python & UV
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
with: with:
enable-cache: true enable-cache: true
@ -50,23 +50,12 @@ jobs:
run: | run: |
diff -u /tmp/pyrefly_base.txt /tmp/pyrefly_pr.txt > pyrefly_diff.txt || true diff -u /tmp/pyrefly_base.txt /tmp/pyrefly_pr.txt > pyrefly_diff.txt || true
- name: Check if line counts match
id: line_count_check
run: |
base_lines=$(wc -l < /tmp/pyrefly_base.txt)
pr_lines=$(wc -l < /tmp/pyrefly_pr.txt)
if [ "$base_lines" -eq "$pr_lines" ]; then
echo "same=true" >> $GITHUB_OUTPUT
else
echo "same=false" >> $GITHUB_OUTPUT
fi
- name: Save PR number - name: Save PR number
run: | run: |
echo ${{ github.event.pull_request.number }} > pr_number.txt echo ${{ github.event.pull_request.number }} > pr_number.txt
- name: Upload pyrefly diff - name: Upload pyrefly diff
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with: with:
name: pyrefly_diff name: pyrefly_diff
path: | path: |
@ -74,8 +63,8 @@ jobs:
pr_number.txt pr_number.txt
- name: Comment PR with pyrefly diff - name: Comment PR with pyrefly diff
if: ${{ github.event.pull_request.head.repo.full_name == github.repository && steps.line_count_check.outputs.same == 'false' }} if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} github-token: ${{ secrets.GITHUB_TOKEN }}
script: | script: |
@ -103,26 +92,9 @@ jobs:
].join('\n') ].join('\n')
: '### Pyrefly Diff\nNo changes detected.'; : '### Pyrefly Diff\nNo changes detected.';
const marker = '### Pyrefly Diff'; await github.rest.issues.createComment({
const { data: comments } = await github.rest.issues.listComments({
issue_number: prNumber, issue_number: prNumber,
owner: context.repo.owner, owner: context.repo.owner,
repo: context.repo.repo, repo: context.repo.repo,
body,
}); });
const existing = comments.find((comment) => comment.body.startsWith(marker));
if (existing) {
await github.rest.issues.updateComment({
comment_id: existing.id,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
} else {
await github.rest.issues.createComment({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
}

View File

@ -1,118 +0,0 @@
name: Comment with Pyrefly Type Coverage
on:
workflow_run:
workflows:
- Pyrefly Type Coverage
types:
- completed
permissions: {}
jobs:
comment:
name: Comment PR with type coverage
runs-on: depot-ubuntu-24.04
permissions:
actions: read
contents: read
issues: write
pull-requests: write
if: ${{ github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.pull_requests[0].head.repo.full_name != github.repository }}
steps:
- name: Checkout default branch (trusted code)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Python & UV
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
with:
enable-cache: true
- name: Install dependencies
run: uv sync --project api --dev
- name: Download type coverage artifact
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{ github.event.workflow_run.id }},
});
const match = artifacts.data.artifacts.find((artifact) =>
artifact.name === 'pyrefly_type_coverage'
);
if (!match) {
throw new Error('pyrefly_type_coverage artifact not found');
}
const download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: match.id,
archive_format: 'zip',
});
fs.writeFileSync('pyrefly_type_coverage.zip', Buffer.from(download.data));
- name: Unzip artifact
run: unzip -o pyrefly_type_coverage.zip
- name: Render coverage markdown from structured data
id: render
run: |
comment_body="$(uv run --directory api python libs/pyrefly_type_coverage.py \
--base base_report.json \
< pr_report.json)"
{
echo "### Pyrefly Type Coverage"
echo ""
echo "$comment_body"
} > /tmp/type_coverage_comment.md
- name: Post comment
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const body = fs.readFileSync('/tmp/type_coverage_comment.md', { encoding: 'utf8' });
let prNumber = null;
try {
prNumber = parseInt(fs.readFileSync('pr_number.txt', { encoding: 'utf8' }), 10);
} catch (err) {
const prs = context.payload.workflow_run.pull_requests || [];
if (prs.length > 0 && prs[0].number) {
prNumber = prs[0].number;
}
}
if (!prNumber) {
throw new Error('PR number not found in artifact or workflow_run payload');
}
// Update existing comment if one exists, otherwise create new
const { data: comments } = await github.rest.issues.listComments({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
});
const marker = '### Pyrefly Type Coverage';
const existing = comments.find(c => c.body.startsWith(marker));
if (existing) {
await github.rest.issues.updateComment({
comment_id: existing.id,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
} else {
await github.rest.issues.createComment({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
}

View File

@ -1,120 +0,0 @@
name: Pyrefly Type Coverage
on:
pull_request:
paths:
- 'api/**/*.py'
permissions:
contents: read
jobs:
pyrefly-type-coverage:
runs-on: depot-ubuntu-24.04
permissions:
contents: read
issues: write
pull-requests: write
steps:
- name: Checkout PR branch
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Setup Python & UV
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
with:
enable-cache: true
- name: Install dependencies
run: uv sync --project api --dev
- name: Run pyrefly report on PR branch
run: |
uv run --directory api --dev pyrefly report 2>/dev/null > /tmp/pyrefly_report_pr.tmp && \
mv /tmp/pyrefly_report_pr.tmp /tmp/pyrefly_report_pr.json || \
echo '{}' > /tmp/pyrefly_report_pr.json
- name: Save helper script from base branch
run: |
git show ${{ github.event.pull_request.base.sha }}:api/libs/pyrefly_type_coverage.py > /tmp/pyrefly_type_coverage.py 2>/dev/null \
|| cp api/libs/pyrefly_type_coverage.py /tmp/pyrefly_type_coverage.py
- name: Checkout base branch
run: git checkout ${{ github.base_ref }}
- name: Run pyrefly report on base branch
run: |
uv run --directory api --dev pyrefly report 2>/dev/null > /tmp/pyrefly_report_base.tmp && \
mv /tmp/pyrefly_report_base.tmp /tmp/pyrefly_report_base.json || \
echo '{}' > /tmp/pyrefly_report_base.json
- name: Generate coverage comparison
id: coverage
run: |
comment_body="$(uv run --directory api python /tmp/pyrefly_type_coverage.py \
--base /tmp/pyrefly_report_base.json \
< /tmp/pyrefly_report_pr.json)"
{
echo "### Pyrefly Type Coverage"
echo ""
echo "$comment_body"
} | tee -a "$GITHUB_STEP_SUMMARY" > /tmp/type_coverage_comment.md
# Save structured data for the fork-PR comment workflow
cp /tmp/pyrefly_report_pr.json pr_report.json
cp /tmp/pyrefly_report_base.json base_report.json
- name: Save PR number
run: |
echo ${{ github.event.pull_request.number }} > pr_number.txt
- name: Upload type coverage artifact
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: pyrefly_type_coverage
path: |
pr_report.json
base_report.json
pr_number.txt
- name: Comment PR with type coverage
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const marker = '### Pyrefly Type Coverage';
let body;
try {
body = fs.readFileSync('/tmp/type_coverage_comment.md', { encoding: 'utf8' });
} catch {
body = `${marker}\n\n_Coverage report unavailable._`;
}
const prNumber = context.payload.pull_request.number;
// Update existing comment if one exists, otherwise create new
const { data: comments } = await github.rest.issues.listComments({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
});
const existing = comments.find(c => c.body.startsWith(marker));
if (existing) {
await github.rest.issues.updateComment({
comment_id: existing.id,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
} else {
await github.rest.issues.createComment({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body,
});
}

View File

@ -7,22 +7,15 @@ on:
- edited - edited
- reopened - reopened
- synchronize - synchronize
merge_group:
branches: ["main"]
types: [checks_requested]
jobs: jobs:
lint: lint:
name: Validate PR title name: Validate PR title
permissions: permissions:
pull-requests: read pull-requests: read
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- name: Complete merge group check
if: github.event_name == 'merge_group'
run: echo "Semantic PR title validation is handled on pull requests."
- name: Check title - name: Check title
if: github.event_name == 'pull_request'
uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1 uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -12,7 +12,7 @@ on:
jobs: jobs:
stale: stale:
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
permissions: permissions:
issues: write issues: write
pull-requests: write pull-requests: write
@ -23,8 +23,8 @@ jobs:
days-before-issue-stale: 15 days-before-issue-stale: 15
days-before-issue-close: 3 days-before-issue-close: 3
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: "Closed due to inactivity. If you have any questions, you can reopen it." stale-issue-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
stale-pr-message: "Closed due to inactivity. If you have any questions, you can reopen it." stale-pr-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
stale-issue-label: 'no-issue-activity' stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity' stale-pr-label: 'no-pr-activity'
any-of-labels: '🌚 invalid,🙋‍♂️ question,wont-fix,no-issue-activity,no-pr-activity,💪 enhancement,🤔 cant-reproduce,🙏 help wanted' any-of-labels: 'duplicate,question,invalid,wontfix,no-issue-activity,no-pr-activity,enhancement,cant-reproduce,help-wanted'

View File

@ -15,7 +15,7 @@ permissions:
jobs: jobs:
python-style: python-style:
name: Python Style name: Python Style
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
@ -25,7 +25,7 @@ jobs:
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6 uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with: with:
files: | files: |
api/** api/**
@ -33,7 +33,7 @@ jobs:
- name: Setup UV and Python - name: Setup UV and Python
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
with: with:
enable-cache: false enable-cache: false
python-version: "3.12" python-version: "3.12"
@ -49,7 +49,7 @@ jobs:
- name: Run Type Checks - name: Run Type Checks
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: make type-check-core run: make type-check
- name: Dotenv check - name: Dotenv check
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
@ -57,7 +57,7 @@ jobs:
web-style: web-style:
name: Web Style name: Web Style
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
defaults: defaults:
run: run:
working-directory: ./web working-directory: ./web
@ -73,17 +73,10 @@ jobs:
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6 uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with: with:
files: | files: |
web/** web/**
e2e/**
sdks/nodejs-client/**
packages/**
package.json
pnpm-lock.yaml
pnpm-workspace.yaml
.nvmrc
.github/workflows/style.yml .github/workflows/style.yml
.github/actions/setup-web/** .github/actions/setup-web/**
@ -94,28 +87,26 @@ jobs:
- name: Restore ESLint cache - name: Restore ESLint cache
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
id: eslint-cache-restore id: eslint-cache-restore
uses: actions/cache/restore@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with: with:
path: .eslintcache path: web/.eslintcache
key: ${{ runner.os }}-eslint-${{ hashFiles('pnpm-lock.yaml', 'eslint.config.mjs', 'web/eslint.config.mjs', 'web/eslint.constants.mjs', 'web/plugins/eslint/**') }}-${{ github.sha }} key: ${{ runner.os }}-web-eslint-${{ hashFiles('web/package.json', 'web/pnpm-lock.yaml', 'web/eslint.config.mjs', 'web/eslint.constants.mjs', 'web/plugins/eslint/**') }}-${{ github.sha }}
restore-keys: | restore-keys: |
${{ runner.os }}-eslint-${{ hashFiles('pnpm-lock.yaml', 'eslint.config.mjs', 'web/eslint.config.mjs', 'web/eslint.constants.mjs', 'web/plugins/eslint/**') }}- ${{ runner.os }}-web-eslint-${{ hashFiles('web/package.json', 'web/pnpm-lock.yaml', 'web/eslint.config.mjs', 'web/eslint.constants.mjs', 'web/plugins/eslint/**') }}-
- name: Web style check - name: Web style check
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
working-directory: . working-directory: ./web
run: vp run lint:ci run: vp run lint:ci
- name: Web tsslint - name: Web tsslint
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web working-directory: ./web
env:
NODE_OPTIONS: --max-old-space-size=4096
run: vp run lint:tss run: vp run lint:tss
- name: Web type check - name: Web type check
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
working-directory: . working-directory: ./web
run: vp run type-check run: vp run type-check
- name: Web dead code check - name: Web dead code check
@ -125,14 +116,14 @@ jobs:
- name: Save ESLint cache - name: Save ESLint cache
if: steps.changed-files.outputs.any_changed == 'true' && success() && steps.eslint-cache-restore.outputs.cache-hit != 'true' if: steps.changed-files.outputs.any_changed == 'true' && success() && steps.eslint-cache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4
with: with:
path: .eslintcache path: web/.eslintcache
key: ${{ steps.eslint-cache-restore.outputs.cache-primary-key }} key: ${{ steps.eslint-cache-restore.outputs.cache-primary-key }}
superlinter: superlinter:
name: SuperLinter name: SuperLinter
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
@ -143,7 +134,7 @@ jobs:
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6 uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with: with:
files: | files: |
**.sh **.sh
@ -154,7 +145,7 @@ jobs:
.editorconfig .editorconfig
- name: Super-linter - name: Super-linter
uses: super-linter/super-linter/slim@9e863354e3ff62e0727d37183162c4a88873df41 # v8.6.0 uses: super-linter/super-linter/slim@61abc07d755095a68f4987d1c2c3d1d64408f1f9 # v8.5.0
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
env: env:
BASH_SEVERITY: warning BASH_SEVERITY: warning

View File

@ -6,9 +6,6 @@ on:
- main - main
paths: paths:
- sdks/** - sdks/**
- package.json
- pnpm-lock.yaml
- pnpm-workspace.yaml
concurrency: concurrency:
group: sdk-tests-${{ github.head_ref || github.run_id }} group: sdk-tests-${{ github.head_ref || github.run_id }}
@ -17,7 +14,7 @@ concurrency:
jobs: jobs:
build: build:
name: unit test for Node.js SDK name: unit test for Node.js SDK
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
defaults: defaults:
run: run:
@ -29,7 +26,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Use Node.js - name: Use Node.js
uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
with: with:
node-version: 22 node-version: 22
cache: '' cache: ''

View File

@ -1,24 +1,26 @@
name: Translate i18n Files with Claude Code name: Translate i18n Files with Claude Code
# Note: claude-code-action doesn't support push events directly. # Note: claude-code-action doesn't support push events directly.
# Push events are bridged by trigger-i18n-sync.yml via repository_dispatch. # Push events are handled by trigger-i18n-sync.yml which sends repository_dispatch.
# See: https://github.com/langgenius/dify/issues/30743
on: on:
repository_dispatch: repository_dispatch:
types: [i18n-sync] types: [i18n-sync]
workflow_dispatch: workflow_dispatch:
inputs: inputs:
files: files:
description: 'Specific files to translate (space-separated, e.g., "app common"). Required for full mode; leave empty in incremental mode to use en-US files changed since HEAD~1.' description: 'Specific files to translate (space-separated, e.g., "app common"). Leave empty for all files.'
required: false required: false
type: string type: string
languages: languages:
description: 'Specific languages to translate (space-separated, e.g., "zh-Hans ja-JP"). Leave empty for all supported target languages except en-US.' description: 'Specific languages to translate (space-separated, e.g., "zh-Hans ja-JP"). Leave empty for all supported languages.'
required: false required: false
type: string type: string
mode: mode:
description: 'Sync mode: incremental (compare with previous en-US revision) or full (sync all keys in scope)' description: 'Sync mode: incremental (only changes) or full (re-check all keys)'
required: false required: false
default: incremental default: 'incremental'
type: choice type: choice
options: options:
- incremental - incremental
@ -28,15 +30,11 @@ permissions:
contents: write contents: write
pull-requests: write pull-requests: write
concurrency:
group: translate-i18n-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: false
jobs: jobs:
translate: translate:
if: github.repository == 'langgenius/dify' if: github.repository == 'langgenius/dify'
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
timeout-minutes: 120 timeout-minutes: 60
steps: steps:
- name: Checkout repository - name: Checkout repository
@ -53,293 +51,380 @@ jobs:
- name: Setup web environment - name: Setup web environment
uses: ./.github/actions/setup-web uses: ./.github/actions/setup-web
- name: Prepare sync context - name: Detect changed files and generate diff
id: context id: detect_changes
shell: bash
run: | run: |
DEFAULT_TARGET_LANGS=$(awk " if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
/value: '/ { # Manual trigger
value=\$2 if [ -n "${{ github.event.inputs.files }}" ]; then
gsub(/[',]/, \"\", value) echo "CHANGED_FILES=${{ github.event.inputs.files }}" >> $GITHUB_OUTPUT
}
/supported: true/ && value != \"en-US\" {
printf \"%s \", value
}
" web/i18n-config/languages.ts | sed 's/[[:space:]]*$//')
generate_changes_json() {
node .github/scripts/generate-i18n-changes.mjs
}
if [ "${{ github.event_name }}" = "repository_dispatch" ]; then
BASE_SHA="${{ github.event.client_payload.base_sha }}"
HEAD_SHA="${{ github.event.client_payload.head_sha }}"
CHANGED_FILES="${{ github.event.client_payload.changed_files }}"
TARGET_LANGS="$DEFAULT_TARGET_LANGS"
SYNC_MODE="${{ github.event.client_payload.sync_mode || 'incremental' }}"
if [ -n "${{ github.event.client_payload.changes_base64 }}" ]; then
printf '%s' '${{ github.event.client_payload.changes_base64 }}' | base64 -d > /tmp/i18n-changes.json
CHANGES_AVAILABLE="true"
CHANGES_SOURCE="embedded"
elif [ -n "$BASE_SHA" ] && [ -n "$CHANGED_FILES" ]; then
export BASE_SHA HEAD_SHA CHANGED_FILES
generate_changes_json
CHANGES_AVAILABLE="true"
CHANGES_SOURCE="recomputed"
else else
printf '%s' '{"baseSha":"","headSha":"","files":[],"changes":{}}' > /tmp/i18n-changes.json # Get all JSON files in en-US directory
CHANGES_AVAILABLE="false" files=$(ls web/i18n/en-US/*.json 2>/dev/null | xargs -n1 basename | sed 's/.json$//' | tr '\n' ' ')
CHANGES_SOURCE="unavailable" echo "CHANGED_FILES=$files" >> $GITHUB_OUTPUT
fi
echo "TARGET_LANGS=${{ github.event.inputs.languages }}" >> $GITHUB_OUTPUT
echo "SYNC_MODE=${{ github.event.inputs.mode || 'incremental' }}" >> $GITHUB_OUTPUT
# For manual trigger with incremental mode, get diff from last commit
# For full mode, we'll do a complete check anyway
if [ "${{ github.event.inputs.mode }}" == "full" ]; then
echo "Full mode: will check all keys" > /tmp/i18n-diff.txt
echo "DIFF_AVAILABLE=false" >> $GITHUB_OUTPUT
else
git diff HEAD~1..HEAD -- 'web/i18n/en-US/*.json' > /tmp/i18n-diff.txt 2>/dev/null || echo "" > /tmp/i18n-diff.txt
if [ -s /tmp/i18n-diff.txt ]; then
echo "DIFF_AVAILABLE=true" >> $GITHUB_OUTPUT
else
echo "DIFF_AVAILABLE=false" >> $GITHUB_OUTPUT
fi
fi
elif [ "${{ github.event_name }}" == "repository_dispatch" ]; then
# Triggered by push via trigger-i18n-sync.yml workflow
# Validate required payload fields
if [ -z "${{ github.event.client_payload.changed_files }}" ]; then
echo "Error: repository_dispatch payload missing required 'changed_files' field" >&2
exit 1
fi
echo "CHANGED_FILES=${{ github.event.client_payload.changed_files }}" >> $GITHUB_OUTPUT
echo "TARGET_LANGS=" >> $GITHUB_OUTPUT
echo "SYNC_MODE=${{ github.event.client_payload.sync_mode || 'incremental' }}" >> $GITHUB_OUTPUT
# Decode the base64-encoded diff from the trigger workflow
if [ -n "${{ github.event.client_payload.diff_base64 }}" ]; then
if ! echo "${{ github.event.client_payload.diff_base64 }}" | base64 -d > /tmp/i18n-diff.txt 2>&1; then
echo "Warning: Failed to decode base64 diff payload" >&2
echo "" > /tmp/i18n-diff.txt
echo "DIFF_AVAILABLE=false" >> $GITHUB_OUTPUT
elif [ -s /tmp/i18n-diff.txt ]; then
echo "DIFF_AVAILABLE=true" >> $GITHUB_OUTPUT
else
echo "DIFF_AVAILABLE=false" >> $GITHUB_OUTPUT
fi
else
echo "" > /tmp/i18n-diff.txt
echo "DIFF_AVAILABLE=false" >> $GITHUB_OUTPUT
fi fi
else else
BASE_SHA="" echo "Unsupported event type: ${{ github.event_name }}"
HEAD_SHA=$(git rev-parse HEAD) exit 1
if [ -n "${{ github.event.inputs.languages }}" ]; then
TARGET_LANGS="${{ github.event.inputs.languages }}"
else
TARGET_LANGS="$DEFAULT_TARGET_LANGS"
fi
SYNC_MODE="${{ github.event.inputs.mode || 'incremental' }}"
if [ -n "${{ github.event.inputs.files }}" ]; then
CHANGED_FILES="${{ github.event.inputs.files }}"
elif [ "$SYNC_MODE" = "incremental" ]; then
BASE_SHA=$(git rev-parse HEAD~1 2>/dev/null || true)
if [ -n "$BASE_SHA" ]; then
CHANGED_FILES=$(git diff --name-only "$BASE_SHA" "$HEAD_SHA" -- 'web/i18n/en-US/*.json' 2>/dev/null | sed -n 's@^.*/@@p' | sed 's/\.json$//' | tr '\n' ' ' | sed 's/[[:space:]]*$//')
else
CHANGED_FILES=$(find web/i18n/en-US -maxdepth 1 -type f -name '*.json' -print | sed -n 's@^.*/@@p' | sed 's/\.json$//' | sort | tr '\n' ' ' | sed 's/[[:space:]]*$//')
fi
elif [ "$SYNC_MODE" = "full" ]; then
echo "workflow_dispatch full mode requires the files input to stay within CI limits." >&2
exit 1
else
CHANGED_FILES=""
fi
if [ "$SYNC_MODE" = "incremental" ] && [ -n "$CHANGED_FILES" ]; then
export BASE_SHA HEAD_SHA CHANGED_FILES
generate_changes_json
CHANGES_AVAILABLE="true"
CHANGES_SOURCE="local"
else
printf '%s' '{"baseSha":"","headSha":"","files":[],"changes":{}}' > /tmp/i18n-changes.json
CHANGES_AVAILABLE="false"
CHANGES_SOURCE="unavailable"
fi
fi fi
FILE_ARGS="" # Truncate diff if too large (keep first 50KB)
if [ -n "$CHANGED_FILES" ]; then if [ -f /tmp/i18n-diff.txt ]; then
FILE_ARGS="--file $CHANGED_FILES" head -c 50000 /tmp/i18n-diff.txt > /tmp/i18n-diff-truncated.txt
mv /tmp/i18n-diff-truncated.txt /tmp/i18n-diff.txt
fi fi
LANG_ARGS="" echo "Detected files: $(cat $GITHUB_OUTPUT | grep CHANGED_FILES || echo 'none')"
if [ -n "$TARGET_LANGS" ]; then
LANG_ARGS="--lang $TARGET_LANGS"
fi
{
echo "DEFAULT_TARGET_LANGS=$DEFAULT_TARGET_LANGS"
echo "BASE_SHA=$BASE_SHA"
echo "HEAD_SHA=$HEAD_SHA"
echo "CHANGED_FILES=$CHANGED_FILES"
echo "TARGET_LANGS=$TARGET_LANGS"
echo "SYNC_MODE=$SYNC_MODE"
echo "CHANGES_AVAILABLE=$CHANGES_AVAILABLE"
echo "CHANGES_SOURCE=$CHANGES_SOURCE"
echo "FILE_ARGS=$FILE_ARGS"
echo "LANG_ARGS=$LANG_ARGS"
} >> "$GITHUB_OUTPUT"
echo "Files: ${CHANGED_FILES:-<none>}"
echo "Languages: ${TARGET_LANGS:-<none>}"
echo "Mode: $SYNC_MODE"
- name: Run Claude Code for Translation Sync - name: Run Claude Code for Translation Sync
if: steps.context.outputs.CHANGED_FILES != '' if: steps.detect_changes.outputs.CHANGED_FILES != ''
uses: anthropics/claude-code-action@476e359e6203e73dad705c8b322e333fabbd7416 # v1.0.119 uses: anthropics/claude-code-action@ff9acae5886d41a99ed4ec14b7dc147d55834722 # v1.0.77
with: with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
# Allow github-actions bot to trigger this workflow via repository_dispatch
# See: https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
allowed_bots: 'github-actions[bot]' allowed_bots: 'github-actions[bot]'
show_full_output: ${{ github.event_name == 'workflow_dispatch' }}
prompt: | prompt: |
You are the i18n sync agent for the Dify repository. You are a professional i18n synchronization engineer for the Dify project.
Your job is to keep translations synchronized with the English source files under `${{ github.workspace }}/web/i18n/en-US/`. Your task is to keep all language translations in sync with the English source (en-US).
Use absolute paths at all times: ## CRITICAL TOOL RESTRICTIONS
- Repo root: `${{ github.workspace }}` - Use **Read** tool to read files (NOT cat or bash)
- Web directory: `${{ github.workspace }}/web` - Use **Edit** tool to modify JSON files (NOT node, jq, or bash scripts)
- Language config: `${{ github.workspace }}/web/i18n-config/languages.ts` - Use **Bash** ONLY for: git commands, gh commands, pnpm commands
- Run bash commands ONE BY ONE, never combine with && or ||
- NEVER use `$()` command substitution - it's not supported. Split into separate commands instead.
Inputs: ## WORKING DIRECTORY & ABSOLUTE PATHS
- Files in scope: `${{ steps.context.outputs.CHANGED_FILES }}` Claude Code sandbox working directory may vary. Always use absolute paths:
- Target languages: `${{ steps.context.outputs.TARGET_LANGS }}` - For pnpm: `pnpm --dir ${{ github.workspace }}/web <command>`
- Sync mode: `${{ steps.context.outputs.SYNC_MODE }}` - For git: `git -C ${{ github.workspace }} <command>`
- Base SHA: `${{ steps.context.outputs.BASE_SHA }}` - For gh: `gh --repo ${{ github.repository }} <command>`
- Head SHA: `${{ steps.context.outputs.HEAD_SHA }}` - For file paths: `${{ github.workspace }}/web/i18n/`
- Scoped file args: `${{ steps.context.outputs.FILE_ARGS }}`
- Scoped language args: `${{ steps.context.outputs.LANG_ARGS }}`
- Structured change set available: `${{ steps.context.outputs.CHANGES_AVAILABLE }}`
- Structured change set source: `${{ steps.context.outputs.CHANGES_SOURCE }}`
- Structured change set file: `/tmp/i18n-changes.json`
Tool rules: ## EFFICIENCY RULES
- Use Read for repository files. - **ONE Edit per language file** - batch all key additions into a single Edit
- Use Edit for JSON updates. - Insert new keys at the beginning of JSON (after `{`), lint:fix will sort them
- Use Bash only for `vp`. - Translate ALL keys for a language mentally first, then do ONE Edit
- Do not use Bash for `git`, `gh`, or branch management.
## Context
- Changed/target files: ${{ steps.detect_changes.outputs.CHANGED_FILES }}
- Target languages (empty means all supported): ${{ steps.detect_changes.outputs.TARGET_LANGS }}
- Sync mode: ${{ steps.detect_changes.outputs.SYNC_MODE }}
- Translation files are located in: ${{ github.workspace }}/web/i18n/{locale}/{filename}.json
- Language configuration is in: ${{ github.workspace }}/web/i18n-config/languages.ts
- Git diff is available: ${{ steps.detect_changes.outputs.DIFF_AVAILABLE }}
## CRITICAL DESIGN: Verify First, Then Sync
You MUST follow this three-phase approach:
═══════════════════════════════════════════════════════════════
║ PHASE 1: VERIFY - Analyze and Generate Change Report ║
═══════════════════════════════════════════════════════════════
### Step 1.1: Analyze Git Diff (for incremental mode)
Use the Read tool to read `/tmp/i18n-diff.txt` to see the git diff.
Parse the diff to categorize changes:
- Lines with `+` (not `+++`): Added or modified values
- Lines with `-` (not `---`): Removed or old values
- Identify specific keys for each category:
* ADD: Keys that appear only in `+` lines (new keys)
* UPDATE: Keys that appear in both `-` and `+` lines (value changed)
* DELETE: Keys that appear only in `-` lines (removed keys)
### Step 1.2: Read Language Configuration
Use the Read tool to read `${{ github.workspace }}/web/i18n-config/languages.ts`.
Extract all languages with `supported: true`.
### Step 1.3: Run i18n:check for Each Language
```bash
pnpm --dir ${{ github.workspace }}/web install --frozen-lockfile
```
```bash
pnpm --dir ${{ github.workspace }}/web run i18n:check
```
This will report:
- Missing keys (need to ADD)
- Extra keys (need to DELETE)
### Step 1.4: Generate Change Report
Create a structured report identifying:
```
╔══════════════════════════════════════════════════════════════╗
║ I18N SYNC CHANGE REPORT ║
╠══════════════════════════════════════════════════════════════╣
║ Files to process: [list] ║
║ Languages to sync: [list] ║
╠══════════════════════════════════════════════════════════════╣
║ ADD (New Keys):
║ - [filename].[key]: "English value"
║ ... ║
╠══════════════════════════════════════════════════════════════╣
║ UPDATE (Modified Keys - MUST re-translate):
║ - [filename].[key]: "Old value" → "New value" ║
║ ... ║
╠══════════════════════════════════════════════════════════════╣
║ DELETE (Extra Keys):
║ - [language]/[filename].[key] ║
║ ... ║
╚══════════════════════════════════════════════════════════════╝
```
**IMPORTANT**: For UPDATE detection, compare git diff to find keys where
the English value changed. These MUST be re-translated even if target
language already has a translation (it's now stale!).
═══════════════════════════════════════════════════════════════
║ PHASE 2: SYNC - Execute Changes Based on Report ║
═══════════════════════════════════════════════════════════════
### Step 2.1: Process ADD Operations (BATCH per language file)
**CRITICAL WORKFLOW for efficiency:**
1. First, translate ALL new keys for ALL languages mentally
2. Then, for EACH language file, do ONE Edit operation:
- Read the file once
- Insert ALL new keys at the beginning (right after the opening `{`)
- Don't worry about alphabetical order - lint:fix will sort them later
Example Edit (adding 3 keys to zh-Hans/app.json):
```
old_string: '{\n "accessControl"'
new_string: '{\n "newKey1": "translation1",\n "newKey2": "translation2",\n "newKey3": "translation3",\n "accessControl"'
```
**IMPORTANT**:
- ONE Edit per language file (not one Edit per key!)
- Always use the Edit tool. NEVER use bash scripts, node, or jq.
### Step 2.2: Process UPDATE Operations
**IMPORTANT: Special handling for zh-Hans and ja-JP**
If zh-Hans or ja-JP files were ALSO modified in the same push:
- Run: `git -C ${{ github.workspace }} diff HEAD~1 --name-only` and check for zh-Hans or ja-JP files
- If found, it means someone manually translated them. Apply these rules:
1. **Missing keys**: Still ADD them (completeness required)
2. **Existing translations**: Compare with the NEW English value:
- If translation is **completely wrong** or **unrelated** → Update it
- If translation is **roughly correct** (captures the meaning) → Keep it, respect manual work
- When in doubt, **keep the manual translation**
Example:
- English changed: "Save" → "Save Changes"
- Manual translation: "保存更改" → Keep it (correct meaning)
- Manual translation: "删除" → Update it (completely wrong)
For other languages:
Use Edit tool to replace the old value with the new translation.
You can batch multiple updates in one Edit if they are adjacent.
### Step 2.3: Process DELETE Operations
For extra keys reported by i18n:check:
- Run: `pnpm --dir ${{ github.workspace }}/web run i18n:check --auto-remove`
- Or manually remove from target language JSON files
## Translation Guidelines
- PRESERVE all placeholders exactly as-is:
- `{{variable}}` - Mustache interpolation
- `${variable}` - Template literal
- `<tag>content</tag>` - HTML tags
- `_one`, `_other` - Pluralization suffixes (these are KEY suffixes, not values)
**CRITICAL: Variable names and tag names MUST stay in English - NEVER translate them**
✅ CORRECT examples:
- English: "{{count}} items" → Japanese: "{{count}} 個のアイテム"
- English: "{{name}} updated" → Korean: "{{name}} 업데이트됨"
- English: "<email>{{email}}</email>" → Chinese: "<email>{{email}}</email>"
- English: "<CustomLink>Marketplace</CustomLink>" → Japanese: "<CustomLink>マーケットプレイス</CustomLink>"
❌ WRONG examples (NEVER do this - will break the application):
- "{{count}}" → "{{カウント}}" ❌ (variable name translated to Japanese)
- "{{name}}" → "{{이름}}" ❌ (variable name translated to Korean)
- "{{email}}" → "{{邮箱}}" ❌ (variable name translated to Chinese)
- "<email>" → "<メール>" ❌ (tag name translated)
- "<CustomLink>" → "<自定义链接>" ❌ (component name translated)
- Use appropriate language register (formal/informal) based on existing translations
- Match existing translation style in each language
- Technical terms: check existing conventions per language
- For CJK languages: no spaces between characters unless necessary
- For RTL languages (ar-TN, fa-IR): ensure proper text handling
## Output Format Requirements
- Alphabetical key ordering (if original file uses it)
- 2-space indentation
- Trailing newline at end of file
- Valid JSON (use proper escaping for special characters)
═══════════════════════════════════════════════════════════════
║ PHASE 3: RE-VERIFY - Confirm All Issues Resolved ║
═══════════════════════════════════════════════════════════════
### Step 3.1: Run Lint Fix (IMPORTANT!)
```bash
pnpm --dir ${{ github.workspace }}/web lint:fix --quiet -- 'i18n/**/*.json'
```
This ensures:
- JSON keys are sorted alphabetically (jsonc/sort-keys rule)
- Valid i18n keys (dify-i18n/valid-i18n-keys rule)
- No extra keys (dify-i18n/no-extra-keys rule)
### Step 3.2: Run Final i18n Check
```bash
pnpm --dir ${{ github.workspace }}/web run i18n:check
```
### Step 3.3: Fix Any Remaining Issues
If check reports issues:
- Go back to PHASE 2 for unresolved items
- Repeat until check passes
### Step 3.4: Generate Final Summary
```
╔══════════════════════════════════════════════════════════════╗
║ SYNC COMPLETED SUMMARY ║
╠══════════════════════════════════════════════════════════════╣
║ Language │ Added │ Updated │ Deleted │ Status ║
╠══════════════════════════════════════════════════════════════╣
║ zh-Hans │ 5 │ 2 │ 1 │ ✓ Complete ║
║ ja-JP │ 5 │ 2 │ 1 │ ✓ Complete ║
║ ... │ ... │ ... │ ... │ ... ║
╠══════════════════════════════════════════════════════════════╣
║ i18n:check │ PASSED - All keys in sync ║
╚══════════════════════════════════════════════════════════════╝
```
## Mode-Specific Behavior
**SYNC_MODE = "incremental"** (default):
- Focus on keys identified from git diff
- Also check i18n:check output for any missing/extra keys
- Efficient for small changes
**SYNC_MODE = "full"**:
- Compare ALL keys between en-US and each language
- Run i18n:check to identify all discrepancies
- Use for first-time sync or fixing historical issues
## Important Notes
1. Always run i18n:check BEFORE and AFTER making changes
2. The check script is the source of truth for missing/extra keys
3. For UPDATE scenario: git diff is the source of truth for changed values
4. Create a single commit with all translation changes
5. If any translation fails, continue with others and report failures
═══════════════════════════════════════════════════════════════
║ PHASE 4: COMMIT AND CREATE PR ║
═══════════════════════════════════════════════════════════════
After all translations are complete and verified:
### Step 4.1: Check for changes
```bash
git -C ${{ github.workspace }} status --porcelain
```
If there are changes:
### Step 4.2: Create a new branch and commit
Run these git commands ONE BY ONE (not combined with &&).
**IMPORTANT**: Do NOT use `$()` command substitution. Use two separate commands:
1. First, get the timestamp:
```bash
date +%Y%m%d-%H%M%S
```
(Note the output, e.g., "20260115-143052")
2. Then create branch using the timestamp value:
```bash
git -C ${{ github.workspace }} checkout -b chore/i18n-sync-20260115-143052
```
(Replace "20260115-143052" with the actual timestamp from step 1)
3. Stage changes:
```bash
git -C ${{ github.workspace }} add web/i18n/
```
4. Commit:
```bash
git -C ${{ github.workspace }} commit -m "chore(i18n): sync translations with en-US - Mode: ${{ steps.detect_changes.outputs.SYNC_MODE }}"
```
5. Push:
```bash
git -C ${{ github.workspace }} push origin HEAD
```
### Step 4.3: Create Pull Request
```bash
gh pr create --repo ${{ github.repository }} --title "chore(i18n): sync translations with en-US" --body "## Summary
This PR was automatically generated to sync i18n translation files.
### Changes
- Mode: ${{ steps.detect_changes.outputs.SYNC_MODE }}
- Files processed: ${{ steps.detect_changes.outputs.CHANGED_FILES }}
### Verification
- [x] \`i18n:check\` passed
- [x] \`lint:fix\` applied
🤖 Generated with Claude Code GitHub Action" --base main
```
Required execution plan:
1. Resolve target languages.
- Use the provided `Target languages` value as the source of truth.
- If it is unexpectedly empty, read `${{ github.workspace }}/web/i18n-config/languages.ts` and use every language with `supported: true` except `en-US`.
2. Stay strictly in scope.
- Only process the files listed in `Files in scope`.
- Only process the resolved target languages, never `en-US`.
- Do not touch unrelated i18n files.
- Do not modify `${{ github.workspace }}/web/i18n/en-US/`.
3. Resolve source changes.
- If `Structured change set available` is `true`, read `/tmp/i18n-changes.json` and use it as the source of truth for file-level and key-level changes.
- For each file entry:
- `added` contains new English keys that need translations.
- `updated` contains stale keys whose English source changed; re-translate using the `after` value.
- `deleted` contains keys that should be removed from locale files.
- `fileDeleted: true` means the English file no longer exists; remove the matching locale file if present.
- Read the current English JSON file for any file that still exists so wording, placeholders, and surrounding terminology stay accurate.
- If `Structured change set available` is `false`, treat this as a scoped full sync and use the current English files plus scoped checks as the source of truth.
4. Run a scoped pre-check before editing:
- `vp run dify-web#i18n:check ${{ steps.context.outputs.FILE_ARGS }} ${{ steps.context.outputs.LANG_ARGS }}`
- Use this command as the source of truth for missing and extra keys inside the current scope.
5. Apply translations.
- For every target language and scoped file:
- If `fileDeleted` is `true`, remove the locale file if it exists and skip the rest of that file.
- If the locale file does not exist yet, create it with `Write` and then continue with `Edit` as needed.
- ADD missing keys.
- UPDATE stale translations when the English value changed.
- DELETE removed keys. Prefer `vp run dify-web#i18n:check ${{ steps.context.outputs.FILE_ARGS }} ${{ steps.context.outputs.LANG_ARGS }} --auto-remove` for extra keys so deletions stay in scope.
- Preserve placeholders exactly: `{{variable}}`, `${variable}`, HTML tags, component tags, and variable names.
- Match the existing terminology and register used by each locale.
- Prefer one Edit per file when stable, but prioritize correctness over batching.
6. Verify only the edited files.
- Run `vp run dify-web#lint:fix --quiet -- <relative edited i18n file paths under web/>`
- Run `vp run dify-web#i18n:check ${{ steps.context.outputs.FILE_ARGS }} ${{ steps.context.outputs.LANG_ARGS }}`
- If verification fails, fix the remaining problems before continuing.
7. Stop after the scoped locale files are updated and verification passes.
- Do not create branches, commits, or pull requests.
claude_args: | claude_args: |
--max-turns 120 --max-turns 150
--allowedTools "Read,Write,Edit,Bash(vp *),Bash(vp:*),Glob,Grep" --allowedTools "Read,Write,Edit,Bash(git *),Bash(git:*),Bash(gh *),Bash(gh:*),Bash(pnpm *),Bash(pnpm:*),Bash(date *),Bash(date:*),Glob,Grep"
- name: Prepare branch metadata
id: pr_meta
if: steps.context.outputs.CHANGED_FILES != ''
shell: bash
run: |
if [ -z "$(git -C "${{ github.workspace }}" status --porcelain -- web/i18n/)" ]; then
echo "has_changes=false" >> "$GITHUB_OUTPUT"
exit 0
fi
SCOPE_HASH=$(printf '%s|%s|%s' "${{ steps.context.outputs.CHANGED_FILES }}" "${{ steps.context.outputs.TARGET_LANGS }}" "${{ steps.context.outputs.SYNC_MODE }}" | sha256sum | cut -c1-8)
HEAD_SHORT=$(printf '%s' "${{ steps.context.outputs.HEAD_SHA }}" | cut -c1-12)
BRANCH_NAME="chore/i18n-sync-${HEAD_SHORT}-${SCOPE_HASH}"
{
echo "has_changes=true"
echo "branch_name=$BRANCH_NAME"
} >> "$GITHUB_OUTPUT"
- name: Commit translation changes
if: steps.pr_meta.outputs.has_changes == 'true'
shell: bash
run: |
git -C "${{ github.workspace }}" checkout -B "${{ steps.pr_meta.outputs.branch_name }}"
git -C "${{ github.workspace }}" add web/i18n/
git -C "${{ github.workspace }}" commit -m "chore(i18n): sync translations with en-US"
- name: Push translation branch
if: steps.pr_meta.outputs.has_changes == 'true'
shell: bash
run: |
if git -C "${{ github.workspace }}" ls-remote --exit-code --heads origin "${{ steps.pr_meta.outputs.branch_name }}" >/dev/null 2>&1; then
git -C "${{ github.workspace }}" push --force-with-lease origin "${{ steps.pr_meta.outputs.branch_name }}"
else
git -C "${{ github.workspace }}" push --set-upstream origin "${{ steps.pr_meta.outputs.branch_name }}"
fi
- name: Create or update translation PR
if: steps.pr_meta.outputs.has_changes == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH_NAME: ${{ steps.pr_meta.outputs.branch_name }}
FILES_IN_SCOPE: ${{ steps.context.outputs.CHANGED_FILES }}
TARGET_LANGS: ${{ steps.context.outputs.TARGET_LANGS }}
SYNC_MODE: ${{ steps.context.outputs.SYNC_MODE }}
CHANGES_SOURCE: ${{ steps.context.outputs.CHANGES_SOURCE }}
BASE_SHA: ${{ steps.context.outputs.BASE_SHA }}
HEAD_SHA: ${{ steps.context.outputs.HEAD_SHA }}
REPO_NAME: ${{ github.repository }}
shell: bash
run: |
PR_BODY_FILE=/tmp/i18n-pr-body.md
LANG_COUNT=$(printf '%s\n' "$TARGET_LANGS" | wc -w | tr -d ' ')
if [ "$LANG_COUNT" = "0" ]; then
LANG_COUNT="0"
fi
export LANG_COUNT
node <<'NODE' > "$PR_BODY_FILE"
const fs = require('node:fs')
const changesPath = '/tmp/i18n-changes.json'
const changes = fs.existsSync(changesPath)
? JSON.parse(fs.readFileSync(changesPath, 'utf8'))
: { changes: {} }
const filesInScope = (process.env.FILES_IN_SCOPE || '').split(/\s+/).filter(Boolean)
const lines = [
'## Summary',
'',
`- **Files synced**: \`${process.env.FILES_IN_SCOPE || '<none>'}\``,
`- **Languages updated**: ${process.env.TARGET_LANGS || '<none>'} (${process.env.LANG_COUNT} languages)`,
`- **Sync mode**: ${process.env.SYNC_MODE}${process.env.BASE_SHA ? ` (base: \`${process.env.BASE_SHA.slice(0, 10)}\`, head: \`${process.env.HEAD_SHA.slice(0, 10)}\`)` : ` (head: \`${process.env.HEAD_SHA.slice(0, 10)}\`)`}`,
'',
'### Key changes',
]
for (const fileName of filesInScope) {
const fileChange = changes.changes?.[fileName] || { added: {}, updated: {}, deleted: [], fileDeleted: false }
const addedKeys = Object.keys(fileChange.added || {})
const updatedKeys = Object.keys(fileChange.updated || {})
const deletedKeys = fileChange.deleted || []
lines.push(`- \`${fileName}\`: +${addedKeys.length} / ~${updatedKeys.length} / -${deletedKeys.length}${fileChange.fileDeleted ? ' (file deleted in en-US)' : ''}`)
}
lines.push(
'',
'## Verification',
'',
`- \`vp run dify-web#i18n:check --file ${process.env.FILES_IN_SCOPE} --lang ${process.env.TARGET_LANGS}\``,
`- \`vp run dify-web#lint:fix --quiet -- <edited i18n files under web/>\``,
'',
'## Notes',
'',
'- This PR was generated from structured en-US key changes produced by `trigger-i18n-sync.yml`.',
`- Structured change source: ${process.env.CHANGES_SOURCE || 'unknown'}.`,
'- Branch name is deterministic for the head SHA and scope, so reruns update the same PR instead of opening duplicates.',
'',
'🤖 Generated with [Claude Code](https://claude.com/claude-code)'
)
process.stdout.write(lines.join('\n'))
NODE
EXISTING_PR_NUMBER=$(gh pr list --repo "$REPO_NAME" --head "$BRANCH_NAME" --state open --json number --jq '.[0].number')
if [ -n "$EXISTING_PR_NUMBER" ] && [ "$EXISTING_PR_NUMBER" != "null" ]; then
gh pr edit "$EXISTING_PR_NUMBER" --repo "$REPO_NAME" --title "chore(i18n): sync translations with en-US" --body-file "$PR_BODY_FILE"
else
gh pr create --repo "$REPO_NAME" --head "$BRANCH_NAME" --base main --title "chore(i18n): sync translations with en-US" --body-file "$PR_BODY_FILE"
fi

View File

@ -1,5 +1,9 @@
name: Trigger i18n Sync on Push name: Trigger i18n Sync on Push
# This workflow bridges the push event to repository_dispatch
# because claude-code-action doesn't support push events directly.
# See: https://github.com/langgenius/dify/issues/30743
on: on:
push: push:
branches: [main] branches: [main]
@ -9,14 +13,10 @@ on:
permissions: permissions:
contents: write contents: write
concurrency:
group: trigger-i18n-sync-${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
trigger: trigger:
if: github.repository == 'langgenius/dify' if: github.repository == 'langgenius/dify'
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
timeout-minutes: 5 timeout-minutes: 5
steps: steps:
@ -25,66 +25,42 @@ jobs:
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Detect changed files and build structured change set - name: Detect changed files and generate diff
id: detect id: detect
shell: bash
run: | run: |
BASE_SHA="${{ github.event.before }}" BEFORE_SHA="${{ github.event.before }}"
if [ -z "$BASE_SHA" ] || [ "$BASE_SHA" = "0000000000000000000000000000000000000000" ]; then # Handle edge case: force push may have null/zero SHA
BASE_SHA=$(git rev-parse HEAD~1 2>/dev/null || true) if [ -z "$BEFORE_SHA" ] || [ "$BEFORE_SHA" = "0000000000000000000000000000000000000000" ]; then
BEFORE_SHA="HEAD~1"
fi fi
HEAD_SHA="${{ github.sha }}"
if [ -n "$BASE_SHA" ]; then # Detect changed i18n files
CHANGED_FILES=$(git diff --name-only "$BASE_SHA" "$HEAD_SHA" -- 'web/i18n/en-US/*.json' 2>/dev/null | sed -n 's@^.*/@@p' | sed 's/\.json$//' | tr '\n' ' ' | sed 's/[[:space:]]*$//') changed=$(git diff --name-only "$BEFORE_SHA" "${{ github.sha }}" -- 'web/i18n/en-US/*.json' 2>/dev/null | xargs -n1 basename 2>/dev/null | sed 's/.json$//' | tr '\n' ' ' || echo "")
echo "changed_files=$changed" >> $GITHUB_OUTPUT
# Generate diff for context
git diff "$BEFORE_SHA" "${{ github.sha }}" -- 'web/i18n/en-US/*.json' > /tmp/i18n-diff.txt 2>/dev/null || echo "" > /tmp/i18n-diff.txt
# Truncate if too large (keep first 50KB to match receiving workflow)
head -c 50000 /tmp/i18n-diff.txt > /tmp/i18n-diff-truncated.txt
mv /tmp/i18n-diff-truncated.txt /tmp/i18n-diff.txt
# Base64 encode the diff for safe JSON transport (portable, single-line)
diff_base64=$(base64 < /tmp/i18n-diff.txt | tr -d '\n')
echo "diff_base64=$diff_base64" >> $GITHUB_OUTPUT
if [ -n "$changed" ]; then
echo "has_changes=true" >> $GITHUB_OUTPUT
echo "Detected changed files: $changed"
else else
CHANGED_FILES=$(find web/i18n/en-US -maxdepth 1 -type f -name '*.json' -print | sed -n 's@^.*/@@p' | sed 's/\.json$//' | sort | tr '\n' ' ' | sed 's/[[:space:]]*$//') echo "has_changes=false" >> $GITHUB_OUTPUT
echo "No i18n changes detected"
fi fi
export BASE_SHA HEAD_SHA CHANGED_FILES
node .github/scripts/generate-i18n-changes.mjs
if [ -n "$CHANGED_FILES" ]; then
echo "has_changes=true" >> "$GITHUB_OUTPUT"
else
echo "has_changes=false" >> "$GITHUB_OUTPUT"
fi
echo "base_sha=$BASE_SHA" >> "$GITHUB_OUTPUT"
echo "head_sha=$HEAD_SHA" >> "$GITHUB_OUTPUT"
echo "changed_files=$CHANGED_FILES" >> "$GITHUB_OUTPUT"
- name: Trigger i18n sync workflow - name: Trigger i18n sync workflow
if: steps.detect.outputs.has_changes == 'true' if: steps.detect.outputs.has_changes == 'true'
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
env:
BASE_SHA: ${{ steps.detect.outputs.base_sha }}
HEAD_SHA: ${{ steps.detect.outputs.head_sha }}
CHANGED_FILES: ${{ steps.detect.outputs.changed_files }}
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
script: | event-type: i18n-sync
const fs = require('fs') client-payload: '{"changed_files": "${{ steps.detect.outputs.changed_files }}", "diff_base64": "${{ steps.detect.outputs.diff_base64 }}", "sync_mode": "incremental", "trigger_sha": "${{ github.sha }}"}'
const changesJson = fs.readFileSync('/tmp/i18n-changes.json', 'utf8')
const changesBase64 = Buffer.from(changesJson).toString('base64')
const maxEmbeddedChangesChars = 48000
const changesEmbedded = changesBase64.length <= maxEmbeddedChangesChars
if (!changesEmbedded) {
console.log(`Structured change set too large to embed safely (${changesBase64.length} chars). Downstream workflow will regenerate it from git history.`)
}
await github.rest.repos.createDispatchEvent({
owner: context.repo.owner,
repo: context.repo.repo,
event_type: 'i18n-sync',
client_payload: {
changed_files: process.env.CHANGED_FILES,
changes_base64: changesEmbedded ? changesBase64 : '',
changes_embedded: changesEmbedded,
sync_mode: 'incremental',
base_sha: process.env.BASE_SHA,
head_sha: process.env.HEAD_SHA,
},
})

View File

@ -1,95 +0,0 @@
name: Run Full VDB Tests
on:
schedule:
- cron: '0 3 * * 1'
workflow_dispatch:
permissions:
contents: read
concurrency:
group: vdb-tests-full-${{ github.ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
name: Full VDB Tests
if: github.repository == 'langgenius/dify'
runs-on: depot-ubuntu-24.04
strategy:
matrix:
python-version:
- "3.12"
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Free Disk Space
uses: endersonmenezes/free-disk-space@7901478139cff6e9d44df5972fd8ab8fcade4db1 # v3.2.2
with:
remove_dotnet: true
remove_haskell: true
remove_tool_cache: true
- name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
with:
enable-cache: true
python-version: ${{ matrix.python-version }}
cache-dependency-glob: api/uv.lock
- name: Check UV lockfile
run: uv lock --project api --check
- name: Install dependencies
run: uv sync --project api --dev
- name: Set up dotenvs
run: |
cp docker/.env.example docker/.env
cp docker/envs/middleware.env.example docker/middleware.env
- name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh
# - name: Set up Vector Store (TiDB)
# uses: hoverkraft-tech/compose-action@v2.0.2
# with:
# compose-file: docker/tidb/docker-compose.yaml
# services: |
# tidb
# tiflash
- name: Set up Full Vector Store Matrix
uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0
with:
compose-file: |
docker/docker-compose.yaml
services: |
weaviate
qdrant
couchbase-server
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
elasticsearch
oceanbase
- name: setup test config
run: |
echo $(pwd)
ls -lah .
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
# - name: Check VDB Ready (TiDB)
# run: uv run --project api python api/providers/vdb/tidb-vector/tests/integration_tests/check_tiflash_ready.py
- name: Test Vector Stores
run: uv run --project api bash dev/pytest/pytest_vdb.sh

View File

@ -1,22 +1,20 @@
name: Run VDB Smoke Tests name: Run VDB Tests
on: on:
workflow_call: workflow_call:
permissions:
contents: read
concurrency: concurrency:
group: vdb-tests-${{ github.head_ref || github.run_id }} group: vdb-tests-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: test:
name: VDB Smoke Tests name: VDB Tests
runs-on: depot-ubuntu-24.04 runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: python-version:
- "3.11"
- "3.12" - "3.12"
steps: steps:
@ -33,7 +31,7 @@ jobs:
remove_tool_cache: true remove_tool_cache: true
- name: Setup UV and Python - name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
with: with:
enable-cache: true enable-cache: true
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
@ -48,7 +46,7 @@ jobs:
- name: Set up dotenvs - name: Set up dotenvs
run: | run: |
cp docker/.env.example docker/.env cp docker/.env.example docker/.env
cp docker/envs/middleware.env.example docker/middleware.env cp docker/middleware.env.example docker/middleware.env
- name: Expose Service Ports - name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh run: sh .github/workflows/expose_service_ports.sh
@ -61,18 +59,23 @@ jobs:
# tidb # tidb
# tiflash # tiflash
- name: Set up Vector Stores for Smoke Coverage - name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase, OceanBase)
uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 uses: hoverkraft-tech/compose-action@4894d2492015c1774ee5a13a95b1072093087ec3 # v2.5.0
with: with:
compose-file: | compose-file: |
docker/docker-compose.yaml docker/docker-compose.yaml
services: | services: |
db_postgres
redis
weaviate weaviate
qdrant qdrant
couchbase-server
etcd
minio
milvus-standalone
pgvecto-rs
pgvector pgvector
chroma chroma
elasticsearch
oceanbase
- name: setup test config - name: setup test config
run: | run: |
@ -81,12 +84,7 @@ jobs:
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
# - name: Check VDB Ready (TiDB) # - name: Check VDB Ready (TiDB)
# run: uv run --project api python api/providers/vdb/tidb-vector/tests/integration_tests/check_tiflash_ready.py # run: uv run --project api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
- name: Test Vector Stores - name: Test Vector Stores
run: | run: uv run --project api bash dev/pytest/pytest_vdb.sh
uv run --project api pytest --timeout "${PYTEST_TIMEOUT:-180}" \
api/providers/vdb/vdb-chroma/tests/integration_tests \
api/providers/vdb/vdb-pgvector/tests/integration_tests \
api/providers/vdb/vdb-qdrant/tests/integration_tests \
api/providers/vdb/vdb-weaviate/tests/integration_tests

View File

@ -1,68 +0,0 @@
name: Web Full-Stack E2E
on:
workflow_call:
permissions:
contents: read
concurrency:
group: web-e2e-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
name: Web Full-Stack E2E
runs-on: depot-ubuntu-24.04-4
defaults:
run:
shell: bash
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Setup web dependencies
uses: ./.github/actions/setup-web
- name: Setup UV and Python
uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
with:
enable-cache: true
python-version: "3.12"
cache-dependency-glob: api/uv.lock
- name: Install API dependencies
run: uv sync --project api --dev
- name: Install Playwright browser
working-directory: ./e2e
run: vp run e2e:install
- name: Run isolated source-api and built-web Cucumber E2E tests
working-directory: ./e2e
env:
E2E_ADMIN_EMAIL: e2e-admin@example.com
E2E_ADMIN_NAME: E2E Admin
E2E_ADMIN_PASSWORD: E2eAdmin12345
E2E_FORCE_WEB_BUILD: "1"
E2E_INIT_PASSWORD: E2eInit12345
run: vp run e2e:full
- name: Upload Cucumber report
if: ${{ !cancelled() }}
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: cucumber-report
path: e2e/cucumber-report
retention-days: 7
- name: Upload E2E logs
if: ${{ !cancelled() }}
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: e2e-logs
path: e2e/.logs
retention-days: 7

View File

@ -16,14 +16,14 @@ concurrency:
jobs: jobs:
test: test:
name: Web Tests (${{ matrix.shardIndex }}/${{ matrix.shardTotal }}) name: Web Tests (${{ matrix.shardIndex }}/${{ matrix.shardTotal }})
runs-on: depot-ubuntu-24.04-4 runs-on: ubuntu-latest
env: env:
VITEST_COVERAGE_SCOPE: app-components VITEST_COVERAGE_SCOPE: app-components
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
shardIndex: [1, 2, 3, 4] shardIndex: [1, 2, 3, 4, 5, 6]
shardTotal: [4] shardTotal: [6]
defaults: defaults:
run: run:
shell: bash shell: bash
@ -43,7 +43,7 @@ jobs:
- name: Upload blob report - name: Upload blob report
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with: with:
name: blob-report-${{ matrix.shardIndex }} name: blob-report-${{ matrix.shardIndex }}
path: web/.vitest-reports/* path: web/.vitest-reports/*
@ -54,7 +54,7 @@ jobs:
name: Merge Test Reports name: Merge Test Reports
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
needs: [test] needs: [test]
runs-on: depot-ubuntu-24.04-4 runs-on: ubuntu-latest
env: env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
defaults: defaults:
@ -66,6 +66,7 @@ jobs:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with: with:
fetch-depth: 0
persist-credentials: false persist-credentials: false
- name: Setup web environment - name: Setup web environment
@ -83,22 +84,19 @@ jobs:
- name: Report coverage - name: Report coverage
if: ${{ env.CODECOV_TOKEN != '' }} if: ${{ env.CODECOV_TOKEN != '' }}
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0 uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5.5.3
with: with:
directory: web/coverage directory: web/coverage
flags: web flags: web
env: env:
CODECOV_TOKEN: ${{ env.CODECOV_TOKEN }} CODECOV_TOKEN: ${{ env.CODECOV_TOKEN }}
dify-ui-test: web-build:
name: dify-ui Tests name: Web Build
runs-on: depot-ubuntu-24.04-4 runs-on: ubuntu-latest
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
defaults: defaults:
run: run:
shell: bash working-directory: ./web
working-directory: ./packages/dify-ui
steps: steps:
- name: Checkout code - name: Checkout code
@ -106,20 +104,20 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
with:
files: |
web/**
.github/workflows/web-tests.yml
.github/actions/setup-web/**
- name: Setup web environment - name: Setup web environment
if: steps.changed-files.outputs.any_changed == 'true'
uses: ./.github/actions/setup-web uses: ./.github/actions/setup-web
- name: Install Chromium for Browser Mode - name: Web build check
run: vp exec playwright install --with-deps chromium if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web
- name: Run dify-ui tests run: vp run build
run: vp test run --coverage --silent=passed-only
- name: Report coverage
if: ${{ env.CODECOV_TOKEN != '' }}
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0
with:
directory: packages/dify-ui/coverage
flags: dify-ui
env:
CODECOV_TOKEN: ${{ env.CODECOV_TOKEN }}

12
.gitignore vendored
View File

@ -203,7 +203,6 @@ sdks/python-client/dify_client.egg-info
.vscode/* .vscode/*
!.vscode/launch.json.template !.vscode/launch.json.template
!.vscode/settings.example.json
!.vscode/README.md !.vscode/README.md
api/.vscode api/.vscode
# vscode Code History Extension # vscode Code History Extension
@ -213,15 +212,10 @@ api/.vscode
# pnpm # pnpm
/.pnpm-store /.pnpm-store
node_modules
.vite-hooks/_
# plugin migrate # plugin migrate
plugins.jsonl plugins.jsonl
# generated API OpenAPI specs
packages/contracts/openapi/
# mise # mise
mise.toml mise.toml
@ -240,15 +234,9 @@ scripts/stress-test/reports/
.playwright-mcp/ .playwright-mcp/
.serena/ .serena/
# vitest browser mode attachments (failure screenshots, traces, etc.)
.vitest-attachments/
**/__screenshots__/
# settings # settings
*.local.json *.local.json
*.local.md *.local.md
# Code Agent Folder # Code Agent Folder
.qoder/* .qoder/*
.eslintcache

View File

@ -1,64 +0,0 @@
#!/bin/sh
# get the list of modified files
files=$(git diff --cached --name-only)
# check if api or web directory is modified
api_modified=false
web_modified=false
skip_web_checks=false
git_path() {
git rev-parse --git-path "$1"
}
if [ -f "$(git_path MERGE_HEAD)" ] || \
[ -f "$(git_path CHERRY_PICK_HEAD)" ] || \
[ -f "$(git_path REVERT_HEAD)" ] || \
[ -f "$(git_path SQUASH_MSG)" ] || \
[ -d "$(git_path rebase-merge)" ] || \
[ -d "$(git_path rebase-apply)" ]; then
skip_web_checks=true
fi
for file in $files
do
# Use POSIX compliant pattern matching
case "$file" in
api/*.py)
# set api_modified flag to true
api_modified=true
;;
web/*)
# set web_modified flag to true
web_modified=true
;;
esac
done
# run linters based on the modified modules
if $api_modified; then
echo "Running Ruff linter on api module"
# run Ruff linter auto-fixing
uv run --project api --dev ruff check --fix ./api
# run Ruff linter checks
uv run --project api --dev ruff check ./api || status=$?
status=${status:-0}
if [ $status -ne 0 ]; then
echo "Ruff linter on api module error, exit code: $status"
echo "Please run 'dev/reformat' to fix the fixable linting errors."
exit 1
fi
fi
if $skip_web_checks; then
echo "Git operation in progress, skipping web checks"
exit 0
fi
vp staged

View File

@ -2,10 +2,21 @@
"version": "0.2.0", "version": "0.2.0",
"configurations": [ "configurations": [
{ {
"name": "Python: API (gevent)", "name": "Python: Flask API",
"type": "debugpy", "type": "debugpy",
"request": "launch", "request": "launch",
"program": "${workspaceFolder}/api/app.py", "module": "flask",
"env": {
"FLASK_APP": "app.py",
"FLASK_ENV": "development"
},
"args": [
"run",
"--host=0.0.0.0",
"--port=5001",
"--no-debugger",
"--no-reload"
],
"jinja": true, "jinja": true,
"justMyCode": true, "justMyCode": true,
"cwd": "${workspaceFolder}/api", "cwd": "${workspaceFolder}/api",

View File

@ -30,7 +30,7 @@ The codebase is split into:
## Language Style ## Language Style
- **Python**: Keep type hints on functions and attributes, and implement relevant special methods (e.g., `__repr__`, `__str__`). Prefer `TypedDict` over `dict` or `Mapping` for type safety and better code documentation. - **Python**: Keep type hints on functions and attributes, and implement relevant special methods (e.g., `__repr__`, `__str__`). Prefer `TypedDict` over `dict` or `Mapping` for type safety and better code documentation.
- **TypeScript**: Use the strict config, rely on ESLint (`pnpm lint:fix` preferred) plus `pnpm type-check`, and avoid `any` types. - **TypeScript**: Use the strict config, rely on ESLint (`pnpm lint:fix` preferred) plus `pnpm type-check:tsgo`, and avoid `any` types.
## General Practices ## General Practices

View File

@ -97,3 +97,8 @@ Feel free to reach out if you encounter any issues during the setup process.
## Getting Help ## Getting Help
If you ever get stuck or get a burning question while contributing, simply shoot your queries our way via the related GitHub issue, or hop onto our [Discord](https://discord.gg/8Tpq4AcN9c) for a quick chat. If you ever get stuck or get a burning question while contributing, simply shoot your queries our way via the related GitHub issue, or hop onto our [Discord](https://discord.gg/8Tpq4AcN9c) for a quick chat.
## Automated Agent Contributions
> [!NOTE]
> If you are an automated agent just add 🤖🤖🤖 to the end of the PR title to opt-in.

View File

@ -3,10 +3,6 @@ DOCKER_REGISTRY=langgenius
WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web
API_IMAGE=$(DOCKER_REGISTRY)/dify-api API_IMAGE=$(DOCKER_REGISTRY)/dify-api
VERSION=latest VERSION=latest
DOCKER_DIR=docker
DOCKER_MIDDLEWARE_ENV=$(DOCKER_DIR)/middleware.env
DOCKER_MIDDLEWARE_ENV_EXAMPLE=$(DOCKER_DIR)/envs/middleware.env.example
DOCKER_MIDDLEWARE_PROJECT=dify-middlewares-dev
# Default target - show help # Default target - show help
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
@ -21,20 +17,15 @@ dev-setup: prepare-docker prepare-web prepare-api
# Step 1: Prepare Docker middleware # Step 1: Prepare Docker middleware
prepare-docker: prepare-docker:
@echo "🐳 Setting up Docker middleware..." @echo "🐳 Setting up Docker middleware..."
@if [ ! -f "$(DOCKER_MIDDLEWARE_ENV)" ]; then \ @cp -n docker/middleware.env.example docker/middleware.env 2>/dev/null || echo "Docker middleware.env already exists"
cp "$(DOCKER_MIDDLEWARE_ENV_EXAMPLE)" "$(DOCKER_MIDDLEWARE_ENV)"; \ @cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev up -d
echo "Docker middleware.env created"; \
else \
echo "Docker middleware.env already exists"; \
fi
@cd $(DOCKER_DIR) && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p $(DOCKER_MIDDLEWARE_PROJECT) up -d
@echo "✅ Docker middleware started" @echo "✅ Docker middleware started"
# Step 2: Prepare web environment # Step 2: Prepare web environment
prepare-web: prepare-web:
@echo "🌐 Setting up web environment..." @echo "🌐 Setting up web environment..."
@cp -n web/.env.example web/.env.local 2>/dev/null || echo "Web .env.local already exists" @cp -n web/.env.example web/.env 2>/dev/null || echo "Web .env already exists"
@pnpm install @cd web && pnpm install
@echo "✅ Web environment prepared (not started)" @echo "✅ Web environment prepared (not started)"
# Step 3: Prepare API environment # Step 3: Prepare API environment
@ -48,18 +39,12 @@ prepare-api:
# Clean dev environment # Clean dev environment
dev-clean: dev-clean:
@echo "⚠️ Stopping Docker containers..." @echo "⚠️ Stopping Docker containers..."
@if [ -f "$(DOCKER_MIDDLEWARE_ENV)" ]; then \ @cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev down
cd $(DOCKER_DIR) && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p $(DOCKER_MIDDLEWARE_PROJECT) down; \
else \
echo "Docker middleware.env does not exist, skipping compose down"; \
fi
@echo "🗑️ Removing volumes..." @echo "🗑️ Removing volumes..."
@rm -rf docker/volumes/db @rm -rf docker/volumes/db
@rm -rf docker/volumes/mysql
@rm -rf docker/volumes/redis @rm -rf docker/volumes/redis
@rm -rf docker/volumes/plugin_daemon @rm -rf docker/volumes/plugin_daemon
@rm -rf docker/volumes/weaviate @rm -rf docker/volumes/weaviate
@rm -rf docker/volumes/sandbox/dependencies
@rm -rf api/storage @rm -rf api/storage
@echo "✅ Cleanup complete" @echo "✅ Cleanup complete"
@ -86,15 +71,9 @@ type-check:
@echo "📝 Running type checks (basedpyright + pyrefly + mypy)..." @echo "📝 Running type checks (basedpyright + pyrefly + mypy)..."
@./dev/basedpyright-check $(PATH_TO_CHECK) @./dev/basedpyright-check $(PATH_TO_CHECK)
@./dev/pyrefly-check-local @./dev/pyrefly-check-local
@uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped .
@echo "✅ Type checks complete" @echo "✅ Type checks complete"
type-check-core:
@echo "📝 Running core type checks (basedpyright + mypy)..."
@./dev/basedpyright-check $(PATH_TO_CHECK)
@uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --exclude 'dev/generate_fastopenapi_specs.py' --check-untyped-defs --disable-error-code=import-untyped .
@echo "✅ Core type checks complete"
test: test:
@echo "🧪 Running backend unit tests..." @echo "🧪 Running backend unit tests..."
@if [ -n "$(TARGET_TESTS)" ]; then \ @if [ -n "$(TARGET_TESTS)" ]; then \
@ -108,7 +87,7 @@ test:
# Build Docker images # Build Docker images
build-web: build-web:
@echo "Building web Docker image: $(WEB_IMAGE):$(VERSION)..." @echo "Building web Docker image: $(WEB_IMAGE):$(VERSION)..."
docker build -f web/Dockerfile -t $(WEB_IMAGE):$(VERSION) . docker build -t $(WEB_IMAGE):$(VERSION) ./web
@echo "Web Docker image built successfully: $(WEB_IMAGE):$(VERSION)" @echo "Web Docker image built successfully: $(WEB_IMAGE):$(VERSION)"
build-api: build-api:
@ -147,14 +126,13 @@ help:
@echo " make prepare-docker - Set up Docker middleware" @echo " make prepare-docker - Set up Docker middleware"
@echo " make prepare-web - Set up web environment" @echo " make prepare-web - Set up web environment"
@echo " make prepare-api - Set up API environment" @echo " make prepare-api - Set up API environment"
@echo " make dev-clean - Stop Docker middleware containers and remove dev data" @echo " make dev-clean - Stop Docker middleware containers"
@echo "" @echo ""
@echo "Backend Code Quality:" @echo "Backend Code Quality:"
@echo " make format - Format code with ruff" @echo " make format - Format code with ruff"
@echo " make check - Check code with ruff" @echo " make check - Check code with ruff"
@echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)" @echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)"
@echo " make type-check - Run type checks (basedpyright, pyrefly, mypy)" @echo " make type-check - Run type checks (basedpyright, pyrefly, mypy)"
@echo " make type-check-core - Run core type checks (basedpyright, mypy)"
@echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/<target_tests>)" @echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/<target_tests>)"
@echo "" @echo ""
@echo "Docker Build Targets:" @echo "Docker Build Targets:"

View File

@ -53,11 +53,7 @@
<a href="./docs/tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a> <a href="./docs/tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
<a href="./docs/vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a> <a href="./docs/vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
<a href="./docs/de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a> <a href="./docs/de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
<a href="./docs/it-IT/README.md"><img alt="README in Italiano" src="https://img.shields.io/badge/Italiano-d9d9d9"></a>
<a href="./docs/pt-BR/README.md"><img alt="README em Português do Brasil" src="https://img.shields.io/badge/Portugu%C3%AAs%20do%20Brasil-d9d9d9"></a>
<a href="./docs/sl-SI/README.md"><img alt="README Slovenščina" src="https://img.shields.io/badge/Sloven%C5%A1%C4%8Dina-d9d9d9"></a>
<a href="./docs/bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a> <a href="./docs/bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
<a href="./docs/hi-IN/README.md"><img alt="README in हिन्दी" src="https://img.shields.io/badge/Hindi-d9d9d9"></a>
</p> </p>
Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features (including [Opik](https://www.comet.com/docs/opik/integrations/dify), [Langfuse](https://docs.langfuse.com), and [Arize Phoenix](https://docs.arize.com/phoenix)) and more, letting you quickly go from prototype to production. Here's a list of the core features: Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features (including [Opik](https://www.comet.com/docs/opik/integrations/dify), [Langfuse](https://docs.langfuse.com), and [Arize Phoenix](https://docs.arize.com/phoenix)) and more, letting you quickly go from prototype to production. Here's a list of the core features:
@ -137,7 +133,20 @@ Star Dify on GitHub and be instantly notified of new releases.
### Custom configurations ### Custom configurations
If you need to customize the configuration, edit `docker/.env`. The essential startup defaults live in [`docker/.env.example`](docker/.env.example), and optional advanced variables are split under `docker/envs/` by theme. After making any changes, re-run `docker compose up -d` from the `docker` directory. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
#### Customizing Suggested Questions
You can now customize the "Suggested Questions After Answer" feature to better fit your use case. For example, to generate longer, more technical questions:
```bash
# In your .env file
SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]'
SUGGESTED_QUESTIONS_MAX_TOKENS=512
SUGGESTED_QUESTIONS_TEMPERATURE=0.3
```
See the [Suggested Questions Configuration Guide](docs/suggested-questions-configuration.md) for detailed examples and usage instructions.
### Metrics Monitoring with Grafana ### Metrics Monitoring with Grafana
@ -147,7 +156,7 @@ Import the dashboard to Grafana, using Dify's PostgreSQL database as data source
### Deployment with Kubernetes ### Deployment with Kubernetes
If you'd like to configure a highly available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes. If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify) - [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm) - [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)

View File

@ -33,9 +33,6 @@ TRIGGER_URL=http://localhost:5001
# The time in seconds after the signature is rejected # The time in seconds after the signature is rejected
FILES_ACCESS_TIMEOUT=300 FILES_ACCESS_TIMEOUT=300
# Collaboration mode toggle
ENABLE_COLLABORATION_MODE=true
# Access token expiration time in minutes # Access token expiration time in minutes
ACCESS_TOKEN_EXPIRE_MINUTES=60 ACCESS_TOKEN_EXPIRE_MINUTES=60
@ -60,9 +57,6 @@ REDIS_SSL_CERTFILE=
REDIS_SSL_KEYFILE= REDIS_SSL_KEYFILE=
# Path to client private key file for SSL authentication # Path to client private key file for SSL authentication
REDIS_DB=0 REDIS_DB=0
# Optional global prefix for Redis keys, topics, streams, and Celery Redis transport artifacts.
# Leave empty to preserve current unprefixed behavior.
REDIS_KEY_PREFIX=
# redis Sentinel configuration. # redis Sentinel configuration.
REDIS_USE_SENTINEL=false REDIS_USE_SENTINEL=false
@ -77,21 +71,10 @@ REDIS_USE_CLUSTERS=false
REDIS_CLUSTERS= REDIS_CLUSTERS=
REDIS_CLUSTERS_PASSWORD= REDIS_CLUSTERS_PASSWORD=
REDIS_RETRY_RETRIES=3
REDIS_RETRY_BACKOFF_BASE=1.0
REDIS_RETRY_BACKOFF_CAP=10.0
REDIS_SOCKET_TIMEOUT=5.0
REDIS_SOCKET_CONNECT_TIMEOUT=5.0
REDIS_HEALTH_CHECK_INTERVAL=30
# celery configuration # celery configuration
CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1 CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1
CELERY_BACKEND=redis CELERY_BACKEND=redis
# Ops trace retry configuration
OPS_TRACE_RETRYABLE_DISPATCH_MAX_RETRIES=60
OPS_TRACE_RETRYABLE_DISPATCH_DELAY_SECONDS=5
# Database configuration # Database configuration
DB_TYPE=postgresql DB_TYPE=postgresql
DB_USERNAME=postgres DB_USERNAME=postgres
@ -102,8 +85,6 @@ DB_DATABASE=dify
SQLALCHEMY_POOL_PRE_PING=true SQLALCHEMY_POOL_PRE_PING=true
SQLALCHEMY_POOL_TIMEOUT=30 SQLALCHEMY_POOL_TIMEOUT=30
# Connection pool reset behavior on return
SQLALCHEMY_POOL_RESET_ON_RETURN=rollback
# Storage configuration # Storage configuration
# use for store upload files, private keys... # use for store upload files, private keys...
@ -121,7 +102,6 @@ S3_BUCKET_NAME=your-bucket-name
S3_ACCESS_KEY=your-access-key S3_ACCESS_KEY=your-access-key
S3_SECRET_KEY=your-secret-key S3_SECRET_KEY=your-secret-key
S3_REGION=your-region S3_REGION=your-region
S3_ADDRESS_STYLE=auto
# Workflow run and Conversation archive storage (S3-compatible) # Workflow run and Conversation archive storage (S3-compatible)
ARCHIVE_STORAGE_ENABLED=false ARCHIVE_STORAGE_ENABLED=false
@ -147,8 +127,7 @@ ALIYUN_OSS_AUTH_VERSION=v1
ALIYUN_OSS_REGION=your-region ALIYUN_OSS_REGION=your-region
# Don't start with '/'. OSS doesn't support leading slash in object names. # Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path ALIYUN_OSS_PATH=your-path
# Optional CloudBox ID for Aliyun OSS, DO NOT enable it if you are not using CloudBox. ALIYUN_CLOUDBOX_ID=your-cloudbox-id
#ALIYUN_CLOUDBOX_ID=your-cloudbox-id
# Google Storage configuration # Google Storage configuration
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
@ -387,7 +366,7 @@ VIKINGDB_ACCESS_KEY=your-ak
VIKINGDB_SECRET_KEY=your-sk VIKINGDB_SECRET_KEY=your-sk
VIKINGDB_REGION=cn-shanghai VIKINGDB_REGION=cn-shanghai
VIKINGDB_HOST=api-vikingdb.xxx.volces.com VIKINGDB_HOST=api-vikingdb.xxx.volces.com
VIKINGDB_SCHEME=http VIKINGDB_SCHEMA=http
VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_CONNECTION_TIMEOUT=30
VIKINGDB_SOCKET_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30
@ -438,6 +417,8 @@ UPLOAD_FILE_EXTENSION_BLACKLIST=
# Model configuration # Model configuration
MULTIMODAL_SEND_FORMAT=base64 MULTIMODAL_SEND_FORMAT=base64
PROMPT_GENERATION_MAX_TOKENS=512
CODE_GENERATION_MAX_TOKENS=1024
PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false
# Mail configuration, support: resend, smtp, sendgrid # Mail configuration, support: resend, smtp, sendgrid
@ -663,11 +644,6 @@ INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y
MARKETPLACE_ENABLED=true MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace.dify.ai MARKETPLACE_API_URL=https://marketplace.dify.ai
# Creators Platform configuration
CREATORS_PLATFORM_FEATURES_ENABLED=true
CREATORS_PLATFORM_API_URL=https://creators.dify.ai
CREATORS_PLATFORM_OAUTH_CLIENT_ID=
# Endpoint configuration # Endpoint configuration
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id} ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
@ -718,6 +694,22 @@ SWAGGER_UI_PATH=/swagger-ui.html
# Set to false to export dataset IDs as plain text for easier cross-environment import # Set to false to export dataset IDs as plain text for easier cross-environment import
DSL_EXPORT_ENCRYPT_DATASET_ID=true DSL_EXPORT_ENCRYPT_DATASET_ID=true
# Suggested Questions After Answer Configuration
# These environment variables allow customization of the suggested questions feature
#
# Custom prompt for generating suggested questions (optional)
# If not set, uses the default prompt that generates 3 questions under 20 characters each
# Example: "Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: [\"question1\",\"question2\",\"question3\",\"question4\",\"question5\"]"
# SUGGESTED_QUESTIONS_PROMPT=
# Maximum number of tokens for suggested questions generation (default: 256)
# Adjust this value for longer questions or more questions
# SUGGESTED_QUESTIONS_MAX_TOKENS=256
# Temperature for suggested questions generation (default: 0.0)
# Higher values (0.5-1.0) produce more creative questions, lower values (0.0-0.3) produce more focused questions
# SUGGESTED_QUESTIONS_TEMPERATURE=0
# Tenant isolated task queue configuration # Tenant isolated task queue configuration
TENANT_ISOLATED_TASK_CONCURRENCY=1 TENANT_ISOLATED_TASK_CONCURRENCY=1

View File

@ -1,14 +1,202 @@
[importlinter] [importlinter]
root_packages = root_packages =
core core
constants dify_graph
context
configs configs
controllers controllers
extensions extensions
factories
libs
models models
tasks tasks
services services
include_external_packages = True include_external_packages = True
[importlinter:contract:workflow]
name = Workflow
type=layers
layers =
graph_engine
graph_events
graph
nodes
node_events
runtime
entities
containers =
dify_graph
ignore_imports =
dify_graph.nodes.base.node -> dify_graph.graph_events
dify_graph.nodes.iteration.iteration_node -> dify_graph.graph_events
dify_graph.nodes.loop.loop_node -> dify_graph.graph_events
dify_graph.nodes.iteration.iteration_node -> dify_graph.graph_engine
dify_graph.nodes.loop.loop_node -> dify_graph.graph_engine
# TODO(QuantumGhost): fix the import violation later
dify_graph.entities.pause_reason -> dify_graph.nodes.human_input.entities
[importlinter:contract:workflow-infrastructure-dependencies]
name = Workflow Infrastructure Dependencies
type = forbidden
source_modules =
dify_graph
forbidden_modules =
extensions.ext_database
extensions.ext_redis
allow_indirect_imports = True
ignore_imports =
dify_graph.nodes.llm.node -> extensions.ext_database
dify_graph.model_runtime.model_providers.__base.ai_model -> extensions.ext_redis
dify_graph.model_runtime.model_providers.model_provider_factory -> extensions.ext_redis
[importlinter:contract:workflow-external-imports]
name = Workflow External Imports
type = forbidden
source_modules =
dify_graph
forbidden_modules =
configs
controllers
extensions
models
services
tasks
core.agent
core.app
core.base
core.callback_handler
core.datasource
core.db
core.entities
core.errors
core.extension
core.external_data_tool
core.file
core.helper
core.hosting_configuration
core.indexing_runner
core.llm_generator
core.logging
core.mcp
core.memory
core.moderation
core.ops
core.plugin
core.prompt
core.provider_manager
core.rag
core.repositories
core.schemas
core.tools
core.trigger
core.variables
ignore_imports =
dify_graph.nodes.llm.llm_utils -> core.model_manager
dify_graph.nodes.llm.protocols -> core.model_manager
dify_graph.nodes.llm.llm_utils -> dify_graph.model_runtime.model_providers.__base.large_language_model
dify_graph.nodes.llm.node -> core.tools.signature
dify_graph.nodes.tool.tool_node -> core.callback_handler.workflow_tool_callback_handler
dify_graph.nodes.tool.tool_node -> core.tools.tool_engine
dify_graph.nodes.tool.tool_node -> core.tools.tool_manager
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.advanced_prompt_transform
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.simple_prompt_transform
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> dify_graph.model_runtime.model_providers.__base.large_language_model
dify_graph.nodes.question_classifier.question_classifier_node -> core.prompt.simple_prompt_transform
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.model_manager
dify_graph.nodes.question_classifier.question_classifier_node -> core.model_manager
dify_graph.nodes.tool.tool_node -> core.tools.utils.message_transformer
dify_graph.nodes.llm.node -> core.llm_generator.output_parser.errors
dify_graph.nodes.llm.node -> core.llm_generator.output_parser.structured_output
dify_graph.nodes.llm.node -> core.model_manager
dify_graph.nodes.llm.entities -> core.prompt.entities.advanced_prompt_entities
dify_graph.nodes.llm.node -> core.prompt.entities.advanced_prompt_entities
dify_graph.nodes.llm.node -> core.prompt.utils.prompt_message_util
dify_graph.nodes.parameter_extractor.entities -> core.prompt.entities.advanced_prompt_entities
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.entities.advanced_prompt_entities
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.utils.prompt_message_util
dify_graph.nodes.question_classifier.entities -> core.prompt.entities.advanced_prompt_entities
dify_graph.nodes.question_classifier.question_classifier_node -> core.prompt.utils.prompt_message_util
dify_graph.nodes.llm.node -> models.dataset
dify_graph.nodes.llm.file_saver -> core.tools.signature
dify_graph.nodes.llm.file_saver -> core.tools.tool_file_manager
dify_graph.nodes.tool.tool_node -> core.tools.errors
dify_graph.nodes.llm.node -> extensions.ext_database
dify_graph.nodes.llm.node -> models.model
dify_graph.nodes.tool.tool_node -> services
dify_graph.model_runtime.model_providers.__base.ai_model -> configs
dify_graph.model_runtime.model_providers.__base.ai_model -> extensions.ext_redis
dify_graph.model_runtime.model_providers.__base.large_language_model -> configs
dify_graph.model_runtime.model_providers.__base.text_embedding_model -> core.entities.embedding_type
dify_graph.model_runtime.model_providers.model_provider_factory -> configs
dify_graph.model_runtime.model_providers.model_provider_factory -> extensions.ext_redis
dify_graph.model_runtime.model_providers.model_provider_factory -> models.provider_ids
[importlinter:contract:rsc]
name = RSC
type = layers
layers =
graph_engine
response_coordinator
containers =
dify_graph.graph_engine
[importlinter:contract:worker]
name = Worker
type = layers
layers =
graph_engine
worker
containers =
dify_graph.graph_engine
[importlinter:contract:graph-engine-architecture]
name = Graph Engine Architecture
type = layers
layers =
graph_engine
orchestration
command_processing
event_management
error_handler
graph_traversal
graph_state_manager
worker_management
domain
containers =
dify_graph.graph_engine
[importlinter:contract:domain-isolation]
name = Domain Model Isolation
type = forbidden
source_modules =
dify_graph.graph_engine.domain
forbidden_modules =
dify_graph.graph_engine.worker_management
dify_graph.graph_engine.command_channels
dify_graph.graph_engine.layers
dify_graph.graph_engine.protocols
[importlinter:contract:worker-management]
name = Worker Management
type = forbidden
source_modules =
dify_graph.graph_engine.worker_management
forbidden_modules =
dify_graph.graph_engine.orchestration
dify_graph.graph_engine.command_processing
dify_graph.graph_engine.event_management
[importlinter:contract:graph-traversal-components]
name = Graph Traversal Components
type = layers
layers =
edge_processor
skip_propagator
containers =
dify_graph.graph_engine.graph_traversal
[importlinter:contract:command-channels]
name = Command Channels Independence
type = independence
modules =
dify_graph.graph_engine.command_channels.in_memory_channel
dify_graph.graph_engine.command_channels.redis_channel

View File

@ -69,6 +69,8 @@ ignore = [
"FURB152", # math-constant "FURB152", # math-constant
"UP007", # non-pep604-annotation "UP007", # non-pep604-annotation
"UP032", # f-string "UP032", # f-string
"UP045", # non-pep604-annotation-optional
"B005", # strip-with-multi-characters
"B006", # mutable-argument-default "B006", # mutable-argument-default
"B007", # unused-loop-control-variable "B007", # unused-loop-control-variable
"B026", # star-arg-unpacking-after-keyword-arg "B026", # star-arg-unpacking-after-keyword-arg
@ -82,6 +84,7 @@ ignore = [
"SIM102", # collapsible-if "SIM102", # collapsible-if
"SIM103", # needless-bool "SIM103", # needless-bool
"SIM105", # suppressible-exception "SIM105", # suppressible-exception
"SIM107", # return-in-try-except-finally
"SIM108", # if-else-block-instead-of-if-exp "SIM108", # if-else-block-instead-of-if-exp
"SIM113", # enumerate-for-loop "SIM113", # enumerate-for-loop
"SIM117", # multiple-with-statements "SIM117", # multiple-with-statements
@ -90,22 +93,38 @@ ignore = [
] ]
[lint.per-file-ignores] [lint.per-file-ignores]
"__init__.py" = [
"F401", # unused-import
"F811", # redefined-while-unused
]
"configs/*" = [ "configs/*" = [
"N802", # invalid-function-name "N802", # invalid-function-name
] ]
"dify_graph/model_runtime/callbacks/base_callback.py" = ["T201"]
"core/workflow/callbacks/workflow_logging_callback.py" = ["T201"]
"libs/gmpy2_pkcs10aep_cipher.py" = [ "libs/gmpy2_pkcs10aep_cipher.py" = [
"N803", # invalid-argument-name "N803", # invalid-argument-name
] ]
"tests/*" = [ "tests/*" = [
"F811", # redefined-while-unused
"T201", # allow print in tests, "T201", # allow print in tests,
"S110", # allow ignoring exceptions in tests code (currently) "S110", # allow ignoring exceptions in tests code (currently)
] ]
"controllers/console/explore/trial.py" = ["TID251"]
"controllers/console/human_input_form.py" = ["TID251"]
"controllers/web/human_input_form.py" = ["TID251"]
[lint.pyflakes]
allowed-unused-imports = [
"tests.integration_tests",
"tests.unit_tests",
]
[lint.flake8-tidy-imports]
[lint.flake8-tidy-imports.banned-api."flask_restx.reqparse"] [lint.flake8-tidy-imports.banned-api."flask_restx.reqparse"]
msg = "Use Pydantic payload/query models instead of reqparse." msg = "Use Pydantic payload/query models instead of reqparse."
[lint.flake8-tidy-imports.banned-api."flask_restx.reqparse.RequestParser"] [lint.flake8-tidy-imports.banned-api."flask_restx.reqparse.RequestParser"]
msg = "Use Pydantic payload/query models instead of reqparse." msg = "Use Pydantic payload/query models instead of reqparse."
[lint.isort]
known-first-party = ["graphon"]

View File

@ -3,21 +3,29 @@
"compounds": [ "compounds": [
{ {
"name": "Launch Flask and Celery", "name": "Launch Flask and Celery",
"configurations": ["Python: API (gevent)", "Python: Celery"] "configurations": ["Python: Flask", "Python: Celery"]
} }
], ],
"configurations": [ "configurations": [
{ {
"name": "Python: API (gevent)", "name": "Python: Flask",
"consoleName": "API", "consoleName": "Flask",
"type": "debugpy", "type": "debugpy",
"request": "launch", "request": "launch",
"python": "${workspaceFolder}/.venv/bin/python", "python": "${workspaceFolder}/.venv/bin/python",
"cwd": "${workspaceFolder}", "cwd": "${workspaceFolder}",
"envFile": ".env", "envFile": ".env",
"program": "${workspaceFolder}/app.py", "module": "flask",
"justMyCode": true, "justMyCode": true,
"jinja": true "jinja": true,
"env": {
"FLASK_APP": "app.py",
"GEVENT_SUPPORT": "True"
},
"args": [
"run",
"--port=5001"
]
}, },
{ {
"name": "Python: Celery", "name": "Python: Celery",

View File

@ -193,10 +193,6 @@ Before opening a PR / submitting:
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic. - Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
- Services: coordinate repositories, providers, background tasks; keep side effects explicit. - Services: coordinate repositories, providers, background tasks; keep side effects explicit.
- Document non-obvious behaviour with concise docstrings and comments. - Document non-obvious behaviour with concise docstrings and comments.
- For Flask-RESTX controller request, query, and response schemas, follow `controllers/API_SCHEMA_GUIDE.md`.
In short: use Pydantic models, document GET query params with `query_params_from_model(...)`, register response
DTOs with `register_response_schema_models(...)`, serialize with `ResponseModel.model_validate(...).model_dump(...)`,
and avoid adding new legacy `ns.model(...)`, `@marshal_with(...)`, or GET `@ns.expect(...)` patterns.
### Miscellaneous ### Miscellaneous

View File

@ -21,9 +21,8 @@ RUN apt-get update \
# for building gmpy2 # for building gmpy2
libmpfr-dev libmpc-dev libmpfr-dev libmpc-dev
# Install Python dependencies (workspace members under providers/vdb/) # Install Python dependencies
COPY pyproject.toml uv.lock ./ COPY pyproject.toml uv.lock ./
COPY providers ./providers
RUN uv sync --locked --no-dev RUN uv sync --locked --no-dev
# production stage # production stage

View File

@ -40,8 +40,6 @@ The scripts resolve paths relative to their location, so you can run them from a
./dev/start-web ./dev/start-web
``` ```
`./dev/setup` and `./dev/start-web` install JavaScript dependencies through the repository root workspace, so you do not need a separate `cd web && pnpm install` step.
1. Set up your application by visiting `http://localhost:3000`. 1. Set up your application by visiting `http://localhost:3000`.
1. Start the worker service (async and scheduler tasks, runs from `api`). 1. Start the worker service (async and scheduler tasks, runs from `api`).
@ -101,11 +99,3 @@ The scripts resolve paths relative to their location, so you can run them from a
uv run ruff format ./ # Format code uv run ruff format ./ # Format code
uv run basedpyright . # Type checking uv run basedpyright . # Type checking
``` ```
## Generate TS stub
```
uv run dev/generate_swagger_specs.py --output-dir openapi
```
use https://jsontotable.org/openapi-to-typescript to convert to typescript

View File

@ -1,6 +1,5 @@
from __future__ import annotations from __future__ import annotations
import logging
import sys import sys
from typing import TYPE_CHECKING, cast from typing import TYPE_CHECKING, cast
@ -10,35 +9,17 @@ if TYPE_CHECKING:
celery: Celery celery: Celery
HOST = "0.0.0.0"
PORT = 5001
logger = logging.getLogger(__name__)
def is_db_command() -> bool: def is_db_command() -> bool:
if len(sys.argv) > 1 and sys.argv[0].endswith("flask") and sys.argv[1] == "db": if len(sys.argv) > 1 and sys.argv[0].endswith("flask") and sys.argv[1] == "db":
return True return True
return False return False
def log_startup_banner(host: str, port: int) -> None:
debugger_attached = sys.gettrace() is not None
logger.info("Serving Dify API via gevent WebSocket server")
logger.info("Bound to http://%s:%s", host, port)
logger.info("Debugger attached: %s", "on" if debugger_attached else "off")
logger.info("Press CTRL+C to quit")
# create app # create app
flask_app = None
socketio_app = None
if is_db_command(): if is_db_command():
from app_factory import create_migrations_app from app_factory import create_migrations_app
app = create_migrations_app() app = create_migrations_app()
socketio_app = app
flask_app = app
else: else:
# Gunicorn and Celery handle monkey patching automatically in production by # Gunicorn and Celery handle monkey patching automatically in production by
# specifying the `gevent` worker class. Manual monkey patching is not required here. # specifying the `gevent` worker class. Manual monkey patching is not required here.
@ -49,14 +30,8 @@ else:
from app_factory import create_app from app_factory import create_app
socketio_app, flask_app = create_app() app = create_app()
app = flask_app
celery = cast("Celery", app.extensions["celery"]) celery = cast("Celery", app.extensions["celery"])
if __name__ == "__main__": if __name__ == "__main__":
from gevent import pywsgi app.run(host="0.0.0.0", port=5001)
from geventwebsocket.handler import WebSocketHandler # type: ignore[reportMissingTypeStubs]
log_startup_banner(HOST, PORT)
server = pywsgi.WSGIServer((HOST, PORT), socketio_app, handler_class=WebSocketHandler)
server.serve_forever()

View File

@ -1,7 +1,6 @@
import logging import logging
import time import time
import socketio # type: ignore[reportMissingTypeStubs]
from flask import request from flask import request
from opentelemetry.trace import get_current_span from opentelemetry.trace import get_current_span
from opentelemetry.trace.span import INVALID_SPAN_ID, INVALID_TRACE_ID from opentelemetry.trace.span import INVALID_SPAN_ID, INVALID_TRACE_ID
@ -11,7 +10,6 @@ from contexts.wrapper import RecyclableContextVar
from controllers.console.error import UnauthorizedAndForceLogout from controllers.console.error import UnauthorizedAndForceLogout
from core.logging.context import init_request_context from core.logging.context import init_request_context
from dify_app import DifyApp from dify_app import DifyApp
from extensions.ext_socketio import sio
from services.enterprise.enterprise_service import EnterpriseService from services.enterprise.enterprise_service import EnterpriseService
from services.feature_service import LicenseStatus from services.feature_service import LicenseStatus
@ -124,18 +122,14 @@ def create_flask_app_with_configs() -> DifyApp:
return dify_app return dify_app
def create_app() -> tuple[socketio.WSGIApp, DifyApp]: def create_app() -> DifyApp:
start_time = time.perf_counter() start_time = time.perf_counter()
app = create_flask_app_with_configs() app = create_flask_app_with_configs()
initialize_extensions(app) initialize_extensions(app)
sio.app = app
socketio_app = socketio.WSGIApp(sio, app)
end_time = time.perf_counter() end_time = time.perf_counter()
if dify_config.DEBUG: if dify_config.DEBUG:
logger.info("Finished create_app (%s ms)", round((end_time - start_time) * 1000, 2)) logger.info("Finished create_app (%s ms)", round((end_time - start_time) * 1000, 2))
return socketio_app, app return app
def initialize_extensions(app: DifyApp): def initialize_extensions(app: DifyApp):
@ -149,7 +143,6 @@ def initialize_extensions(app: DifyApp):
ext_commands, ext_commands,
ext_compress, ext_compress,
ext_database, ext_database,
ext_enterprise_telemetry,
ext_fastopenapi, ext_fastopenapi,
ext_forward_refs, ext_forward_refs,
ext_hosting_provider, ext_hosting_provider,
@ -181,6 +174,7 @@ def initialize_extensions(app: DifyApp):
ext_import_modules, ext_import_modules,
ext_orjson, ext_orjson,
ext_forward_refs, ext_forward_refs,
ext_set_secretkey,
ext_compress, ext_compress,
ext_code_based_extension, ext_code_based_extension,
ext_database, ext_database,
@ -188,7 +182,6 @@ def initialize_extensions(app: DifyApp):
ext_migrate, ext_migrate,
ext_redis, ext_redis,
ext_storage, ext_storage,
ext_set_secretkey,
ext_logstore, # Initialize logstore after storage, before celery ext_logstore, # Initialize logstore after storage, before celery
ext_celery, ext_celery,
ext_login, ext_login,
@ -200,7 +193,6 @@ def initialize_extensions(app: DifyApp):
ext_commands, ext_commands,
ext_fastopenapi, ext_fastopenapi,
ext_otel, ext_otel,
ext_enterprise_telemetry,
ext_request_logging, ext_request_logging,
ext_session_factory, ext_session_factory,
] ]

View File

@ -1,18 +0,0 @@
# This module provides a lightweight Celery instance for use in Docker health checks.
# Unlike celery_entrypoint.py, this does NOT import app.py and therefore avoids
# initializing all Flask extensions (DB, Redis, storage, blueprints, etc.).
# Using this module keeps the health check fast and low-cost.
from celery import Celery
from configs import dify_config
from extensions.ext_celery import get_celery_broker_transport_options, get_celery_ssl_options
celery = Celery(broker=dify_config.CELERY_BROKER_URL)
broker_transport_options = get_celery_broker_transport_options()
if broker_transport_options:
celery.conf.update(broker_transport_options=broker_transport_options)
ssl_options = get_celery_ssl_options()
if ssl_options:
celery.conf.update(broker_use_ssl=ssl_options)

View File

@ -2,7 +2,7 @@ import base64
import secrets import secrets
import click import click
from sqlalchemy.orm import Session from sqlalchemy.orm import sessionmaker
from constants.languages import languages from constants.languages import languages
from extensions.ext_database import db from extensions.ext_database import db
@ -25,32 +25,30 @@ def reset_password(email, new_password, password_confirm):
return return
normalized_email = email.strip().lower() normalized_email = email.strip().lower()
account = AccountService.get_account_by_email_with_case_fallback(email.strip()) with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
account = AccountService.get_account_by_email_with_case_fallback(email.strip(), session=session)
if not account: if not account:
click.echo(click.style(f"Account not found for email: {email}", fg="red")) click.echo(click.style(f"Account not found for email: {email}", fg="red"))
return return
try: try:
valid_password(new_password) valid_password(new_password)
except: except:
click.echo(click.style(f"Invalid password. Must match {password_pattern}", fg="red")) click.echo(click.style(f"Invalid password. Must match {password_pattern}", fg="red"))
return return
# generate password salt # generate password salt
salt = secrets.token_bytes(16) salt = secrets.token_bytes(16)
base64_salt = base64.b64encode(salt).decode() base64_salt = base64.b64encode(salt).decode()
# encrypt password with salt # encrypt password with salt
password_hashed = hash_password(new_password, salt) password_hashed = hash_password(new_password, salt)
base64_password_hashed = base64.b64encode(password_hashed).decode() base64_password_hashed = base64.b64encode(password_hashed).decode()
with Session(db.engine) as session:
account = session.merge(account)
account.password = base64_password_hashed account.password = base64_password_hashed
account.password_salt = base64_salt account.password_salt = base64_salt
session.commit() AccountService.reset_login_error_rate_limit(normalized_email)
AccountService.reset_login_error_rate_limit(normalized_email) click.echo(click.style("Password reset successfully.", fg="green"))
click.echo(click.style("Password reset successfully.", fg="green"))
@click.command("reset-email", help="Reset the account email.") @click.command("reset-email", help="Reset the account email.")
@ -67,23 +65,21 @@ def reset_email(email, new_email, email_confirm):
return return
normalized_new_email = new_email.strip().lower() normalized_new_email = new_email.strip().lower()
account = AccountService.get_account_by_email_with_case_fallback(email.strip()) with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
account = AccountService.get_account_by_email_with_case_fallback(email.strip(), session=session)
if not account: if not account:
click.echo(click.style(f"Account not found for email: {email}", fg="red")) click.echo(click.style(f"Account not found for email: {email}", fg="red"))
return return
try: try:
email_validate(normalized_new_email) email_validate(normalized_new_email)
except: except:
click.echo(click.style(f"Invalid email: {new_email}", fg="red")) click.echo(click.style(f"Invalid email: {new_email}", fg="red"))
return return
with Session(db.engine) as session:
account = session.merge(account)
account.email = normalized_new_email account.email = normalized_new_email
session.commit() click.echo(click.style("Email updated successfully.", fg="green"))
click.echo(click.style("Email updated successfully.", fg="green"))
@click.command("create-tenant", help="Create account and tenant.") @click.command("create-tenant", help="Create account and tenant.")
@ -113,18 +109,8 @@ def create_tenant(email: str, language: str | None = None, name: str | None = No
# Validates name encoding for non-Latin characters. # Validates name encoding for non-Latin characters.
name = name.strip().encode("utf-8").decode("utf-8") if name else None name = name.strip().encode("utf-8").decode("utf-8") if name else None
# Generate a random password that satisfies the password policy. # generate random password
# The iteration limit guards against infinite loops caused by unexpected bugs in valid_password. new_password = secrets.token_urlsafe(16)
for _ in range(100):
new_password = secrets.token_urlsafe(16)
try:
valid_password(new_password)
break
except Exception:
continue
else:
click.echo(click.style("Failed to generate a valid password. Please try again.", fg="red"))
return
# register account # register account
account = RegisterService.register( account = RegisterService.register(

View File

@ -11,7 +11,7 @@ from configs import dify_config
from core.helper import encrypter from core.helper import encrypter
from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.entities.plugin_daemon import CredentialType
from core.plugin.impl.plugin import PluginInstaller from core.plugin.impl.plugin import PluginInstaller
from core.tools.utils.system_encryption import encrypt_system_params from core.tools.utils.system_oauth_encryption import encrypt_system_oauth_params
from extensions.ext_database import db from extensions.ext_database import db
from models import Tenant from models import Tenant
from models.oauth import DatasourceOauthParamConfig, DatasourceProvider from models.oauth import DatasourceOauthParamConfig, DatasourceProvider
@ -44,7 +44,7 @@ def setup_system_tool_oauth_client(provider, client_params):
click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow")) click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow"))
click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow")) click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow"))
oauth_client_params = encrypt_system_params(client_params_dict) oauth_client_params = encrypt_system_oauth_params(client_params_dict)
click.echo(click.style("Client params encrypted successfully.", fg="green")) click.echo(click.style("Client params encrypted successfully.", fg="green"))
except Exception as e: except Exception as e:
click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red")) click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red"))
@ -94,7 +94,7 @@ def setup_system_trigger_oauth_client(provider, client_params):
click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow")) click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow"))
click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow")) click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow"))
oauth_client_params = encrypt_system_params(client_params_dict) oauth_client_params = encrypt_system_oauth_params(client_params_dict)
click.echo(click.style("Client params encrypted successfully.", fg="green")) click.echo(click.style("Client params encrypted successfully.", fg="green"))
except Exception as e: except Exception as e:
click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red")) click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red"))

View File

@ -1,7 +1,7 @@
import datetime import datetime
import logging import logging
import time import time
from typing import TypedDict from typing import Any
import click import click
import sqlalchemy as sa import sqlalchemy as sa
@ -503,19 +503,7 @@ def _find_orphaned_draft_variables(batch_size: int = 1000) -> list[str]:
return [row[0] for row in result] return [row[0] for row in result]
class _AppOrphanCounts(TypedDict): def _count_orphaned_draft_variables() -> dict[str, Any]:
variables: int
files: int
class OrphanedDraftVariableStatsDict(TypedDict):
total_orphaned_variables: int
total_orphaned_files: int
orphaned_app_count: int
orphaned_by_app: dict[str, _AppOrphanCounts]
def _count_orphaned_draft_variables() -> OrphanedDraftVariableStatsDict:
""" """
Count orphaned draft variables by app, including associated file counts. Count orphaned draft variables by app, including associated file counts.
@ -538,7 +526,7 @@ def _count_orphaned_draft_variables() -> OrphanedDraftVariableStatsDict:
with db.engine.connect() as conn: with db.engine.connect() as conn:
result = conn.execute(sa.text(variables_query)) result = conn.execute(sa.text(variables_query))
orphaned_by_app: dict[str, _AppOrphanCounts] = {} orphaned_by_app = {}
total_files = 0 total_files = 0
for row in result: for row in result:

View File

@ -341,10 +341,11 @@ def add_qdrant_index(field: str):
click.echo(click.style("No dataset collection bindings found.", fg="red")) click.echo(click.style("No dataset collection bindings found.", fg="red"))
return return
import qdrant_client import qdrant_client
from dify_vdb_qdrant.qdrant_vector import PathQdrantParams, QdrantConfig
from qdrant_client.http.exceptions import UnexpectedResponse from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models import PayloadSchemaType from qdrant_client.http.models import PayloadSchemaType
from core.rag.datasource.vdb.qdrant.qdrant_vector import PathQdrantParams, QdrantConfig
for binding in bindings: for binding in bindings:
if dify_config.QDRANT_URL is None: if dify_config.QDRANT_URL is None:
raise ValueError("Qdrant URL is required.") raise ValueError("Qdrant URL is required.")

View File

@ -8,7 +8,7 @@ from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, Settings
from libs.file_utils import search_file_upwards from libs.file_utils import search_file_upwards
from .deploy import DeploymentConfig from .deploy import DeploymentConfig
from .enterprise import EnterpriseFeatureConfig, EnterpriseTelemetryConfig from .enterprise import EnterpriseFeatureConfig
from .extra import ExtraServiceConfig from .extra import ExtraServiceConfig
from .feature import FeatureConfig from .feature import FeatureConfig
from .middleware import MiddlewareConfig from .middleware import MiddlewareConfig
@ -73,8 +73,6 @@ class DifyConfig(
# Enterprise feature configs # Enterprise feature configs
# **Before using, please contact business@dify.ai by email to inquire about licensing matters.** # **Before using, please contact business@dify.ai by email to inquire about licensing matters.**
EnterpriseFeatureConfig, EnterpriseFeatureConfig,
# Enterprise telemetry configs
EnterpriseTelemetryConfig,
): ):
model_config = SettingsConfigDict( model_config = SettingsConfigDict(
# read from dotenv format config file # read from dotenv format config file

View File

@ -22,52 +22,3 @@ class EnterpriseFeatureConfig(BaseSettings):
ENTERPRISE_REQUEST_TIMEOUT: int = Field( ENTERPRISE_REQUEST_TIMEOUT: int = Field(
ge=1, description="Maximum timeout in seconds for enterprise requests", default=5 ge=1, description="Maximum timeout in seconds for enterprise requests", default=5
) )
class EnterpriseTelemetryConfig(BaseSettings):
"""
Configuration for enterprise telemetry.
"""
ENTERPRISE_TELEMETRY_ENABLED: bool = Field(
description="Enable enterprise telemetry collection (also requires ENTERPRISE_ENABLED=true).",
default=False,
)
ENTERPRISE_OTLP_ENDPOINT: str = Field(
description="Enterprise OTEL collector endpoint.",
default="",
)
ENTERPRISE_OTLP_HEADERS: str = Field(
description="Auth headers for OTLP export (key=value,key2=value2).",
default="",
)
ENTERPRISE_OTLP_PROTOCOL: str = Field(
description="OTLP protocol: 'http' or 'grpc' (default: http).",
default="http",
)
ENTERPRISE_OTLP_API_KEY: str = Field(
description="Bearer token for enterprise OTLP export authentication.",
default="",
)
ENTERPRISE_INCLUDE_CONTENT: bool = Field(
description="Include input/output content in traces (privacy toggle).",
# Setting the default value to False to avoid accidentally log PII data in traces.
default=False,
)
ENTERPRISE_SERVICE_NAME: str = Field(
description="Service name for OTEL resource.",
default="dify",
)
ENTERPRISE_OTEL_SAMPLING_RATE: float = Field(
description="Sampling rate for enterprise traces (0.0 to 1.0, default 1.0 = 100%).",
default=1.0,
ge=0.0,
le=1.0,
)

View File

@ -23,9 +23,9 @@ class SecurityConfig(BaseSettings):
""" """
SECRET_KEY: str = Field( SECRET_KEY: str = Field(
description="Secret key for secure session cookie signing. " description="Secret key for secure session cookie signing."
"Leave empty to let Dify generate a persistent key in the storage directory, " "Make sure you are changing this key for your deployment with a strong key."
"or set a strong value via the `SECRET_KEY` environment variable.", "Generate a strong key using `openssl rand -base64 42` or set via the `SECRET_KEY` environment variable.",
default="", default="",
) )
@ -287,27 +287,6 @@ class MarketplaceConfig(BaseSettings):
) )
class CreatorsPlatformConfig(BaseSettings):
"""
Configuration for Creators Platform integration
"""
CREATORS_PLATFORM_FEATURES_ENABLED: bool = Field(
description="Enable or disable Creators Platform features",
default=True,
)
CREATORS_PLATFORM_API_URL: HttpUrl = Field(
description="Creators Platform API URL",
default=HttpUrl("https://creators.dify.ai"),
)
CREATORS_PLATFORM_OAUTH_CLIENT_ID: str = Field(
description="OAuth client ID for Creators Platform integration",
default="",
)
class EndpointConfig(BaseSettings): class EndpointConfig(BaseSettings):
""" """
Configuration for various application endpoints and URLs Configuration for various application endpoints and URLs
@ -1137,18 +1116,6 @@ class MultiModalTransferConfig(BaseSettings):
) )
class OpsTraceConfig(BaseSettings):
OPS_TRACE_RETRYABLE_DISPATCH_MAX_RETRIES: PositiveInt = Field(
description="Maximum retry attempts for transient ops trace provider dispatch failures.",
default=60,
)
OPS_TRACE_RETRYABLE_DISPATCH_DELAY_SECONDS: PositiveInt = Field(
description="Delay in seconds between transient ops trace provider dispatch retry attempts.",
default=5,
)
class CeleryBeatConfig(BaseSettings): class CeleryBeatConfig(BaseSettings):
CELERY_BEAT_SCHEDULER_TIME: int = Field( CELERY_BEAT_SCHEDULER_TIME: int = Field(
description="Interval in days for Celery Beat scheduler execution, default to 1 day", description="Interval in days for Celery Beat scheduler execution, default to 1 day",
@ -1307,13 +1274,6 @@ class PositionConfig(BaseSettings):
return {item.strip() for item in self.POSITION_TOOL_EXCLUDES.split(",") if item.strip() != ""} return {item.strip() for item in self.POSITION_TOOL_EXCLUDES.split(",") if item.strip() != ""}
class CollaborationConfig(BaseSettings):
ENABLE_COLLABORATION_MODE: bool = Field(
description="Whether to enable collaboration mode features across the workspace",
default=True,
)
class LoginConfig(BaseSettings): class LoginConfig(BaseSettings):
ENABLE_EMAIL_CODE_LOGIN: bool = Field( ENABLE_EMAIL_CODE_LOGIN: bool = Field(
description="whether to enable email code login", description="whether to enable email code login",
@ -1412,7 +1372,6 @@ class FeatureConfig(
AuthConfig, # Changed from OAuthConfig to AuthConfig AuthConfig, # Changed from OAuthConfig to AuthConfig
BillingConfig, BillingConfig,
CodeExecutionSandboxConfig, CodeExecutionSandboxConfig,
CreatorsPlatformConfig,
TriggerConfig, TriggerConfig,
AsyncWorkflowConfig, AsyncWorkflowConfig,
PluginConfig, PluginConfig,
@ -1429,7 +1388,6 @@ class FeatureConfig(
ModelLoadBalanceConfig, ModelLoadBalanceConfig,
ModerationConfig, ModerationConfig,
MultiModalTransferConfig, MultiModalTransferConfig,
OpsTraceConfig,
PositionConfig, PositionConfig,
RagEtlConfig, RagEtlConfig,
RepositoryConfig, RepositoryConfig,
@ -1441,7 +1399,6 @@ class FeatureConfig(
WorkflowConfig, WorkflowConfig,
WorkflowNodeExecutionConfig, WorkflowNodeExecutionConfig,
WorkspaceConfig, WorkspaceConfig,
CollaborationConfig,
LoginConfig, LoginConfig,
AccountConfig, AccountConfig,
SwaggerUIConfig, SwaggerUIConfig,

View File

@ -1,5 +1,5 @@
import os import os
from typing import Any, Literal, TypedDict from typing import Any, Literal
from urllib.parse import parse_qsl, quote_plus from urllib.parse import parse_qsl, quote_plus
from pydantic import Field, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, computed_field from pydantic import Field, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, computed_field
@ -107,17 +107,6 @@ class KeywordStoreConfig(BaseSettings):
) )
class SQLAlchemyEngineOptionsDict(TypedDict):
pool_size: int
max_overflow: int
pool_recycle: int
pool_pre_ping: bool
connect_args: dict[str, str]
pool_use_lifo: bool
pool_reset_on_return: Literal["commit", "rollback", None]
pool_timeout: int
class DatabaseConfig(BaseSettings): class DatabaseConfig(BaseSettings):
# Database type selector # Database type selector
DB_TYPE: Literal["postgresql", "mysql", "oceanbase", "seekdb"] = Field( DB_TYPE: Literal["postgresql", "mysql", "oceanbase", "seekdb"] = Field(
@ -160,16 +149,6 @@ class DatabaseConfig(BaseSettings):
default="", default="",
) )
DB_SESSION_TIMEZONE_OVERRIDE: str = Field(
description=(
"PostgreSQL session timezone override injected via startup options."
" Default is 'UTC' for out-of-the-box consistency."
" Set to empty string to disable app-level timezone injection, for example when using RDS Proxy"
" together with a database-side default timezone."
),
default="UTC",
)
@computed_field # type: ignore[prop-decorator] @computed_field # type: ignore[prop-decorator]
@property @property
def SQLALCHEMY_DATABASE_URI_SCHEME(self) -> str: def SQLALCHEMY_DATABASE_URI_SCHEME(self) -> str:
@ -223,11 +202,6 @@ class DatabaseConfig(BaseSettings):
default=30, default=30,
) )
SQLALCHEMY_POOL_RESET_ON_RETURN: Literal["commit", "rollback", None] = Field(
description="Connection pool reset behavior on return. Options: 'commit', 'rollback', or None",
default="rollback",
)
RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field( RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field(
description="Number of processes for the retrieval service, default to CPU cores.", description="Number of processes for the retrieval service, default to CPU cores.",
default=os.cpu_count() or 1, default=os.cpu_count() or 1,
@ -235,32 +209,30 @@ class DatabaseConfig(BaseSettings):
@computed_field # type: ignore[prop-decorator] @computed_field # type: ignore[prop-decorator]
@property @property
def SQLALCHEMY_ENGINE_OPTIONS(self) -> SQLAlchemyEngineOptionsDict: def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
# Parse DB_EXTRAS for 'options' # Parse DB_EXTRAS for 'options'
db_extras_dict = dict(parse_qsl(self.DB_EXTRAS)) db_extras_dict = dict(parse_qsl(self.DB_EXTRAS))
options = db_extras_dict.get("options", "") options = db_extras_dict.get("options", "")
connect_args: dict[str, str] = {} connect_args = {}
# Use the dynamic SQLALCHEMY_DATABASE_URI_SCHEME property # Use the dynamic SQLALCHEMY_DATABASE_URI_SCHEME property
if self.SQLALCHEMY_DATABASE_URI_SCHEME.startswith("postgresql"): if self.SQLALCHEMY_DATABASE_URI_SCHEME.startswith("postgresql"):
merged_options = options.strip() timezone_opt = "-c timezone=UTC"
session_timezone_override = self.DB_SESSION_TIMEZONE_OVERRIDE.strip() if options:
if session_timezone_override: merged_options = f"{options} {timezone_opt}"
timezone_opt = f"-c timezone={session_timezone_override}" else:
merged_options = f"{merged_options} {timezone_opt}".strip() if merged_options else timezone_opt merged_options = timezone_opt
if merged_options: connect_args = {"options": merged_options}
connect_args = {"options": merged_options}
result: SQLAlchemyEngineOptionsDict = { return {
"pool_size": self.SQLALCHEMY_POOL_SIZE, "pool_size": self.SQLALCHEMY_POOL_SIZE,
"max_overflow": self.SQLALCHEMY_MAX_OVERFLOW, "max_overflow": self.SQLALCHEMY_MAX_OVERFLOW,
"pool_recycle": self.SQLALCHEMY_POOL_RECYCLE, "pool_recycle": self.SQLALCHEMY_POOL_RECYCLE,
"pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING, "pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING,
"connect_args": connect_args, "connect_args": connect_args,
"pool_use_lifo": self.SQLALCHEMY_POOL_USE_LIFO, "pool_use_lifo": self.SQLALCHEMY_POOL_USE_LIFO,
"pool_reset_on_return": self.SQLALCHEMY_POOL_RESET_ON_RETURN, "pool_reset_on_return": None,
"pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT, "pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT,
} }
return result
class CeleryConfig(DatabaseConfig): class CeleryConfig(DatabaseConfig):

View File

@ -32,11 +32,6 @@ class RedisConfig(BaseSettings):
default=0, default=0,
) )
REDIS_KEY_PREFIX: str = Field(
description="Optional global prefix for Redis keys, topics, and transport artifacts",
default="",
)
REDIS_USE_SSL: bool = Field( REDIS_USE_SSL: bool = Field(
description="Enable SSL/TLS for the Redis connection", description="Enable SSL/TLS for the Redis connection",
default=False, default=False,
@ -122,37 +117,6 @@ class RedisConfig(BaseSettings):
default=None, default=None,
) )
REDIS_RETRY_RETRIES: NonNegativeInt = Field(
description="Maximum number of retries per Redis command on "
"transient failures (ConnectionError, TimeoutError, socket.timeout)",
default=3,
)
REDIS_RETRY_BACKOFF_BASE: PositiveFloat = Field(
description="Base delay in seconds for exponential backoff between retries",
default=1.0,
)
REDIS_RETRY_BACKOFF_CAP: PositiveFloat = Field(
description="Maximum backoff delay in seconds between retries",
default=10.0,
)
REDIS_SOCKET_TIMEOUT: PositiveFloat | None = Field(
description="Socket timeout in seconds for Redis read/write operations",
default=5.0,
)
REDIS_SOCKET_CONNECT_TIMEOUT: PositiveFloat | None = Field(
description="Socket timeout in seconds for Redis connection establishment",
default=5.0,
)
REDIS_HEALTH_CHECK_INTERVAL: NonNegativeInt = Field(
description="Interval in seconds between Redis connection health checks (0 to disable)",
default=30,
)
@field_validator("REDIS_MAX_CONNECTIONS", mode="before") @field_validator("REDIS_MAX_CONNECTIONS", mode="before")
@classmethod @classmethod
def _empty_string_to_none_for_max_conns(cls, v): def _empty_string_to_none_for_max_conns(cls, v):

View File

@ -1,3 +1,4 @@
from holo_search_sdk.types import BaseQuantizationType, DistanceType, TokenizerType
from pydantic import Field from pydantic import Field
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings
@ -41,17 +42,17 @@ class HologresConfig(BaseSettings):
default="public", default="public",
) )
HOLOGRES_TOKENIZER: str = Field( HOLOGRES_TOKENIZER: TokenizerType = Field(
description="Tokenizer for full-text search index (e.g., 'jieba', 'ik', 'standard', 'simple').", description="Tokenizer for full-text search index (e.g., 'jieba', 'ik', 'standard', 'simple').",
default="jieba", default="jieba",
) )
HOLOGRES_DISTANCE_METHOD: str = Field( HOLOGRES_DISTANCE_METHOD: DistanceType = Field(
description="Distance method for vector index (e.g., 'Cosine', 'Euclidean', 'InnerProduct').", description="Distance method for vector index (e.g., 'Cosine', 'Euclidean', 'InnerProduct').",
default="Cosine", default="Cosine",
) )
HOLOGRES_BASE_QUANTIZATION_TYPE: str = Field( HOLOGRES_BASE_QUANTIZATION_TYPE: BaseQuantizationType = Field(
description="Base quantization type for vector index (e.g., 'rabitq', 'sq8', 'fp16', 'fp32').", description="Base quantization type for vector index (e.g., 'rabitq', 'sq8', 'fp16', 'fp32').",
default="rabitq", default="rabitq",
) )

View File

@ -1,7 +1,5 @@
"""Configuration for InterSystems IRIS vector database.""" """Configuration for InterSystems IRIS vector database."""
from typing import Any
from pydantic import Field, PositiveInt, model_validator from pydantic import Field, PositiveInt, model_validator
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings
@ -66,7 +64,7 @@ class IrisVectorConfig(BaseSettings):
@model_validator(mode="before") @model_validator(mode="before")
@classmethod @classmethod
def validate_config(cls, values: dict[str, Any]) -> dict[str, Any]: def validate_config(cls, values: dict) -> dict:
"""Validate IRIS configuration values. """Validate IRIS configuration values.
Args: Args:

View File

@ -1,38 +0,0 @@
"""SECRET_KEY persistence helpers for runtime setup."""
from __future__ import annotations
import secrets
from extensions.ext_storage import storage
GENERATED_SECRET_KEY_FILENAME = ".dify_secret_key"
def resolve_secret_key(secret_key: str) -> str:
"""Return an explicit SECRET_KEY or a generated key persisted in storage."""
if secret_key:
return secret_key
return _load_or_create_secret_key()
def _load_or_create_secret_key() -> str:
try:
persisted_key = storage.load_once(GENERATED_SECRET_KEY_FILENAME).decode("utf-8").strip()
if persisted_key:
return persisted_key
except FileNotFoundError:
pass
generated_key = secrets.token_urlsafe(48)
try:
storage.save(GENERATED_SECRET_KEY_FILENAME, f"{generated_key}\n".encode())
except Exception as exc:
raise ValueError(
f"SECRET_KEY is not set and could not be generated at {GENERATED_SECRET_KEY_FILENAME}. "
"Set SECRET_KEY explicitly or make storage writable."
) from exc
return generated_key

View File

@ -7,16 +7,15 @@ UUID_NIL = "00000000-0000-0000-0000-000000000000"
DEFAULT_FILE_NUMBER_LIMITS = 3 DEFAULT_FILE_NUMBER_LIMITS = 3
_IMAGE_EXTENSION_BASE: frozenset[str] = frozenset(("jpg", "jpeg", "png", "webp", "gif", "svg")) IMAGE_EXTENSIONS = convert_to_lower_and_upper_set({"jpg", "jpeg", "png", "webp", "gif", "svg"})
_VIDEO_EXTENSION_BASE: frozenset[str] = frozenset(("mp4", "mov", "mpeg", "webm"))
_AUDIO_EXTENSION_BASE: frozenset[str] = frozenset(("mp3", "m4a", "wav", "amr", "mpga"))
IMAGE_EXTENSIONS: frozenset[str] = frozenset(convert_to_lower_and_upper_set(_IMAGE_EXTENSION_BASE)) VIDEO_EXTENSIONS = convert_to_lower_and_upper_set({"mp4", "mov", "mpeg", "webm"})
VIDEO_EXTENSIONS: frozenset[str] = frozenset(convert_to_lower_and_upper_set(_VIDEO_EXTENSION_BASE))
AUDIO_EXTENSIONS: frozenset[str] = frozenset(convert_to_lower_and_upper_set(_AUDIO_EXTENSION_BASE))
_UNSTRUCTURED_DOCUMENT_EXTENSION_BASE: frozenset[str] = frozenset( AUDIO_EXTENSIONS = convert_to_lower_and_upper_set({"mp3", "m4a", "wav", "amr", "mpga"})
(
_doc_extensions: set[str]
if dify_config.ETL_TYPE == "Unstructured":
_doc_extensions = {
"txt", "txt",
"markdown", "markdown",
"md", "md",
@ -36,10 +35,11 @@ _UNSTRUCTURED_DOCUMENT_EXTENSION_BASE: frozenset[str] = frozenset(
"pptx", "pptx",
"xml", "xml",
"epub", "epub",
) }
) if dify_config.UNSTRUCTURED_API_URL:
_DEFAULT_DOCUMENT_EXTENSION_BASE: frozenset[str] = frozenset( _doc_extensions.add("ppt")
( else:
_doc_extensions = {
"txt", "txt",
"markdown", "markdown",
"md", "md",
@ -53,17 +53,8 @@ _DEFAULT_DOCUMENT_EXTENSION_BASE: frozenset[str] = frozenset(
"csv", "csv",
"vtt", "vtt",
"properties", "properties",
) }
) DOCUMENT_EXTENSIONS: set[str] = convert_to_lower_and_upper_set(_doc_extensions)
_doc_extensions: set[str]
if dify_config.ETL_TYPE == "Unstructured":
_doc_extensions = set(_UNSTRUCTURED_DOCUMENT_EXTENSION_BASE)
if dify_config.UNSTRUCTURED_API_URL:
_doc_extensions.add("ppt")
else:
_doc_extensions = set(_DEFAULT_DOCUMENT_EXTENSION_BASE)
DOCUMENT_EXTENSIONS: frozenset[str] = frozenset(convert_to_lower_and_upper_set(_doc_extensions))
# console # console
COOKIE_NAME_ACCESS_TOKEN = "access_token" COOKIE_NAME_ACCESS_TOKEN = "access_token"

View File

@ -1 +0,0 @@
CURRENT_APP_DSL_VERSION = "0.6.0"

View File

@ -19,7 +19,7 @@
"name": "Website Generator" "name": "Website Generator"
}, },
"app_id": "b53545b1-79ea-4da3-b31a-c39391c6f041", "app_id": "b53545b1-79ea-4da3-b31a-c39391c6f041",
"categories": ["Programming"], "category": "Programming",
"copyright": null, "copyright": null,
"description": null, "description": null,
"is_listed": true, "is_listed": true,
@ -35,7 +35,7 @@
"name": "Investment Analysis Report Copilot" "name": "Investment Analysis Report Copilot"
}, },
"app_id": "a23b57fa-85da-49c0-a571-3aff375976c1", "app_id": "a23b57fa-85da-49c0-a571-3aff375976c1",
"categories": ["Agent"], "category": "Agent",
"copyright": "Dify.AI", "copyright": "Dify.AI",
"description": "Welcome to your personalized Investment Analysis Copilot service, where we delve into the depths of stock analysis to provide you with comprehensive insights. \n", "description": "Welcome to your personalized Investment Analysis Copilot service, where we delve into the depths of stock analysis to provide you with comprehensive insights. \n",
"is_listed": true, "is_listed": true,
@ -51,7 +51,7 @@
"name": "Workflow Planning Assistant " "name": "Workflow Planning Assistant "
}, },
"app_id": "f3303a7d-a81c-404e-b401-1f8711c998c1", "app_id": "f3303a7d-a81c-404e-b401-1f8711c998c1",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "An assistant that helps you plan and select the right node for a workflow (V0.6.0). ", "description": "An assistant that helps you plan and select the right node for a workflow (V0.6.0). ",
"is_listed": true, "is_listed": true,
@ -67,7 +67,7 @@
"name": "Automated Email Reply " "name": "Automated Email Reply "
}, },
"app_id": "e9d92058-7d20-4904-892f-75d90bef7587", "app_id": "e9d92058-7d20-4904-892f-75d90bef7587",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Reply emails using Gmail API. It will automatically retrieve email in your inbox and create a response in Gmail. \nConfigure your Gmail API in Google Cloud Console. ", "description": "Reply emails using Gmail API. It will automatically retrieve email in your inbox and create a response in Gmail. \nConfigure your Gmail API in Google Cloud Console. ",
"is_listed": true, "is_listed": true,
@ -83,7 +83,7 @@
"name": "Book Translation " "name": "Book Translation "
}, },
"app_id": "98b87f88-bd22-4d86-8b74-86beba5e0ed4", "app_id": "98b87f88-bd22-4d86-8b74-86beba5e0ed4",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "A workflow designed to translate a full book up to 15000 tokens per run. Uses Code node to separate text into chunks and Iteration to translate each chunk. ", "description": "A workflow designed to translate a full book up to 15000 tokens per run. Uses Code node to separate text into chunks and Iteration to translate each chunk. ",
"is_listed": true, "is_listed": true,
@ -99,7 +99,7 @@
"name": "Python bug fixer" "name": "Python bug fixer"
}, },
"app_id": "cae337e6-aec5-4c7b-beca-d6f1a808bd5e", "app_id": "cae337e6-aec5-4c7b-beca-d6f1a808bd5e",
"categories": ["Programming"], "category": "Programming",
"copyright": null, "copyright": null,
"description": null, "description": null,
"is_listed": true, "is_listed": true,
@ -115,7 +115,7 @@
"name": "Code Interpreter" "name": "Code Interpreter"
}, },
"app_id": "d077d587-b072-4f2c-b631-69ed1e7cdc0f", "app_id": "d077d587-b072-4f2c-b631-69ed1e7cdc0f",
"categories": ["Programming"], "category": "Programming",
"copyright": "Copyright 2023 Dify", "copyright": "Copyright 2023 Dify",
"description": "Code interpreter, clarifying the syntax and semantics of the code.", "description": "Code interpreter, clarifying the syntax and semantics of the code.",
"is_listed": true, "is_listed": true,
@ -131,7 +131,7 @@
"name": "SVG Logo Design " "name": "SVG Logo Design "
}, },
"app_id": "73fbb5f1-c15d-4d74-9cc8-46d9db9b2cca", "app_id": "73fbb5f1-c15d-4d74-9cc8-46d9db9b2cca",
"categories": ["Agent"], "category": "Agent",
"copyright": "Dify.AI", "copyright": "Dify.AI",
"description": "Hello, I am your creative partner in bringing ideas to vivid life! I can assist you in creating stunning designs by leveraging abilities of DALL·E 3. ", "description": "Hello, I am your creative partner in bringing ideas to vivid life! I can assist you in creating stunning designs by leveraging abilities of DALL·E 3. ",
"is_listed": true, "is_listed": true,
@ -147,7 +147,7 @@
"name": "Long Story Generator (Iteration) " "name": "Long Story Generator (Iteration) "
}, },
"app_id": "5efb98d7-176b-419c-b6ef-50767391ab62", "app_id": "5efb98d7-176b-419c-b6ef-50767391ab62",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "A workflow demonstrating how to use Iteration node to generate long article that is longer than the context length of LLMs. ", "description": "A workflow demonstrating how to use Iteration node to generate long article that is longer than the context length of LLMs. ",
"is_listed": true, "is_listed": true,
@ -163,7 +163,7 @@
"name": "Text Summarization Workflow" "name": "Text Summarization Workflow"
}, },
"app_id": "f00c4531-6551-45ee-808f-1d7903099515", "app_id": "f00c4531-6551-45ee-808f-1d7903099515",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Based on users' choice, retrieve external knowledge to more accurately summarize articles.", "description": "Based on users' choice, retrieve external knowledge to more accurately summarize articles.",
"is_listed": true, "is_listed": true,
@ -179,7 +179,7 @@
"name": "YouTube Channel Data Analysis" "name": "YouTube Channel Data Analysis"
}, },
"app_id": "be591209-2ca8-410f-8f3b-ca0e530dd638", "app_id": "be591209-2ca8-410f-8f3b-ca0e530dd638",
"categories": ["Agent"], "category": "Agent",
"copyright": "Dify.AI", "copyright": "Dify.AI",
"description": "I am a YouTube Channel Data Analysis Copilot, I am here to provide expert data analysis tailored to your needs. ", "description": "I am a YouTube Channel Data Analysis Copilot, I am here to provide expert data analysis tailored to your needs. ",
"is_listed": true, "is_listed": true,
@ -195,7 +195,7 @@
"name": "Article Grading Bot" "name": "Article Grading Bot"
}, },
"app_id": "a747f7b4-c48b-40d6-b313-5e628232c05f", "app_id": "a747f7b4-c48b-40d6-b313-5e628232c05f",
"categories": ["Writing"], "category": "Writing",
"copyright": null, "copyright": null,
"description": "Assess the quality of articles and text based on user defined criteria. ", "description": "Assess the quality of articles and text based on user defined criteria. ",
"is_listed": true, "is_listed": true,
@ -211,7 +211,7 @@
"name": "SEO Blog Generator" "name": "SEO Blog Generator"
}, },
"app_id": "18f3bd03-524d-4d7a-8374-b30dbe7c69d5", "app_id": "18f3bd03-524d-4d7a-8374-b30dbe7c69d5",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Workflow for retrieving information from the internet, followed by segmented generation of SEO blogs.", "description": "Workflow for retrieving information from the internet, followed by segmented generation of SEO blogs.",
"is_listed": true, "is_listed": true,
@ -227,7 +227,7 @@
"name": "SQL Creator" "name": "SQL Creator"
}, },
"app_id": "050ef42e-3e0c-40c1-a6b6-a64f2c49d744", "app_id": "050ef42e-3e0c-40c1-a6b6-a64f2c49d744",
"categories": ["Programming"], "category": "Programming",
"copyright": "Copyright 2023 Dify", "copyright": "Copyright 2023 Dify",
"description": "Write SQL from natural language by pasting in your schema with the request.Please describe your query requirements in natural language and select the target database type.", "description": "Write SQL from natural language by pasting in your schema with the request.Please describe your query requirements in natural language and select the target database type.",
"is_listed": true, "is_listed": true,
@ -243,7 +243,7 @@
"name": "Sentiment Analysis " "name": "Sentiment Analysis "
}, },
"app_id": "f06bf86b-d50c-4895-a942-35112dbe4189", "app_id": "f06bf86b-d50c-4895-a942-35112dbe4189",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Batch sentiment analysis of text, followed by JSON output of sentiment classification along with scores.", "description": "Batch sentiment analysis of text, followed by JSON output of sentiment classification along with scores.",
"is_listed": true, "is_listed": true,
@ -259,7 +259,7 @@
"name": "Strategic Consulting Expert" "name": "Strategic Consulting Expert"
}, },
"app_id": "7e8ca1ae-02f2-4b5f-979e-62d19133bee2", "app_id": "7e8ca1ae-02f2-4b5f-979e-62d19133bee2",
"categories": ["Assistant"], "category": "Assistant",
"copyright": "Copyright 2023 Dify", "copyright": "Copyright 2023 Dify",
"description": "I can answer your questions related to strategic marketing.", "description": "I can answer your questions related to strategic marketing.",
"is_listed": true, "is_listed": true,
@ -275,7 +275,7 @@
"name": "Code Converter" "name": "Code Converter"
}, },
"app_id": "4006c4b2-0735-4f37-8dbb-fb1a8c5bd87a", "app_id": "4006c4b2-0735-4f37-8dbb-fb1a8c5bd87a",
"categories": ["Programming"], "category": "Programming",
"copyright": "Copyright 2023 Dify", "copyright": "Copyright 2023 Dify",
"description": "This is an application that provides the ability to convert code snippets in multiple programming languages. You can input the code you wish to convert, select the target programming language, and get the desired output.", "description": "This is an application that provides the ability to convert code snippets in multiple programming languages. You can input the code you wish to convert, select the target programming language, and get the desired output.",
"is_listed": true, "is_listed": true,
@ -291,7 +291,7 @@
"name": "Question Classifier + Knowledge + Chatbot " "name": "Question Classifier + Knowledge + Chatbot "
}, },
"app_id": "d9f6b733-e35d-4a40-9f38-ca7bbfa009f7", "app_id": "d9f6b733-e35d-4a40-9f38-ca7bbfa009f7",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Basic Workflow Template, a chatbot capable of identifying intents alongside with a knowledge base.", "description": "Basic Workflow Template, a chatbot capable of identifying intents alongside with a knowledge base.",
"is_listed": true, "is_listed": true,
@ -307,7 +307,7 @@
"name": "AI Front-end interviewer" "name": "AI Front-end interviewer"
}, },
"app_id": "127efead-8944-4e20-ba9d-12402eb345e0", "app_id": "127efead-8944-4e20-ba9d-12402eb345e0",
"categories": ["HR"], "category": "HR",
"copyright": "Copyright 2023 Dify", "copyright": "Copyright 2023 Dify",
"description": "A simulated front-end interviewer that tests the skill level of front-end development through questioning.", "description": "A simulated front-end interviewer that tests the skill level of front-end development through questioning.",
"is_listed": true, "is_listed": true,
@ -323,7 +323,7 @@
"name": "Knowledge Retrieval + Chatbot " "name": "Knowledge Retrieval + Chatbot "
}, },
"app_id": "e9870913-dd01-4710-9f06-15d4180ca1ce", "app_id": "e9870913-dd01-4710-9f06-15d4180ca1ce",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Basic Workflow Template, A chatbot with a knowledge base. ", "description": "Basic Workflow Template, A chatbot with a knowledge base. ",
"is_listed": true, "is_listed": true,
@ -339,7 +339,7 @@
"name": "Email Assistant Workflow " "name": "Email Assistant Workflow "
}, },
"app_id": "dd5b6353-ae9b-4bce-be6a-a681a12cf709", "app_id": "dd5b6353-ae9b-4bce-be6a-a681a12cf709",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "A multifunctional email assistant capable of summarizing, replying, composing, proofreading, and checking grammar.", "description": "A multifunctional email assistant capable of summarizing, replying, composing, proofreading, and checking grammar.",
"is_listed": true, "is_listed": true,
@ -355,7 +355,7 @@
"name": "Customer Review Analysis Workflow " "name": "Customer Review Analysis Workflow "
}, },
"app_id": "9c0cd31f-4b62-4005-adf5-e3888d08654a", "app_id": "9c0cd31f-4b62-4005-adf5-e3888d08654a",
"categories": ["Workflow"], "category": "Workflow",
"copyright": null, "copyright": null,
"description": "Utilize LLM (Large Language Models) to classify customer reviews and forward them to the internal system.", "description": "Utilize LLM (Large Language Models) to classify customer reviews and forward them to the internal system.",
"is_listed": true, "is_listed": true,

View File

@ -1,36 +1,74 @@
""" """
Application-layer context adapters. Core Context - Framework-agnostic context management.
Concrete execution-context implementations live here so `graphon` only This module provides context management that is independent of any specific
depends on injected context managers rather than framework state capture. web framework. Framework-specific implementations register their context
capture functions at application initialization time.
This ensures the workflow layer remains completely decoupled from Flask
or any other web framework.
""" """
from context.execution_context import ( import contextvars
AppContext, from collections.abc import Callable
ContextProviderNotFoundError,
from dify_graph.context.execution_context import (
ExecutionContext, ExecutionContext,
ExecutionContextBuilder,
IExecutionContext, IExecutionContext,
NullAppContext, NullAppContext,
capture_current_context,
read_context,
register_context,
register_context_capturer,
reset_context_provider,
) )
from context.models import SandboxContext
# Global capturer function - set by framework-specific modules
_capturer: Callable[[], IExecutionContext] | None = None
def register_context_capturer(capturer: Callable[[], IExecutionContext]) -> None:
"""
Register a context capture function.
This should be called by framework-specific modules (e.g., Flask)
during application initialization.
Args:
capturer: Function that captures current context and returns IExecutionContext
"""
global _capturer
_capturer = capturer
def capture_current_context() -> IExecutionContext:
"""
Capture current execution context.
This function uses the registered context capturer. If no capturer
is registered, it returns a minimal context with only contextvars
(suitable for non-framework environments like tests or standalone scripts).
Returns:
IExecutionContext with captured context
"""
if _capturer is None:
# No framework registered - return minimal context
return ExecutionContext(
app_context=NullAppContext(),
context_vars=contextvars.copy_context(),
)
return _capturer()
def reset_context_provider() -> None:
"""
Reset the context capturer.
This is primarily useful for testing to ensure a clean state.
"""
global _capturer
_capturer = None
__all__ = [ __all__ = [
"AppContext",
"ContextProviderNotFoundError",
"ExecutionContext",
"ExecutionContextBuilder",
"IExecutionContext",
"NullAppContext",
"SandboxContext",
"capture_current_context", "capture_current_context",
"read_context",
"register_context",
"register_context_capturer", "register_context_capturer",
"reset_context_provider", "reset_context_provider",
] ]

View File

@ -1,251 +0,0 @@
"""
Application-layer execution context adapters.
Concrete context capture lives outside `graphon` so the graph package only
consumes injected context managers when it needs to preserve thread-local state.
"""
import contextvars
import threading
from abc import ABC, abstractmethod
from collections.abc import Callable, Generator
from contextlib import AbstractContextManager, contextmanager
from typing import Any, Protocol, final, runtime_checkable
from pydantic import BaseModel
class AppContext(ABC):
"""
Abstract application context interface.
Application adapters can implement this to restore framework-specific state
such as Flask app context around worker execution.
"""
@abstractmethod
def get_config(self, key: str, default: Any = None) -> Any:
"""Get configuration value by key."""
raise NotImplementedError
@abstractmethod
def get_extension(self, name: str) -> Any:
"""Get application extension by name."""
raise NotImplementedError
@abstractmethod
def enter(self) -> AbstractContextManager[None]:
"""Enter the application context."""
raise NotImplementedError
@runtime_checkable
class IExecutionContext(Protocol):
"""
Protocol for enterable execution context objects.
Concrete implementations may carry extra framework state, but callers only
depend on standard context-manager behavior plus optional user metadata.
"""
def __enter__(self) -> "IExecutionContext":
"""Enter the execution context."""
...
def __exit__(self, *args: Any) -> None:
"""Exit the execution context."""
...
@property
def user(self) -> Any:
"""Get user object."""
...
@final
class ExecutionContext:
"""
Generic execution context used by application-layer adapters.
It restores captured `contextvars` and optionally enters an application
context before the worker executes graph logic.
"""
def __init__(
self,
app_context: AppContext | None = None,
context_vars: contextvars.Context | None = None,
user: Any = None,
) -> None:
self._app_context = app_context
self._context_vars = context_vars
self._user = user
self._local = threading.local()
@property
def app_context(self) -> AppContext | None:
"""Get application context."""
return self._app_context
@property
def context_vars(self) -> contextvars.Context | None:
"""Get captured context variables."""
return self._context_vars
@property
def user(self) -> Any:
"""Get captured user object."""
return self._user
@contextmanager
def enter(self) -> Generator[None, None, None]:
"""Enter this execution context."""
if self._context_vars:
for var, val in self._context_vars.items():
var.set(val)
if self._app_context is not None:
with self._app_context.enter():
yield
else:
yield
def __enter__(self) -> "ExecutionContext":
"""Enter the execution context."""
cm = self.enter()
self._local.cm = cm
cm.__enter__()
return self
def __exit__(self, *args: Any) -> None:
"""Exit the execution context."""
cm = getattr(self._local, "cm", None)
if cm is not None:
cm.__exit__(*args)
class NullAppContext(AppContext):
"""
Null application context for non-framework environments.
"""
def __init__(self, config: dict[str, Any] | None = None) -> None:
self._config = config or {}
self._extensions: dict[str, Any] = {}
def get_config(self, key: str, default: Any = None) -> Any:
"""Get configuration value by key."""
return self._config.get(key, default)
def get_extension(self, name: str) -> Any:
"""Get extension by name."""
return self._extensions.get(name)
def set_extension(self, name: str, extension: Any) -> None:
"""Register an extension for tests or standalone execution."""
self._extensions[name] = extension
@contextmanager
def enter(self) -> Generator[None, None, None]:
"""Enter null context (no-op)."""
yield
class ExecutionContextBuilder:
"""
Builder for creating `ExecutionContext` instances.
"""
def __init__(self) -> None:
self._app_context: AppContext | None = None
self._context_vars: contextvars.Context | None = None
self._user: Any = None
def with_app_context(self, app_context: AppContext) -> "ExecutionContextBuilder":
"""Set application context."""
self._app_context = app_context
return self
def with_context_vars(self, context_vars: contextvars.Context) -> "ExecutionContextBuilder":
"""Set context variables."""
self._context_vars = context_vars
return self
def with_user(self, user: Any) -> "ExecutionContextBuilder":
"""Set user."""
self._user = user
return self
def build(self) -> ExecutionContext:
"""Build the execution context."""
return ExecutionContext(
app_context=self._app_context,
context_vars=self._context_vars,
user=self._user,
)
_capturer: Callable[[], IExecutionContext] | None = None
_tenant_context_providers: dict[tuple[str, str], Callable[[], BaseModel]] = {}
class ContextProviderNotFoundError(KeyError):
"""Raised when a tenant-scoped context provider is missing."""
pass
def register_context_capturer(capturer: Callable[[], IExecutionContext]) -> None:
"""Register an enterable execution context capturer."""
global _capturer
_capturer = capturer
def register_context(name: str, tenant_id: str, provider: Callable[[], BaseModel]) -> None:
"""Register a tenant-specific provider for a named context."""
_tenant_context_providers[(name, tenant_id)] = provider
def read_context(name: str, *, tenant_id: str) -> BaseModel:
"""Read a context value for a specific tenant."""
provider = _tenant_context_providers.get((name, tenant_id))
if provider is None:
raise ContextProviderNotFoundError(f"Context provider '{name}' not registered for tenant '{tenant_id}'")
return provider()
def capture_current_context() -> IExecutionContext:
"""
Capture current execution context from the calling environment.
If no framework adapter is registered, return a minimal context that only
restores `contextvars`.
"""
if _capturer is None:
return ExecutionContext(
app_context=NullAppContext(),
context_vars=contextvars.copy_context(),
)
return _capturer()
def reset_context_provider() -> None:
"""Reset the capturer and tenant-scoped providers."""
global _capturer
_capturer = None
_tenant_context_providers.clear()
__all__ = [
"AppContext",
"ContextProviderNotFoundError",
"ExecutionContext",
"ExecutionContextBuilder",
"IExecutionContext",
"NullAppContext",
"capture_current_context",
"read_context",
"register_context",
"register_context_capturer",
"reset_context_provider",
]

View File

@ -10,7 +10,11 @@ from typing import Any, final
from flask import Flask, current_app, g from flask import Flask, current_app, g
from context.execution_context import AppContext, IExecutionContext, register_context_capturer from dify_graph.context import register_context_capturer
from dify_graph.context.execution_context import (
AppContext,
IExecutionContext,
)
@final @final

View File

@ -6,6 +6,7 @@ from contexts.wrapper import RecyclableContextVar
if TYPE_CHECKING: if TYPE_CHECKING:
from core.datasource.__base.datasource_provider import DatasourcePluginProviderController from core.datasource.__base.datasource_provider import DatasourcePluginProviderController
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
from core.tools.plugin_tool.provider import PluginToolProviderController from core.tools.plugin_tool.provider import PluginToolProviderController
from core.trigger.provider import PluginTriggerProviderController from core.trigger.provider import PluginTriggerProviderController
@ -19,6 +20,14 @@ plugin_tool_providers: RecyclableContextVar[dict[str, "PluginToolProviderControl
plugin_tool_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(ContextVar("plugin_tool_providers_lock")) plugin_tool_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(ContextVar("plugin_tool_providers_lock"))
plugin_model_providers: RecyclableContextVar[list["PluginModelProviderEntity"] | None] = RecyclableContextVar(
ContextVar("plugin_model_providers")
)
plugin_model_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(
ContextVar("plugin_model_providers_lock")
)
datasource_plugin_providers: RecyclableContextVar[dict[str, "DatasourcePluginProviderController"]] = ( datasource_plugin_providers: RecyclableContextVar[dict[str, "DatasourcePluginProviderController"]] = (
RecyclableContextVar(ContextVar("datasource_plugin_providers")) RecyclableContextVar(ContextVar("datasource_plugin_providers"))
) )

View File

@ -1,4 +1,7 @@
from contextvars import ContextVar from contextvars import ContextVar
from typing import Generic, TypeVar
T = TypeVar("T")
class HiddenValue: class HiddenValue:
@ -8,7 +11,7 @@ class HiddenValue:
_default = HiddenValue() _default = HiddenValue()
class RecyclableContextVar[T]: class RecyclableContextVar(Generic[T]):
""" """
RecyclableContextVar is a wrapper around ContextVar RecyclableContextVar is a wrapper around ContextVar
It's safe to use in gunicorn with thread recycling, but features like `reset` are not available for now It's safe to use in gunicorn with thread recycling, but features like `reset` are not available for now

View File

@ -1,193 +0,0 @@
# API Schema Guide
This guide describes the expected Flask-RESTX + Pydantic pattern for controller request payloads, query
parameters, response schemas, and Swagger documentation.
## Principles
- Use Pydantic `BaseModel` for request bodies and query parameters.
- Use `fields.base.ResponseModel` for response DTOs.
- Keep runtime validation and Swagger documentation wired to the same Pydantic model.
- Prefer explicit validation and serialization in controller methods over Flask-RESTX marshalling.
- Do not add new Flask-RESTX `fields.*` dictionaries, `Namespace.model(...)` exports, or `@marshal_with(...)` for migrated or new endpoints.
- Do not use `@ns.expect(...)` for GET query parameters. Flask-RESTX documents that as a request body.
## Naming
- Request body models: use a `Payload` suffix.
- Example: `WorkflowRunPayload`, `DatasourceVariablesPayload`.
- Query parameter models: use a `Query` suffix.
- Example: `WorkflowRunListQuery`, `MessageListQuery`.
- Response models: use a `Response` suffix and inherit from `ResponseModel`.
- Example: `WorkflowRunDetailResponse`, `WorkflowRunNodeExecutionListResponse`.
- Use `ListResponse` or `PaginationResponse` for wrapper responses.
- Example: `WorkflowRunNodeExecutionListResponse`, `WorkflowRunPaginationResponse`.
- Keep these models near the controller when they are endpoint-specific. Move them to `fields/*_fields.py` only when shared by multiple controllers.
## Registering Models For Swagger
Use helpers from `controllers.common.schema`.
```python
from controllers.common.schema import (
query_params_from_model,
register_response_schema_models,
register_schema_models,
)
```
Register request payload and query models with `register_schema_models(...)`:
```python
register_schema_models(
console_ns,
WorkflowRunPayload,
WorkflowRunListQuery,
)
```
Register response models with `register_response_schema_models(...)`:
```python
register_response_schema_models(
console_ns,
WorkflowRunDetailResponse,
WorkflowRunPaginationResponse,
)
```
Response models are registered in Pydantic serialization mode. This matters when a response model uses
`validation_alias` to read internal object attributes but emits public API field names. For example, a response model
can validate from `inputs_dict` while documenting and serializing `inputs`.
## Request Bodies
For non-GET request bodies:
1. Define a Pydantic `Payload` model.
2. Register it with `register_schema_models(...)`.
3. Use `@ns.expect(ns.models[Payload.__name__])` for Swagger documentation.
4. Validate from `ns.payload or {}` inside the controller.
```python
class DraftWorkflowNodeRunPayload(BaseModel):
inputs: dict[str, Any]
query: str = ""
register_schema_models(console_ns, DraftWorkflowNodeRunPayload)
@console_ns.expect(console_ns.models[DraftWorkflowNodeRunPayload.__name__])
def post(self, app_model: App, node_id: str):
payload = DraftWorkflowNodeRunPayload.model_validate(console_ns.payload or {})
result = service.run(..., inputs=payload.inputs, query=payload.query)
return WorkflowRunNodeExecutionResponse.model_validate(result, from_attributes=True).model_dump(mode="json")
```
## Query Parameters
For GET query parameters:
1. Define a Pydantic `Query` model.
2. Register it with `register_schema_models(...)` if it is referenced elsewhere in docs, or only use
`query_params_from_model(...)` if a body schema is not needed.
3. Use `@ns.doc(params=query_params_from_model(QueryModel))`.
4. Validate from `request.args.to_dict(flat=True)` or an explicit dict when type coercion is needed.
```python
class WorkflowRunListQuery(BaseModel):
last_id: str | None = Field(default=None, description="Last run ID for pagination")
limit: int = Field(default=20, ge=1, le=100, description="Number of items per page (1-100)")
@console_ns.doc(params=query_params_from_model(WorkflowRunListQuery))
def get(self, app_model: App):
query = WorkflowRunListQuery.model_validate(request.args.to_dict(flat=True))
result = service.list(..., limit=query.limit, last_id=query.last_id)
return WorkflowRunPaginationResponse.model_validate(result, from_attributes=True).model_dump(mode="json")
```
Do not do this for GET query parameters:
```python
@console_ns.expect(console_ns.models[WorkflowRunListQuery.__name__])
def get(...):
...
```
That documents a GET request body and is not the expected contract.
## Responses
Response models should inherit from `ResponseModel`:
```python
class WorkflowRunNodeExecutionResponse(ResponseModel):
id: str
inputs: Any = Field(default=None, validation_alias="inputs_dict")
process_data: Any = Field(default=None, validation_alias="process_data_dict")
outputs: Any = Field(default=None, validation_alias="outputs_dict")
```
Document response models with `@ns.response(...)`:
```python
@console_ns.response(
200,
"Node run started successfully",
console_ns.models[WorkflowRunNodeExecutionResponse.__name__],
)
def post(...):
...
```
Serialize explicitly:
```python
return WorkflowRunNodeExecutionResponse.model_validate(
workflow_node_execution,
from_attributes=True,
).model_dump(mode="json")
```
If the service can return `None`, translate that into the expected HTTP error before validation:
```python
workflow_run = service.get_workflow_run(...)
if workflow_run is None:
raise NotFound("Workflow run not found")
return WorkflowRunDetailResponse.model_validate(workflow_run, from_attributes=True).model_dump(mode="json")
```
## Legacy Flask-RESTX Patterns
Avoid adding these patterns to new or migrated endpoints:
- `ns.model(...)` for new request/response DTOs.
- Module-level exported RESTX model objects such as `workflow_run_detail_model`.
- `fields.Nested({...})` with raw inline dict field maps.
- `@marshal_with(...)` for response serialization.
- `@ns.expect(...)` for GET query params.
Existing legacy field dictionaries may remain where an endpoint has not yet been migrated. Keep that compatibility local
to the legacy area and avoid importing RESTX model objects from controllers.
## Verifying Swagger
For schema and documentation changes, run focused tests and generate Swagger JSON:
```bash
uv run --project . pytest tests/unit_tests/controllers/common/test_schema.py
uv run --project . pytest tests/unit_tests/commands/test_generate_swagger_specs.py tests/unit_tests/controllers/test_swagger.py
uv run --project . dev/generate_swagger_specs.py --output-dir /tmp/dify-openapi-check
```
Inspect affected endpoints with `jq`. Check that:
- GET parameters are `in: query`.
- Request bodies appear only where the endpoint has a body.
- Responses reference the expected `*Response` schema.
- Response schemas use public serialized names, not internal validation aliases like `inputs_dict`.

View File

@ -1,104 +0,0 @@
from typing import Any, Literal
from uuid import UUID
from pydantic import BaseModel, Field, model_validator
from libs.helper import UUIDStrOrEmpty
# --- Conversation schemas ---
class ConversationRenamePayload(BaseModel):
name: str | None = None
auto_generate: bool = False
@model_validator(mode="after")
def validate_name_requirement(self):
if not self.auto_generate:
if self.name is None or not self.name.strip():
raise ValueError("name is required when auto_generate is false")
return self
# --- Message schemas ---
class MessageListQuery(BaseModel):
conversation_id: UUIDStrOrEmpty = Field(description="Conversation UUID")
first_id: UUIDStrOrEmpty | None = Field(default=None, description="First message ID for pagination")
limit: int = Field(default=20, ge=1, le=100, description="Number of messages to return (1-100)")
class MessageFeedbackPayload(BaseModel):
rating: Literal["like", "dislike"] | None = None
content: str | None = None
# --- Saved message schemas ---
class SavedMessageListQuery(BaseModel):
last_id: UUIDStrOrEmpty | None = None
limit: int = Field(default=20, ge=1, le=100)
class SavedMessageCreatePayload(BaseModel):
message_id: UUIDStrOrEmpty
# --- Workflow schemas ---
class DefaultBlockConfigQuery(BaseModel):
q: str | None = None
class WorkflowListQuery(BaseModel):
page: int = Field(default=1, ge=1, le=99999)
limit: int = Field(default=10, ge=1, le=100)
user_id: str | None = None
named_only: bool = False
class WorkflowRunPayload(BaseModel):
inputs: dict[str, Any]
files: list[dict[str, Any]] | None = None
class WorkflowUpdatePayload(BaseModel):
marked_name: str | None = Field(default=None, max_length=20)
marked_comment: str | None = Field(default=None, max_length=100)
# --- Dataset schemas ---
DOCUMENT_BATCH_DOWNLOAD_ZIP_MAX_DOCS = 100
class ChildChunkCreatePayload(BaseModel):
content: str
class ChildChunkUpdatePayload(BaseModel):
content: str
class DocumentBatchDownloadZipPayload(BaseModel):
"""Request payload for bulk downloading documents as a zip archive."""
document_ids: list[UUID] = Field(..., min_length=1, max_length=DOCUMENT_BATCH_DOWNLOAD_ZIP_MAX_DOCS)
class MetadataUpdatePayload(BaseModel):
name: str
# --- Audio schemas ---
class TextToAudioPayload(BaseModel):
message_id: str | None = Field(default=None, description="Message ID")
voice: str | None = Field(default=None, description="Voice to use for TTS")
text: str | None = Field(default=None, description="Text to convert to audio")
streaming: bool | None = Field(default=None, description="Enable streaming response")

View File

@ -1,14 +1,14 @@
from __future__ import annotations from __future__ import annotations
from typing import Any from typing import Any, TypeAlias
from pydantic import BaseModel, ConfigDict, computed_field from pydantic import BaseModel, ConfigDict, computed_field
from graphon.file import helpers as file_helpers from dify_graph.file import helpers as file_helpers
from models.model import IconType from models.model import IconType
type JSONValue = str | int | float | bool | None | dict[str, Any] | list[Any] JSONValue: TypeAlias = str | int | float | bool | None | dict[str, Any] | list[Any]
type JSONObject = dict[str, Any] JSONObject: TypeAlias = dict[str, Any]
class SystemParameters(BaseModel): class SystemParameters(BaseModel):

View File

@ -4,8 +4,8 @@ from urllib.parse import quote
from flask import Response from flask import Response
HTML_MIME_TYPES: frozenset[str] = frozenset(("text/html", "application/xhtml+xml")) HTML_MIME_TYPES = frozenset({"text/html", "application/xhtml+xml"})
HTML_EXTENSIONS: frozenset[str] = frozenset(("html", "htm")) HTML_EXTENSIONS = frozenset({"html", "htm"})
def _normalize_mime_type(mime_type: str | None) -> str: def _normalize_mime_type(mime_type: str | None) -> str:

View File

@ -41,8 +41,7 @@ def guess_file_info_from_response(response: httpx.Response):
# Try to extract filename from URL # Try to extract filename from URL
parsed_url = urllib.parse.urlparse(url) parsed_url = urllib.parse.urlparse(url)
url_path = parsed_url.path url_path = parsed_url.path
# Decode percent-encoded characters in the path segment filename = os.path.basename(url_path)
filename = urllib.parse.unquote(os.path.basename(url_path))
# If filename couldn't be extracted, use Content-Disposition header # If filename couldn't be extracted, use Content-Disposition header
if not filename: if not filename:

View File

@ -1,6 +0,0 @@
from pydantic import BaseModel, JsonValue
class HumanInputFormSubmitPayload(BaseModel):
inputs: dict[str, JsonValue]
action: str

View File

@ -1,14 +1,6 @@
"""Helpers for registering Pydantic models with Flask-RESTX namespaces. """Helpers for registering Pydantic models with Flask-RESTX namespaces."""
Flask-RESTX treats `SchemaModel` bodies as opaque JSON schemas; it does not
promote Pydantic's nested `$defs` into top-level Swagger `definitions`.
These helpers keep that translation centralized so models registered through
`register_schema_models` emit resolvable Swagger 2.0 references.
"""
from collections.abc import Mapping
from enum import StrEnum from enum import StrEnum
from typing import Any, Literal, NotRequired, TypedDict
from flask_restx import Namespace from flask_restx import Namespace
from pydantic import BaseModel, TypeAdapter from pydantic import BaseModel, TypeAdapter
@ -16,59 +8,10 @@ from pydantic import BaseModel, TypeAdapter
DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
QueryParamDoc = TypedDict(
"QueryParamDoc",
{
"in": NotRequired[str],
"type": NotRequired[str],
"items": NotRequired[dict[str, object]],
"required": NotRequired[bool],
"description": NotRequired[str],
"enum": NotRequired[list[object]],
"default": NotRequired[object],
"minimum": NotRequired[int | float],
"maximum": NotRequired[int | float],
"minLength": NotRequired[int],
"maxLength": NotRequired[int],
"minItems": NotRequired[int],
"maxItems": NotRequired[int],
},
)
def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None:
"""Register a JSON schema and promote any nested Pydantic `$defs`."""
nested_definitions = schema.get("$defs")
schema_to_register = dict(schema)
if isinstance(nested_definitions, dict):
schema_to_register.pop("$defs")
namespace.schema_model(name, schema_to_register)
if not isinstance(nested_definitions, dict):
return
for nested_name, nested_schema in nested_definitions.items():
if isinstance(nested_schema, dict):
_register_json_schema(namespace, nested_name, nested_schema)
JsonSchemaMode = Literal["validation", "serialization"]
def _register_schema_model(namespace: Namespace, model: type[BaseModel], *, mode: JsonSchemaMode) -> None:
_register_json_schema(
namespace,
model.__name__,
model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0, mode=mode),
)
def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None:
"""Register a BaseModel and its nested schema definitions for Swagger documentation.""" """Register a single BaseModel with a namespace for Swagger documentation."""
_register_schema_model(namespace, model, mode="validation") namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0))
def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None:
@ -78,19 +21,6 @@ def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> No
register_schema_model(namespace, model) register_schema_model(namespace, model)
def register_response_schema_model(namespace: Namespace, model: type[BaseModel]) -> None:
"""Register a BaseModel using its serialized response shape."""
_register_schema_model(namespace, model, mode="serialization")
def register_response_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None:
"""Register multiple response BaseModels using their serialized response shape."""
for model in models:
register_response_schema_model(namespace, model)
def get_or_create_model(model_name: str, field_def): def get_or_create_model(model_name: str, field_def):
# Import lazily to avoid circular imports between console controllers and schema helpers. # Import lazily to avoid circular imports between console controllers and schema helpers.
from controllers.console import console_ns from controllers.console import console_ns
@ -104,114 +34,15 @@ def get_or_create_model(model_name: str, field_def):
def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None:
"""Register multiple StrEnum with a namespace.""" """Register multiple StrEnum with a namespace."""
for model in models: for model in models:
_register_json_schema( namespace.schema_model(
namespace, model.__name__, TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)
model.__name__,
TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
) )
def query_params_from_model(model: type[BaseModel]) -> dict[str, QueryParamDoc]:
"""Build Flask-RESTX query parameter docs from a flat Pydantic model.
`Namespace.expect()` treats Pydantic schema models as request bodies, so GET
endpoints should keep runtime validation on the Pydantic model and feed this
derived mapping to `Namespace.doc(params=...)` for Swagger documentation.
"""
schema = model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)
properties = schema.get("properties", {})
if not isinstance(properties, Mapping):
return {}
required = schema.get("required", [])
required_names = set(required) if isinstance(required, list) else set()
params: dict[str, QueryParamDoc] = {}
for name, property_schema in properties.items():
if not isinstance(name, str) or not isinstance(property_schema, Mapping):
continue
params[name] = _query_param_from_property(property_schema, required=name in required_names)
return params
def _query_param_from_property(property_schema: Mapping[str, Any], *, required: bool) -> QueryParamDoc:
param_schema = _nullable_property_schema(property_schema)
param_doc: QueryParamDoc = {"in": "query", "required": required}
description = param_schema.get("description")
if isinstance(description, str):
param_doc["description"] = description
schema_type = param_schema.get("type")
if isinstance(schema_type, str) and schema_type in {"array", "boolean", "integer", "number", "string"}:
param_doc["type"] = schema_type
if schema_type == "array":
items = param_schema.get("items")
if isinstance(items, Mapping):
item_type = items.get("type")
if isinstance(item_type, str):
param_doc["items"] = {"type": item_type}
enum = param_schema.get("enum")
if isinstance(enum, list):
param_doc["enum"] = enum
default = param_schema.get("default")
if default is not None:
param_doc["default"] = default
minimum = param_schema.get("minimum")
if isinstance(minimum, int | float):
param_doc["minimum"] = minimum
maximum = param_schema.get("maximum")
if isinstance(maximum, int | float):
param_doc["maximum"] = maximum
min_length = param_schema.get("minLength")
if isinstance(min_length, int):
param_doc["minLength"] = min_length
max_length = param_schema.get("maxLength")
if isinstance(max_length, int):
param_doc["maxLength"] = max_length
min_items = param_schema.get("minItems")
if isinstance(min_items, int):
param_doc["minItems"] = min_items
max_items = param_schema.get("maxItems")
if isinstance(max_items, int):
param_doc["maxItems"] = max_items
return param_doc
def _nullable_property_schema(property_schema: Mapping[str, Any]) -> Mapping[str, Any]:
any_of = property_schema.get("anyOf")
if not isinstance(any_of, list):
return property_schema
non_null_candidates = [
candidate for candidate in any_of if isinstance(candidate, Mapping) and candidate.get("type") != "null"
]
if len(non_null_candidates) == 1:
return {**property_schema, **non_null_candidates[0]}
return property_schema
__all__ = [ __all__ = [
"DEFAULT_REF_TEMPLATE_SWAGGER_2_0", "DEFAULT_REF_TEMPLATE_SWAGGER_2_0",
"get_or_create_model", "get_or_create_model",
"query_params_from_model",
"register_enum_models", "register_enum_models",
"register_response_schema_model",
"register_response_schema_models",
"register_schema_model", "register_schema_model",
"register_schema_models", "register_schema_models",
] ]

View File

@ -65,7 +65,6 @@ from .app import (
statistic, statistic,
workflow, workflow,
workflow_app_log, workflow_app_log,
workflow_comment,
workflow_draft_variable, workflow_draft_variable,
workflow_run, workflow_run,
workflow_statistic, workflow_statistic,
@ -117,7 +116,6 @@ from .explore import (
saved_message, saved_message,
trial, trial,
) )
from .socketio import workflow as socketio_workflow # pyright: ignore[reportUnusedImport]
# Import tag controllers # Import tag controllers
from .tag import tags from .tag import tags
@ -203,7 +201,6 @@ __all__ = [
"saved_message", "saved_message",
"setup", "setup",
"site", "site",
"socketio_workflow",
"spec", "spec",
"statistic", "statistic",
"tags", "tags",
@ -214,7 +211,6 @@ __all__ = [
"website", "website",
"workflow", "workflow",
"workflow_app_log", "workflow_app_log",
"workflow_comment",
"workflow_draft_variable", "workflow_draft_variable",
"workflow_run", "workflow_run",
"workflow_statistic", "workflow_statistic",

View File

@ -2,8 +2,7 @@ import csv
import io import io
from collections.abc import Callable from collections.abc import Callable
from functools import wraps from functools import wraps
from typing import cast from typing import ParamSpec, TypeVar
from uuid import UUID
from flask import request from flask import request
from flask_restx import Resource from flask_restx import Resource
@ -13,14 +12,18 @@ from werkzeug.exceptions import BadRequest, NotFound, Unauthorized
from configs import dify_config from configs import dify_config
from constants.languages import supported_language from constants.languages import supported_language
from controllers.common.schema import register_schema_models
from controllers.console import console_ns from controllers.console import console_ns
from controllers.console.wraps import only_edition_cloud from controllers.console.wraps import only_edition_cloud
from core.db.session_factory import session_factory from core.db.session_factory import session_factory
from extensions.ext_database import db from extensions.ext_database import db
from libs.token import extract_access_token from libs.token import extract_access_token
from models.model import App, ExporleBanner, InstalledApp, RecommendedApp, TrialApp from models.model import App, ExporleBanner, InstalledApp, RecommendedApp, TrialApp
from services.billing_service import BillingService, LangContentDict from services.billing_service import BillingService
P = ParamSpec("P")
R = TypeVar("R")
DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
class InsertExploreAppPayload(BaseModel): class InsertExploreAppPayload(BaseModel):
@ -58,12 +61,20 @@ class InsertExploreBannerPayload(BaseModel):
model_config = {"populate_by_name": True} model_config = {"populate_by_name": True}
register_schema_models(console_ns, InsertExploreAppPayload, InsertExploreBannerPayload) console_ns.schema_model(
InsertExploreAppPayload.__name__,
InsertExploreAppPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
)
console_ns.schema_model(
InsertExploreBannerPayload.__name__,
InsertExploreBannerPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
)
def admin_required[**P, R](view: Callable[P, R]) -> Callable[P, R]: def admin_required(view: Callable[P, R]):
@wraps(view) @wraps(view)
def decorated(*args: P.args, **kwargs: P.kwargs) -> R: def decorated(*args: P.args, **kwargs: P.kwargs):
if not dify_config.ADMIN_API_KEY: if not dify_config.ADMIN_API_KEY:
raise Unauthorized("API key is invalid.") raise Unauthorized("API key is invalid.")
@ -182,7 +193,7 @@ class InsertExploreAppApi(Resource):
@console_ns.response(204, "App removed successfully") @console_ns.response(204, "App removed successfully")
@only_edition_cloud @only_edition_cloud
@admin_required @admin_required
def delete(self, app_id: UUID): def delete(self, app_id):
with session_factory.create_session() as session: with session_factory.create_session() as session:
recommended_app = session.execute( recommended_app = session.execute(
select(RecommendedApp).where(RecommendedApp.app_id == str(app_id)) select(RecommendedApp).where(RecommendedApp.app_id == str(app_id))
@ -293,7 +304,15 @@ class BatchAddNotificationAccountsPayload(BaseModel):
user_email: list[str] = Field(..., description="List of account email addresses") user_email: list[str] = Field(..., description="List of account email addresses")
register_schema_models(console_ns, UpsertNotificationPayload, BatchAddNotificationAccountsPayload) console_ns.schema_model(
UpsertNotificationPayload.__name__,
UpsertNotificationPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
)
console_ns.schema_model(
BatchAddNotificationAccountsPayload.__name__,
BatchAddNotificationAccountsPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
)
@console_ns.route("/admin/upsert_notification") @console_ns.route("/admin/upsert_notification")
@ -313,7 +332,7 @@ class UpsertNotificationApi(Resource):
def post(self): def post(self):
payload = UpsertNotificationPayload.model_validate(console_ns.payload) payload = UpsertNotificationPayload.model_validate(console_ns.payload)
result = BillingService.upsert_notification( result = BillingService.upsert_notification(
contents=[cast(LangContentDict, c.model_dump()) for c in payload.contents], contents=[c.model_dump() for c in payload.contents],
frequency=payload.frequency, frequency=payload.frequency,
status=payload.status, status=payload.status,
notification_id=payload.notification_id, notification_id=payload.notification_id,
@ -395,11 +414,11 @@ class BatchAddNotificationAccountsApi(Resource):
raise BadRequest("Invalid file type. Only CSV (.csv) and TXT (.txt) files are allowed.") raise BadRequest("Invalid file type. Only CSV (.csv) and TXT (.txt) files are allowed.")
try: try:
content = file.stream.read().decode("utf-8") content = file.read().decode("utf-8")
except UnicodeDecodeError: except UnicodeDecodeError:
try: try:
file.stream.seek(0) file.seek(0)
content = file.stream.read().decode("gbk") content = file.read().decode("gbk")
except UnicodeDecodeError: except UnicodeDecodeError:
raise BadRequest("Unable to decode the file. Please use UTF-8 or GBK encoding.") raise BadRequest("Unable to decode the file. Please use UTF-8 or GBK encoding.")

View File

@ -1,16 +1,12 @@
from datetime import datetime
import flask_restx import flask_restx
from flask_restx import Resource from flask_restx import Resource, fields, marshal_with
from flask_restx._http import HTTPStatus from flask_restx._http import HTTPStatus
from pydantic import field_validator
from sqlalchemy import delete, func, select from sqlalchemy import delete, func, select
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import Session
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from controllers.common.schema import register_schema_models
from extensions.ext_database import db from extensions.ext_database import db
from fields.base import ResponseModel from libs.helper import TimestampField
from libs.login import current_account_with_tenant, login_required from libs.login import current_account_with_tenant, login_required
from models.dataset import Dataset from models.dataset import Dataset
from models.enums import ApiTokenType from models.enums import ApiTokenType
@ -20,35 +16,25 @@ from services.api_token_service import ApiTokenCache
from . import console_ns from . import console_ns
from .wraps import account_initialization_required, edit_permission_required, setup_required from .wraps import account_initialization_required, edit_permission_required, setup_required
api_key_fields = {
"id": fields.String,
"type": fields.String,
"token": fields.String,
"last_used_at": TimestampField,
"created_at": TimestampField,
}
def _to_timestamp(value: datetime | int | None) -> int | None: api_key_item_model = console_ns.model("ApiKeyItem", api_key_fields)
if isinstance(value, datetime):
return int(value.timestamp())
return value
api_key_list = {"data": fields.List(fields.Nested(api_key_item_model), attribute="items")}
class ApiKeyItem(ResponseModel): api_key_list_model = console_ns.model(
id: str "ApiKeyList", {"data": fields.List(fields.Nested(api_key_item_model), attribute="items")}
type: str )
token: str
last_used_at: int | None = None
created_at: int | None = None
@field_validator("last_used_at", "created_at", mode="before")
@classmethod
def _normalize_timestamp(cls, value: datetime | int | None) -> int | None:
return _to_timestamp(value)
class ApiKeyList(ResponseModel):
data: list[ApiKeyItem]
register_schema_models(console_ns, ApiKeyItem, ApiKeyList)
def _get_resource(resource_id, tenant_id, resource_model): def _get_resource(resource_id, tenant_id, resource_model):
with sessionmaker(db.engine).begin() as session: with Session(db.engine) as session:
resource = session.execute( resource = session.execute(
select(resource_model).filter_by(id=resource_id, tenant_id=tenant_id) select(resource_model).filter_by(id=resource_id, tenant_id=tenant_id)
).scalar_one_or_none() ).scalar_one_or_none()
@ -68,6 +54,7 @@ class BaseApiKeyListResource(Resource):
token_prefix: str | None = None token_prefix: str | None = None
max_keys = 10 max_keys = 10
@marshal_with(api_key_list_model)
def get(self, resource_id): def get(self, resource_id):
assert self.resource_id_field is not None, "resource_id_field must be set" assert self.resource_id_field is not None, "resource_id_field must be set"
resource_id = str(resource_id) resource_id = str(resource_id)
@ -79,8 +66,9 @@ class BaseApiKeyListResource(Resource):
ApiToken.type == self.resource_type, getattr(ApiToken, self.resource_id_field) == resource_id ApiToken.type == self.resource_type, getattr(ApiToken, self.resource_id_field) == resource_id
) )
).all() ).all()
return ApiKeyList.model_validate({"data": keys}, from_attributes=True).model_dump(mode="json") return {"items": keys}
@marshal_with(api_key_item_model)
@edit_permission_required @edit_permission_required
def post(self, resource_id): def post(self, resource_id):
assert self.resource_id_field is not None, "resource_id_field must be set" assert self.resource_id_field is not None, "resource_id_field must be set"
@ -112,7 +100,7 @@ class BaseApiKeyListResource(Resource):
api_token.type = self.resource_type api_token.type = self.resource_type
db.session.add(api_token) db.session.add(api_token)
db.session.commit() db.session.commit()
return ApiKeyItem.model_validate(api_token, from_attributes=True).model_dump(mode="json"), 201 return api_token, 201
class BaseApiKeyResource(Resource): class BaseApiKeyResource(Resource):
@ -159,7 +147,7 @@ class AppApiKeyListResource(BaseApiKeyListResource):
@console_ns.doc("get_app_api_keys") @console_ns.doc("get_app_api_keys")
@console_ns.doc(description="Get all API keys for an app") @console_ns.doc(description="Get all API keys for an app")
@console_ns.doc(params={"resource_id": "App ID"}) @console_ns.doc(params={"resource_id": "App ID"})
@console_ns.response(200, "API keys retrieved successfully", console_ns.models[ApiKeyList.__name__]) @console_ns.response(200, "Success", api_key_list_model)
def get(self, resource_id): # type: ignore def get(self, resource_id): # type: ignore
"""Get all API keys for an app""" """Get all API keys for an app"""
return super().get(resource_id) return super().get(resource_id)
@ -167,7 +155,7 @@ class AppApiKeyListResource(BaseApiKeyListResource):
@console_ns.doc("create_app_api_key") @console_ns.doc("create_app_api_key")
@console_ns.doc(description="Create a new API key for an app") @console_ns.doc(description="Create a new API key for an app")
@console_ns.doc(params={"resource_id": "App ID"}) @console_ns.doc(params={"resource_id": "App ID"})
@console_ns.response(201, "API key created successfully", console_ns.models[ApiKeyItem.__name__]) @console_ns.response(201, "API key created successfully", api_key_item_model)
@console_ns.response(400, "Maximum keys exceeded") @console_ns.response(400, "Maximum keys exceeded")
def post(self, resource_id): # type: ignore def post(self, resource_id): # type: ignore
"""Create a new API key for an app""" """Create a new API key for an app"""
@ -199,7 +187,7 @@ class DatasetApiKeyListResource(BaseApiKeyListResource):
@console_ns.doc("get_dataset_api_keys") @console_ns.doc("get_dataset_api_keys")
@console_ns.doc(description="Get all API keys for a dataset") @console_ns.doc(description="Get all API keys for a dataset")
@console_ns.doc(params={"resource_id": "Dataset ID"}) @console_ns.doc(params={"resource_id": "Dataset ID"})
@console_ns.response(200, "API keys retrieved successfully", console_ns.models[ApiKeyList.__name__]) @console_ns.response(200, "Success", api_key_list_model)
def get(self, resource_id): # type: ignore def get(self, resource_id): # type: ignore
"""Get all API keys for a dataset""" """Get all API keys for a dataset"""
return super().get(resource_id) return super().get(resource_id)
@ -207,7 +195,7 @@ class DatasetApiKeyListResource(BaseApiKeyListResource):
@console_ns.doc("create_dataset_api_key") @console_ns.doc("create_dataset_api_key")
@console_ns.doc(description="Create a new API key for a dataset") @console_ns.doc(description="Create a new API key for a dataset")
@console_ns.doc(params={"resource_id": "Dataset ID"}) @console_ns.doc(params={"resource_id": "Dataset ID"})
@console_ns.response(201, "API key created successfully", console_ns.models[ApiKeyItem.__name__]) @console_ns.response(201, "API key created successfully", api_key_item_model)
@console_ns.response(400, "Maximum keys exceeded") @console_ns.response(400, "Maximum keys exceeded")
def post(self, resource_id): # type: ignore def post(self, resource_id): # type: ignore
"""Create a new API key for a dataset""" """Create a new API key for a dataset"""

Some files were not shown because too many files have changed in this diff Show More