Merge branch 'main' into 4-27-app-deploy

This commit is contained in:
Stephen Zhou 2026-05-11 15:03:10 +08:00
commit 923964a22a
No known key found for this signature in database
538 changed files with 5253 additions and 3681 deletions

View File

@ -63,7 +63,7 @@ pnpm analyze-component <path> --json
```typescript
// ❌ Before: Complex state logic in component
const Configuration: FC = () => {
function Configuration() {
const [modelConfig, setModelConfig] = useState<ModelConfig>(...)
const [datasetConfigs, setDatasetConfigs] = useState<DatasetConfigs>(...)
const [completionParams, setCompletionParams] = useState<FormValue>({})
@ -85,7 +85,7 @@ export const useModelConfig = (appId: string) => {
}
// Component becomes cleaner
const Configuration: FC = () => {
function Configuration() {
const { modelConfig, setModelConfig } = useModelConfig(appId)
return <div>...</div>
}
@ -189,7 +189,6 @@ const Template = useMemo(() => {
**Dify Convention**:
- This skill is for component decomposition, not query/mutation design.
- When refactoring data fetching, follow `web/AGENTS.md`.
- Do not introduce deprecated `useInvalid` / `useReset`.
- Do not add thin passthrough `useQuery` wrappers during refactoring; only extract a custom hook when it truly orchestrates multiple queries/mutations or shared derived state.

View File

@ -60,8 +60,10 @@ const Template = useMemo(() => {
**After** (complexity: ~3):
```typescript
import type { ComponentType } from 'react'
// Define lookup table outside component
const TEMPLATE_MAP: Record<AppModeEnum, Record<string, FC<TemplateProps>>> = {
const TEMPLATE_MAP: Record<AppModeEnum, Record<string, ComponentType<TemplateProps>>> = {
[AppModeEnum.CHAT]: {
[LanguagesSupported[1]]: TemplateChatZh,
[LanguagesSupported[7]]: TemplateChatJa,

View File

@ -65,10 +65,10 @@ interface ConfigurationHeaderProps {
onPublish: () => void
}
const ConfigurationHeader: FC<ConfigurationHeaderProps> = ({
function ConfigurationHeader({
isAdvancedMode,
onPublish,
}) => {
}: ConfigurationHeaderProps) {
const { t } = useTranslation()
return (
@ -136,7 +136,7 @@ const AppInfo = () => {
}
// ✅ After: Separate view components
const AppInfoExpanded: FC<AppInfoViewProps> = ({ appDetail, onAction }) => {
function AppInfoExpanded({ appDetail, onAction }: AppInfoViewProps) {
return (
<div className="expanded">
{/* Clean, focused expanded view */}
@ -144,7 +144,7 @@ const AppInfoExpanded: FC<AppInfoViewProps> = ({ appDetail, onAction }) => {
)
}
const AppInfoCollapsed: FC<AppInfoViewProps> = ({ appDetail, onAction }) => {
function AppInfoCollapsed({ appDetail, onAction }: AppInfoViewProps) {
return (
<div className="collapsed">
{/* Clean, focused collapsed view */}
@ -203,12 +203,12 @@ interface AppInfoModalsProps {
onSuccess: () => void
}
const AppInfoModals: FC<AppInfoModalsProps> = ({
function AppInfoModals({
appDetail,
activeModal,
onClose,
onSuccess,
}) => {
}: AppInfoModalsProps) {
const handleEdit = async (data) => { /* logic */ }
const handleDuplicate = async (data) => { /* logic */ }
const handleDelete = async () => { /* logic */ }
@ -296,7 +296,7 @@ interface OperationItemProps {
onAction: (id: string) => void
}
const OperationItem: FC<OperationItemProps> = ({ operation, onAction }) => {
function OperationItem({ operation, onAction }: OperationItemProps) {
return (
<div className="operation-item">
<span className="icon">{operation.icon}</span>
@ -435,7 +435,7 @@ interface ChildProps {
onSubmit: () => void
}
const Child: FC<ChildProps> = ({ value, onChange, onSubmit }) => {
function Child({ value, onChange, onSubmit }: ChildProps) {
return (
<div>
<input value={value} onChange={e => onChange(e.target.value)} />

View File

@ -112,13 +112,13 @@ export const useModelConfig = ({
```typescript
// Before: 50+ lines of state management
const Configuration: FC = () => {
function Configuration() {
const [modelConfig, setModelConfig] = useState<ModelConfig>(...)
// ... lots of related state and effects
}
// After: Clean component
const Configuration: FC = () => {
function Configuration() {
const {
modelConfig,
setModelConfig,
@ -159,7 +159,6 @@ const Configuration: FC = () => {
When hook extraction touches query or mutation code, do not use this reference as the source of truth for data-layer patterns.
- Follow `web/AGENTS.md` first.
- Do not introduce deprecated `useInvalid` / `useReset`.
- Do not extract thin passthrough `useQuery` hooks; only extract orchestration hooks.

View File

@ -23,7 +23,7 @@ Use this skill for Dify's repository-level E2E suite in `e2e/`. Use [`e2e/AGENTS
- `e2e/scripts/run-cucumber.ts` and `e2e/cucumber.config.ts` when tags or execution flow matter
3. Read [`references/playwright-best-practices.md`](references/playwright-best-practices.md) only when locator, assertion, isolation, or waiting choices are involved.
4. Read [`references/cucumber-best-practices.md`](references/cucumber-best-practices.md) only when scenario wording, step granularity, tags, or expression design are involved.
5. Re-check official docs with Context7 before introducing a new Playwright or Cucumber pattern.
5. Re-check official Playwright or Cucumber docs with the available documentation tools before introducing a new framework pattern.
## Local Rules

View File

@ -9,18 +9,18 @@ Category: Performance
When rendering React Flow, prefer `useNodes`/`useEdges` for UI consumption and rely on `useStoreApi` inside callbacks that mutate or read node/edge state. Avoid manually pulling Flow data outside of these hooks.
## Complex prop memoization
## Complex prop stability
IsUrgent: True
IsUrgent: False
Category: Performance
### Description
Wrap complex prop values (objects, arrays, maps) in `useMemo` prior to passing them into child components to guarantee stable references and prevent unnecessary renders.
Only require stable object, array, or map props when there is a clear reason: the child is memoized, the value participates in effect/query dependencies, the value is part of a stable-reference API contract, or profiling/local behavior shows avoidable re-renders. Do not request `useMemo` for every inline object by default; `how-to-write-component` treats memoization as a targeted optimization.
Update this file when adding, editing, or removing Performance rules so the catalog remains accurate.
Wrong:
Risky:
```tsx
<HeavyComp
@ -31,7 +31,7 @@ Wrong:
/>
```
Right:
Better when stable identity matters:
```tsx
const config = useMemo(() => ({

View File

@ -5,7 +5,7 @@ description: Generate Vitest + React Testing Library tests for Dify frontend com
# Dify Frontend Testing Skill
This skill enables Claude to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices.
This skill enables Codex to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices.
> **⚠️ Authoritative Source**: This skill is derived from `web/docs/test.md`. Use Vitest mock/timer APIs (`vi.*`).
@ -24,35 +24,27 @@ Apply this skill when the user:
**Do NOT apply** when:
- User is asking about backend/API tests (Python/pytest)
- User is asking about E2E tests (Playwright/Cypress)
- User is asking about E2E tests (Cucumber + Playwright under `e2e/`)
- User is only asking conceptual questions without code context
## Quick Reference
### Tech Stack
| Tool | Version | Purpose |
|------|---------|---------|
| Vitest | 4.0.16 | Test runner |
| React Testing Library | 16.0 | Component testing |
| jsdom | - | Test environment |
| nock | 14.0 | HTTP mocking |
| TypeScript | 5.x | Type safety |
### Key Commands
Run these commands from `web/`. From the repository root, prefix them with `pnpm -C web`.
```bash
# Run all tests
pnpm test
# Watch mode
pnpm test:watch
pnpm test --watch
# Run specific file
pnpm test path/to/file.spec.tsx
# Generate coverage report
pnpm test:coverage
pnpm test --coverage
# Analyze component complexity
pnpm analyze-component <path>
@ -228,7 +220,10 @@ Every test should clearly separate:
### 2. Black-Box Testing
- Test observable behavior, not implementation details
- Use semantic queries (getByRole, getByLabelText)
- Use semantic queries (`getByRole` with accessible `name`, `getByLabelText`, `getByPlaceholderText`, `getByText`, and scoped `within(...)`)
- Treat `getByTestId` as a last resort. If a control cannot be found by role/name, label, landmark, or dialog scope, fix the component accessibility first instead of adding or relying on `data-testid`.
- Remove production `data-testid` attributes when semantic selectors can cover the behavior. Keep them only for non-visual mocked boundaries, editor/browser shims such as Monaco, canvas/chart output, or third-party widgets with no accessible DOM in the test environment.
- Do not assert decorative icons by test id. Assert the named control that contains them, or mark decorative icons `aria-hidden`.
- Avoid testing internal state directly
- **Prefer pattern matching over hardcoded strings** in assertions:

View File

@ -56,7 +56,7 @@ See [Zustand Store Testing](#zustand-store-testing) section for full details.
| Location | Purpose |
|----------|---------|
| `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `next/image`, `zustand`) |
| `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `zustand`, clipboard, FloatingPortal, Monaco, localStorage`) |
| `web/__mocks__/zustand.ts` | Zustand mock implementation (auto-resets stores after each test) |
| `web/__mocks__/` | Reusable mock factories shared across multiple test files |
| Test file | Test-specific mocks, inline with `vi.mock()` |
@ -216,28 +216,21 @@ describe('Component', () => {
})
```
### 5. HTTP Mocking with Nock
### 5. HTTP and `fetch` Mocking
```typescript
import nock from 'nock'
const GITHUB_HOST = 'https://api.github.com'
const GITHUB_PATH = '/repos/owner/repo'
const mockGithubApi = (status: number, body: Record<string, unknown>, delayMs = 0) => {
return nock(GITHUB_HOST)
.get(GITHUB_PATH)
.delay(delayMs)
.reply(status, body)
}
describe('GithubComponent', () => {
afterEach(() => {
nock.cleanAll()
beforeEach(() => {
vi.clearAllMocks()
})
it('should display repo info', async () => {
mockGithubApi(200, { name: 'dify', stars: 1000 })
vi.mocked(globalThis.fetch).mockResolvedValueOnce(
new Response(JSON.stringify({ name: 'dify', stars: 1000 }), {
status: 200,
headers: { 'Content-Type': 'application/json' },
}),
)
render(<GithubComponent />)
@ -247,7 +240,12 @@ describe('GithubComponent', () => {
})
it('should handle API error', async () => {
mockGithubApi(500, { message: 'Server error' })
vi.mocked(globalThis.fetch).mockResolvedValueOnce(
new Response(JSON.stringify({ message: 'Server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
}),
)
render(<GithubComponent />)
@ -258,6 +256,8 @@ describe('GithubComponent', () => {
})
```
Prefer mocking `@/service/*` modules or spying on `global.fetch` / `ky` clients with deterministic responses. Do not introduce an HTTP interception dependency such as `nock` or MSW unless it is already declared in the workspace or adding it is part of the task.
### 6. Context Providers
```typescript
@ -332,7 +332,7 @@ const renderWithQueryClient = (ui: React.ReactElement) => {
1. **Don't mock Zustand store modules** - Use real stores with `setState()`
1. Don't mock components you can import directly
1. Don't create overly simplified mocks that miss conditional logic
1. Don't forget to clean up nock after each test
1. Don't leave HTTP mocks or service mock state leaking between tests
1. Don't use `any` types in mocks without necessity
### Mock Decision Tree

View File

@ -227,12 +227,12 @@ Failing tests compound:
**Fix failures immediately before proceeding.**
## Integration with Claude's Todo Feature
## Integration with Codex's Todo Feature
When using Claude for multi-file testing:
When using Codex for multi-file testing:
1. **Ask Claude to create a todo list** before starting
1. **Request one file at a time** or ensure Claude processes incrementally
1. **Create a todo list** before starting
1. **Process one file at a time**
1. **Verify each test passes** before asking for the next
1. **Mark todos complete** as you progress

View File

@ -45,23 +45,20 @@ description: Tailwind CSS v4.1+ rules and best practices. Use when writing, revi
| `decoration-slice` | `box-decoration-slice` |
| `decoration-clone` | `box-decoration-clone` |
### Renamed Utilities (ALWAYS use the v4 name)
### Renamed Utilities
| ❌ v3 | ✅ v4 |
| ------------------ | ------------------ |
| `bg-gradient-*` | `bg-linear-*` |
| `shadow-sm` | `shadow-xs` |
| `shadow` | `shadow-sm` |
| `drop-shadow-sm` | `drop-shadow-xs` |
| `drop-shadow` | `drop-shadow-sm` |
| `blur-sm` | `blur-xs` |
| `blur` | `blur-sm` |
| `backdrop-blur-sm` | `backdrop-blur-xs` |
| `backdrop-blur` | `backdrop-blur-sm` |
| `rounded-sm` | `rounded-xs` |
| `rounded` | `rounded-sm` |
| `outline-none` | `outline-hidden` |
| `ring` | `ring-3` |
Use the v4 name when migrating code that still carries Tailwind v3 semantics. Do not blanket-replace existing v4 classes: classes such as `rounded-sm`, `shadow-sm`, `ring-1`, and `ring-2` are valid in this codebase when they intentionally represent the current design scale.
| ❌ v3 pattern | ✅ v4 pattern |
| ------------------- | -------------------------------------------------- |
| `bg-gradient-*` | `bg-linear-*` |
| old shadow scale | verify against the current Tailwind/design scale |
| old blur scale | verify against the current Tailwind/design scale |
| old radius scale | use the Dify radius token mapping when applicable |
| `outline-none` | `outline-hidden` |
| bare `ring` utility | use an explicit ring width such as `ring-1`/`ring-2`/`ring-3` |
For Figma radius tokens, follow `packages/dify-ui/AGENTS.md`. For example, `--radius/xs` maps to `rounded-sm`; do not rewrite it to `rounded-xs`.
## Layout and Spacing Rules

View File

@ -9,6 +9,6 @@ jobs:
pull-requests: write
runs-on: depot-ubuntu-24.04
steps:
- uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1
- uses: actions/labeler@f27b608878404679385c85cfa523b85ccb86e213 # v6.1.0
with:
sync-labels: true

View File

@ -158,7 +158,7 @@ jobs:
- name: Run Claude Code for Translation Sync
if: steps.context.outputs.CHANGED_FILES != ''
uses: anthropics/claude-code-action@fefa07e9c665b7320f08c3b525980457f22f58aa # v1.0.111
uses: anthropics/claude-code-action@476e359e6203e73dad705c8b322e333fabbd7416 # v1.0.119
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -3,6 +3,10 @@ DOCKER_REGISTRY=langgenius
WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web
API_IMAGE=$(DOCKER_REGISTRY)/dify-api
VERSION=latest
DOCKER_DIR=docker
DOCKER_MIDDLEWARE_ENV=$(DOCKER_DIR)/middleware.env
DOCKER_MIDDLEWARE_ENV_EXAMPLE=$(DOCKER_DIR)/envs/middleware.env.example
DOCKER_MIDDLEWARE_PROJECT=dify-middlewares-dev
# Default target - show help
.DEFAULT_GOAL := help
@ -17,8 +21,13 @@ dev-setup: prepare-docker prepare-web prepare-api
# Step 1: Prepare Docker middleware
prepare-docker:
@echo "🐳 Setting up Docker middleware..."
@cp -n docker/middleware.env.example docker/middleware.env 2>/dev/null || echo "Docker middleware.env already exists"
@cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev up -d
@if [ ! -f "$(DOCKER_MIDDLEWARE_ENV)" ]; then \
cp "$(DOCKER_MIDDLEWARE_ENV_EXAMPLE)" "$(DOCKER_MIDDLEWARE_ENV)"; \
echo "Docker middleware.env created"; \
else \
echo "Docker middleware.env already exists"; \
fi
@cd $(DOCKER_DIR) && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p $(DOCKER_MIDDLEWARE_PROJECT) up -d
@echo "✅ Docker middleware started"
# Step 2: Prepare web environment
@ -39,12 +48,18 @@ prepare-api:
# Clean dev environment
dev-clean:
@echo "⚠️ Stopping Docker containers..."
@cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev down
@if [ -f "$(DOCKER_MIDDLEWARE_ENV)" ]; then \
cd $(DOCKER_DIR) && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p $(DOCKER_MIDDLEWARE_PROJECT) down; \
else \
echo "Docker middleware.env does not exist, skipping compose down"; \
fi
@echo "🗑️ Removing volumes..."
@rm -rf docker/volumes/db
@rm -rf docker/volumes/mysql
@rm -rf docker/volumes/redis
@rm -rf docker/volumes/plugin_daemon
@rm -rf docker/volumes/weaviate
@rm -rf docker/volumes/sandbox/dependencies
@rm -rf api/storage
@echo "✅ Cleanup complete"
@ -132,7 +147,7 @@ help:
@echo " make prepare-docker - Set up Docker middleware"
@echo " make prepare-web - Set up web environment"
@echo " make prepare-api - Set up API environment"
@echo " make dev-clean - Stop Docker middleware containers"
@echo " make dev-clean - Stop Docker middleware containers and remove dev data"
@echo ""
@echo "Backend Code Quality:"
@echo " make format - Format code with ruff"

View File

@ -34,7 +34,7 @@ TRIGGER_URL=http://localhost:5001
FILES_ACCESS_TIMEOUT=300
# Collaboration mode toggle
ENABLE_COLLABORATION_MODE=false
ENABLE_COLLABORATION_MODE=true
# Access token expiration time in minutes
ACCESS_TOKEN_EXPIRE_MINUTES=60

View File

@ -1298,7 +1298,7 @@ class PositionConfig(BaseSettings):
class CollaborationConfig(BaseSettings):
ENABLE_COLLABORATION_MODE: bool = Field(
description="Whether to enable collaboration mode features across the workspace",
default=False,
default=True,
)

View File

@ -39,7 +39,7 @@ from libs.login import current_account_with_tenant, login_required
from models import App, DatasetPermissionEnum, Workflow
from models.model import IconType
from services.app_dsl_service import AppDslService
from services.app_service import AppService
from services.app_service import AppListParams, AppService, CreateAppParams
from services.enterprise.enterprise_service import EnterpriseService
from services.entities.dsl_entities import ImportMode, ImportStatus
from services.entities.knowledge_entities.knowledge_entities import (
@ -478,11 +478,18 @@ class AppListApi(Resource):
current_user, current_tenant_id = current_account_with_tenant()
args = AppListQuery.model_validate(_normalize_app_list_query_args(request.args))
args_dict = args.model_dump()
params = AppListParams(
page=args.page,
limit=args.limit,
mode=args.mode,
name=args.name,
tag_ids=args.tag_ids,
is_created_by_me=args.is_created_by_me,
)
# get app list
app_service = AppService()
app_pagination = app_service.get_paginate_apps(current_user.id, current_tenant_id, args_dict)
app_pagination = app_service.get_paginate_apps(current_user.id, current_tenant_id, params)
if not app_pagination:
empty = AppPagination(page=args.page, limit=args.limit, total=0, has_more=False, data=[])
return empty.model_dump(mode="json"), 200
@ -546,9 +553,17 @@ class AppListApi(Resource):
"""Create app"""
current_user, current_tenant_id = current_account_with_tenant()
args = CreateAppPayload.model_validate(console_ns.payload)
params = CreateAppParams(
name=args.name,
description=args.description,
mode=args.mode,
icon_type=args.icon_type,
icon=args.icon,
icon_background=args.icon_background,
)
app_service = AppService()
app = app_service.create_app(current_tenant_id, args.model_dump(), current_user)
app = app_service.create_app(current_tenant_id, params, current_user)
app_detail = AppDetail.model_validate(app, from_attributes=True)
return app_detail.model_dump(mode="json"), 201

View File

@ -606,63 +606,63 @@ class DatasetIndexingEstimateApi(Resource):
# validate args
DocumentService.estimate_args_validate(args)
extract_settings = []
if args["info_list"]["data_source_type"] == "upload_file":
file_ids = args["info_list"]["file_info_list"]["file_ids"]
file_details = db.session.scalars(
select(UploadFile).where(UploadFile.tenant_id == current_tenant_id, UploadFile.id.in_(file_ids))
).all()
match args["info_list"]["data_source_type"]:
case "upload_file":
file_ids = args["info_list"]["file_info_list"]["file_ids"]
file_details = db.session.scalars(
select(UploadFile).where(UploadFile.tenant_id == current_tenant_id, UploadFile.id.in_(file_ids))
).all()
if file_details is None:
raise NotFound("File not found.")
if file_details is None:
raise NotFound("File not found.")
if file_details:
for file_detail in file_details:
if file_details:
for file_detail in file_details:
extract_setting = ExtractSetting(
datasource_type=DatasourceType.FILE,
upload_file=file_detail,
document_model=args["doc_form"],
)
extract_settings.append(extract_setting)
case "notion_import":
notion_info_list = args["info_list"]["notion_info_list"]
for notion_info in notion_info_list:
workspace_id = notion_info["workspace_id"]
credential_id = notion_info.get("credential_id")
for page in notion_info["pages"]:
extract_setting = ExtractSetting(
datasource_type=DatasourceType.NOTION,
notion_info=NotionInfo.model_validate(
{
"credential_id": credential_id,
"notion_workspace_id": workspace_id,
"notion_obj_id": page["page_id"],
"notion_page_type": page["type"],
"tenant_id": current_tenant_id,
}
),
document_model=args["doc_form"],
)
extract_settings.append(extract_setting)
case "website_crawl":
website_info_list = args["info_list"]["website_info_list"]
for url in website_info_list["urls"]:
extract_setting = ExtractSetting(
datasource_type=DatasourceType.FILE,
upload_file=file_detail,
document_model=args["doc_form"],
)
extract_settings.append(extract_setting)
elif args["info_list"]["data_source_type"] == "notion_import":
notion_info_list = args["info_list"]["notion_info_list"]
for notion_info in notion_info_list:
workspace_id = notion_info["workspace_id"]
credential_id = notion_info.get("credential_id")
for page in notion_info["pages"]:
extract_setting = ExtractSetting(
datasource_type=DatasourceType.NOTION,
notion_info=NotionInfo.model_validate(
datasource_type=DatasourceType.WEBSITE,
website_info=WebsiteInfo.model_validate(
{
"credential_id": credential_id,
"notion_workspace_id": workspace_id,
"notion_obj_id": page["page_id"],
"notion_page_type": page["type"],
"provider": website_info_list["provider"],
"job_id": website_info_list["job_id"],
"url": url,
"tenant_id": current_tenant_id,
"mode": "crawl",
"only_main_content": website_info_list["only_main_content"],
}
),
document_model=args["doc_form"],
)
extract_settings.append(extract_setting)
elif args["info_list"]["data_source_type"] == "website_crawl":
website_info_list = args["info_list"]["website_info_list"]
for url in website_info_list["urls"]:
extract_setting = ExtractSetting(
datasource_type=DatasourceType.WEBSITE,
website_info=WebsiteInfo.model_validate(
{
"provider": website_info_list["provider"],
"job_id": website_info_list["job_id"],
"url": url,
"tenant_id": current_tenant_id,
"mode": "crawl",
"only_main_content": website_info_list["only_main_content"],
}
),
document_model=args["doc_form"],
)
extract_settings.append(extract_setting)
else:
raise ValueError("Data source type not support")
case _:
raise ValueError("Data source type not support")
indexing_runner = IndexingRunner()
try:
response = indexing_runner.indexing_estimate(

View File

@ -369,28 +369,31 @@ class DatasetDocumentListApi(Resource):
else:
sort_logic = asc
if sort == "hit_count":
sub_query = (
sa.select(DocumentSegment.document_id, sa.func.sum(DocumentSegment.hit_count).label("total_hit_count"))
.where(DocumentSegment.dataset_id == str(dataset_id))
.group_by(DocumentSegment.document_id)
.subquery()
)
match sort:
case "hit_count":
sub_query = (
sa.select(
DocumentSegment.document_id, sa.func.sum(DocumentSegment.hit_count).label("total_hit_count")
)
.where(DocumentSegment.dataset_id == str(dataset_id))
.group_by(DocumentSegment.document_id)
.subquery()
)
query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id).order_by(
sort_logic(sa.func.coalesce(sub_query.c.total_hit_count, 0)),
sort_logic(Document.position),
)
elif sort == "created_at":
query = query.order_by(
sort_logic(Document.created_at),
sort_logic(Document.position),
)
else:
query = query.order_by(
desc(Document.created_at),
desc(Document.position),
)
query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id).order_by(
sort_logic(sa.func.coalesce(sub_query.c.total_hit_count, 0)),
sort_logic(Document.position),
)
case "created_at":
query = query.order_by(
sort_logic(Document.created_at),
sort_logic(Document.position),
)
case _:
query = query.order_by(
desc(Document.created_at),
desc(Document.position),
)
paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False)
documents = paginated_documents.items

View File

@ -532,7 +532,6 @@ class BaseAgentRunner(AppRunner):
file_objs = file_factory.build_from_message_files(
message_files=files,
tenant_id=self.tenant_id,
config=file_extra_config,
access_controller=_file_access_controller,
)
if not file_objs:

View File

@ -324,9 +324,10 @@ class IndexingRunner:
# one extract_setting is one source document
for extract_setting in extract_settings:
# extract
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"])
)
processing_rule = {
"mode": tmp_processing_rule["mode"],
"rules": tmp_processing_rule.get("rules"),
}
# Extract document content
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
# Cleaning and segmentation
@ -334,7 +335,7 @@ class IndexingRunner:
text_docs,
current_user=None,
embedding_model_instance=embedding_model_instance,
process_rule=processing_rule.to_dict(),
process_rule=processing_rule,
tenant_id=tenant_id,
doc_language=doc_language,
preview=True,

View File

@ -86,12 +86,10 @@ class TokenBufferMemory:
detail = ImagePromptMessageContent.DETAIL.HIGH
if file_extra_config and app_record:
# Build files directly without filtering by belongs_to
file_objs = [
file_factory.build_from_message_file(
message_file=message_file,
tenant_id=app_record.tenant_id,
config=file_extra_config,
access_controller=_file_access_controller,
)
for message_file in message_files

View File

@ -123,12 +123,15 @@ class SimplePromptTransform(PromptTransform):
for v in special_variable_keys:
# support #context#, #query# and #histories#
if v == "#context#":
variables["#context#"] = context or ""
elif v == "#query#":
variables["#query#"] = query or ""
elif v == "#histories#":
variables["#histories#"] = histories or ""
match v:
case "#context#":
variables["#context#"] = context or ""
case "#query#":
variables["#query#"] = query or ""
case "#histories#":
variables["#histories#"] = histories or ""
case _:
pass
prompt_template = prompt_template_config["prompt_template"]
if not isinstance(prompt_template, PromptTemplateParser):

View File

@ -29,6 +29,7 @@ from libs import helper
from models import Account
from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegment
from models.dataset import Document as DatasetDocument
from models.enums import ProcessRuleMode
from services.account_service import AccountService
from services.summary_index_service import SummaryIndexService
@ -325,7 +326,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor):
# update document parent mode
dataset_process_rule = DatasetProcessRule(
dataset_id=dataset.id,
mode="hierarchical",
mode=ProcessRuleMode.HIERARCHICAL,
rules=json.dumps(
{
"parent_mode": parent_childs.parent_mode,

View File

@ -378,6 +378,7 @@ class DifyToolNodeRuntime(ToolNodeRuntimeProtocol):
node_id: str,
node_data: ToolNodeData,
variable_pool,
node_execution_id: str | None = None,
) -> ToolRuntimeHandle:
try:
tool_runtime = ToolManager.get_workflow_tool_runtime(

View File

@ -1,11 +1,18 @@
"""Adapters from persisted message files to graph-layer file values."""
"""Adapters from persisted message files to graph-layer file values.
Replay paths only: files in conversation history were validated at upload time,
so these helpers deliberately do not accept (or forward) a ``FileUploadConfig``
re-validation here would break replays whenever workflow ``file_upload`` config
drifts between rounds. Mirrors ``build_file_from_stored_mapping`` in
``models/utils/file_input_compat.py``.
"""
from __future__ import annotations
from collections.abc import Sequence
from core.app.file_access import FileAccessControllerProtocol
from graphon.file import File, FileBelongsTo, FileTransferMethod, FileUploadConfig
from graphon.file import File, FileBelongsTo, FileTransferMethod
from models import MessageFile
from .builders import build_from_mapping
@ -15,14 +22,12 @@ def build_from_message_files(
*,
message_files: Sequence[MessageFile],
tenant_id: str,
config: FileUploadConfig | None = None,
access_controller: FileAccessControllerProtocol,
) -> Sequence[File]:
return [
build_from_message_file(
message_file=message_file,
tenant_id=tenant_id,
config=config,
access_controller=access_controller,
)
for message_file in message_files
@ -34,7 +39,6 @@ def build_from_message_file(
*,
message_file: MessageFile,
tenant_id: str,
config: FileUploadConfig | None,
access_controller: FileAccessControllerProtocol,
) -> File:
mapping = {
@ -54,6 +58,5 @@ def build_from_message_file(
return build_from_mapping(
mapping=mapping,
tenant_id=tenant_id,
config=config,
access_controller=access_controller,
)

View File

@ -2,9 +2,25 @@
from __future__ import annotations
from collections.abc import Iterable
from graphon.file import FileTransferMethod, FileType, FileUploadConfig
def _normalize_extension(extension: str) -> str:
s = extension.strip().lower()
if not s:
return ""
return s if s.startswith(".") else "." + s
def _extension_matches(extension: str, whitelist: Iterable[str]) -> bool:
normalized = _normalize_extension(extension)
if not normalized:
return False
return normalized in {_normalize_extension(e) for e in whitelist}
def is_file_valid_with_config(
*,
input_file_type: str,
@ -12,22 +28,31 @@ def is_file_valid_with_config(
file_transfer_method: FileTransferMethod,
config: FileUploadConfig,
) -> bool:
# FIXME(QIN2DIM): Always allow tool files (files generated by the assistant/model)
# These are internally generated and should bypass user upload restrictions
"""Return whether the file is allowed by the upload config.
``allowed_file_types`` lists the buckets a file may fall into; ``CUSTOM`` is
a fallback bucket gated by ``allowed_file_extensions`` (case- and
dot-insensitive). Tool-generated files bypass user-facing config.
"""
if file_transfer_method == FileTransferMethod.TOOL_FILE:
return True
if (
config.allowed_file_types
and input_file_type not in config.allowed_file_types
and input_file_type != FileType.CUSTOM
):
allowed_types = config.allowed_file_types or []
custom_allowed = FileType.CUSTOM in allowed_types
type_allowed = not allowed_types or input_file_type in allowed_types
if not type_allowed and not custom_allowed:
return False
# When the file is in the CUSTOM bucket, the extension whitelist is authoritative.
# An explicitly set whitelist (including the empty list) is enforced; empty == deny —
# the UI never submits an empty list, so this guards against DSL/API paths that
# bypass the UI from accidentally widening the allowlist.
in_custom_bucket = input_file_type == FileType.CUSTOM or not type_allowed
if (
input_file_type == FileType.CUSTOM
in_custom_bucket
and config.allowed_file_extensions is not None
and file_extension not in config.allowed_file_extensions
and not _extension_matches(file_extension, config.allowed_file_extensions)
):
return False

View File

@ -11,7 +11,7 @@ import time
from collections.abc import Sequence
from datetime import datetime
from json import JSONDecodeError
from typing import Any, TypedDict, cast
from typing import Any, ClassVar, TypedDict, cast
from uuid import uuid4
import sqlalchemy as sa
@ -441,23 +441,27 @@ class Dataset(Base):
return f"{dify_config.VECTOR_INDEX_NAME_PREFIX}_{normalized_dataset_id}_Node"
class DatasetProcessRule(Base): # bug
class DatasetProcessRule(TypeBase):
__tablename__ = "dataset_process_rules"
__table_args__ = (
sa.PrimaryKeyConstraint("id", name="dataset_process_rule_pkey"),
sa.Index("dataset_process_rule_dataset_id_idx", "dataset_id"),
)
id = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4()))
dataset_id = mapped_column(StringUUID, nullable=False)
mode = mapped_column(EnumText(ProcessRuleMode, length=255), nullable=False, server_default=sa.text("'automatic'"))
rules = mapped_column(LongText, nullable=True)
created_by = mapped_column(StringUUID, nullable=False)
created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp())
id: Mapped[str] = mapped_column(StringUUID, nullable=False, default_factory=lambda: str(uuid4()), init=False)
dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
mode: Mapped[ProcessRuleMode] = mapped_column(
EnumText(ProcessRuleMode, length=255), nullable=False, server_default=sa.text("'automatic'")
)
rules: Mapped[str | None] = mapped_column(LongText, nullable=True)
created_by: Mapped[str] = mapped_column(StringUUID, nullable=False)
created_at: Mapped[datetime] = mapped_column(
DateTime, nullable=False, server_default=func.current_timestamp(), init=False
)
MODES = ["automatic", "custom", "hierarchical"]
PRE_PROCESSING_RULES = ["remove_stopwords", "remove_extra_spaces", "remove_urls_emails"]
AUTOMATIC_RULES: AutomaticRulesConfig = {
AUTOMATIC_RULES: ClassVar[AutomaticRulesConfig] = {
"pre_processing_rules": [
{"id": "remove_extra_spaces", "enabled": True},
{"id": "remove_urls_emails", "enabled": False},

View File

@ -65,35 +65,18 @@ class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel):
}
file_list = values.get("file_list", [])
if isinstance(v, str):
if field_name == "inputs":
return {
"messages": {
"role": "user",
"content": v,
"usage_metadata": usage_metadata,
"file_list": file_list,
},
}
elif field_name == "outputs":
return {
"choices": {
"role": "ai",
"content": v,
"usage_metadata": usage_metadata,
"file_list": file_list,
},
}
elif isinstance(v, list):
data = {}
if len(v) > 0 and isinstance(v[0], dict):
# rename text to content
v = replace_text_with_content(data=v)
if field_name == "inputs":
data = {
"messages": v,
match field_name:
case "inputs":
return {
"messages": {
"role": "user",
"content": v,
"usage_metadata": usage_metadata,
"file_list": file_list,
},
}
elif field_name == "outputs":
data = {
case "outputs":
return {
"choices": {
"role": "ai",
"content": v,
@ -101,6 +84,29 @@ class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel):
"file_list": file_list,
},
}
case _:
pass
elif isinstance(v, list):
data = {}
if len(v) > 0 and isinstance(v[0], dict):
# rename text to content
v = replace_text_with_content(data=v)
match field_name:
case "inputs":
data = {
"messages": v,
}
case "outputs":
data = {
"choices": {
"role": "ai",
"content": v,
"usage_metadata": usage_metadata,
"file_list": file_list,
},
}
case _:
pass
return data
else:
return {

View File

@ -64,7 +64,9 @@ class LangSmithDataTrace(BaseTraceInstance):
self.generate_name_trace(trace_info)
def workflow_trace(self, trace_info: WorkflowTraceInfo):
trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id
# trace_id must equal the root run's run_id (LangSmith protocol); external trace_id
# cannot be used here as it would cause HTTP 400.
trace_id = trace_info.message_id or trace_info.workflow_run_id
if trace_info.start_time is None:
trace_info.start_time = datetime.now()
message_dotted_order = (
@ -77,6 +79,8 @@ class LangSmithDataTrace(BaseTraceInstance):
)
metadata = trace_info.metadata
metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id
if trace_info.trace_id:
metadata["external_trace_id"] = trace_info.trace_id
if trace_info.message_id:
message_run = LangSmithRunModel(

View File

@ -208,13 +208,17 @@ def test_workflow_trace(trace_instance, monkeypatch: pytest.MonkeyPatch):
assert call_args[0].id == "msg-1"
assert call_args[0].name == TraceTaskName.MESSAGE_TRACE
# trace_id must equal root run's id (message_id), not the external trace_id "trace-1"
assert call_args[0].trace_id == "msg-1"
assert call_args[1].id == "run-1"
assert call_args[1].name == TraceTaskName.WORKFLOW_TRACE
assert call_args[1].parent_run_id == "msg-1"
assert call_args[1].trace_id == "msg-1"
assert call_args[2].id == "node-llm"
assert call_args[2].run_type == LangSmithRunType.llm
assert call_args[2].trace_id == "msg-1"
assert call_args[3].id == "node-other"
assert call_args[3].run_type == LangSmithRunType.tool
@ -604,3 +608,83 @@ def test_get_project_url_error(trace_instance):
trace_instance.langsmith_client.get_run_url.side_effect = Exception("error")
with pytest.raises(ValueError, match="LangSmith get run url failed: error"):
trace_instance.get_project_url()
def _make_workflow_trace_info(
*, message_id: str | None, workflow_run_id: str, trace_id: str | None
) -> WorkflowTraceInfo:
workflow_data = MagicMock()
workflow_data.created_at = _dt()
workflow_data.finished_at = _dt() + timedelta(seconds=1)
return WorkflowTraceInfo(
tenant_id="tenant-1",
workflow_id="wf-1",
workflow_run_id=workflow_run_id,
workflow_run_inputs={},
workflow_run_outputs={},
workflow_run_status="succeeded",
workflow_run_version="1.0",
workflow_run_elapsed_time=1.0,
total_tokens=0,
file_list=[],
query="q",
message_id=message_id,
conversation_id="conv-1" if message_id else None,
start_time=_dt(),
end_time=_dt() + timedelta(seconds=1),
trace_id=trace_id,
metadata={"app_id": "app-1"},
workflow_app_log_id=None,
error=None,
workflow_data=workflow_data,
)
def _patch_workflow_trace_deps(monkeypatch, trace_instance):
monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.sessionmaker", lambda bind: lambda: MagicMock())
monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.db", MagicMock(engine="engine"))
repo = MagicMock()
repo.get_by_workflow_execution.return_value = []
factory = MagicMock()
factory.create_workflow_node_execution_repository.return_value = repo
monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.DifyCoreRepositoryFactory", factory)
monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock())
trace_instance.add_run = MagicMock()
def test_workflow_trace_id_uses_message_id_not_external(trace_instance, monkeypatch):
"""Chatflow with external trace_id: LangSmith trace_id must be message_id, not external."""
trace_info = _make_workflow_trace_info(
message_id="msg-abc",
workflow_run_id="run-xyz",
trace_id="external-999",
)
_patch_workflow_trace_deps(monkeypatch, trace_instance)
trace_instance.workflow_trace(trace_info)
calls = [c[0][0] for c in trace_instance.add_run.call_args_list]
# message run (root) and workflow run (child) must both use message_id as trace_id
assert calls[0].id == "msg-abc"
assert calls[0].trace_id == "msg-abc"
assert calls[1].id == "run-xyz"
assert calls[1].trace_id == "msg-abc"
# external_trace_id preserved in metadata
assert trace_info.metadata.get("external_trace_id") == "external-999"
def test_workflow_trace_id_pure_workflow_uses_run_id(trace_instance, monkeypatch):
"""Pure workflow (no message_id) with external trace_id: trace_id must be workflow_run_id."""
trace_info = _make_workflow_trace_info(
message_id=None,
workflow_run_id="run-xyz",
trace_id="external-999",
)
_patch_workflow_trace_deps(monkeypatch, trace_instance)
trace_instance.workflow_trace(trace_info)
calls = [c[0][0] for c in trace_instance.add_run.call_args_list]
# workflow run is the root; trace_id must equal its run_id
assert calls[0].id == "run-xyz"
assert calls[0].trace_id == "run-xyz"

View File

@ -81,14 +81,15 @@ class OpenSearchConfig(BaseModel):
pool_maxsize=20,
)
if self.auth_method == "basic":
logger.info("Using basic authentication for OpenSearch Vector DB")
match self.auth_method:
case AuthMethod.BASIC:
logger.info("Using basic authentication for OpenSearch Vector DB")
params["http_auth"] = (self.user, self.password)
elif self.auth_method == "aws_managed_iam":
logger.info("Using AWS managed IAM role for OpenSearch Vector DB")
params["http_auth"] = (self.user, self.password)
case AuthMethod.AWS_MANAGED_IAM:
logger.info("Using AWS managed IAM role for OpenSearch Vector DB")
params["http_auth"] = self.create_aws_managed_iam_auth()
params["http_auth"] = self.create_aws_managed_iam_auth()
return params

View File

@ -6,7 +6,7 @@ requires-python = "~=3.12.0"
dependencies = [
# Legacy: mature and widely deployed
"bleach>=6.3.0",
"boto3>=1.43.3",
"boto3>=1.43.6",
"celery>=5.6.3",
"croniter>=6.2.2",
"flask>=3.1.3,<4.0.0",
@ -14,8 +14,8 @@ dependencies = [
"gevent>=26.4.0",
"gevent-websocket>=0.10.1",
"gmpy2>=2.3.0",
"google-api-python-client>=2.195.0",
"gunicorn>=25.3.0",
"google-api-python-client>=2.196.0",
"gunicorn>=26.0.0",
"psycogreen>=1.0.2",
"psycopg2-binary>=2.9.12",
"python-socketio>=5.13.0",
@ -31,7 +31,7 @@ dependencies = [
"flask-migrate>=4.1.0,<5.0.0",
"flask-orjson>=2.0.0,<3.0.0",
"flask-restx>=1.3.2,<2.0.0",
"google-cloud-aiplatform>=1.149.0,<2.0.0",
"google-cloud-aiplatform>=1.151.0,<2.0.0",
"httpx[socks]>=0.28.1,<1.0.0",
"opentelemetry-distro>=0.62b1,<1.0.0",
"opentelemetry-instrumentation-celery>=0.62b0,<1.0.0",
@ -45,7 +45,7 @@ dependencies = [
# Emerging: newer and fast-moving, use compatible pins
"fastopenapi[flask]~=0.7.0",
"graphon~=0.3.0",
"graphon~=0.3.1",
"httpx-sse~=0.4.0",
"json-repair~=0.59.4",
]
@ -191,7 +191,7 @@ storage = [
"google-cloud-storage>=3.10.1",
"opendal>=0.46.0",
"oss2>=2.19.1",
"supabase>=2.29.0",
"supabase>=2.30.0",
"tos>=2.9.0",
]

View File

@ -1,9 +1,10 @@
import json
import logging
from typing import Any, TypedDict, cast
from typing import Any, Literal, TypedDict, cast
import sqlalchemy as sa
from flask_sqlalchemy.pagination import Pagination
from pydantic import BaseModel, Field
from sqlalchemy import select
from configs import dify_config
@ -31,39 +32,59 @@ from tasks.remove_app_and_related_data_task import remove_app_and_related_data_t
logger = logging.getLogger(__name__)
class AppListParams(BaseModel):
page: int = Field(default=1, ge=1)
limit: int = Field(default=20, ge=1, le=100)
mode: Literal["completion", "chat", "advanced-chat", "workflow", "agent-chat", "channel", "all"] = "all"
name: str | None = None
tag_ids: list[str] | None = None
is_created_by_me: bool | None = None
class CreateAppParams(BaseModel):
name: str = Field(min_length=1)
description: str | None = None
mode: Literal["chat", "agent-chat", "advanced-chat", "workflow", "completion"]
icon_type: str | None = None
icon: str | None = None
icon_background: str | None = None
api_rph: int = 0
api_rpm: int = 0
max_active_requests: int | None = None
class AppService:
def get_paginate_apps(self, user_id: str, tenant_id: str, args: dict[str, Any]) -> Pagination | None:
def get_paginate_apps(self, user_id: str, tenant_id: str, params: AppListParams) -> Pagination | None:
"""
Get app list with pagination
:param user_id: user id
:param tenant_id: tenant id
:param args: request args
:param params: query parameters
:return:
"""
filters = [App.tenant_id == tenant_id, App.is_universal == False]
if args["mode"] == "workflow":
if params.mode == "workflow":
filters.append(App.mode == AppMode.WORKFLOW)
elif args["mode"] == "completion":
elif params.mode == "completion":
filters.append(App.mode == AppMode.COMPLETION)
elif args["mode"] == "chat":
elif params.mode == "chat":
filters.append(App.mode == AppMode.CHAT)
elif args["mode"] == "advanced-chat":
elif params.mode == "advanced-chat":
filters.append(App.mode == AppMode.ADVANCED_CHAT)
elif args["mode"] == "agent-chat":
elif params.mode == "agent-chat":
filters.append(App.mode == AppMode.AGENT_CHAT)
if args.get("is_created_by_me", False):
if params.is_created_by_me:
filters.append(App.created_by == user_id)
if args.get("name"):
if params.name:
from libs.helper import escape_like_pattern
name = args["name"][:30]
name = params.name[:30]
escaped_name = escape_like_pattern(name)
filters.append(App.name.ilike(f"%{escaped_name}%", escape="\\"))
# Check if tag_ids is not empty to avoid WHERE false condition
if args.get("tag_ids") and len(args["tag_ids"]) > 0:
target_ids = TagService.get_target_ids_by_tag_ids("app", tenant_id, args["tag_ids"])
if params.tag_ids and len(params.tag_ids) > 0:
target_ids = TagService.get_target_ids_by_tag_ids("app", tenant_id, params.tag_ids)
if target_ids and len(target_ids) > 0:
filters.append(App.id.in_(target_ids))
else:
@ -71,21 +92,21 @@ class AppService:
app_models = db.paginate(
sa.select(App).where(*filters).order_by(App.created_at.desc()),
page=args["page"],
per_page=args["limit"],
page=params.page,
per_page=params.limit,
error_out=False,
)
return app_models
def create_app(self, tenant_id: str, args: dict[str, Any], account: Account) -> App:
def create_app(self, tenant_id: str, params: CreateAppParams, account: Account) -> App:
"""
Create app
:param tenant_id: tenant id
:param args: request args
:param params: app creation parameters
:param account: Account instance
"""
app_mode = AppMode.value_of(args["mode"])
app_mode = AppMode.value_of(params.mode)
app_template = default_app_templates[app_mode]
# get model config
@ -143,15 +164,16 @@ class AppService:
default_model_config["model"] = json.dumps(default_model_dict)
app = App(**app_template["app"])
app.name = args["name"]
app.description = args.get("description", "")
app.mode = args["mode"]
app.icon_type = args.get("icon_type", "emoji")
app.icon = args["icon"]
app.icon_background = args["icon_background"]
app.name = params.name
app.description = params.description or ""
app.mode = app_mode
app.icon_type = IconType(params.icon_type) if params.icon_type else IconType.EMOJI
app.icon = params.icon
app.icon_background = params.icon_background
app.tenant_id = tenant_id
app.api_rph = args.get("api_rph", 0)
app.api_rpm = args.get("api_rpm", 0)
app.api_rph = params.api_rph
app.api_rpm = params.api_rpm
app.max_active_requests = params.max_active_requests
app.created_by = account.id
app.updated_by = account.id

View File

@ -108,7 +108,7 @@ logger = logging.getLogger(__name__)
class ProcessRulesDict(TypedDict):
mode: str
mode: ProcessRuleMode
rules: dict[str, Any]
@ -204,7 +204,7 @@ class DatasetService:
mode = dataset_process_rule.mode
rules = dataset_process_rule.rules_dict or {}
else:
mode = str(DocumentService.DEFAULT_RULES["mode"])
mode = ProcessRuleMode(DocumentService.DEFAULT_RULES["mode"])
rules = dict(DocumentService.DEFAULT_RULES.get("rules") or {})
return {"mode": mode, "rules": rules}
@ -1984,7 +1984,7 @@ class DocumentService:
if process_rule.rules:
dataset_process_rule = DatasetProcessRule(
dataset_id=dataset.id,
mode=process_rule.mode,
mode=ProcessRuleMode(process_rule.mode),
rules=process_rule.rules.model_dump_json() if process_rule.rules else None,
created_by=account.id,
)
@ -1995,7 +1995,7 @@ class DocumentService:
elif process_rule.mode == ProcessRuleMode.AUTOMATIC:
dataset_process_rule = DatasetProcessRule(
dataset_id=dataset.id,
mode=process_rule.mode,
mode=ProcessRuleMode.AUTOMATIC,
rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES),
created_by=account.id,
)
@ -2572,14 +2572,14 @@ class DocumentService:
if process_rule.mode in {ProcessRuleMode.CUSTOM, ProcessRuleMode.HIERARCHICAL}:
dataset_process_rule = DatasetProcessRule(
dataset_id=dataset.id,
mode=process_rule.mode,
mode=ProcessRuleMode(process_rule.mode),
rules=process_rule.rules.model_dump_json() if process_rule.rules else None,
created_by=account.id,
)
elif process_rule.mode == ProcessRuleMode.AUTOMATIC:
dataset_process_rule = DatasetProcessRule(
dataset_id=dataset.id,
mode=process_rule.mode,
mode=ProcessRuleMode.AUTOMATIC,
rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES),
created_by=account.id,
)

View File

@ -166,7 +166,7 @@ class SystemFeatureModel(BaseModel):
enable_email_code_login: bool = False
enable_email_password_login: bool = True
enable_social_oauth_login: bool = False
enable_collaboration_mode: bool = False
enable_collaboration_mode: bool = True
is_allow_register: bool = False
is_allow_create_workspace: bool = False
is_email_setup: bool = False

View File

@ -1066,8 +1066,13 @@ class WorkflowService:
)
rendered_content = node.render_form_content_before_submission()
selected_action = next(
(user_action for user_action in node_data.user_actions if user_action.id == action),
None,
)
outputs: dict[str, Any] = dict(form_inputs)
outputs["__action_id"] = action
outputs["__action_value"] = selected_action.title if selected_action else ""
outputs["__rendered_content"] = node.render_form_content_with_outputs(
rendered_content, outputs, node_data.outputs_field_names()
)

View File

@ -91,7 +91,11 @@ def init_llm_node(config: dict) -> LLMNode:
return node
def test_execute_llm():
def _mock_db_session_close(monkeypatch) -> None:
monkeypatch.setattr(db.session, "close", MagicMock())
def test_execute_llm(monkeypatch):
node = init_llm_node(
config={
"id": "llm",
@ -118,7 +122,7 @@ def test_execute_llm():
},
)
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
def build_mock_model_instance() -> MagicMock:
from decimal import Decimal
@ -195,7 +199,7 @@ def test_execute_llm():
assert item.node_run_result.outputs.get("usage", {})["total_tokens"] > 0
def test_execute_llm_with_jinja2():
def test_execute_llm_with_jinja2(monkeypatch):
"""
Test execute LLM node with jinja2
"""
@ -233,8 +237,7 @@ def test_execute_llm_with_jinja2():
},
)
# Mock db.session.close()
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
def build_mock_model_instance() -> MagicMock:
from decimal import Decimal

View File

@ -83,7 +83,11 @@ def init_parameter_extractor_node(config: dict, memory=None):
return node
def test_function_calling_parameter_extractor(setup_model_mock):
def _mock_db_session_close(monkeypatch) -> None:
monkeypatch.setattr(db.session, "close", MagicMock())
def test_function_calling_parameter_extractor(setup_model_mock, monkeypatch):
"""
Test function calling for parameter extractor.
"""
@ -114,7 +118,7 @@ def test_function_calling_parameter_extractor(setup_model_mock):
mode="chat",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)()
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
result = node._run()
@ -124,7 +128,7 @@ def test_function_calling_parameter_extractor(setup_model_mock):
assert result.outputs.get("__reason") == None
def test_instructions(setup_model_mock):
def test_instructions(setup_model_mock, monkeypatch):
"""
Test chat parameter extractor.
"""
@ -155,7 +159,7 @@ def test_instructions(setup_model_mock):
mode="chat",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)()
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
result = node._run()
@ -174,7 +178,7 @@ def test_instructions(setup_model_mock):
assert "what's the weather in SF" in prompt.get("text")
def test_chat_parameter_extractor(setup_model_mock):
def test_chat_parameter_extractor(setup_model_mock, monkeypatch):
"""
Test chat parameter extractor.
"""
@ -205,7 +209,7 @@ def test_chat_parameter_extractor(setup_model_mock):
mode="chat",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)()
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
result = node._run()
@ -225,7 +229,7 @@ def test_chat_parameter_extractor(setup_model_mock):
assert '<structure>\n{"type": "object"' in prompt.get("text")
def test_completion_parameter_extractor(setup_model_mock):
def test_completion_parameter_extractor(setup_model_mock, monkeypatch):
"""
Test completion parameter extractor.
"""
@ -256,7 +260,7 @@ def test_completion_parameter_extractor(setup_model_mock):
mode="completion",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)()
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
result = node._run()
@ -350,7 +354,7 @@ def test_extract_json_from_tool_call():
assert result["location"] == "kawaii"
def test_chat_parameter_extractor_with_memory(setup_model_mock):
def test_chat_parameter_extractor_with_memory(setup_model_mock, monkeypatch):
"""
Test chat parameter extractor with memory.
"""
@ -382,7 +386,7 @@ def test_chat_parameter_extractor_with_memory(setup_model_mock):
mode="chat",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
)()
db.session.close = MagicMock()
_mock_db_session_close(monkeypatch)
result = node._run()

View File

@ -168,6 +168,7 @@ def test_node_variable_collection_get_success(
account, tenant = create_console_account_and_tenant(db_session_with_containers)
app = create_console_app(db_session_with_containers, tenant.id, account.id, AppMode.WORKFLOW)
node_variable = _create_node_variable(db_session_with_containers, app.id, account.id, node_id="node_123")
node_variable_id = node_variable.id
_create_node_variable(db_session_with_containers, app.id, account.id, node_id="node_456", name="other")
response = test_client_with_containers.get(
@ -178,7 +179,7 @@ def test_node_variable_collection_get_success(
assert response.status_code == 200
payload = response.get_json()
assert payload is not None
assert [item["id"] for item in payload["items"]] == [node_variable.id]
assert [item["id"] for item in payload["items"]] == [node_variable_id]
def test_node_variable_collection_get_invalid_node_id(
@ -377,6 +378,7 @@ def test_system_variable_collection_get(
account, tenant = create_console_account_and_tenant(db_session_with_containers)
app = create_console_app(db_session_with_containers, tenant.id, account.id, AppMode.WORKFLOW)
variable = _create_system_variable(db_session_with_containers, app.id, account.id)
variable_id = variable.id
response = test_client_with_containers.get(
f"/console/api/apps/{app.id}/workflows/draft/system-variables",
@ -386,7 +388,7 @@ def test_system_variable_collection_get(
assert response.status_code == 200
payload = response.get_json()
assert payload is not None
assert [item["id"] for item in payload["items"]] == [variable.id]
assert [item["id"] for item in payload["items"]] == [variable_id]
def test_environment_variable_collection_get(

View File

@ -17,6 +17,8 @@ def test_get_oauth_url_successful(
test_client_with_containers: FlaskClient,
) -> None:
account, tenant = create_console_account_and_tenant(db_session_with_containers)
tenant_id = tenant.id
current_tenant_id = account.current_tenant_id
provider = MagicMock()
provider.get_authorization_url.return_value = "http://oauth.provider/auth"
@ -29,7 +31,7 @@ def test_get_oauth_url_successful(
headers=authenticate_console_client(test_client_with_containers, account),
)
assert tenant.id == account.current_tenant_id
assert tenant_id == current_tenant_id
assert response.status_code == 200
assert response.get_json() == {"data": "http://oauth.provider/auth"}
provider.get_authorization_url.assert_called_once()

View File

@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch
import pytest
from flask import Flask
from sqlalchemy.orm import Session
from controllers.console.auth.error import (
EmailCodeError,
@ -20,13 +21,15 @@ from controllers.console.auth.forgot_password import (
ForgotPasswordSendEmailApi,
)
from controllers.console.error import AccountNotFound, EmailSendIpLimitError
from tests.test_containers_integration_tests.controllers.console.helpers import ensure_dify_setup
class TestForgotPasswordSendEmailApi:
"""Test cases for sending password reset emails."""
@pytest.fixture
def app(self, flask_app_with_containers: Flask):
def app(self, flask_app_with_containers: Flask, db_session_with_containers: Session):
ensure_dify_setup(db_session_with_containers)
return flask_app_with_containers
@pytest.fixture
@ -139,7 +142,8 @@ class TestForgotPasswordCheckApi:
"""Test cases for verifying password reset codes."""
@pytest.fixture
def app(self, flask_app_with_containers: Flask):
def app(self, flask_app_with_containers: Flask, db_session_with_containers: Session):
ensure_dify_setup(db_session_with_containers)
return flask_app_with_containers
@patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit")
@ -322,7 +326,8 @@ class TestForgotPasswordResetApi:
"""Test cases for resetting password with verified token."""
@pytest.fixture
def app(self, flask_app_with_containers: Flask):
def app(self, flask_app_with_containers: Flask, db_session_with_containers: Session):
ensure_dify_setup(db_session_with_containers)
return flask_app_with_containers
@pytest.fixture

View File

@ -11,7 +11,7 @@ from models.enums import ConversationFromSource, MessageFileBelongsTo
from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought
from services.account_service import AccountService, TenantService
from services.agent_service import AgentService
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -119,16 +119,16 @@ class TestAgentService:
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "agent-chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="agent-chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)

View File

@ -9,7 +9,7 @@ from models import Account
from models.enums import ConversationFromSource, InvokeFrom
from models.model import MessageAnnotation
from services.annotation_service import AppAnnotationService
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -86,16 +86,16 @@ class TestAnnotationService:
tenant = account.current_tenant
# Setup app creation arguments
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
# Create app
app_service = AppService()

View File

@ -37,7 +37,7 @@ from services.app_dsl_service import (
PendingData,
_check_version_compatibility,
)
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from tests.test_containers_integration_tests.helpers import generate_valid_password
_DEFAULT_TENANT_ID = "00000000-0000-0000-0000-000000000001"
@ -147,16 +147,16 @@ class TestAppDslService:
)
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
return app, account

View File

@ -1,4 +1,5 @@
import uuid
from typing import Literal
from unittest.mock import ANY, MagicMock, patch
import pytest
@ -133,7 +134,10 @@ class TestAppGenerateService:
}
def _create_test_app_and_account(
self, db_session_with_containers: Session, mock_external_service_dependencies, mode="chat"
self,
db_session_with_containers: Session,
mock_external_service_dependencies,
mode: Literal["chat", "agent-chat", "advanced-chat", "workflow", "completion"] = "chat",
):
"""
Helper method to create a test app and account for testing.
@ -165,20 +169,20 @@ class TestAppGenerateService:
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": mode,
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
"max_active_requests": 5,
}
from services.app_service import AppService, CreateAppParams
from services.app_service import AppService
# Create app with realistic data
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode=mode,
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
max_active_requests=5,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)

View File

@ -2,6 +2,7 @@ from unittest.mock import create_autospec, patch
import pytest
from faker import Faker
from pydantic import ValidationError
from sqlalchemy.orm import Session
from constants.model_template import default_app_templates
@ -12,7 +13,7 @@ from services.account_service import AccountService, TenantService
from tests.test_containers_integration_tests.helpers import generate_valid_password
# Delay import of AppService to avoid circular dependency
# from services.app_service import AppService
# from services.app_service import AppService, AppListParams, CreateAppParams
class TestAppService:
@ -64,34 +65,34 @@ class TestAppService:
tenant = account.current_tenant
# Setup app creation arguments
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
# Import here to avoid circular dependency
from services.app_service import AppService, CreateAppParams
app_params = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
# Create app
# Import here to avoid circular dependency
from services.app_service import AppService
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
app = app_service.create_app(tenant.id, app_params, account)
# Verify app was created correctly
assert app.name == app_args["name"]
assert app.description == app_args["description"]
assert app.mode == app_args["mode"]
assert app.icon_type == app_args["icon_type"]
assert app.icon == app_args["icon"]
assert app.icon_background == app_args["icon_background"]
assert app.name == app_params.name
assert app.description == app_params.description
assert app.mode == app_params.mode
assert app.icon_type == app_params.icon_type
assert app.icon == app_params.icon
assert app.icon_background == app_params.icon_background
assert app.tenant_id == tenant.id
assert app.api_rph == app_args["api_rph"]
assert app.api_rpm == app_args["api_rpm"]
assert app.api_rph == app_params.api_rph
assert app.api_rpm == app_params.api_rpm
assert app.created_by == account.id
assert app.updated_by == account.id
assert app.status == "normal"
@ -120,7 +121,7 @@ class TestAppService:
tenant = account.current_tenant
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_service = AppService()
@ -129,20 +130,20 @@ class TestAppService:
app_modes = [v.value for v in default_app_templates]
for mode in app_modes:
app_args = {
"name": f"{fake.company()} {mode}",
"description": f"Test app for {mode} mode",
"mode": mode,
"icon_type": "emoji",
"icon": "🚀",
"icon_background": "#4ECDC4",
}
app_params = CreateAppParams(
name=f"{fake.company()} {mode}",
description=f"Test app for {mode} mode",
mode=mode,
icon_type="emoji",
icon="🚀",
icon_background="#4ECDC4",
)
app = app_service.create_app(tenant.id, app_args, account)
app = app_service.create_app(tenant.id, app_params, account)
# Verify app mode was set correctly
assert app.mode == mode
assert app.name == app_args["name"]
assert app.name == app_params.name
assert app.tenant_id == tenant.id
assert app.created_by == account.id
@ -163,20 +164,20 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🎯",
"icon_background": "#45B7D1",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_params = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🎯",
icon_background="#45B7D1",
)
app_service = AppService()
created_app = app_service.create_app(tenant.id, app_args, account)
created_app = app_service.create_app(tenant.id, app_params, account)
# Get app using the service - needs current_user mock
mock_current_user = create_autospec(Account, instance=True)
@ -211,31 +212,27 @@ class TestAppService:
tenant = account.current_tenant
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppListParams, AppService, CreateAppParams
app_service = AppService()
# Create multiple apps
app_names = [fake.company() for _ in range(5)]
for name in app_names:
app_args = {
"name": name,
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "📱",
"icon_background": "#96CEB4",
}
app_service.create_app(tenant.id, app_args, account)
app_params = CreateAppParams(
name=name,
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="📱",
icon_background="#96CEB4",
)
app_service.create_app(tenant.id, app_params, account)
# Get paginated apps
args = {
"page": 1,
"limit": 10,
"mode": "chat",
}
params = AppListParams(page=1, limit=10, mode="chat")
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, params)
# Verify pagination results
assert paginated_apps is not None
@ -267,60 +264,47 @@ class TestAppService:
tenant = account.current_tenant
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppListParams, AppService, CreateAppParams
app_service = AppService()
# Create apps with different modes
chat_app_args = {
"name": "Chat App",
"description": "A chat application",
"mode": "chat",
"icon_type": "emoji",
"icon": "💬",
"icon_background": "#FF6B6B",
}
completion_app_args = {
"name": "Completion App",
"description": "A completion application",
"mode": "completion",
"icon_type": "emoji",
"icon": "✍️",
"icon_background": "#4ECDC4",
}
chat_app_params = CreateAppParams(
name="Chat App",
description="A chat application",
mode="chat",
icon_type="emoji",
icon="💬",
icon_background="#FF6B6B",
)
completion_app_params = CreateAppParams(
name="Completion App",
description="A completion application",
mode="completion",
icon_type="emoji",
icon="✍️",
icon_background="#4ECDC4",
)
chat_app = app_service.create_app(tenant.id, chat_app_args, account)
completion_app = app_service.create_app(tenant.id, completion_app_args, account)
chat_app = app_service.create_app(tenant.id, chat_app_params, account)
completion_app = app_service.create_app(tenant.id, completion_app_params, account)
# Test filter by mode
chat_args = {
"page": 1,
"limit": 10,
"mode": "chat",
}
chat_apps = app_service.get_paginate_apps(account.id, tenant.id, chat_args)
chat_apps = app_service.get_paginate_apps(account.id, tenant.id, AppListParams(page=1, limit=10, mode="chat"))
assert len(chat_apps.items) == 1
assert chat_apps.items[0].mode == "chat"
# Test filter by name
name_args = {
"page": 1,
"limit": 10,
"mode": "chat",
"name": "Chat",
}
filtered_apps = app_service.get_paginate_apps(account.id, tenant.id, name_args)
filtered_apps = app_service.get_paginate_apps(
account.id, tenant.id, AppListParams(page=1, limit=10, mode="chat", name="Chat")
)
assert len(filtered_apps.items) == 1
assert "Chat" in filtered_apps.items[0].name
# Test filter by created_by_me
created_by_me_args = {
"page": 1,
"limit": 10,
"mode": "completion",
"is_created_by_me": True,
}
my_apps = app_service.get_paginate_apps(account.id, tenant.id, created_by_me_args)
my_apps = app_service.get_paginate_apps(
account.id, tenant.id, AppListParams(page=1, limit=10, mode="completion", is_created_by_me=True)
)
assert len(my_apps.items) == 1
def test_get_paginate_apps_with_tag_filters(
@ -342,34 +326,29 @@ class TestAppService:
tenant = account.current_tenant
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppListParams, AppService, CreateAppParams
app_service = AppService()
# Create an app
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🏷️",
"icon_background": "#FFEAA7",
}
app = app_service.create_app(tenant.id, app_args, account)
app_params = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🏷️",
icon_background="#FFEAA7",
)
app = app_service.create_app(tenant.id, app_params, account)
# Mock TagService to return the app ID for tag filtering
with patch("services.app_service.TagService.get_target_ids_by_tag_ids") as mock_tag_service:
mock_tag_service.return_value = [app.id]
# Test with tag filter
args = {
"page": 1,
"limit": 10,
"mode": "chat",
"tag_ids": ["tag1", "tag2"],
}
params = AppListParams(page=1, limit=10, mode="chat", tag_ids=["tag1", "tag2"])
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, params)
# Verify tag service was called
mock_tag_service.assert_called_once_with("app", tenant.id, ["tag1", "tag2"])
@ -383,14 +362,9 @@ class TestAppService:
with patch("services.app_service.TagService.get_target_ids_by_tag_ids") as mock_tag_service:
mock_tag_service.return_value = []
args = {
"page": 1,
"limit": 10,
"mode": "chat",
"tag_ids": ["nonexistent_tag"],
}
params = AppListParams(page=1, limit=10, mode="chat", tag_ids=["nonexistent_tag"])
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, params)
# Should return None when no apps match tag filter
assert paginated_apps is None
@ -412,20 +386,20 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🎯",
"icon_background": "#45B7D1",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_params = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🎯",
icon_background="#45B7D1",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
app = app_service.create_app(tenant.id, app_params, account)
# Store original values
original_name = app.name
@ -481,19 +455,19 @@ class TestAppService:
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_service = AppService()
app = app_service.create_app(
tenant.id,
{
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🎯",
"icon_background": "#45B7D1",
},
CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🎯",
icon_background="#45B7D1",
),
account,
)
@ -533,19 +507,19 @@ class TestAppService:
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_service = AppService()
app = app_service.create_app(
tenant.id,
{
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🎯",
"icon_background": "#45B7D1",
},
CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🎯",
icon_background="#45B7D1",
),
account,
)
@ -584,20 +558,20 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🎯",
"icon_background": "#45B7D1",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_params = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🎯",
icon_background="#45B7D1",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
app = app_service.create_app(tenant.id, app_params, account)
# Store original name
original_name = app.name
@ -637,20 +611,20 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🎯",
"icon_background": "#45B7D1",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_params = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🎯",
icon_background="#45B7D1",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
app = app_service.create_app(tenant.id, app_params, account)
# Store original values
original_icon = app.icon
@ -698,18 +672,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🌐",
"icon_background": "#74B9FF",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🌐",
icon_background="#74B9FF",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -758,18 +731,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🔌",
"icon_background": "#A29BFE",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🔌",
icon_background="#A29BFE",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -818,18 +790,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🔄",
"icon_background": "#FD79A8",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🔄",
icon_background="#FD79A8",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -869,18 +840,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🗑️",
"icon_background": "#E17055",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🗑️",
icon_background="#E17055",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -921,18 +891,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🧹",
"icon_background": "#00B894",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🧹",
icon_background="#00B894",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -981,18 +950,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "📊",
"icon_background": "#6C5CE7",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="📊",
icon_background="#6C5CE7",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -1020,18 +988,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🔗",
"icon_background": "#FDCB6E",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🔗",
icon_background="#FDCB6E",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -1060,18 +1027,17 @@ class TestAppService:
tenant = account.current_tenant
# Create app first
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🆔",
"icon_background": "#E84393",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🆔",
icon_background="#E84393",
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -1107,26 +1073,20 @@ class TestAppService:
password=generate_valid_password(fake),
)
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
# Setup app creation arguments with invalid mode
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "invalid_mode", # Invalid mode
"icon_type": "emoji",
"icon": "",
"icon_background": "#D63031",
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import CreateAppParams
app_service = AppService()
# Attempt to create app with invalid mode
with pytest.raises(ValueError, match="invalid mode value"):
app_service.create_app(tenant.id, app_args, account)
# Attempt to create app with invalid mode - Pydantic will reject invalid literal
with pytest.raises(ValidationError):
CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="invalid_mode", # type: ignore[arg-type]
icon_type="emoji",
icon="",
icon_background="#D63031",
)
def test_get_apps_with_special_characters_in_name(
self, db_session_with_containers: Session, mock_external_service_dependencies
@ -1152,99 +1112,103 @@ class TestAppService:
tenant = account.current_tenant
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppListParams, AppService, CreateAppParams
app_service = AppService()
# Create apps with special characters in names
app_with_percent = app_service.create_app(
tenant.id,
{
"name": "App with 50% discount",
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
},
CreateAppParams(
name="App with 50% discount",
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
),
account,
)
app_with_underscore = app_service.create_app(
tenant.id,
{
"name": "test_data_app",
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
},
CreateAppParams(
name="test_data_app",
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
),
account,
)
app_with_backslash = app_service.create_app(
tenant.id,
{
"name": "path\\to\\app",
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
},
CreateAppParams(
name="path\\to\\app",
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
),
account,
)
# Create app that should NOT match
app_no_match = app_service.create_app(
tenant.id,
{
"name": "100% different",
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
},
CreateAppParams(
name="100% different",
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
),
account,
)
# Test 1: Search with % character
args = {"name": "50%", "mode": "chat", "page": 1, "limit": 10}
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(
account.id, tenant.id, AppListParams(name="50%", mode="chat", page=1, limit=10)
)
assert paginated_apps is not None
assert paginated_apps.total == 1
assert len(paginated_apps.items) == 1
assert paginated_apps.items[0].name == "App with 50% discount"
# Test 2: Search with _ character
args = {"name": "test_data", "mode": "chat", "page": 1, "limit": 10}
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(
account.id, tenant.id, AppListParams(name="test_data", mode="chat", page=1, limit=10)
)
assert paginated_apps is not None
assert paginated_apps.total == 1
assert len(paginated_apps.items) == 1
assert paginated_apps.items[0].name == "test_data_app"
# Test 3: Search with \ character
args = {"name": "path\\to\\app", "mode": "chat", "page": 1, "limit": 10}
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(
account.id, tenant.id, AppListParams(name="path\\to\\app", mode="chat", page=1, limit=10)
)
assert paginated_apps is not None
assert paginated_apps.total == 1
assert len(paginated_apps.items) == 1
assert paginated_apps.items[0].name == "path\\to\\app"
# Test 4: Search with % should NOT match 100% (verifies escaping works)
args = {"name": "50%", "mode": "chat", "page": 1, "limit": 10}
paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args)
paginated_apps = app_service.get_paginate_apps(
account.id, tenant.id, AppListParams(name="50%", mode="chat", page=1, limit=10)
)
assert paginated_apps is not None
assert paginated_apps.total == 1
assert all("50%" in app.name for app in paginated_apps.items)

View File

@ -16,6 +16,7 @@ from uuid import uuid4
from sqlalchemy.orm import Session
from core.rag.index_processor.constant.index_type import IndexTechniqueType
from models import AccountStatus, CreatorUserRole, TenantStatus
from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole
from models.dataset import (
AppDatasetJoin,
@ -25,7 +26,7 @@ from models.dataset import (
DatasetProcessRule,
DatasetQuery,
)
from models.enums import DatasetQuerySource, DataSourceType, ProcessRuleMode
from models.enums import DatasetQuerySource, DataSourceType, ProcessRuleMode, TagType
from models.model import Tag, TagBinding
from services.dataset_service import DatasetService, DocumentService
@ -42,11 +43,11 @@ class DatasetRetrievalTestDataFactory:
email=f"{uuid4()}@example.com",
name=f"user-{uuid4()}",
interface_language="en-US",
status="active",
status=AccountStatus.ACTIVE,
)
tenant = Tenant(
name=f"tenant-{uuid4()}",
status="normal",
status=TenantStatus.NORMAL,
)
db_session_with_containers.add_all([account, tenant])
db_session_with_containers.flush()
@ -72,7 +73,7 @@ class DatasetRetrievalTestDataFactory:
email=f"{uuid4()}@example.com",
name=f"user-{uuid4()}",
interface_language="en-US",
status="active",
status=AccountStatus.ACTIVE,
)
db_session_with_containers.add(account)
db_session_with_containers.flush()
@ -130,7 +131,7 @@ class DatasetRetrievalTestDataFactory:
@staticmethod
def create_process_rule(
db_session_with_containers: Session, dataset_id: str, created_by: str, mode: str, rules: dict
db_session_with_containers: Session, dataset_id: str, created_by: str, mode: ProcessRuleMode, rules: dict
) -> DatasetProcessRule:
"""Create a dataset process rule."""
process_rule = DatasetProcessRule(
@ -153,7 +154,7 @@ class DatasetRetrievalTestDataFactory:
content=content,
source=DatasetQuerySource.APP,
source_app_id=None,
created_by_role="account",
created_by_role=CreatorUserRole.ACCOUNT,
created_by=created_by,
)
db_session_with_containers.add(dataset_query)
@ -176,7 +177,7 @@ class DatasetRetrievalTestDataFactory:
"""Create a knowledge tag and bind it to the target dataset."""
tag = Tag(
tenant_id=tenant_id,
type="knowledge",
type=TagType.KNOWLEDGE,
name=f"tag-{uuid4()}",
created_by=created_by,
)

View File

@ -6,7 +6,7 @@ from sqlalchemy.orm import Session
from models.enums import ConversationFromSource, FeedbackRating, InvokeFrom
from models.model import MessageFeedback
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from services.errors.message import (
FirstMessageNotExistsError,
LastMessageNotExistsError,
@ -103,16 +103,16 @@ class TestMessageService:
tenant = account.current_tenant
# Setup app creation arguments
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "advanced-chat", # Use advanced-chat mode to use mocked workflow
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="advanced-chat", # Use advanced-chat mode to use mocked workflow,
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
# Create app
app_service = AppService()

View File

@ -11,7 +11,7 @@ from sqlalchemy.orm import Session
from core.ops.entities.config_entity import TracingProviderEnum
from models.model import TraceAppConfig
from services.account_service import AccountService, TenantService
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from services.ops_service import OpsService
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -57,14 +57,14 @@ class TestOpsService:
app_service = AppService()
app = app_service.create_app(
tenant.id,
{
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
},
CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
),
account,
)
return app, account

View File

@ -8,7 +8,7 @@ from models import App, CreatorUserRole
from models.enums import ConversationFromSource
from models.model import EndUser, Message
from models.web import SavedMessage
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from services.saved_message_service import SavedMessageService
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -73,16 +73,16 @@ class TestSavedMessageService:
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)

View File

@ -11,7 +11,7 @@ from models.enums import ConversationFromSource
from models.model import Conversation, EndUser
from models.web import PinnedConversation
from services.account_service import AccountService, TenantService
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from services.web_conversation_service import WebConversationService
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -77,16 +77,16 @@ class TestWebConversationService:
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)

View File

@ -17,7 +17,7 @@ from models.workflow import WorkflowAppLogCreatedFrom
from services.account_service import AccountService, TenantService
# Delay import of AppService to avoid circular dependency
# from services.app_service import AppService
# from services.app_service import AppService, CreateAppParams
from services.workflow_app_service import LogView, WorkflowAppService
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -82,20 +82,20 @@ class TestWorkflowAppService:
TenantService.create_owner_tenant_if_not_exist(account, name=fake.company())
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "workflow",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
# Create app with realistic data
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="workflow",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -146,20 +146,20 @@ class TestWorkflowAppService:
"""
fake = Faker()
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "workflow",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
# Import here to avoid circular dependency
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
# Create app with realistic data
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="workflow",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)

View File

@ -13,7 +13,7 @@ from models.model import (
)
from models.workflow import WorkflowRun
from services.account_service import AccountService, TenantService
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from services.workflow_run_service import WorkflowRunService
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -79,16 +79,16 @@ class TestWorkflowRunService:
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "chat",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="chat",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)
@ -535,13 +535,13 @@ class TestWorkflowRunService:
tenant = account.current_tenant
# Create app
app_args = {
"name": "Test App",
"mode": "chat",
"icon_type": "emoji",
"icon": "🚀",
"icon_background": "#4ECDC4",
}
app_args = CreateAppParams(
name="Test App",
mode="chat",
icon_type="emoji",
icon="🚀",
icon_background="#4ECDC4",
)
app = app_service.create_app(tenant.id, app_args, account)
# Create workflow run without node executions
@ -586,13 +586,13 @@ class TestWorkflowRunService:
tenant = account.current_tenant
# Create app
app_args = {
"name": "Test App",
"mode": "chat",
"icon_type": "emoji",
"icon": "🚀",
"icon_background": "#4ECDC4",
}
app_args = CreateAppParams(
name="Test App",
mode="chat",
icon_type="emoji",
icon="🚀",
icon_background="#4ECDC4",
)
app = app_service.create_app(tenant.id, app_args, account)
# Use invalid workflow run ID
@ -637,13 +637,13 @@ class TestWorkflowRunService:
tenant = account.current_tenant
# Create app
app_args = {
"name": "Test App",
"mode": "chat",
"icon_type": "emoji",
"icon": "🚀",
"icon_background": "#4ECDC4",
}
app_args = CreateAppParams(
name="Test App",
mode="chat",
icon_type="emoji",
icon="🚀",
icon_background="#4ECDC4",
)
app = app_service.create_app(tenant.id, app_args, account)
# Create workflow run

View File

@ -11,7 +11,7 @@ from core.tools.errors import WorkflowToolHumanInputNotSupportedError
from models.tools import WorkflowToolProvider
from models.workflow import Workflow as WorkflowModel
from services.account_service import AccountService, TenantService
from services.app_service import AppService
from services.app_service import AppService, CreateAppParams
from services.tools.workflow_tools_manage_service import WorkflowToolManageService
from tests.test_containers_integration_tests.helpers import generate_valid_password
@ -94,16 +94,16 @@ class TestWorkflowToolManageService:
tenant = account.current_tenant
# Create app with realistic data
app_args = {
"name": fake.company(),
"description": fake.text(max_nb_chars=100),
"mode": "workflow",
"icon_type": "emoji",
"icon": "🤖",
"icon_background": "#FF6B6B",
"api_rph": 100,
"api_rpm": 10,
}
app_args = CreateAppParams(
name=fake.company(),
description=fake.text(max_nb_chars=100),
mode="workflow",
icon_type="emoji",
icon="🤖",
icon_background="#FF6B6B",
api_rph=100,
api_rpm=10,
)
app_service = AppService()
app = app_service.create_app(tenant.id, app_args, account)

View File

@ -7,6 +7,7 @@ The task is responsible for removing document segments from the search index whe
"""
from unittest.mock import MagicMock, patch
from uuid import uuid4
from faker import Faker
from sqlalchemy import select
@ -82,7 +83,7 @@ class TestDisableSegmentsFromIndexTask:
return account
def _create_test_dataset(self, db_session_with_containers: Session, account, fake: Faker | None = None):
def _create_test_dataset(self, db_session_with_containers: Session, account: Account, fake: Faker | None = None):
"""
Helper method to create a test dataset with realistic data.
@ -117,7 +118,7 @@ class TestDisableSegmentsFromIndexTask:
return dataset
def _create_test_document(
self, db_session_with_containers: Session, dataset, account: Account, fake: Faker | None = None
self, db_session_with_containers: Session, dataset: Dataset, account: Account, fake: Faker | None = None
):
"""
Helper method to create a test document with realistic data.
@ -164,7 +165,7 @@ class TestDisableSegmentsFromIndexTask:
return document
def _create_test_segments(
self, db_session_with_containers: Session, document, dataset, account, count=3, fake=None
self, db_session_with_containers: Session, document, dataset: Dataset, account: Account, count=3, fake=None
):
"""
Helper method to create test document segments with realistic data.
@ -217,7 +218,9 @@ class TestDisableSegmentsFromIndexTask:
return segments
def _create_dataset_process_rule(self, db_session_with_containers: Session, dataset, fake: Faker | None = None):
def _create_dataset_process_rule(
self, db_session_with_containers: Session, dataset: Dataset, fake: Faker | None = None
):
"""
Helper method to create a dataset process rule.
@ -230,21 +233,19 @@ class TestDisableSegmentsFromIndexTask:
DatasetProcessRule: Created process rule instance
"""
fake = fake or Faker()
process_rule = DatasetProcessRule()
process_rule.id = fake.uuid4()
process_rule.tenant_id = dataset.tenant_id
process_rule.dataset_id = dataset.id
process_rule.mode = ProcessRuleMode.AUTOMATIC
process_rule.rules = (
"{"
'"mode": "automatic", '
'"rules": {'
'"pre_processing_rules": [], "segmentation": '
'{"separator": "\\n\\n", "max_tokens": 1000, "chunk_overlap": 50}}'
"}"
process_rule = DatasetProcessRule(
dataset_id=dataset.id,
mode=ProcessRuleMode.AUTOMATIC,
rules=(
"{"
'"mode": "automatic", '
'"rules": {'
'"pre_processing_rules": [], "segmentation": '
'{"separator": "\\n\\n", "max_tokens": 1000, "chunk_overlap": 50}}'
"}"
),
created_by=str(uuid4()),
)
process_rule.created_by = dataset.created_by
process_rule.updated_by = dataset.updated_by
db_session_with_containers.add(process_rule)
db_session_with_containers.commit()

View File

@ -198,6 +198,48 @@ class TestBuildPromptMessageWithFiles:
assert isinstance(result.content[-1], TextPromptMessageContent)
assert result.content[-1].data == "user text"
def test_replay_does_not_pass_config_to_file_factory(self):
"""Replay contract: history files were validated on upload, so this
path must not forward a FileUploadConfig. The factory's signature
no longer accepts ``config``; this test guards against a future
regression that re-introduces it."""
conv = _make_conversation(AppMode.CHAT)
mem = TokenBufferMemory(conversation=conv, model_instance=_make_model_instance())
mock_file_extra_config = MagicMock()
mock_file_extra_config.image_config = None
real_image_content = ImagePromptMessageContent(
url="http://example.com/img.png", format="png", mime_type="image/png"
)
mock_app_record = MagicMock()
mock_app_record.tenant_id = "tenant-1"
with (
patch(
"core.memory.token_buffer_memory.FileUploadConfigManager.convert",
return_value=mock_file_extra_config,
),
patch(
"core.memory.token_buffer_memory.file_factory.build_from_message_file",
return_value=MagicMock(),
) as mock_build,
patch(
"core.memory.token_buffer_memory.file_manager.to_prompt_message_content",
return_value=real_image_content,
),
):
mem._build_prompt_message_with_files(
message_files=[MagicMock()],
text_content="user text",
message=_make_message(),
app_record=mock_app_record,
is_user_message=True,
)
mock_build.assert_called_once()
assert "config" not in mock_build.call_args.kwargs
@pytest.mark.parametrize("mode", [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.COMPLETION])
def test_chat_mode_with_files_assistant_message(self, mode):
"""When files are present, returns AssistantPromptMessage with list content."""

View File

@ -233,8 +233,6 @@ class TestSegmentTypeAdditionalMethods:
assert SegmentType.GROUP.is_valid([StringSegment(value="b")]) is True
assert SegmentType.GROUP.is_valid(["not-segment"]) is False
def test_unreachable_assertion_branch(self, monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(SegmentType, "is_array_type", lambda self: False)
with pytest.raises(AssertionError, match="unreachable"):
SegmentType.ARRAY_STRING.is_valid(["a"])
def test_unreachable_assertion_branch(self):
with pytest.raises(AssertionError, match="Expected code to be unreachable"):
SegmentType.is_valid("not-a-segment-type", None) # type: ignore[arg-type]

View File

@ -613,7 +613,7 @@ def test_combine_message_content_with_role_handles_all_supported_roles():
SystemPromptMessage(content=contents)
)
with pytest.raises(NotImplementedError, match="Role custom is not supported"):
with pytest.raises(AssertionError, match="Expected code to be unreachable"):
llm_utils.combine_message_content_with_role(contents=contents, role="custom") # type: ignore[arg-type]

View File

@ -24,7 +24,14 @@ if TYPE_CHECKING: # pragma: no cover - imported for type checking only
class _StubToolRuntime:
def get_runtime(self, *, node_id: str, node_data: Any, variable_pool: Any) -> ToolRuntimeHandle:
def get_runtime(
self,
*,
node_id: str,
node_data: Any,
variable_pool: Any,
node_execution_id: str | None = None,
) -> ToolRuntimeHandle:
raise NotImplementedError
def get_runtime_parameters(self, *, tool_runtime: ToolRuntimeHandle) -> list[Any]:

View File

@ -7,6 +7,17 @@ from pathlib import Path
def test_moved_core_nodes_resolve_after_importing_production_entrypoints():
api_root = Path(__file__).resolve().parents[4]
# `PYTHONSAFEPATH=1` enables Python's safe-path mode, which suppresses the
# usual implicit insertion of the working directory into `sys.path`.
# Set `PYTHONPATH` explicitly so this subprocess test stays deterministic in
# both CI and local shells that may export `PYTHONSAFEPATH`.
env = os.environ.copy()
existing_pythonpath = env.get("PYTHONPATH")
env["PYTHONPATH"] = (
str(api_root) if not existing_pythonpath else os.pathsep.join([str(api_root), existing_pythonpath])
)
env["PYTHONSAFEPATH"] = "1"
script = textwrap.dedent(
"""
from core.app.apps import workflow_app_runner
@ -34,7 +45,7 @@ def test_moved_core_nodes_resolve_after_importing_production_entrypoints():
completed = subprocess.run(
[sys.executable, "-c", script],
cwd=api_root,
env=os.environ.copy(),
env=env,
capture_output=True,
text=True,
check=False,

View File

@ -0,0 +1,159 @@
"""Unit tests for is_file_valid_with_config."""
from __future__ import annotations
import pytest
from factories.file_factory.validation import is_file_valid_with_config
from graphon.file import FileTransferMethod, FileType, FileUploadConfig
def _validate(
*,
input_file_type: str,
file_extension: str = ".png",
file_transfer_method: FileTransferMethod = FileTransferMethod.LOCAL_FILE,
config: FileUploadConfig,
) -> bool:
return is_file_valid_with_config(
input_file_type=input_file_type,
file_extension=file_extension,
file_transfer_method=file_transfer_method,
config=config,
)
@pytest.mark.parametrize(
("input_file_type", "file_extension", "allowed_file_types", "allowed_file_extensions", "expected"),
[
# round-1 happy path: literal "custom" mapping, ext whitelisted
("custom", ".png", [FileType.CUSTOM], [".png"], True),
# round-2 replay: MessageFile.type is the resolved type, but config still allows CUSTOM
("image", ".png", [FileType.CUSTOM], [".png"], True),
("document", ".pdf", [FileType.CUSTOM], [".pdf"], True),
# mixed bucket [IMAGE, CUSTOM]: document falls into CUSTOM bucket via extension
("document", ".pdf", [FileType.IMAGE, FileType.CUSTOM], [".pdf"], True),
("document", ".exe", [FileType.IMAGE, FileType.CUSTOM], [".pdf"], False),
("image", ".jpg", [FileType.IMAGE], [], True),
("video", ".mp4", [FileType.IMAGE, FileType.DOCUMENT], [], False),
("custom", ".exe", [FileType.CUSTOM], [".png"], False),
# empty allowed_file_types == no type restriction
("video", ".mp4", [], [], True),
],
)
def test_bucket_semantics(input_file_type, file_extension, allowed_file_types, allowed_file_extensions, expected):
config = FileUploadConfig(
allowed_file_types=allowed_file_types,
allowed_file_extensions=allowed_file_extensions,
)
assert _validate(input_file_type=input_file_type, file_extension=file_extension, config=config) is expected
@pytest.mark.parametrize("whitelist_entry", [".png", ".PNG", "png", "PNG", " .Png ", "PnG"])
def test_extension_match_is_case_and_dot_insensitive(whitelist_entry):
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=[whitelist_entry],
)
assert _validate(input_file_type="custom", file_extension=".png", config=config) is True
def test_extension_mismatch_still_rejected_after_normalization():
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=[".png", ".jpg"],
)
assert _validate(input_file_type="custom", file_extension=".pdf", config=config) is False
def test_mixed_case_whitelist_replicating_real_user_config():
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=[".PNG", "png", "JPG", ".WEBP", "SVG", "GIF"],
)
for ext in (".png", ".jpg", ".webp", ".svg", ".gif"):
assert _validate(input_file_type="custom", file_extension=ext, config=config) is True
def test_tool_file_always_passes():
config = FileUploadConfig(allowed_file_types=[FileType.CUSTOM], allowed_file_extensions=[".pdf"])
assert (
_validate(
input_file_type="image",
file_extension=".png",
file_transfer_method=FileTransferMethod.TOOL_FILE,
config=config,
)
is True
)
def test_transfer_method_gate_for_non_image():
config = FileUploadConfig(
allowed_file_types=[FileType.DOCUMENT],
allowed_file_upload_methods=[FileTransferMethod.LOCAL_FILE],
)
assert (
_validate(
input_file_type="document",
file_extension=".pdf",
file_transfer_method=FileTransferMethod.LOCAL_FILE,
config=config,
)
is True
)
assert (
_validate(
input_file_type="document",
file_extension=".pdf",
file_transfer_method=FileTransferMethod.REMOTE_URL,
config=config,
)
is False
)
def test_history_replay_matches_round_1_outcome_under_unchanged_config():
"""A file that passes round 1 must pass history replay when config is unchanged."""
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=[".png"],
)
assert _validate(input_file_type="custom", file_extension=".png", config=config) is True
assert _validate(input_file_type="image", file_extension=".png", config=config) is True
def test_empty_whitelist_in_custom_bucket_denies_by_default():
"""Defensive: when a file lands in the CUSTOM bucket, an empty
allowed_file_extensions list rejects. The UI never submits empty;
this guards DSL / API paths that bypass the UI from accidentally
widening what's accepted."""
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=[],
)
assert _validate(input_file_type="custom", file_extension=".png", config=config) is False
assert _validate(input_file_type="image", file_extension=".png", config=config) is False
def test_normalize_handles_whitespace_and_empty_consistently():
"""Whitespace-only or empty entries in the whitelist must not match real
extensions (regression guard for _normalize_extension edge cases)."""
for noisy_entry in ("", " ", "\t"):
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=[noisy_entry],
)
assert _validate(input_file_type="custom", file_extension=".png", config=config) is False
def test_empty_extension_does_not_spuriously_match_empty_whitelist_entry():
"""Defensive: even if the whitelist contains an empty / whitespace entry
(e.g., a stray comma in DSL), an extensionless file must not pass via
a both-sides-empty match. Real entries in the same whitelist still match."""
config = FileUploadConfig(
allowed_file_types=[FileType.CUSTOM],
allowed_file_extensions=["", ".png"],
)
assert _validate(input_file_type="custom", file_extension=".png", config=config) is True
assert _validate(input_file_type="custom", file_extension="", config=config) is False

View File

@ -847,9 +847,7 @@ class TestDatasetProcessRule:
# Act
process_rule = DatasetProcessRule(
dataset_id=dataset_id,
mode=ProcessRuleMode.AUTOMATIC,
created_by=created_by,
dataset_id=dataset_id, mode=ProcessRuleMode.AUTOMATIC, created_by=created_by, rules=None
)
# Assert

View File

@ -11,6 +11,7 @@ This test suite covers:
import json
import uuid
from types import SimpleNamespace
from typing import Any, cast
from unittest.mock import ANY, MagicMock, Mock, patch, sentinel
@ -2649,7 +2650,12 @@ class TestWorkflowServiceHumanInputOperations:
mock_node = MagicMock()
mock_node.node_data = MagicMock()
mock_node.node_data.user_actions = [
SimpleNamespace(id="submit", title="card_visa_enterprise_001"),
]
mock_node.node_data.outputs_field_names.return_value = ["field1"]
mock_node.render_form_content_before_submission.return_value = "Ticket: {{#$output.field1#}}"
mock_node.render_form_content_with_outputs.return_value = "Ticket: val1"
with (
patch("services.workflow_service.db"),
@ -2665,6 +2671,8 @@ class TestWorkflowServiceHumanInputOperations:
app_model=app_model, account=account, node_id="node-1", form_inputs={"field1": "val1"}, action="submit"
)
assert result["__action_id"] == "submit"
assert result["__action_value"] == "card_visa_enterprise_001"
assert result["__rendered_content"] == "Ticket: val1"
mock_saver_cls.return_value.save.assert_called_once()
def test_test_human_input_delivery_success(self, service: WorkflowService) -> None:

114
api/uv.lock generated
View File

@ -607,16 +607,16 @@ wheels = [
[[package]]
name = "boto3"
version = "1.43.3"
version = "1.43.6"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "botocore" },
{ name = "jmespath" },
{ name = "s3transfer" },
]
sdist = { url = "https://files.pythonhosted.org/packages/f2/50/ea184e159c4ac64fef816a72094fb8656eb071361a39ed22c0e3b15a35b4/boto3-1.43.3.tar.gz", hash = "sha256:7c7777862ffc898f05efa566032bbabfe226dbb810e35ec11125817f128bc5c5", size = 113111, upload-time = "2026-05-04T19:34:09.731Z" }
sdist = { url = "https://files.pythonhosted.org/packages/0a/37/78c630d1308964aa9abf44951d9c4df776546ff37251ec2434944e205c4e/boto3-1.43.6.tar.gz", hash = "sha256:e6315effaf12b890b99956e6f8e2c3000a3f64e4ee91943cec3895ce9a836afb", size = 113153, upload-time = "2026-05-07T20:49:59.694Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c8/ad/8a6946a329f0127322108e537dc1c0d9f8eea4f1d1231702c073d2e85f46/boto3-1.43.3-py3-none-any.whl", hash = "sha256:fb9fe51849ef2a78198d582756fc06f14f7de27f73e0fa90275d6aa4171eb4d0", size = 140501, upload-time = "2026-05-04T19:34:07.991Z" },
{ url = "https://files.pythonhosted.org/packages/c8/e2/3c2eef44f55eafab256836d1d9479bd6a74f70c26cbfdc0639a0e23e4327/boto3-1.43.6-py3-none-any.whl", hash = "sha256:179601ec2992726a718053bf41e43c223ceba397d31ceab11f64d9c910d9fc3a", size = 140502, upload-time = "2026-05-07T20:49:57.8Z" },
]
[[package]]
@ -639,16 +639,16 @@ bedrock-runtime = [
[[package]]
name = "botocore"
version = "1.43.3"
version = "1.43.6"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "jmespath" },
{ name = "python-dateutil" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/74/ac/cd55f886e17b6b952dbc95b792d3645a73d58586a1400ababe54406073bd/botocore-1.43.3.tar.gz", hash = "sha256:eac6da0fffccf87888ebf4d89f0b2378218a707efa748cd955b838995e944695", size = 15308705, upload-time = "2026-05-04T19:33:56.28Z" }
sdist = { url = "https://files.pythonhosted.org/packages/79/a7/23d0f5028011455096a1eeac0ddf3cbe147b3e855e127342f8202552194d/botocore-1.43.6.tar.gz", hash = "sha256:b1e395b347356860398da42e61c808cf1e34b6fa7180cf2b9d87d986e1a06ba0", size = 15336070, upload-time = "2026-05-07T20:49:48.14Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/be/99/1d9e296edf244f47e0508032f20999f8fd40704dd3c5b601fed099424eb6/botocore-1.43.3-py3-none-any.whl", hash = "sha256:ec0769eb0f7c5034856bb406a92698dbc02a3d4be0f78a384747106b161d8ea3", size = 14989027, upload-time = "2026-05-04T19:33:50.81Z" },
{ url = "https://files.pythonhosted.org/packages/e5/c8/6f47223840e8d8cfa8c9f7c0ec1b77970417f257fc885169ff4f6326ce09/botocore-1.43.6-py3-none-any.whl", hash = "sha256:b6d1fdbc6f65a5fe0b7e947823aa37535d3f39f3ba4d21110fab1f55bbbcc04b", size = 15017094, upload-time = "2026-05-07T20:49:44.964Z" },
]
[[package]]
@ -1581,7 +1581,7 @@ requires-dist = [
{ name = "aliyun-log-python-sdk", specifier = ">=0.9.44,<1.0.0" },
{ name = "azure-identity", specifier = ">=1.25.3,<2.0.0" },
{ name = "bleach", specifier = ">=6.3.0" },
{ name = "boto3", specifier = ">=1.43.3" },
{ name = "boto3", specifier = ">=1.43.6" },
{ name = "celery", specifier = ">=5.6.3" },
{ name = "croniter", specifier = ">=6.2.2" },
{ name = "fastopenapi", extras = ["flask"], specifier = "~=0.7.0" },
@ -1595,10 +1595,10 @@ requires-dist = [
{ name = "gevent", specifier = ">=26.4.0" },
{ name = "gevent-websocket", specifier = ">=0.10.1" },
{ name = "gmpy2", specifier = ">=2.3.0" },
{ name = "google-api-python-client", specifier = ">=2.195.0" },
{ name = "google-cloud-aiplatform", specifier = ">=1.149.0,<2.0.0" },
{ name = "graphon", specifier = "~=0.3.0" },
{ name = "gunicorn", specifier = ">=25.3.0" },
{ name = "google-api-python-client", specifier = ">=2.196.0" },
{ name = "google-cloud-aiplatform", specifier = ">=1.151.0,<2.0.0" },
{ name = "graphon", specifier = "~=0.3.1" },
{ name = "gunicorn", specifier = ">=26.0.0" },
{ name = "httpx", extras = ["socks"], specifier = ">=0.28.1,<1.0.0" },
{ name = "httpx-sse", specifier = "~=0.4.0" },
{ name = "json-repair", specifier = "~=0.59.4" },
@ -1692,7 +1692,7 @@ storage = [
{ name = "google-cloud-storage", specifier = ">=3.10.1" },
{ name = "opendal", specifier = ">=0.46.0" },
{ name = "oss2", specifier = ">=2.19.1" },
{ name = "supabase", specifier = ">=2.29.0" },
{ name = "supabase", specifier = ">=2.30.0" },
{ name = "tos", specifier = ">=2.9.0" },
]
tools = [
@ -2722,7 +2722,7 @@ grpc = [
[[package]]
name = "google-api-python-client"
version = "2.195.0"
version = "2.196.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "google-api-core" },
@ -2731,9 +2731,9 @@ dependencies = [
{ name = "httplib2" },
{ name = "uritemplate" },
]
sdist = { url = "https://files.pythonhosted.org/packages/69/07/08d759b9cb10f48af14b25262dd0d6685ca8cda6c1f9e8a8109f57457205/google_api_python_client-2.195.0.tar.gz", hash = "sha256:c72cf2661c3addf01c880ce60541e83e1df354644b874f7f9d8d5ed2070446ae", size = 14584819, upload-time = "2026-04-30T21:51:50.638Z" }
sdist = { url = "https://files.pythonhosted.org/packages/6d/f3/34ef8aca7909675fe327f96c1ed927f0520e7acf68af19157e96acc05e76/google_api_python_client-2.196.0.tar.gz", hash = "sha256:9f335d38f6caaa2747bcf64335ed1a9a19047d53e86538eda6a1b17d37f1743d", size = 14628129, upload-time = "2026-05-06T23:47:35.655Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/21/b9/2c71095e31fff57668fec7c07ac897df065f15521d070e63229e13689590/google_api_python_client-2.195.0-py3-none-any.whl", hash = "sha256:753e62057f23049a89534bea0162b60fe391b85fb86d80bcdf884d05ec91c5bf", size = 15162418, upload-time = "2026-04-30T21:51:47.444Z" },
{ url = "https://files.pythonhosted.org/packages/99/c7/1817b4edf966d5afcac1c0781ca36d621bc0cb58104c4e7c2a475ab185f7/google_api_python_client-2.196.0-py3-none-any.whl", hash = "sha256:2591e9b47dcb17e4e62a09370aaee3bcf323af8f28ccecdabcd0a42a23ca4db5", size = 15206663, upload-time = "2026-05-06T23:47:32.886Z" },
]
[[package]]
@ -2769,7 +2769,7 @@ wheels = [
[[package]]
name = "google-cloud-aiplatform"
version = "1.149.0"
version = "1.151.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "docstring-parser" },
@ -2785,9 +2785,9 @@ dependencies = [
{ name = "pydantic" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/42/2c/fba4adc56f74c0ee0fbd91a39d414ca2c3588dd8b71f9be8a507015ca886/google_cloud_aiplatform-1.149.0.tar.gz", hash = "sha256:a4d73485bf1d727a9e1bbbd13d08d7031490686bbf7d125eb905c1a6c1559a35", size = 10451466, upload-time = "2026-04-27T23:11:54.513Z" }
sdist = { url = "https://files.pythonhosted.org/packages/ec/f6/e2fbe175a011f5080da8c1f7d9169a6875a00ea2c7bee4193d952b097400/google_cloud_aiplatform-1.151.0.tar.gz", hash = "sha256:2f29b1853f790a7371a746c747bf1f664380b534254682441acd4b5ee26fafd2", size = 10617421, upload-time = "2026-05-07T21:56:52.91Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bf/a0/27719ba23967ef62e52a1d54e013e0fc174bdab8dd84fb300bab9bf0d4a3/google_cloud_aiplatform-1.149.0-py2.py3-none-any.whl", hash = "sha256:e6b5299fa5d303e971cb29a19f03fdbb7b1e3b9d2faa3a788ca933341fba2f2e", size = 8570410, upload-time = "2026-04-27T23:11:50.495Z" },
{ url = "https://files.pythonhosted.org/packages/f6/4a/cd35f8ba622d563b1335222284d2838aa789b953b40516b1b997e50fe5b6/google_cloud_aiplatform-1.151.0-py2.py3-none-any.whl", hash = "sha256:61372bb0923b14b8027f45b83393452df3a85bf4ea86fa48e08844fb5ec50049", size = 8732627, upload-time = "2026-05-07T21:56:49.014Z" },
]
[[package]]
@ -2940,7 +2940,7 @@ httpx = [
[[package]]
name = "graphon"
version = "0.3.0"
version = "0.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "charset-normalizer" },
@ -2961,9 +2961,9 @@ dependencies = [
{ name = "unstructured", extra = ["docx", "epub", "md", "ppt", "pptx"] },
{ name = "webvtt-py" },
]
sdist = { url = "https://files.pythonhosted.org/packages/bf/62/83593d6e7a139ff124711ea05882cadca7065c11a38763aa9360d7e76804/graphon-0.3.0.tar.gz", hash = "sha256:cd38f842ae3dcfa956428b952efbe2a3ea9c1581446647142accbbdeb638b876", size = 241176, upload-time = "2026-04-21T15:18:48.291Z" }
sdist = { url = "https://files.pythonhosted.org/packages/5a/ef/43217842e84160acca64a95858f1689389a50e04a53fc94f2aa836b4eaf7/graphon-0.3.1.tar.gz", hash = "sha256:49971baed1eb16c8e1983f755e659902e4f117a68dc62fad19e91472950b937d", size = 242210, upload-time = "2026-05-07T06:58:21.879Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/f7/81ee8f0368aa6a2d47f97fecc5d4a12865c987906798cbddd0e3b8387f33/graphon-0.3.0-py3-none-any.whl", hash = "sha256:9cca45ebab2a79fd4d04432f55b5b962e9e4f34fa037cc20fee7f18ec80eaa5d", size = 348486, upload-time = "2026-04-21T15:18:46.737Z" },
{ url = "https://files.pythonhosted.org/packages/62/37/bef16ed3d6da7446b36769fa388f4dc79f95337ffa16d6dfc3177152507e/graphon-0.3.1-py3-none-any.whl", hash = "sha256:e6422c7e3f1ce7d2185979c17e08201816ca25d46d400ebdd035c95d501c04fe", size = 349368, upload-time = "2026-05-07T06:58:20.217Z" },
]
[[package]]
@ -3102,14 +3102,14 @@ wheels = [
[[package]]
name = "gunicorn"
version = "25.3.0"
version = "26.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "packaging" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c4/f4/e78fa054248fab913e2eab0332c6c2cb07421fca1ce56d8fe43b6aef57a4/gunicorn-25.3.0.tar.gz", hash = "sha256:f74e1b2f9f76f6cd1ca01198968bd2dd65830edc24b6e8e4d78de8320e2fe889", size = 634883, upload-time = "2026-03-27T00:00:26.092Z" }
sdist = { url = "https://files.pythonhosted.org/packages/6d/b7/a4a3f632f823e432ce6bc65f62961b7980c898c77f075a2f7118cb3846fe/gunicorn-26.0.0.tar.gz", hash = "sha256:ca9346f85e3a4aeeb64d491045c16b9a35647abd37ea15efe53080eb8b090baf", size = 727286, upload-time = "2026-05-05T06:38:25.529Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/43/c8/8aaf447698c4d59aa853fd318eed300b5c9e44459f242ab8ead6c9c09792/gunicorn-25.3.0-py3-none-any.whl", hash = "sha256:cacea387dab08cd6776501621c295a904fe8e3b7aae9a1a3cbb26f4e7ed54660", size = 208403, upload-time = "2026-03-27T00:00:27.386Z" },
{ url = "https://files.pythonhosted.org/packages/e6/40/9c2384fc2be4ad25dd4a49decd5ad9ea5a3639814c11bd40ab77cb9f0a14/gunicorn-26.0.0-py3-none-any.whl", hash = "sha256:40233d26a5f0d1872916188c276e21641155111c2853f0c2cd55260aec0d24fc", size = 212009, upload-time = "2026-05-05T06:38:23.007Z" },
]
[[package]]
@ -4265,32 +4265,32 @@ wheels = [
[[package]]
name = "opentelemetry-exporter-otlp"
version = "1.41.0"
version = "1.41.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "opentelemetry-exporter-otlp-proto-grpc" },
{ name = "opentelemetry-exporter-otlp-proto-http" },
]
sdist = { url = "https://files.pythonhosted.org/packages/65/b7/845565a2ab5d22c1486bc7729a06b05cd0964c61539d766e1f107c9eea0c/opentelemetry_exporter_otlp-1.41.0.tar.gz", hash = "sha256:97ff847321f8d4c919032a67d20d3137fb7b34eac0c47f13f71112858927fc5b", size = 6152, upload-time = "2026-04-09T14:38:35.895Z" }
sdist = { url = "https://files.pythonhosted.org/packages/42/84/d55baf8e1a222f40282956083e67de9fa92d5fa451108df4839505fa2a24/opentelemetry_exporter_otlp-1.41.1.tar.gz", hash = "sha256:299a2f0541ca175df186f5ac58fd5db177ba1e9b72b0826049062f750d55b47f", size = 6152, upload-time = "2026-04-24T13:15:40.006Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e0/f2/f1076fff152858773f22cda146713f9ae3661795af6bacd411a76f2151ac/opentelemetry_exporter_otlp-1.41.0-py3-none-any.whl", hash = "sha256:443b6a45c990ae4c55e147f97049a86c5f5b704f3d78b48b44a073a886ec4d6e", size = 7022, upload-time = "2026-04-09T14:38:13.934Z" },
{ url = "https://files.pythonhosted.org/packages/6d/d5/ea4aa7dfc458fd537bd9519ea0e7226eef2a6212dfe952694984167daaba/opentelemetry_exporter_otlp-1.41.1-py3-none-any.whl", hash = "sha256:db276c5a80c02b063994e80950d00ca1bfddcf6520f608335b7dc2db0c0eb9c6", size = 7025, upload-time = "2026-04-24T13:15:17.839Z" },
]
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
version = "1.41.0"
version = "1.41.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "opentelemetry-proto" },
]
sdist = { url = "https://files.pythonhosted.org/packages/8c/28/e8eca94966fe9a1465f6094dc5ddc5398473682180279c94020bc23b4906/opentelemetry_exporter_otlp_proto_common-1.41.0.tar.gz", hash = "sha256:966bbce537e9edb166154779a7c4f8ab6b8654a03a28024aeaf1a3eacb07d6ee", size = 20411, upload-time = "2026-04-09T14:38:36.572Z" }
sdist = { url = "https://files.pythonhosted.org/packages/ae/fa/f9e3bd3c4d692b3ce9a2880a167d1f79681a1bea11f00d5bf76adc03e6ea/opentelemetry_exporter_otlp_proto_common-1.41.1.tar.gz", hash = "sha256:0e253156ea9c36b0bd3d2440c5c9ba7dd1f3fb64ba7a08fc85fbac536b56e1fb", size = 20409, upload-time = "2026-04-24T13:15:40.924Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/26/c4/78b9bf2d9c1d5e494f44932988d9d91c51a66b9a7b48adf99b62f7c65318/opentelemetry_exporter_otlp_proto_common-1.41.0-py3-none-any.whl", hash = "sha256:7a99177bf61f85f4f9ed2072f54d676364719c066f6d11f515acc6c745c7acf0", size = 18366, upload-time = "2026-04-09T14:38:15.135Z" },
{ url = "https://files.pythonhosted.org/packages/29/48/bce76d3ea772b609757e9bc844e02ab408a6446609bf74fb562062ba6b71/opentelemetry_exporter_otlp_proto_common-1.41.1-py3-none-any.whl", hash = "sha256:10da74dad6a49344b9b7b21b6182e3060373a235fde1528616d5f01f92e66aa9", size = 18366, upload-time = "2026-04-24T13:15:18.917Z" },
]
[[package]]
name = "opentelemetry-exporter-otlp-proto-grpc"
version = "1.41.0"
version = "1.41.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "googleapis-common-protos" },
@ -4301,14 +4301,14 @@ dependencies = [
{ name = "opentelemetry-sdk" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/42/46/d75a3f8c91915f2e58f61d0a2e4ada63891e7c7a37a20ff7949ba184a6b2/opentelemetry_exporter_otlp_proto_grpc-1.41.0.tar.gz", hash = "sha256:f704201251c6f65772b11bddea1c948000554459101bdbb0116e0a01b70592f6", size = 25754, upload-time = "2026-04-09T14:38:37.423Z" }
sdist = { url = "https://files.pythonhosted.org/packages/1e/9b/e4503060b8695579dbaad187dc8cef4554188de68748c88060599b77489e/opentelemetry_exporter_otlp_proto_grpc-1.41.1.tar.gz", hash = "sha256:b05df8fa1333dc9a3fda36b676b96b5095ab6016d3f0c3296d430d629ba1443b", size = 25755, upload-time = "2026-04-24T13:15:41.93Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/81/f6/b09e2e0c9f0b5750cebc6eaf31527b910821453cef40a5a0fe93550422b2/opentelemetry_exporter_otlp_proto_grpc-1.41.0-py3-none-any.whl", hash = "sha256:3a1a86bd24806ccf136ec9737dbfa4c09b069f9130ff66b0acb014f9c5255fd1", size = 20299, upload-time = "2026-04-09T14:38:17.01Z" },
{ url = "https://files.pythonhosted.org/packages/ac/f2/c54f33c92443d087703e57e52e55f22f111373a5c4c4aa349ea60efe512e/opentelemetry_exporter_otlp_proto_grpc-1.41.1-py3-none-any.whl", hash = "sha256:537926dcef951136992479af1d9cd88f25e33d56c530e9f020ed57774dca2f94", size = 20297, upload-time = "2026-04-24T13:15:20.212Z" },
]
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
version = "1.41.0"
version = "1.41.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "googleapis-common-protos" },
@ -4319,9 +4319,9 @@ dependencies = [
{ name = "requests" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/19/63/d9f43cd75f3fabb7e01148c89cfa9491fc18f6580a6764c554ff7c953c46/opentelemetry_exporter_otlp_proto_http-1.41.0.tar.gz", hash = "sha256:dcd6e0686f56277db4eecbadd5262124e8f2cc739cadbc3fae3d08a12c976cf5", size = 24139, upload-time = "2026-04-09T14:38:38.128Z" }
sdist = { url = "https://files.pythonhosted.org/packages/33/5b/9d3c7f70cca10136ba82a81e738dee626c8e7fc61c6887ea9a58bf34c606/opentelemetry_exporter_otlp_proto_http-1.41.1.tar.gz", hash = "sha256:4747a9604c8550ab38c6fd6180e2fcb80de3267060bef2c306bad3cb443302bc", size = 24139, upload-time = "2026-04-24T13:15:42.977Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/64/b5/a214cd907eedc17699d1c2d602288ae17cb775526df04db3a3b3585329d2/opentelemetry_exporter_otlp_proto_http-1.41.0-py3-none-any.whl", hash = "sha256:a9c4ee69cce9c3f4d7ee736ad1b44e3c9654002c0816900abbafd9f3cf289751", size = 22673, upload-time = "2026-04-09T14:38:18.349Z" },
{ url = "https://files.pythonhosted.org/packages/ba/4d/ef07ff2fc630849f2080ae0ae73a61f67257905b7ac79066640bfa0c5739/opentelemetry_exporter_otlp_proto_http-1.41.1-py3-none-any.whl", hash = "sha256:1a21e8f49c7a946d935551e90947d6c3eb39236723c6624401da0f33d68edcb4", size = 22673, upload-time = "2026-04-24T13:15:21.313Z" },
]
[[package]]
@ -4479,14 +4479,14 @@ wheels = [
[[package]]
name = "opentelemetry-proto"
version = "1.41.0"
version = "1.41.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "protobuf" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e0/d9/08e3dc6156878713e8c811682bc76151f5fe1a3cb7f3abda3966fd56e71e/opentelemetry_proto-1.41.0.tar.gz", hash = "sha256:95d2e576f9fb1800473a3e4cfcca054295d06bdb869fda4dc9f4f779dc68f7b6", size = 45669, upload-time = "2026-04-09T14:38:45.978Z" }
sdist = { url = "https://files.pythonhosted.org/packages/99/e8/633c6d8a9c8840338b105907e55c32d3da1983abab5e52f899f72a82c3d1/opentelemetry_proto-1.41.1.tar.gz", hash = "sha256:4b9d2eb631237ea43b80e16c073af438554e32bc7e9e3f8ca4a9582f900020e5", size = 45670, upload-time = "2026-04-24T13:15:49.768Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/49/8c/65ef7a9383a363864772022e822b5d5c6988e6f9dabeebb9278f5b86ebc3/opentelemetry_proto-1.41.0-py3-none-any.whl", hash = "sha256:b970ab537309f9eed296be482c3e7cca05d8aca8165346e929f658dbe153b247", size = 72074, upload-time = "2026-04-09T14:38:29.38Z" },
{ url = "https://files.pythonhosted.org/packages/e4/1e/5cd77035e3e82070e2265a63a760f715aacd3cb16dddc7efee913f297fcc/opentelemetry_proto-1.41.1-py3-none-any.whl", hash = "sha256:0496713b804d127a4147e32849fbaf5683fac8ee98550e8e7679cd706c289720", size = 72076, upload-time = "2026-04-24T13:15:32.542Z" },
]
[[package]]
@ -4813,7 +4813,7 @@ wheels = [
[[package]]
name = "postgrest"
version = "2.29.0"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "deprecation" },
@ -4821,9 +4821,9 @@ dependencies = [
{ name = "pydantic" },
{ name = "yarl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/52/98/f216b8b5c4d116ab6a2fb21339b5821da279ee773e163612418e1c56c012/postgrest-2.29.0.tar.gz", hash = "sha256:a87081858f627fcd57e8e7137004a1ef0adbdf0dbdfed1384e9ea1d7a9c525ec", size = 14217, upload-time = "2026-04-24T13:13:00.281Z" }
sdist = { url = "https://files.pythonhosted.org/packages/56/7c/54e7be05adc9fd6fd98dc572ddfc8982d45bec314a55711e37277d440698/postgrest-2.30.0.tar.gz", hash = "sha256:4f89eec56ce605ab6fbddd9b96d526a9bb44962796d44a5d85cb77640eb766c3", size = 14430, upload-time = "2026-05-06T17:35:21.559Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2c/0b/08b670a93a90d625c557b9e64b8a5fdeec80c3542d2d0265f0b4d6b16646/postgrest-2.29.0-py3-none-any.whl", hash = "sha256:3ee48e146f726272733d20e2b12de354cdb6cb9dd9cc3a61ed97ce69047aeb96", size = 22735, upload-time = "2026-04-24T13:12:58.405Z" },
{ url = "https://files.pythonhosted.org/packages/22/aa/ff2e09f99f95ea96fddeb373646bf907dd89a24fc00b5d38e5674ca7c9ca/postgrest-2.30.0-py3-none-any.whl", hash = "sha256:30631e7993da542419f4217cf3b60aa641084731ea15e66a18526a3a52e40a7d", size = 23108, upload-time = "2026-05-06T17:35:20.531Z" },
]
[[package]]
@ -5726,16 +5726,16 @@ wheels = [
[[package]]
name = "realtime"
version = "2.29.0"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pydantic" },
{ name = "typing-extensions" },
{ name = "websockets" },
]
sdist = { url = "https://files.pythonhosted.org/packages/6e/f1/08c42a42653942fadfbef495d5b0239356140e7186cc528704956c5f06d4/realtime-2.29.0.tar.gz", hash = "sha256:8efe4a1b3a548a5fda09de701bd041fa0970c5a2fe7d13db0b9861ce11828be2", size = 18715, upload-time = "2026-04-24T13:13:02.315Z" }
sdist = { url = "https://files.pythonhosted.org/packages/b2/a2/0328d49d3b5fb427068e9200e7de5b0d708d021a1ad98d004bc685d2529e/realtime-2.30.0.tar.gz", hash = "sha256:7aa593da52ed5f92c34ec4e50e32043afa62f219c94f717ad64a66ab0ef9f1ba", size = 18718, upload-time = "2026-05-06T17:35:23.925Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/77/48/f6375c0a24923beb988f0c71c052604c96641cf43c2d22b91ec1df86afa0/realtime-2.29.0-py3-none-any.whl", hash = "sha256:1a4891e6c82e88ac9d96ac715e435e086f6f8c7665212a8717346de829cbb509", size = 22374, upload-time = "2026-04-24T13:13:01.103Z" },
{ url = "https://files.pythonhosted.org/packages/b4/75/1b2cfc949595e22d8c05a2aa2cfc222921f7f94177d7e8a90542f3f73b33/realtime-2.30.0-py3-none-any.whl", hash = "sha256:7c93b63d2cf99aa1da4fa8826b03b00cd32f7b38abb27ff47b19eb5dcb5707c6", size = 22376, upload-time = "2026-05-06T17:35:22.568Z" },
]
[[package]]
@ -6217,7 +6217,7 @@ wheels = [
[[package]]
name = "storage3"
version = "2.29.0"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "deprecation" },
@ -6226,9 +6226,9 @@ dependencies = [
{ name = "pyiceberg" },
{ name = "yarl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d7/be/771246434b5caf3c6187bfdc932eaede00bf5f2937b47475ab25209ede3e/storage3-2.29.0.tar.gz", hash = "sha256:b0cc2f6714655d725c998d2c5ae8c6fb4f56a513bd31e4f85770df557fe021e3", size = 20160, upload-time = "2026-04-24T13:13:04.626Z" }
sdist = { url = "https://files.pythonhosted.org/packages/9b/b2/6df208d64630744704d00f2c07197170390d6b4d0098617740f6a7a4fa98/storage3-2.30.0.tar.gz", hash = "sha256:b74e3cac149f2c0553dcb5f4d55d8c35d420d88183a1a2df77727d482665972b", size = 20162, upload-time = "2026-05-06T17:35:25.71Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/fc/c3/790c31866f52c13b26f108b45759bf50dafae3a0bafb4511fadc98ba7c33/storage3-2.29.0-py3-none-any.whl", hash = "sha256:043ef7ff27cc8b9da12be403cf78ee4586180edfcf62b227ff61e1bd79594b06", size = 28284, upload-time = "2026-04-24T13:13:03.338Z" },
{ url = "https://files.pythonhosted.org/packages/91/5c/bb8c8cc448cfae671c4ffee67f3651892ea59b341f27bed54666190eb8ef/storage3-2.30.0-py3-none-any.whl", hash = "sha256:2bd23a34011c018bd9c130d8a70a09ebd060ae80d946c6204a6fc08161ad728d", size = 28284, upload-time = "2026-05-06T17:35:24.659Z" },
]
[[package]]
@ -6254,7 +6254,7 @@ wheels = [
[[package]]
name = "supabase"
version = "2.29.0"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "httpx" },
@ -6265,37 +6265,37 @@ dependencies = [
{ name = "supabase-functions" },
{ name = "yarl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/51/a0/2407d616fdf68e8632bbbfb063d1685c38377ac0199e8ca11deaea1f3bf0/supabase-2.29.0.tar.gz", hash = "sha256:a88c4a4eb50fbb903e2e962fbc7c27733b00589140139f9e837bc9fe30dd3615", size = 9689, upload-time = "2026-04-24T13:13:06.728Z" }
sdist = { url = "https://files.pythonhosted.org/packages/5c/a6/d2b17021c2db1a9d219c383e0762ac03a62b25468e61ab126b6b561c2f21/supabase-2.30.0.tar.gz", hash = "sha256:efdba41d474038ed220736ba4e64946df56043057ad785c4c3499d27e459975c", size = 9689, upload-time = "2026-05-06T17:35:27.781Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/22/52/232f6bbf5326e04ae12e2ef04a24f011a0d7cab379a8b9698652bc8ff78f/supabase-2.29.0-py3-none-any.whl", hash = "sha256:16c3ec4b7094f6b92efc5cd3bb3f96826d3b6dd5d24fe15c89c81166efce88fe", size = 16633, upload-time = "2026-04-24T13:13:05.722Z" },
{ url = "https://files.pythonhosted.org/packages/f0/82/d213be7d0ce0bb18018744c0ee38ba0d6648d41dbc46ac8558cffe80541f/supabase-2.30.0-py3-none-any.whl", hash = "sha256:f9b259194554f7bfd2dca6c23261f2df588016ca18b18e774f4d85bc941edb03", size = 16634, upload-time = "2026-05-06T17:35:26.696Z" },
]
[[package]]
name = "supabase-auth"
version = "2.29.0"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "httpx", extra = ["http2"] },
{ name = "pydantic" },
{ name = "pyjwt", extra = ["crypto"] },
]
sdist = { url = "https://files.pythonhosted.org/packages/51/7f/7ceeb4c7a2caa188062e934897f0e08e1af0a0e47e376c7645c26b4c39d8/supabase_auth-2.29.0.tar.gz", hash = "sha256:46efc6a3455a23957b846dc974303a844ba0413718cfa899425477ac977f95b3", size = 39154, upload-time = "2026-04-24T13:13:08.509Z" }
sdist = { url = "https://files.pythonhosted.org/packages/8e/8a/48bbbe0b6703d0670b67e45b90d6a791fd01aace67443d286f760bf48895/supabase_auth-2.30.0.tar.gz", hash = "sha256:6138a53a306a95ed59c03d4e4975469dfc3343a0ade33cc4b37e4ef967ad83f8", size = 39135, upload-time = "2026-05-06T17:35:30.371Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f1/ac/3c35cf52281f940b9497cf17abfc5c2050ca49f342d60cfafe22dac3482b/supabase_auth-2.29.0-py3-none-any.whl", hash = "sha256:64de6ef8cae80f97d3aa8d5ca507d5427dda5c89885c0bcfe9f8b0263b6fb9a4", size = 48379, upload-time = "2026-04-24T13:13:07.417Z" },
{ url = "https://files.pythonhosted.org/packages/db/40/a99cb4373353bcbf302d962e51da9eac78b3b0f257eb0362c0852b1667f4/supabase_auth-2.30.0-py3-none-any.whl", hash = "sha256:e85e1f51ec0de2172c3a2a8514205f71731a9914f9a770ed199ac0cf054bc82c", size = 48352, upload-time = "2026-05-06T17:35:28.936Z" },
]
[[package]]
name = "supabase-functions"
version = "2.29.0"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "httpx", extra = ["http2"] },
{ name = "strenum" },
{ name = "yarl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e5/19/1a1d22749f38f2a6cbca93a6f5a35c9f816c2c3c06bfaa077fa336e90537/supabase_functions-2.29.0.tar.gz", hash = "sha256:0f8a14a2ea9f12b1c208f61dc6f55e2f4b1121f81bf01c08f9b487d22888744d", size = 4683, upload-time = "2026-04-24T13:13:10.432Z" }
sdist = { url = "https://files.pythonhosted.org/packages/f0/e6/5cd8559ec2bb332e6027840c1be292f9989c2fc7b47bf40800aec5586791/supabase_functions-2.30.0.tar.gz", hash = "sha256:025acfd25f1c000ba43d0f7b8e366b0d2e9dfc784b842528e21973eb33006113", size = 4683, upload-time = "2026-05-06T17:35:32.246Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e0/10/6f8ef0b408ade76b5a439afab588ce5849e9604a23040ca73cfe0b90cb9e/supabase_functions-2.29.0-py3-none-any.whl", hash = "sha256:6f08de52eec5820eae53616868b85e849e181beffaa5d05b8ea1708ceae5e48e", size = 8799, upload-time = "2026-04-24T13:13:09.214Z" },
{ url = "https://files.pythonhosted.org/packages/53/da/9dedab32775df04cc22ca72f194b78e895d940f195bed3e02882a65daa9b/supabase_functions-2.30.0-py3-none-any.whl", hash = "sha256:92419459f102767b954cd034856e4ded8e34c78660b32442d66c8b2899c68011", size = 8803, upload-time = "2026-05-06T17:35:31.342Z" },
]
[[package]]

View File

@ -15,7 +15,7 @@ mkdir -p "${OPENDAL_FS_ROOT}"
# Prepare env files like CI
cp -n docker/.env.example docker/.env || true
cp -n docker/middleware.env.example docker/middleware.env || true
cp -n docker/envs/middleware.env.example docker/middleware.env || true
cp -n api/tests/integration_tests/.env.example api/tests/integration_tests/.env || true
# Expose service ports (same as CI) without leaving the repo dirty

View File

@ -8,7 +8,7 @@ API_ENV_EXAMPLE="$ROOT/api/.env.example"
API_ENV="$ROOT/api/.env"
WEB_ENV_EXAMPLE="$ROOT/web/.env.example"
WEB_ENV="$ROOT/web/.env.local"
MIDDLEWARE_ENV_EXAMPLE="$ROOT/docker/middleware.env.example"
MIDDLEWARE_ENV_EXAMPLE="$ROOT/docker/envs/middleware.env.example"
MIDDLEWARE_ENV="$ROOT/docker/middleware.env"
# 1) Copy api/.env.example -> api/.env
@ -17,7 +17,7 @@ cp "$API_ENV_EXAMPLE" "$API_ENV"
# 2) Copy web/.env.example -> web/.env.local
cp "$WEB_ENV_EXAMPLE" "$WEB_ENV"
# 3) Copy docker/middleware.env.example -> docker/middleware.env
# 3) Copy docker/envs/middleware.env.example -> docker/middleware.env
cp "$MIDDLEWARE_ENV_EXAMPLE" "$MIDDLEWARE_ENV"
# 4) Install deps

View File

@ -34,7 +34,8 @@ CHECK_UPDATE_URL=https://updates.dify.ai
OPENAI_API_BASE=https://api.openai.com/v1
MIGRATION_ENABLED=true
FILES_ACCESS_TIMEOUT=300
ENABLE_COLLABORATION_MODE=false
# Remove `collaboration` from COMPOSE_PROFILES to stop the dedicated websocket service.
ENABLE_COLLABORATION_MODE=true
# Logging and server workers
LOG_LEVEL=INFO
@ -52,6 +53,9 @@ DIFY_PORT=5001
SERVER_WORKER_AMOUNT=1
SERVER_WORKER_CLASS=gevent
SERVER_WORKER_CONNECTIONS=10
API_WEBSOCKET_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker
API_WEBSOCKET_WORKER_CONNECTIONS=1000
API_WEBSOCKET_GUNICORN_TIMEOUT=360
GUNICORN_TIMEOUT=360
CELERY_WORKER_CLASS=
CELERY_WORKER_AMOUNT=4
@ -246,6 +250,7 @@ NGINX_KEEPALIVE_TIMEOUT=65
NGINX_PROXY_READ_TIMEOUT=3600s
NGINX_PROXY_SEND_TIMEOUT=3600s
NGINX_ENABLE_CERTBOT_CHALLENGE=false
NGINX_SOCKET_IO_UPSTREAM=api_websocket:5001
EXPOSE_NGINX_PORT=80
EXPOSE_NGINX_SSL_PORT=443
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql}
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql},collaboration

View File

@ -261,6 +261,31 @@ services:
- ssrf_proxy_network
- default
# WebSocket service for workflow collaboration.
api_websocket:
<<: *shared-api-worker-config
image: langgenius/dify-api:1.14.0
profiles:
- collaboration
environment:
MODE: api
SERVER_WORKER_AMOUNT: 1
SERVER_WORKER_CLASS: ${API_WEBSOCKET_WORKER_CLASS:-geventwebsocket.gunicorn.workers.GeventWebSocketWorker}
SERVER_WORKER_CONNECTIONS: ${API_WEBSOCKET_WORKER_CONNECTIONS:-1000}
GUNICORN_TIMEOUT: ${API_WEBSOCKET_GUNICORN_TIMEOUT:-360}
depends_on:
db_postgres:
condition: service_healthy
required: false
db_mysql:
condition: service_healthy
required: false
redis:
condition: service_started
networks:
- ssrf_proxy_network
- default
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
@ -661,6 +686,7 @@ services:
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
NGINX_SOCKET_IO_UPSTREAM: ${NGINX_SOCKET_IO_UPSTREAM:-api_websocket:5001}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
depends_on:
- api

View File

@ -267,6 +267,31 @@ services:
- ssrf_proxy_network
- default
# WebSocket service for workflow collaboration.
api_websocket:
<<: *shared-api-worker-config
image: langgenius/dify-api:1.14.0
profiles:
- collaboration
environment:
MODE: api
SERVER_WORKER_AMOUNT: 1
SERVER_WORKER_CLASS: ${API_WEBSOCKET_WORKER_CLASS:-geventwebsocket.gunicorn.workers.GeventWebSocketWorker}
SERVER_WORKER_CONNECTIONS: ${API_WEBSOCKET_WORKER_CONNECTIONS:-1000}
GUNICORN_TIMEOUT: ${API_WEBSOCKET_GUNICORN_TIMEOUT:-360}
depends_on:
db_postgres:
condition: service_healthy
required: false
db_mysql:
condition: service_healthy
required: false
redis:
condition: service_started
networks:
- ssrf_proxy_network
- default
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
@ -667,6 +692,7 @@ services:
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
NGINX_SOCKET_IO_UPSTREAM: ${NGINX_SOCKET_IO_UPSTREAM:-api_websocket:5001}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
depends_on:
- api

View File

@ -16,7 +16,8 @@ CHECK_UPDATE_URL=https://updates.dify.ai
OPENAI_API_BASE=https://api.openai.com/v1
MIGRATION_ENABLED=true
FILES_ACCESS_TIMEOUT=300
ENABLE_COLLABORATION_MODE=false
# Remove `collaboration` from COMPOSE_PROFILES to stop the dedicated websocket service.
ENABLE_COLLABORATION_MODE=true
CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1
CELERY_TASK_ANNOTATIONS=null
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
@ -87,6 +88,9 @@ DIFY_PORT=5001
SERVER_WORKER_AMOUNT=1
SERVER_WORKER_CLASS=gevent
SERVER_WORKER_CONNECTIONS=10
API_WEBSOCKET_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker
API_WEBSOCKET_WORKER_CONNECTIONS=1000
API_WEBSOCKET_GUNICORN_TIMEOUT=360
CELERY_SENTINEL_PASSWORD=
S3_ACCESS_KEY=
S3_SECRET_KEY=
@ -399,7 +403,7 @@ TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com
TABLESTORE_INSTANCE_NAME=instance-name
CLICKZETTA_USERNAME=
CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql}
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql},collaboration
EXPOSE_NGINX_PORT=80
EXPOSE_NGINX_SSL_PORT=443
POSITION_TOOL_PINS=

View File

@ -15,3 +15,4 @@ NGINX_KEEPALIVE_TIMEOUT=65
NGINX_PROXY_READ_TIMEOUT=3600s
NGINX_PROXY_SEND_TIMEOUT=3600s
NGINX_ENABLE_CERTBOT_CHALLENGE=false
NGINX_SOCKET_IO_UPSTREAM=api_websocket:5001

View File

@ -15,7 +15,9 @@ server {
}
location /socket.io/ {
proxy_pass http://api:5001;
resolver 127.0.0.11 valid=30s ipv6=off;
set $socket_io_upstream ${NGINX_SOCKET_IO_UPSTREAM};
proxy_pass http://$socket_io_upstream;
include proxy.conf;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@ -31,7 +31,7 @@ pnpm -C e2e check
`pnpm install` is resolved through the repository workspace and uses the shared root lockfile plus `pnpm-workspace.yaml`.
Use `pnpm check` as the default local verification step after editing E2E TypeScript, Cucumber support code, or feature glue. It runs formatting, linting, and type checks for this package.
Use `pnpm -C e2e check` as the default local verification step after editing E2E TypeScript, Cucumber support code, or feature glue. It runs formatting, linting, and type checks for this package.
Common commands:
@ -68,8 +68,8 @@ flowchart TD
C --> D["Cucumber loads config, steps, and support modules"]
D --> E["BeforeAll bootstraps shared auth state via /install"]
E --> F{"Which command is running?"}
F -->|`pnpm e2e`| G["Run config default tags: not @fresh and not @skip"]
F -->|`pnpm e2e:full*`| H["Override tags to not @skip"]
F -->|`pnpm -C e2e e2e`| G["Run config default tags: not @fresh and not @skip"]
F -->|`pnpm -C e2e e2e:full*`| H["Override tags to not @skip"]
G --> I["Per-scenario BrowserContext from shared browser"]
H --> I
I --> J["Failure artifacts written to cucumber-report/artifacts"]
@ -99,7 +99,7 @@ Behavior depends on instance state:
- uninitialized instance: completes install and stores authenticated state
- initialized instance: signs in and reuses authenticated state
Because of that, the `@fresh` install scenario only runs in the `pnpm e2e:full*` flows. The default `pnpm e2e*` flows exclude `@fresh` via Cucumber config tags so they can be re-run against an already initialized instance.
Because of that, the `@fresh` install scenario only runs in the `pnpm -C e2e e2e:full*` flows. The default `pnpm -C e2e e2e*` flows exclude `@fresh` via Cucumber config tags so they can be re-run against an already initialized instance.
Reset all persisted E2E state:
@ -126,7 +126,7 @@ pnpm -C e2e e2e:middleware:up
Stop the full middleware stack:
```bash
pnpm e2e:middleware:down
pnpm -C e2e e2e:middleware:down
```
The middleware stack includes:
@ -141,15 +141,15 @@ The middleware stack includes:
Fresh install verification:
```bash
pnpm e2e:full
pnpm -C e2e e2e:full
```
Run the Cucumber suite against an already running middleware stack:
```bash
pnpm e2e:middleware:up
pnpm e2e
pnpm e2e:middleware:down
pnpm -C e2e e2e:middleware:up
pnpm -C e2e e2e
pnpm -C e2e e2e:middleware:down
```
Artifacts and diagnostics:

View File

@ -40,7 +40,7 @@ Then('the shared app page should be accessible', async function (this: DifyWorld
When('I run the shared workflow app', async function (this: DifyWorld) {
const page = this.getPage()
const runButton = page.getByTestId('run-button')
const runButton = page.getByRole('button', { name: 'Execute' })
await expect(runButton).toBeEnabled({ timeout: 15_000 })
await runButton.click()

View File

@ -1334,11 +1334,6 @@
"count": 9
}
},
"web/app/components/base/markdown-blocks/form.tsx": {
"erasable-syntax-only/enums": {
"count": 3
}
},
"web/app/components/base/markdown-blocks/index.ts": {
"no-barrel-files/no-barrel-files": {
"count": 10
@ -1746,11 +1741,6 @@
"count": 4
}
},
"web/app/components/billing/upgrade-btn/index.tsx": {
"ts/no-explicit-any": {
"count": 3
}
},
"web/app/components/datasets/common/image-previewer/index.tsx": {
"no-irregular-whitespace": {
"count": 1
@ -2532,11 +2522,6 @@
"count": 1
}
},
"web/app/components/plugins/plugin-detail-panel/multiple-tool-selector/index.tsx": {
"ts/no-explicit-any": {
"count": 1
}
},
"web/app/components/plugins/plugin-detail-panel/strategy-detail.tsx": {
"ts/no-explicit-any": {
"count": 2
@ -4435,11 +4420,6 @@
"count": 1
}
},
"web/app/signin/one-more-step.tsx": {
"ts/no-explicit-any": {
"count": 1
}
},
"web/app/signup/layout.tsx": {
"ts/no-explicit-any": {
"count": 1

View File

@ -85,6 +85,11 @@ export type AckDeploymentReq = {
lastError?: LastError
}
export type AddGroupAppsRequest = {
id?: string
app_ids?: Array<string>
}
export type AppInstanceBasicInfo = {
id?: string
name?: string
@ -346,6 +351,11 @@ export type CreateReleaseReq = {
description?: string
}
export type CreateResourceGroupRequest = {
name?: string
description?: string
}
export type CreateSecretKeyReply = {
id?: string
name?: string
@ -660,6 +670,17 @@ export type GetWorkspaceReply = {
workspace?: Workspace
}
export type GroupAppItem = {
app_id?: string
app_name?: string
workspace_id?: string
workspace_name?: string
app_status?: number
token_usage?: string
rpm?: string
concurrency?: string
}
export type HealthzReply = {
message?: string
status?: string
@ -685,6 +706,11 @@ export type InfoConfigReply = {
PluginInstallationPermission?: PluginInstallationPermissionInfo
}
export type InnerAdmission = {
marker?: string
concurrencyGroupIds?: Array<string>
}
export type InnerBatchGetWebAppAccessModesByIdReq = {
appIds?: Array<string>
}
@ -750,10 +776,34 @@ export type InnerGetWebAppAccessModeByIdRes = {
accessMode?: string
}
export type InnerGroupConfig = {
id?: string
enabled?: boolean
membershipId?: string
limits?: Array<LimitConfig>
}
export type InnerIsUserAllowedToAccessWebAppRes = {
result?: boolean
}
export type InnerReleaseAdmissionRequest = {
admission?: InnerAdmission
}
export type InnerReleaseAdmissionResponse = {
[key: string]: unknown
}
export type InnerResolveResponse = {
appId?: string
groups?: Array<InnerGroupConfig>
blocked?: boolean
blockGroupId?: string
blockReason?: string
admission?: InnerAdmission
}
export type InnerTryAddAccountToDefaultWorkspaceReply = {
workspaceId?: string
joined?: boolean
@ -806,6 +856,13 @@ export type LicenseStatus = {
workspaces?: ResourceQuota
}
export type LimitConfig = {
type?: number
threshold?: string
action?: number
reached?: boolean
}
export type LimitFields = {
workspaceMembers?: number
workspaces?: ResourceQuota
@ -830,6 +887,11 @@ export type ListEnvironmentsReply = {
pagination?: Pagination
}
export type ListGroupAppsResponse = {
items?: Array<GroupAppItem>
total?: string
}
export type ListMembersReply = {
data?: Array<AccountDetail>
pagination?: Pagination
@ -840,6 +902,11 @@ export type ListReleasesReply = {
pagination?: Pagination
}
export type ListResourceGroupsResponse = {
items?: Array<ResourceGroupItem>
total?: string
}
export type ListRuntimeInstancesReply = {
data?: Array<RuntimeInstanceRow>
}
@ -1059,6 +1126,37 @@ export type ResolvedCredential = {
value?: string
}
export type ResourceGroupDetail = {
id?: string
name?: string
description?: string
enabled?: boolean
rpm_limit?: number
rpm_action?: number
concurrency_limit?: number
concurrency_action?: number
token_quota?: string
token_action?: number
created_at?: string
updated_at?: string
}
export type ResourceGroupItem = {
id?: string
name?: string
description?: string
enabled?: boolean
rpm_limit?: number
concurrency_limit?: number
token_quota?: string
token_usage?: string
app_count?: string
rpm_status?: number
conc_status?: number
created_at?: string
updated_at?: string
}
export type ResourceQuota = {
used?: number
limit?: number
@ -1135,6 +1233,23 @@ export type SearchAccessSubjectsReply = {
data?: Array<AccessSubjectDisplay>
}
export type SearchAppItem = {
app_id?: string
app_name?: string
workspace_id?: string
workspace_name?: string
app_status?: number
icon?: string
icon_type?: string
icon_background?: string
created_by_name?: string
}
export type SearchAppsResponse = {
items?: Array<SearchAppItem>
total?: string
}
export type SearchForWhilteListCandidatesRes = {
subjects?: Array<Subject>
currPage?: number
@ -1370,6 +1485,19 @@ export type UpdatePluginInstallationSettingsRequest = {
restrictToMarketplaceOnly?: boolean
}
export type UpdateResourceGroupRequest = {
id?: string
name?: string
description?: string
enabled?: boolean
rpm_limit?: number
rpm_action?: number
concurrency_limit?: number
concurrency_action?: number
token_quota?: string
token_action?: number
}
export type UpdateUserReply = {
account?: AccountDetail
}

View File

@ -81,6 +81,11 @@ export const zAckDeploymentReply = z.object({
newVersion: z.string().optional(),
})
export const zAddGroupAppsRequest = z.object({
id: z.string().optional(),
app_ids: z.array(z.string()).optional(),
})
export const zAppInstanceBasicInfo = z.object({
id: z.string().optional(),
name: z.string().optional(),
@ -289,6 +294,11 @@ export const zCreateReleaseReq = z.object({
description: z.string().optional(),
})
export const zCreateResourceGroupRequest = z.object({
name: z.string().optional(),
description: z.string().optional(),
})
export const zCreateSecretKeyReply = z.object({
id: z.string().optional(),
name: z.string().optional(),
@ -570,6 +580,17 @@ export const zGetWebAppWhitelistSubjectsResMember = z.object({
avatar: z.string().optional(),
})
export const zGroupAppItem = z.object({
app_id: z.string().optional(),
app_name: z.string().optional(),
workspace_id: z.string().optional(),
workspace_name: z.string().optional(),
app_status: z.int().optional(),
token_usage: z.string().optional(),
rpm: z.string().optional(),
concurrency: z.string().optional(),
})
export const zHealthzReply = z.object({
message: z.string().optional(),
status: z.string().optional(),
@ -580,6 +601,11 @@ export const zHostEnvironmentConfig = z.object({
joinTokenHash: z.string().optional(),
})
export const zInnerAdmission = z.object({
marker: z.string().optional(),
concurrencyGroupIds: z.array(z.string()).optional(),
})
export const zInnerBatchGetWebAppAccessModesByIdReq = z.object({
appIds: z.array(z.string()).optional(),
})
@ -653,6 +679,12 @@ export const zInnerIsUserAllowedToAccessWebAppRes = z.object({
result: z.boolean().optional(),
})
export const zInnerReleaseAdmissionRequest = z.object({
admission: zInnerAdmission.optional(),
})
export const zInnerReleaseAdmissionResponse = z.record(z.string(), z.unknown())
export const zInnerTryAddAccountToDefaultWorkspaceReply = z.object({
workspaceId: z.string().optional(),
joined: z.boolean().optional(),
@ -719,6 +751,29 @@ export const zAckDeploymentReq = z.object({
lastError: zLastError.optional(),
})
export const zLimitConfig = z.object({
type: z.int().optional(),
threshold: z.string().optional(),
action: z.int().optional(),
reached: z.boolean().optional(),
})
export const zInnerGroupConfig = z.object({
id: z.string().optional(),
enabled: z.boolean().optional(),
membershipId: z.string().optional(),
limits: z.array(zLimitConfig).optional(),
})
export const zInnerResolveResponse = z.object({
appId: z.string().optional(),
groups: z.array(zInnerGroupConfig).optional(),
blocked: z.boolean().optional(),
blockGroupId: z.string().optional(),
blockReason: z.string().optional(),
admission: zInnerAdmission.optional(),
})
export const zListDeploymentBindingOptionsReply = z.object({
slots: z.array(zDeploymentBindingOptionSlot).optional(),
})
@ -727,6 +782,11 @@ export const zListDeploymentEnvironmentOptionsReply = z.object({
environments: z.array(zDeploymentEnvironmentOption).optional(),
})
export const zListGroupAppsResponse = z.object({
items: z.array(zGroupAppItem).optional(),
total: z.string().optional(),
})
export const zLoginTypesReply = z.object({
enabledEmailCodeLogin: z.boolean().optional(),
enableEmailPasswordLogin: z.boolean().optional(),
@ -957,6 +1017,58 @@ export const zResolveCredentialsReply = z.object({
resolved: z.array(zResolvedCredential).optional(),
})
export const zResourceGroupDetail = z.object({
id: z.string().optional(),
name: z.string().optional(),
description: z.string().optional(),
enabled: z.boolean().optional(),
rpm_limit: z
.int()
.min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' })
.max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' })
.optional(),
rpm_action: z.int().optional(),
concurrency_limit: z
.int()
.min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' })
.max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' })
.optional(),
concurrency_action: z.int().optional(),
token_quota: z.string().optional(),
token_action: z.int().optional(),
created_at: z.string().optional(),
updated_at: z.string().optional(),
})
export const zResourceGroupItem = z.object({
id: z.string().optional(),
name: z.string().optional(),
description: z.string().optional(),
enabled: z.boolean().optional(),
rpm_limit: z
.int()
.min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' })
.max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' })
.optional(),
concurrency_limit: z
.int()
.min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' })
.max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' })
.optional(),
token_quota: z.string().optional(),
token_usage: z.string().optional(),
app_count: z.string().optional(),
rpm_status: z.int().optional(),
conc_status: z.int().optional(),
created_at: z.string().optional(),
updated_at: z.string().optional(),
})
export const zListResourceGroupsResponse = z.object({
items: z.array(zResourceGroupItem).optional(),
total: z.string().optional(),
})
/**
* ResourceQuota represents usage quota for a resource
*/
@ -1115,6 +1227,23 @@ export const zSearchAccessSubjectsReply = z.object({
data: z.array(zAccessSubjectDisplay).optional(),
})
export const zSearchAppItem = z.object({
app_id: z.string().optional(),
app_name: z.string().optional(),
workspace_id: z.string().optional(),
workspace_name: z.string().optional(),
app_status: z.int().optional(),
icon: z.string().optional(),
icon_type: z.string().optional(),
icon_background: z.string().optional(),
created_by_name: z.string().optional(),
})
export const zSearchAppsResponse = z.object({
items: z.array(zSearchAppItem).optional(),
total: z.string().optional(),
})
export const zSecretKey = z.object({
id: z.string().optional(),
name: z.string().optional(),
@ -1398,6 +1527,27 @@ export const zUpdatePluginInstallationSettingsRequest = z.object({
restrictToMarketplaceOnly: z.boolean().optional(),
})
export const zUpdateResourceGroupRequest = z.object({
id: z.string().optional(),
name: z.string().optional(),
description: z.string().optional(),
enabled: z.boolean().optional(),
rpm_limit: z
.int()
.min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' })
.max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' })
.optional(),
rpm_action: z.int().optional(),
concurrency_limit: z
.int()
.min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' })
.max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' })
.optional(),
concurrency_action: z.int().optional(),
token_quota: z.string().optional(),
token_action: z.int().optional(),
})
export const zUpdateUserReply = z.object({
account: zAccountDetail.optional(),
})

View File

@ -111,7 +111,7 @@ describe('Base Notion Page Selector Flow', () => {
await user.type(screen.getByTestId('notion-search-input'), 'missing-page')
expect(screen.getByText('common.dataSource.notion.selector.noSearchResult')).toBeInTheDocument()
await user.click(screen.getByTestId('notion-search-input-clear'))
await user.click(screen.getByRole('button', { name: 'common.operation.clear' }))
expect(screen.getByTestId('notion-page-name-root-1')).toBeInTheDocument()
await user.click(screen.getByTestId('notion-page-preview-root-1'))
@ -134,7 +134,7 @@ describe('Base Notion Page Selector Flow', () => {
expect(onSelectCredential).toHaveBeenCalledWith('c1')
await user.click(screen.getByTestId('notion-credential-selector-btn'))
await user.click(screen.getByRole('combobox', { name: /Workspace 1/ }))
await user.click(screen.getByTestId('notion-credential-item-c2'))
expect(mockInvalidPreImportNotionPages).toHaveBeenCalledWith({ datasetId: 'dataset-1', credentialId: 'c2' })

View File

@ -119,7 +119,7 @@ describe('RunOnce integration flow', () => {
fireEvent.change(screen.getByPlaceholderText('Bio'), { target: { value: 'Hello' } })
// Phase 3 submit
fireEvent.click(screen.getByTestId('run-button'))
fireEvent.click(screen.getByRole('button', { name: 'share.generation.run' }))
expect(onSend).toHaveBeenCalledTimes(1)
// Phase 4 simulate "running" state
@ -132,7 +132,7 @@ describe('RunOnce integration flow', () => {
/>,
)
const stopBtn = screen.getByTestId('stop-button')
const stopBtn = screen.getByRole('button', { name: 'share.generation.stopRun:{"defaultValue":"Stop Run"}' })
expect(stopBtn).toBeInTheDocument()
fireEvent.click(stopBtn)
expect(onStop).toHaveBeenCalledTimes(1)
@ -145,7 +145,7 @@ describe('RunOnce integration flow', () => {
runControl={{ onStop, isStopping: true }}
/>,
)
expect(screen.getByTestId('stop-button')).toBeDisabled()
expect(screen.getByRole('button', { name: 'share.generation.stopRun:{"defaultValue":"Stop Run"}' })).toBeDisabled()
})
it('clear resets all field types and allows re-submit', async () => {
@ -174,7 +174,7 @@ describe('RunOnce integration flow', () => {
// Re-fill and submit
fireEvent.change(screen.getByPlaceholderText('Question'), { target: { value: 'New' } })
fireEvent.click(screen.getByTestId('run-button'))
fireEvent.click(screen.getByRole('button', { name: 'share.generation.run' }))
expect(onSend).toHaveBeenCalledTimes(1)
})
@ -212,7 +212,7 @@ describe('RunOnce integration flow', () => {
fireEvent.change(screen.getByPlaceholderText('Text'), { target: { value: 'hello' } })
fireEvent.change(screen.getByTestId('code-editor'), { target: { value: '{"a":1}' } })
fireEvent.click(screen.getByTestId('run-button'))
fireEvent.click(screen.getByRole('button', { name: 'share.generation.run' }))
expect(onSend).toHaveBeenCalledTimes(1)
})
})

View File

@ -63,12 +63,12 @@ describe('DatasetsLayout', () => {
render((
<DatasetsLayout>
<div data-testid="datasets-content">datasets</div>
<div>datasets</div>
</DatasetsLayout>
))
expect(screen.getByRole('status')).toBeInTheDocument()
expect(screen.queryByTestId('datasets-content')).not.toBeInTheDocument()
expect(screen.queryByText('datasets')).not.toBeInTheDocument()
expect(mockReplace).not.toHaveBeenCalled()
})
@ -80,11 +80,11 @@ describe('DatasetsLayout', () => {
render((
<DatasetsLayout>
<div data-testid="datasets-content">datasets</div>
<div>datasets</div>
</DatasetsLayout>
))
expect(screen.queryByTestId('datasets-content')).not.toBeInTheDocument()
expect(screen.queryByText('datasets')).not.toBeInTheDocument()
await waitFor(() => {
expect(mockReplace).toHaveBeenCalledWith('/apps')
})
@ -98,11 +98,11 @@ describe('DatasetsLayout', () => {
render((
<DatasetsLayout>
<div data-testid="datasets-content">datasets</div>
<div>datasets</div>
</DatasetsLayout>
))
expect(screen.getByTestId('datasets-content')).toBeInTheDocument()
expect(screen.getByText('datasets')).toBeInTheDocument()
expect(mockReplace).not.toHaveBeenCalled()
})
})

View File

@ -48,12 +48,12 @@ describe('RoleRouteGuard', () => {
render((
<RoleRouteGuard>
<div data-testid="guarded-content">content</div>
<div>content</div>
</RoleRouteGuard>
))
expect(screen.getByRole('status')).toBeInTheDocument()
expect(screen.queryByTestId('guarded-content')).not.toBeInTheDocument()
expect(screen.queryByText('content')).not.toBeInTheDocument()
expect(mockReplace).not.toHaveBeenCalled()
})
@ -64,11 +64,11 @@ describe('RoleRouteGuard', () => {
render((
<RoleRouteGuard>
<div data-testid="guarded-content">content</div>
<div>content</div>
</RoleRouteGuard>
))
expect(screen.queryByTestId('guarded-content')).not.toBeInTheDocument()
expect(screen.queryByText('content')).not.toBeInTheDocument()
await waitFor(() => {
expect(mockReplace).toHaveBeenCalledWith('/datasets')
})
@ -82,11 +82,11 @@ describe('RoleRouteGuard', () => {
render((
<RoleRouteGuard>
<div data-testid="guarded-content">content</div>
<div>content</div>
</RoleRouteGuard>
))
expect(screen.getByTestId('guarded-content')).toBeInTheDocument()
expect(screen.getByText('content')).toBeInTheDocument()
expect(mockReplace).not.toHaveBeenCalled()
})
@ -98,11 +98,11 @@ describe('RoleRouteGuard', () => {
render((
<RoleRouteGuard>
<div data-testid="guarded-content">content</div>
<div>content</div>
</RoleRouteGuard>
))
expect(screen.getByTestId('guarded-content')).toBeInTheDocument()
expect(screen.getByText('content')).toBeInTheDocument()
expect(screen.queryByRole('status')).not.toBeInTheDocument()
expect(mockReplace).not.toHaveBeenCalled()
})

View File

@ -166,7 +166,7 @@ export default function AccountPage() {
{userProfile.name}
{isEducationAccount && (
<PremiumBadge size="s" color="blue" className="ml-1 !px-2">
<RiGraduationCapFill className="mr-1 h-3 w-3" />
<RiGraduationCapFill aria-hidden="true" className="mr-1 h-3 w-3" />
<span className="system-2xs-medium">EDU</span>
</PremiumBadge>
)}

View File

@ -62,7 +62,7 @@ export default function AppSelector() {
{userProfile.name}
{isEducationAccount && (
<PremiumBadge size="s" color="blue" className="ml-1 px-2!">
<span className="mr-1 i-ri-graduation-cap-fill h-3 w-3" />
<span aria-hidden="true" className="mr-1 i-ri-graduation-cap-fill h-3 w-3" />
<span className="system-2xs-medium">EDU</span>
</PremiumBadge>
)}

View File

@ -243,10 +243,7 @@ describe('Filter', () => {
)
// Act
const input = screen.getByPlaceholderText('common.operation.search')
const clearButton = input.parentElement?.querySelector('div.cursor-pointer')
if (clearButton)
fireEvent.click(clearButton)
fireEvent.click(screen.getByRole('button', { name: 'common.operation.clear' }))
// Assert
expect(setQueryParams).toHaveBeenCalledWith({ ...queryParams, keyword: '' })

View File

@ -55,15 +55,23 @@ const BatchAction: FC<IBatchActionProps> = ({
<span className="text-[13px] leading-[16px] font-semibold text-text-accent">{t(`${i18nPrefix}.selected`, { ns: 'appAnnotation' })}</span>
</div>
<Divider type="vertical" className="mx-0.5 h-3.5 bg-divider-regular" />
<div className="flex cursor-pointer items-center gap-x-0.5 px-3 py-2" onClick={showDeleteConfirm}>
<RiDeleteBinLine className="h-4 w-4 text-components-button-destructive-ghost-text" />
<button type="button" className="px-0.5 text-[13px] leading-[16px] font-medium text-components-button-destructive-ghost-text">
<button
type="button"
className="flex cursor-pointer items-center gap-x-0.5 border-none bg-transparent px-3 py-2 text-left text-components-button-destructive-ghost-text focus-visible:ring-1 focus-visible:ring-state-destructive-border focus-visible:outline-hidden"
onClick={showDeleteConfirm}
>
<RiDeleteBinLine className="h-4 w-4" aria-hidden="true" />
<span className="px-0.5 text-[13px] leading-[16px] font-medium">
{t('operation.delete', { ns: 'common' })}
</button>
</div>
</span>
</button>
<Divider type="vertical" className="mx-0.5 h-3.5 bg-divider-regular" />
<button type="button" className="px-3.5 py-2 text-[13px] leading-[16px] font-medium text-components-button-ghost-text" onClick={onCancel}>
<button
type="button"
className="border-none bg-transparent px-3.5 py-2 text-left text-[13px] leading-[16px] font-medium text-components-button-ghost-text focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
onClick={onCancel}
>
{t('operation.cancel', { ns: 'common' })}
</button>
</div>

View File

@ -54,7 +54,7 @@ describe('CSVUploader', () => {
const clickSpy = vi.spyOn(HTMLInputElement.prototype, 'click')
renderComponent()
fireEvent.click(screen.getByText('appAnnotation.batchModal.browse'))
fireEvent.click(screen.getByRole('button', { name: 'appAnnotation.batchModal.browse' }))
expect(clickSpy).toHaveBeenCalledTimes(1)
clickSpy.mockRestore()
@ -137,7 +137,7 @@ describe('CSVUploader', () => {
clickSpy.mockRestore()
const valueSetter = vi.spyOn(fileInput, 'value', 'set')
const removeTrigger = screen.getByTestId('remove-file-button')
const removeTrigger = screen.getByRole('button', { name: /operation\.delete$/ })
fireEvent.click(removeTrigger)
expect(updateFile).toHaveBeenCalledWith()

View File

@ -115,6 +115,14 @@ describe('BatchModal', () => {
expect(props.onCancel).toHaveBeenCalledTimes(1)
})
it('should call onCancel when close button is clicked', () => {
const { props } = renderComponent()
fireEvent.click(screen.getByRole('button', { name: /operation\.close$/ }))
expect(props.onCancel).toHaveBeenCalledTimes(1)
})
it('should submit the csv file, poll status, and notify when import completes', async () => {
vi.useFakeTimers({ shouldAdvanceTime: true })
const { props } = renderComponent()

View File

@ -97,7 +97,13 @@ const CSVUploader: FC<Props> = ({
<CSVIcon className="shrink-0" />
<div className="text-text-tertiary">
{t('batchModal.csvUploadTitle', { ns: 'appAnnotation' })}
<span className="cursor-pointer text-text-accent" onClick={selectHandle}>{t('batchModal.browse', { ns: 'appAnnotation' })}</span>
<button
type="button"
className="inline cursor-pointer border-none bg-transparent p-0 text-left text-text-accent focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
onClick={selectHandle}
>
{t('batchModal.browse', { ns: 'appAnnotation' })}
</button>
</div>
</div>
{dragging && <div ref={dragRef} className="absolute top-0 left-0 h-full w-full" />}
@ -113,9 +119,14 @@ const CSVUploader: FC<Props> = ({
<div className="hidden items-center group-hover:flex">
<Button variant="secondary" onClick={selectHandle}>{t('stepOne.uploader.change', { ns: 'datasetCreation' })}</Button>
<div className="mx-2 h-4 w-px bg-divider-regular" />
<div className="cursor-pointer p-2" onClick={removeFile} data-testid="remove-file-button">
<RiDeleteBinLine className="h-4 w-4 text-text-tertiary" />
</div>
<button
type="button"
className="cursor-pointer border-none bg-transparent p-2 focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
aria-label={t('operation.delete', { ns: 'common' })}
onClick={removeFile}
>
<RiDeleteBinLine className="h-4 w-4 text-text-tertiary" aria-hidden="true" />
</button>
</div>
</div>
)}

View File

@ -91,9 +91,14 @@ const BatchModal: FC<IBatchModalProps> = ({
<DialogContent className="w-full max-w-[520px]! overflow-hidden! rounded-xl! border-none px-8 py-6 text-left align-middle">
<div className="relative pb-1 system-xl-medium text-text-primary">{t('batchModal.title', { ns: 'appAnnotation' })}</div>
<div className="absolute top-4 right-4 cursor-pointer p-2" onClick={onCancel}>
<RiCloseLine className="h-4 w-4 text-text-tertiary" />
</div>
<button
type="button"
className="absolute top-4 right-4 cursor-pointer border-none bg-transparent p-2 focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
aria-label={t('operation.close', { ns: 'common' })}
onClick={onCancel}
>
<RiCloseLine className="h-4 w-4 text-text-tertiary" aria-hidden="true" />
</button>
<CSVUploader
file={currentCSV}
updateFile={handleFile}

View File

@ -212,16 +212,16 @@ describe('SpecificGroupsOrMembers', () => {
expect(screen.getByText(baseMember.name)).toBeInTheDocument()
})
const groupItem = screen.getByText(baseGroup.name).closest('div')
const groupRemove = groupItem?.querySelector('.h-4.w-4.cursor-pointer') as HTMLElement
const groupRemove = screen.getAllByRole('button', { name: /operation\.remove$/ })[0]!
fireEvent.click(groupRemove)
await waitFor(() => {
expect(screen.queryByText(baseGroup.name)).not.toBeInTheDocument()
})
const memberItem = screen.getByText(baseMember.name).closest('div')
const memberRemove = memberItem?.querySelector('.h-4.w-4.cursor-pointer') as HTMLElement
const memberRemove = screen.getAllByRole('button', { name: /operation\.remove$/ })[0]!
fireEvent.click(memberRemove)
await waitFor(() => {

View File

@ -86,11 +86,13 @@ describe('SpecificGroupsOrMembers', () => {
expect(screen.getByText(baseMember.name)).toBeInTheDocument()
})
const groupRemove = screen.getByText(baseGroup.name).closest('div')?.querySelector('.h-4.w-4.cursor-pointer') as HTMLElement
const removeButtons = screen.getAllByRole('button', { name: /operation\.remove$/ })
const groupRemove = removeButtons[0]!
const memberRemove = removeButtons[1]!
fireEvent.click(groupRemove)
expect(useAccessControlStore.getState().specificGroups).toEqual([])
const memberRemove = screen.getByText(baseMember.name).closest('div')?.querySelector('.h-4.w-4.cursor-pointer') as HTMLElement
fireEvent.click(memberRemove)
expect(useAccessControlStore.getState().specificMembers).toEqual([])
})

View File

@ -119,14 +119,40 @@ function SelectedGroupsBreadCrumb() {
const handleReset = useCallback(() => {
setSelectedGroupsForBreadcrumb([])
}, [setSelectedGroupsForBreadcrumb])
const hasBreadcrumb = selectedGroupsForBreadcrumb.length > 0
return (
<div className="flex h-7 items-center gap-x-0.5 px-2 py-0.5">
<span className={cn('system-xs-regular text-text-tertiary', selectedGroupsForBreadcrumb.length > 0 && 'cursor-pointer text-text-accent')} onClick={handleReset}>{t('accessControlDialog.operateGroupAndMember.allMembers', { ns: 'app' })}</span>
{hasBreadcrumb
? (
<button
type="button"
className="cursor-pointer border-none bg-transparent p-0 text-left system-xs-regular text-text-accent focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
onClick={handleReset}
>
{t('accessControlDialog.operateGroupAndMember.allMembers', { ns: 'app' })}
</button>
)
: (
<span className="system-xs-regular text-text-tertiary">{t('accessControlDialog.operateGroupAndMember.allMembers', { ns: 'app' })}</span>
)}
{selectedGroupsForBreadcrumb.map((group, index) => {
const isLastGroup = index === selectedGroupsForBreadcrumb.length - 1
return (
<div key={index} className="flex items-center gap-x-0.5 system-xs-regular text-text-tertiary">
<span>/</span>
<span className={index === selectedGroupsForBreadcrumb.length - 1 ? '' : 'cursor-pointer text-text-accent'} onClick={() => handleBreadCrumbClick(index)}>{group.name}</span>
{isLastGroup
? <span>{group.name}</span>
: (
<button
type="button"
className="cursor-pointer border-none bg-transparent p-0 text-left system-xs-regular text-text-accent focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
onClick={() => handleBreadCrumbClick(index)}
>
{group.name}
</button>
)}
</div>
)
})}

View File

@ -120,6 +120,8 @@ type BaseItemProps = {
onRemove?: () => void
}
function BaseItem({ icon, onRemove, children }: BaseItemProps) {
const { t } = useTranslation()
return (
<div className="group flex flex-row items-center gap-x-1 rounded-full border-[0.5px] border-components-panel-border-subtle bg-components-badge-white-to-dark p-1 pr-1.5 shadow-xs">
<div className="h-5 w-5 overflow-hidden rounded-full bg-components-icon-bg-blue-solid">
@ -128,9 +130,14 @@ function BaseItem({ icon, onRemove, children }: BaseItemProps) {
</div>
</div>
{children}
<div className="flex h-4 w-4 cursor-pointer items-center justify-center" onClick={onRemove}>
<RiCloseCircleFill className="h-[14px] w-[14px] text-text-quaternary" />
</div>
<button
type="button"
className="flex h-4 w-4 cursor-pointer items-center justify-center border-none bg-transparent p-0 focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
aria-label={t('operation.remove', { ns: 'common' })}
onClick={onRemove}
>
<RiCloseCircleFill className="h-[14px] w-[14px] text-text-quaternary" aria-hidden="true" />
</button>
</div>
)
}

View File

@ -103,6 +103,22 @@ describe('VersionInfoModal', () => {
expect(handleClose).toHaveBeenCalledTimes(1)
})
it('should close when the close button is clicked', () => {
const handleClose = vi.fn()
render(
<VersionInfoModal
isOpen
onClose={handleClose}
onPublish={vi.fn()}
/>,
)
fireEvent.click(screen.getByRole('button', { name: 'operation.close' }))
expect(handleClose).toHaveBeenCalledTimes(1)
})
it('should validate release note length and clear previous errors before publishing', () => {
const handlePublish = vi.fn()
const handleClose = vi.fn()

View File

@ -79,9 +79,14 @@ const VersionInfoModal: FC<VersionInfoModalProps> = ({
<div className="title-2xl-semi-bold text-text-primary first-letter:capitalize">
{versionInfo?.marked_name ? t('versionHistory.editVersionInfo', { ns: 'workflow' }) : t('versionHistory.nameThisVersion', { ns: 'workflow' })}
</div>
<div className="absolute top-5 right-5 flex h-8 w-8 cursor-pointer items-center justify-center p-1.5" onClick={onClose}>
<RiCloseLine className="h-[18px] w-[18px] text-text-tertiary" />
</div>
<button
type="button"
className="absolute top-5 right-5 flex h-8 w-8 cursor-pointer items-center justify-center border-none bg-transparent p-1.5 focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:outline-hidden"
aria-label={t('operation.close', { ns: 'common' })}
onClick={onClose}
>
<RiCloseLine className="h-[18px] w-[18px] text-text-tertiary" aria-hidden="true" />
</button>
</div>
<div className="flex flex-col gap-y-4 px-6 py-3">
<div className="flex flex-col gap-y-1">

View File

@ -5,11 +5,6 @@ import type { PromptRole, PromptVariable } from '@/models/debug'
import { Button } from '@langgenius/dify-ui/button'
import { cn } from '@langgenius/dify-ui/cn'
import { toast } from '@langgenius/dify-ui/toast'
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from '@langgenius/dify-ui/tooltip'
import {
RiDeleteBinLine,
RiErrorWarningFill,
@ -25,6 +20,7 @@ import {
Copy,
CopyCheck,
} from '@/app/components/base/icons/src/vender/line/files'
import { Infotip } from '@/app/components/base/infotip'
import PromptEditor from '@/app/components/base/prompt-editor'
import { INSERT_VARIABLE_VALUE_BLOCK_COMMAND } from '@/app/components/base/prompt-editor/plugins/variable-block'
import ConfigContext from '@/context/debug-configuration'
@ -183,18 +179,13 @@ const AdvancedPromptInput: FC<Props> = ({
<div className="text-sm font-semibold text-indigo-800 uppercase">
{t('pageTitle.line1', { ns: 'appDebug' })}
</div>
<Tooltip>
<TooltipTrigger
render={(
<span className="ml-1 i-ri-question-line h-4 w-4 shrink-0 text-text-quaternary" />
)}
/>
<TooltipContent>
<div className="w-[180px]">
{t('promptTip', { ns: 'appDebug' })}
</div>
</TooltipContent>
</Tooltip>
<Infotip
aria-label={t('promptTip', { ns: 'appDebug' })}
className="ml-1"
popupClassName="w-[180px]"
>
{t('promptTip', { ns: 'appDebug' })}
</Infotip>
</div>
)}
<div className={cn(s.optionWrap, 'items-center space-x-1')}>

View File

@ -5,11 +5,6 @@ import type { PromptVariable } from '@/models/debug'
import type { GenRes } from '@/service/debug'
import { cn } from '@langgenius/dify-ui/cn'
import { toast } from '@langgenius/dify-ui/toast'
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from '@langgenius/dify-ui/tooltip'
import { useBoolean } from 'ahooks'
import { noop } from 'es-toolkit/function'
import { produce } from 'immer'
@ -21,6 +16,7 @@ import { ADD_EXTERNAL_DATA_TOOL } from '@/app/components/app/configuration/confi
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
import { useFeaturesStore } from '@/app/components/base/features/hooks'
import { Infotip } from '@/app/components/base/infotip'
import PromptEditor from '@/app/components/base/prompt-editor'
import { PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER } from '@/app/components/base/prompt-editor/plugins/update-block'
import { INSERT_VARIABLE_VALUE_BLOCK_COMMAND } from '@/app/components/base/prompt-editor/plugins/variable-block'
@ -183,18 +179,13 @@ const Prompt: FC<ISimplePromptInput> = ({
<div className="flex items-center space-x-1">
<div className="system-sm-semibold-uppercase text-text-secondary">{mode !== AppModeEnum.COMPLETION ? t('chatSubTitle', { ns: 'appDebug' }) : t('completionSubTitle', { ns: 'appDebug' })}</div>
{!readonly && (
<Tooltip>
<TooltipTrigger
render={(
<span className="ml-1 i-ri-question-line h-4 w-4 shrink-0 text-text-quaternary" />
)}
/>
<TooltipContent>
<div className="w-[180px]">
{t('promptTip', { ns: 'appDebug' })}
</div>
</TooltipContent>
</Tooltip>
<Infotip
aria-label={t('promptTip', { ns: 'appDebug' })}
className="ml-1"
popupClassName="w-[180px]"
>
{t('promptTip', { ns: 'appDebug' })}
</Infotip>
)}
</div>
<div className="flex items-center">

View File

@ -233,9 +233,7 @@ describe('ConfigVar', () => {
const item = screen.getByTitle('name · Name')
const itemContainer = item.closest('div.group')
expect(itemContainer).not.toBeNull()
const actionButtons = itemContainer!.querySelectorAll('div.h-6.w-6')
expect(actionButtons).toHaveLength(2)
fireEvent.click(actionButtons[0]!)
fireEvent.click(within(itemContainer as HTMLElement).getByRole('button', { name: 'common.operation.edit' }))
const editDialog = await screen.findByRole('dialog')
const saveButton = within(editDialog).getByRole('button', { name: 'common.operation.save' })
@ -259,9 +257,7 @@ describe('ConfigVar', () => {
const item = screen.getByTitle('first · First')
const itemContainer = item.closest('div.group')
expect(itemContainer).not.toBeNull()
const actionButtons = itemContainer!.querySelectorAll('div.h-6.w-6')
expect(actionButtons).toHaveLength(2)
fireEvent.click(actionButtons[0]!)
fireEvent.click(within(itemContainer as HTMLElement).getByRole('button', { name: 'common.operation.edit' }))
const inputs = await screen.findAllByPlaceholderText('appDebug.variableConfig.inputPlaceholder')
fireEvent.change(inputs[0]!, { target: { value: 'second' } })
@ -285,9 +281,7 @@ describe('ConfigVar', () => {
const item = screen.getByTitle('first · First')
const itemContainer = item.closest('div.group')
expect(itemContainer).not.toBeNull()
const actionButtons = itemContainer!.querySelectorAll('div.h-6.w-6')
expect(actionButtons).toHaveLength(2)
fireEvent.click(actionButtons[0]!)
fireEvent.click(within(itemContainer as HTMLElement).getByRole('button', { name: 'common.operation.edit' }))
const inputs = await screen.findAllByPlaceholderText('appDebug.variableConfig.inputPlaceholder')
fireEvent.change(inputs[1]!, { target: { value: 'Second' } })
@ -318,7 +312,7 @@ describe('ConfigVar', () => {
onPromptVariablesChange,
})
const removeBtn = screen.getByTestId('var-item-delete-btn')
const removeBtn = screen.getByRole('button', { name: 'common.operation.delete' })
fireEvent.click(removeBtn)
expect(onPromptVariablesChange).toHaveBeenCalledWith([])
@ -343,7 +337,7 @@ describe('ConfigVar', () => {
},
)
const deleteBtn = screen.getByTestId('var-item-delete-btn')
const deleteBtn = screen.getByRole('button', { name: 'common.operation.delete' })
fireEvent.click(deleteBtn)
// confirmation modal should show up
fireEvent.click(screen.getByRole('button', { name: 'common.operation.confirm' }))
@ -411,8 +405,7 @@ describe('ConfigVar', () => {
const itemContainer = item.closest('div.group')
expect(itemContainer).not.toBeNull()
const actionButtons = itemContainer!.querySelectorAll('div.h-6.w-6')
fireEvent.click(actionButtons[0]!)
fireEvent.click(within(itemContainer as HTMLElement).getByRole('button', { name: 'common.operation.edit' }))
const modalState = setShowExternalDataToolModal.mock.calls.at(-1)?.[0]
@ -460,8 +453,7 @@ describe('ConfigVar', () => {
const itemContainer = item.closest('div.group')
expect(itemContainer).not.toBeNull()
const actionButtons = itemContainer!.querySelectorAll('div.h-6.w-6')
fireEvent.click(actionButtons[0]!)
fireEvent.click(within(itemContainer as HTMLElement).getByRole('button', { name: 'common.operation.edit' }))
const modalState = setShowExternalDataToolModal.mock.calls.at(-1)?.[0]

View File

@ -39,7 +39,7 @@ describe('VarItem', () => {
/>,
)
fireEvent.click(screen.getByTestId('var-item-delete-btn'))
fireEvent.click(screen.getByRole('button', { name: 'common.operation.delete' }))
expect(onRemove).toHaveBeenCalledTimes(1)
})

View File

@ -71,6 +71,25 @@ describe('ConfigModal', () => {
}), undefined)
})
it('should keep scrolling inside the form body so scrollbars do not cover dialog corners', () => {
render(
<ConfigModal
isCreate
isShow
payload={createPayload({ label: 'Question' })}
onClose={vi.fn()}
onConfirm={vi.fn()}
/>,
)
const dialog = screen.getByRole('dialog')
const scrollArea = screen.getByTestId('config-modal-scroll-area')
expect(dialog).toHaveClass('overflow-hidden!')
expect(scrollArea).toHaveClass('overflow-y-auto')
expect(scrollArea).toHaveClass('overflow-x-hidden')
})
it('should block save when the label is missing', () => {
render(
<ConfigModal

Some files were not shown because too many files have changed in this diff Show More