diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index 717413937f..bd47abc710 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -16,7 +16,7 @@ concurrency: jobs: api-unit: name: API Unit Tests - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 env: COVERAGE_FILE: coverage-unit defaults: @@ -62,7 +62,7 @@ jobs: api-integration: name: API Integration Tests - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 env: COVERAGE_FILE: coverage-integration STORAGE_TYPE: opendal @@ -137,7 +137,7 @@ jobs: api-coverage: name: API Coverage - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 needs: - api-unit - api-integration diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 35683b112f..8a1719da3c 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -13,7 +13,7 @@ permissions: jobs: autofix: if: github.repository == 'langgenius/dify' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Complete merge group check if: github.event_name == 'merge_group' diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 5f16fc6927..2d8bde8080 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -26,6 +26,9 @@ jobs: build: runs-on: ${{ matrix.runs_on }} if: github.repository == 'langgenius/dify' + permissions: + contents: read + id-token: write strategy: matrix: include: @@ -35,28 +38,28 @@ jobs: build_context: "{{defaultContext}}:api" file: "Dockerfile" platform: linux/amd64 - runs_on: ubuntu-latest + runs_on: depot-ubuntu-24.04-4 - service_name: "build-api-arm64" image_name_env: "DIFY_API_IMAGE_NAME" artifact_context: "api" build_context: "{{defaultContext}}:api" file: "Dockerfile" platform: linux/arm64 - runs_on: ubuntu-24.04-arm + runs_on: depot-ubuntu-24.04-4 - service_name: "build-web-amd64" image_name_env: "DIFY_WEB_IMAGE_NAME" artifact_context: "web" build_context: "{{defaultContext}}" file: "web/Dockerfile" platform: linux/amd64 - runs_on: ubuntu-latest + runs_on: depot-ubuntu-24.04-4 - service_name: "build-web-arm64" image_name_env: "DIFY_WEB_IMAGE_NAME" artifact_context: "web" build_context: "{{defaultContext}}" file: "web/Dockerfile" platform: linux/arm64 - runs_on: ubuntu-24.04-arm + runs_on: depot-ubuntu-24.04-4 steps: - name: Prepare @@ -70,8 +73,8 @@ jobs: username: ${{ env.DOCKERHUB_USER }} password: ${{ env.DOCKERHUB_TOKEN }} - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 + - name: Set up Depot CLI + uses: depot/setup-action@v1 - name: Extract metadata for Docker id: meta @@ -81,16 +84,15 @@ jobs: - name: Build Docker image id: build - uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 + uses: depot/build-push-action@v1 with: + project: ${{ vars.DEPOT_PROJECT_ID }} context: ${{ matrix.build_context }} file: ${{ matrix.file }} platforms: ${{ matrix.platform }} build-args: COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }} labels: ${{ steps.meta.outputs.labels }} outputs: type=image,name=${{ env[matrix.image_name_env] }},push-by-digest=true,name-canonical=true,push=true - cache-from: type=gha,scope=${{ matrix.service_name }} - cache-to: type=gha,mode=max,scope=${{ matrix.service_name }} - name: Export digest env: @@ -108,9 +110,33 @@ jobs: if-no-files-found: error retention-days: 1 + fork-build-validate: + if: github.repository != 'langgenius/dify' + runs-on: ubuntu-24.04 + strategy: + matrix: + include: + - service_name: "validate-api-amd64" + build_context: "{{defaultContext}}:api" + file: "Dockerfile" + - service_name: "validate-web-amd64" + build_context: "{{defaultContext}}" + file: "web/Dockerfile" + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@98e3b2c9eab4f4f98a95c0c0a3ea5e5e672fd2a8 # v3.10.0 + + - name: Validate Docker image + uses: docker/build-push-action@5cd29d66b4a8d8e6f4d5dfe2e9329f0b1d446289 # v6.18.0 + with: + push: false + context: ${{ matrix.build_context }} + file: ${{ matrix.file }} + platforms: linux/amd64 + create-manifest: needs: build - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: github.repository == 'langgenius/dify' strategy: matrix: diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml index 17b867dd6d..b1ccf496df 100644 --- a/.github/workflows/db-migration-test.yml +++ b/.github/workflows/db-migration-test.yml @@ -9,7 +9,7 @@ concurrency: jobs: db-migration-test-postgres: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Checkout code @@ -59,7 +59,7 @@ jobs: run: uv run --directory api flask upgrade-db db-migration-test-mysql: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Checkout code diff --git a/.github/workflows/deploy-agent-dev.yml b/.github/workflows/deploy-agent-dev.yml index cd5fe9242e..9b9b77e0a2 100644 --- a/.github/workflows/deploy-agent-dev.yml +++ b/.github/workflows/deploy-agent-dev.yml @@ -13,7 +13,7 @@ on: jobs: deploy: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: | github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'deploy/agent-dev' diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml index 954537663a..c2ff8c6332 100644 --- a/.github/workflows/deploy-dev.yml +++ b/.github/workflows/deploy-dev.yml @@ -10,7 +10,7 @@ on: jobs: deploy: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: | github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'deploy/dev' diff --git a/.github/workflows/deploy-enterprise.yml b/.github/workflows/deploy-enterprise.yml index 9cff3a3482..2740541f0f 100644 --- a/.github/workflows/deploy-enterprise.yml +++ b/.github/workflows/deploy-enterprise.yml @@ -13,7 +13,7 @@ on: jobs: deploy: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: | github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'deploy/enterprise' diff --git a/.github/workflows/deploy-hitl.yml b/.github/workflows/deploy-hitl.yml index c6f1cc7e6f..0da241cf95 100644 --- a/.github/workflows/deploy-hitl.yml +++ b/.github/workflows/deploy-hitl.yml @@ -10,7 +10,7 @@ on: jobs: deploy: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: | github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'build/feat/hitl' diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 5752076c36..b0022b863b 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -14,40 +14,69 @@ concurrency: jobs: build-docker: + if: github.event.pull_request.head.repo.full_name == github.repository runs-on: ${{ matrix.runs_on }} + permissions: + contents: read + id-token: write strategy: matrix: include: - service_name: "api-amd64" platform: linux/amd64 - runs_on: ubuntu-latest + runs_on: depot-ubuntu-24.04-4 context: "{{defaultContext}}:api" file: "Dockerfile" - service_name: "api-arm64" platform: linux/arm64 - runs_on: ubuntu-24.04-arm + runs_on: depot-ubuntu-24.04-4 context: "{{defaultContext}}:api" file: "Dockerfile" - service_name: "web-amd64" platform: linux/amd64 - runs_on: ubuntu-latest + runs_on: depot-ubuntu-24.04-4 context: "{{defaultContext}}" file: "web/Dockerfile" - service_name: "web-arm64" platform: linux/arm64 - runs_on: ubuntu-24.04-arm + runs_on: depot-ubuntu-24.04-4 context: "{{defaultContext}}" file: "web/Dockerfile" steps: - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 + - name: Set up Depot CLI + uses: depot/setup-action@v1 - name: Build Docker Image - uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 + uses: depot/build-push-action@v1 with: + project: ${{ vars.DEPOT_PROJECT_ID }} push: false context: ${{ matrix.context }} file: ${{ matrix.file }} platforms: ${{ matrix.platform }} - cache-from: type=gha - cache-to: type=gha,mode=max + + build-docker-fork: + if: github.event.pull_request.head.repo.full_name != github.repository + runs-on: ubuntu-24.04 + permissions: + contents: read + strategy: + matrix: + include: + - service_name: "api-amd64" + context: "{{defaultContext}}:api" + file: "Dockerfile" + - service_name: "web-amd64" + context: "{{defaultContext}}" + file: "web/Dockerfile" + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@98e3b2c9eab4f4f98a95c0c0a3ea5e5e672fd2a8 # v3.10.0 + + - name: Build Docker Image + uses: docker/build-push-action@5cd29d66b4a8d8e6f4d5dfe2e9329f0b1d446289 # v6.18.0 + with: + push: false + context: ${{ matrix.context }} + file: ${{ matrix.file }} + platforms: linux/amd64 diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 278e10bc04..f59cc6be48 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -7,7 +7,7 @@ jobs: permissions: contents: read pull-requests: write - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1 with: diff --git a/.github/workflows/main-ci.yml b/.github/workflows/main-ci.yml index ba36b5c07a..278f2ed8d1 100644 --- a/.github/workflows/main-ci.yml +++ b/.github/workflows/main-ci.yml @@ -23,7 +23,7 @@ concurrency: jobs: pre_job: name: Skip Duplicate Checks - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 outputs: should_skip: ${{ steps.skip_check.outputs.should_skip || 'false' }} steps: @@ -39,7 +39,7 @@ jobs: name: Check Changed Files needs: pre_job if: needs.pre_job.outputs.should_skip != 'true' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 outputs: api-changed: ${{ steps.changes.outputs.api }} e2e-changed: ${{ steps.changes.outputs.e2e }} @@ -141,7 +141,7 @@ jobs: - pre_job - check-changes if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.api-changed != 'true' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Report skipped API tests run: echo "No API-related changes detected; skipping API tests." @@ -154,7 +154,7 @@ jobs: - check-changes - api-tests-run - api-tests-skip - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Finalize API Tests status env: @@ -201,7 +201,7 @@ jobs: - pre_job - check-changes if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.web-changed != 'true' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Report skipped web tests run: echo "No web-related changes detected; skipping web tests." @@ -214,7 +214,7 @@ jobs: - check-changes - web-tests-run - web-tests-skip - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Finalize Web Tests status env: @@ -260,7 +260,7 @@ jobs: - pre_job - check-changes if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.e2e-changed != 'true' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Report skipped web full-stack e2e run: echo "No E2E-related changes detected; skipping web full-stack E2E." @@ -273,7 +273,7 @@ jobs: - check-changes - web-e2e-run - web-e2e-skip - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Finalize Web Full-Stack E2E status env: @@ -325,7 +325,7 @@ jobs: - pre_job - check-changes if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.vdb-changed != 'true' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Report skipped VDB tests run: echo "No VDB-related changes detected; skipping VDB tests." @@ -338,7 +338,7 @@ jobs: - check-changes - vdb-tests-run - vdb-tests-skip - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Finalize VDB Tests status env: @@ -384,7 +384,7 @@ jobs: - pre_job - check-changes if: needs.pre_job.outputs.should_skip != 'true' && needs.check-changes.outputs.migration-changed != 'true' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Report skipped DB migration tests run: echo "No migration-related changes detected; skipping DB migration tests." @@ -397,7 +397,7 @@ jobs: - check-changes - db-migration-test-run - db-migration-test-skip - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Finalize DB Migration Test status env: diff --git a/.github/workflows/pyrefly-diff-comment.yml b/.github/workflows/pyrefly-diff-comment.yml index c55b013dbe..7f82942e7e 100644 --- a/.github/workflows/pyrefly-diff-comment.yml +++ b/.github/workflows/pyrefly-diff-comment.yml @@ -12,7 +12,7 @@ permissions: {} jobs: comment: name: Comment PR with pyrefly diff - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: actions: read contents: read diff --git a/.github/workflows/pyrefly-diff.yml b/.github/workflows/pyrefly-diff.yml index eb15cd6f75..0cf54e3585 100644 --- a/.github/workflows/pyrefly-diff.yml +++ b/.github/workflows/pyrefly-diff.yml @@ -10,7 +10,7 @@ permissions: jobs: pyrefly-diff: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read issues: write diff --git a/.github/workflows/pyrefly-type-coverage-comment.yml b/.github/workflows/pyrefly-type-coverage-comment.yml index 3c6c96a664..52c16f3153 100644 --- a/.github/workflows/pyrefly-type-coverage-comment.yml +++ b/.github/workflows/pyrefly-type-coverage-comment.yml @@ -12,7 +12,7 @@ permissions: {} jobs: comment: name: Comment PR with type coverage - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: actions: read contents: read diff --git a/.github/workflows/pyrefly-type-coverage.yml b/.github/workflows/pyrefly-type-coverage.yml index 0599c94eef..eae8debf1a 100644 --- a/.github/workflows/pyrefly-type-coverage.yml +++ b/.github/workflows/pyrefly-type-coverage.yml @@ -10,7 +10,7 @@ permissions: jobs: pyrefly-type-coverage: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read issues: write diff --git a/.github/workflows/semantic-pull-request.yml b/.github/workflows/semantic-pull-request.yml index 49d2e94695..6f3193bbf5 100644 --- a/.github/workflows/semantic-pull-request.yml +++ b/.github/workflows/semantic-pull-request.yml @@ -16,7 +16,7 @@ jobs: name: Validate PR title permissions: pull-requests: read - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Complete merge group check if: github.event_name == 'merge_group' diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c74f4a670a..b23648c7c6 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,7 +12,7 @@ on: jobs: stale: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: issues: write pull-requests: write diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index fe11e7134f..b2d515c6b3 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -15,7 +15,7 @@ permissions: jobs: python-style: name: Python Style - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Checkout code @@ -57,7 +57,7 @@ jobs: web-style: name: Web Style - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 defaults: run: working-directory: ./web @@ -108,6 +108,8 @@ jobs: - name: Web tsslint if: steps.changed-files.outputs.any_changed == 'true' working-directory: ./web + env: + NODE_OPTIONS: --max-old-space-size=4096 run: vp run lint:tss - name: Web type check @@ -129,7 +131,7 @@ jobs: superlinter: name: SuperLinter - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - name: Checkout code diff --git a/.github/workflows/tool-test-sdks.yaml b/.github/workflows/tool-test-sdks.yaml index bf33207a14..79fddb1853 100644 --- a/.github/workflows/tool-test-sdks.yaml +++ b/.github/workflows/tool-test-sdks.yaml @@ -18,7 +18,7 @@ concurrency: jobs: build: name: unit test for Node.js SDK - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 defaults: run: diff --git a/.github/workflows/translate-i18n-claude.yml b/.github/workflows/translate-i18n-claude.yml index eecbbb1a56..5f48c22c56 100644 --- a/.github/workflows/translate-i18n-claude.yml +++ b/.github/workflows/translate-i18n-claude.yml @@ -35,7 +35,7 @@ concurrency: jobs: translate: if: github.repository == 'langgenius/dify' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 timeout-minutes: 120 steps: @@ -158,7 +158,7 @@ jobs: - name: Run Claude Code for Translation Sync if: steps.context.outputs.CHANGED_FILES != '' - uses: anthropics/claude-code-action@38ec876110f9fbf8b950c79f534430740c3ac009 # v1.0.101 + uses: anthropics/claude-code-action@567fe954a4527e81f132d87d1bdbcc94f7737434 # v1.0.107 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/trigger-i18n-sync.yml b/.github/workflows/trigger-i18n-sync.yml index 790ea9126d..87c88e2023 100644 --- a/.github/workflows/trigger-i18n-sync.yml +++ b/.github/workflows/trigger-i18n-sync.yml @@ -16,7 +16,7 @@ concurrency: jobs: trigger: if: github.repository == 'langgenius/dify' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 timeout-minutes: 5 steps: diff --git a/.github/workflows/vdb-tests-full.yml b/.github/workflows/vdb-tests-full.yml index b79e8927d7..5c241af5c5 100644 --- a/.github/workflows/vdb-tests-full.yml +++ b/.github/workflows/vdb-tests-full.yml @@ -16,7 +16,7 @@ jobs: test: name: Full VDB Tests if: github.repository == 'langgenius/dify' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 strategy: matrix: python-version: diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index bd13d662c3..38ec96f00f 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -13,7 +13,7 @@ concurrency: jobs: test: name: VDB Smoke Tests - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 strategy: matrix: python-version: diff --git a/.github/workflows/web-e2e.yml b/.github/workflows/web-e2e.yml index 6bd4d4f406..a634830fef 100644 --- a/.github/workflows/web-e2e.yml +++ b/.github/workflows/web-e2e.yml @@ -13,7 +13,7 @@ concurrency: jobs: test: name: Web Full-Stack E2E - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 defaults: run: shell: bash diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml index 2a5cf19645..db6a797c15 100644 --- a/.github/workflows/web-tests.yml +++ b/.github/workflows/web-tests.yml @@ -16,7 +16,7 @@ concurrency: jobs: test: name: Web Tests (${{ matrix.shardIndex }}/${{ matrix.shardTotal }}) - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 env: VITEST_COVERAGE_SCOPE: app-components strategy: @@ -54,7 +54,7 @@ jobs: name: Merge Test Reports if: ${{ !cancelled() }} needs: [test] - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} defaults: @@ -92,7 +92,7 @@ jobs: dify-ui-test: name: dify-ui Tests - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} defaults: diff --git a/README.md b/README.md index c87472ace3..778028fc76 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,7 @@ Import the dashboard to Grafana, using Dify's PostgreSQL database as data source ### Deployment with Kubernetes -If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes. +If you'd like to configure a highly available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes. - [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify) - [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm) diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py index 614bf03ea5..f73e2da54e 100644 --- a/api/controllers/console/tag/tags.py +++ b/api/controllers/console/tag/tags.py @@ -37,6 +37,11 @@ class TagBindingRemovePayload(BaseModel): type: TagType = Field(description="Tag type") +class TagBindingItemDeletePayload(BaseModel): + target_id: str = Field(description="Target ID to unbind tag from") + type: TagType = Field(description="Tag type") + + class TagListQueryParam(BaseModel): type: Literal["knowledge", "app", ""] = Field("", description="Tag type filter") keyword: str | None = Field(None, description="Search keyword") @@ -70,6 +75,7 @@ register_schema_models( TagBasePayload, TagBindingPayload, TagBindingRemovePayload, + TagBindingItemDeletePayload, TagListQueryParam, TagResponse, ) @@ -152,41 +158,107 @@ class TagUpdateDeleteApi(Resource): return "", 204 -@console_ns.route("/tag-bindings/create") -class TagBindingCreateApi(Resource): +def _require_tag_binding_edit_permission() -> None: + """ + Ensure the current account can edit tag bindings. + + Tag binding operations are allowed for users who can edit resources (app/dataset) within the current tenant. + """ + current_user, _ = current_account_with_tenant() + # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator + if not (current_user.has_edit_permission or current_user.is_dataset_editor): + raise Forbidden() + + +def _create_tag_bindings() -> tuple[dict[str, str], int]: + _require_tag_binding_edit_permission() + + payload = TagBindingPayload.model_validate(console_ns.payload or {}) + TagService.save_tag_binding( + TagBindingCreatePayload( + tag_ids=payload.tag_ids, + target_id=payload.target_id, + type=payload.type, + ) + ) + return {"result": "success"}, 200 + + +def _remove_tag_binding() -> tuple[dict[str, str], int]: + _require_tag_binding_edit_permission() + + payload = TagBindingRemovePayload.model_validate(console_ns.payload or {}) + TagService.delete_tag_binding( + TagBindingDeletePayload( + tag_id=payload.tag_id, + target_id=payload.target_id, + type=payload.type, + ) + ) + return {"result": "success"}, 200 + + +@console_ns.route("/tag-bindings") +class TagBindingCollectionApi(Resource): + """Canonical collection resource for tag binding creation.""" + + @console_ns.doc("create_tag_binding") @console_ns.expect(console_ns.models[TagBindingPayload.__name__]) @setup_required @login_required @account_initialization_required def post(self): - current_user, _ = current_account_with_tenant() - # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator - if not (current_user.has_edit_permission or current_user.is_dataset_editor): - raise Forbidden() + return _create_tag_bindings() - payload = TagBindingPayload.model_validate(console_ns.payload or {}) - TagService.save_tag_binding( - TagBindingCreatePayload(tag_ids=payload.tag_ids, target_id=payload.target_id, type=payload.type) + +@console_ns.route("/tag-bindings/") +class TagBindingItemApi(Resource): + """Canonical item resource for tag binding deletion.""" + + @console_ns.doc("delete_tag_binding") + @console_ns.doc(params={"id": "Tag ID"}) + @console_ns.expect(console_ns.models[TagBindingItemDeletePayload.__name__]) + @setup_required + @login_required + @account_initialization_required + def delete(self, id): + _require_tag_binding_edit_permission() + payload = TagBindingItemDeletePayload.model_validate(console_ns.payload or {}) + TagService.delete_tag_binding( + TagBindingDeletePayload( + tag_id=str(id), + target_id=payload.target_id, + type=payload.type, + ) ) - return {"result": "success"}, 200 +@console_ns.route("/tag-bindings/create") +class DeprecatedTagBindingCreateApi(Resource): + """Deprecated verb-based alias for tag binding creation.""" + + @console_ns.doc("create_tag_binding_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc(description="Deprecated legacy alias. Use POST /tag-bindings instead.") + @console_ns.expect(console_ns.models[TagBindingPayload.__name__]) + @setup_required + @login_required + @account_initialization_required + def post(self): + return _create_tag_bindings() + + @console_ns.route("/tag-bindings/remove") -class TagBindingDeleteApi(Resource): +class DeprecatedTagBindingRemoveApi(Resource): + """Deprecated verb-based alias for tag binding deletion.""" + + @console_ns.doc("delete_tag_binding_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc(description="Deprecated legacy alias. Use DELETE /tag-bindings/{id} instead.") @console_ns.expect(console_ns.models[TagBindingRemovePayload.__name__]) @setup_required @login_required @account_initialization_required def post(self): - current_user, _ = current_account_with_tenant() - # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator - if not (current_user.has_edit_permission or current_user.is_dataset_editor): - raise Forbidden() - - payload = TagBindingRemovePayload.model_validate(console_ns.payload or {}) - TagService.delete_tag_binding( - TagBindingDeletePayload(tag_id=payload.tag_id, target_id=payload.target_id, type=payload.type) - ) - - return {"result": "success"}, 200 + return _remove_tag_binding() diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index f17f7cd330..4ab42743f6 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -527,6 +527,7 @@ class RetrievalService: child_index_nodes = session.execute(child_chunk_stmt).scalars().all() for i in child_index_nodes: + assert i.index_node_id segment_ids.append(i.segment_id) if i.segment_id in child_chunk_map: child_chunk_map[i.segment_id].append(i) diff --git a/api/core/rag/docstore/dataset_docstore.py b/api/core/rag/docstore/dataset_docstore.py index 8e9ebdd17a..69aaefa764 100644 --- a/api/core/rag/docstore/dataset_docstore.py +++ b/api/core/rag/docstore/dataset_docstore.py @@ -11,6 +11,7 @@ from core.rag.index_processor.constant.index_type import IndexTechniqueType from core.rag.models.document import AttachmentDocument, Document from extensions.ext_database import db from models.dataset import ChildChunk, Dataset, DocumentSegment, SegmentAttachmentBinding +from models.enums import SegmentType class DatasetDocumentStore: @@ -127,6 +128,7 @@ class DatasetDocumentStore: if save_child: if doc.children: for position, child in enumerate(doc.children, start=1): + assert self._document_id child_segment = ChildChunk( tenant_id=self._dataset.tenant_id, dataset_id=self._dataset.id, @@ -137,7 +139,7 @@ class DatasetDocumentStore: index_node_hash=child.metadata.get("doc_hash"), content=child.page_content, word_count=len(child.page_content), - type="automatic", + type=SegmentType.AUTOMATIC, created_by=self._user_id, ) db.session.add(child_segment) @@ -163,6 +165,7 @@ class DatasetDocumentStore: ) # add new child chunks for position, child in enumerate(doc.children, start=1): + assert self._document_id child_segment = ChildChunk( tenant_id=self._dataset.tenant_id, dataset_id=self._dataset.id, @@ -173,7 +176,7 @@ class DatasetDocumentStore: index_node_hash=child.metadata.get("doc_hash"), content=child.page_content, word_count=len(child.page_content), - type="automatic", + type=SegmentType.AUTOMATIC, created_by=self._user_id, ) db.session.add(child_segment) diff --git a/api/models/dataset.py b/api/models/dataset.py index 50301dd2d7..2b1a8ab228 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -1036,7 +1036,7 @@ class DocumentSegment(Base): return attachment_list -class ChildChunk(Base): +class ChildChunk(TypeBase): __tablename__ = "child_chunks" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="child_chunk_pkey"), @@ -1046,29 +1046,42 @@ class ChildChunk(Base): ) # initial fields - id = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4())) - tenant_id = mapped_column(StringUUID, nullable=False) - dataset_id = mapped_column(StringUUID, nullable=False) - document_id = mapped_column(StringUUID, nullable=False) - segment_id = mapped_column(StringUUID, nullable=False) + id: Mapped[str] = mapped_column(StringUUID, nullable=False, default_factory=lambda: str(uuid4()), init=False) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + document_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + segment_id: Mapped[str] = mapped_column(StringUUID, nullable=False) position: Mapped[int] = mapped_column(sa.Integer, nullable=False) - content = mapped_column(LongText, nullable=False) + content: Mapped[str] = mapped_column(LongText, nullable=False) word_count: Mapped[int] = mapped_column(sa.Integer, nullable=False) # indexing fields - index_node_id = mapped_column(String(255), nullable=True) - index_node_hash = mapped_column(String(255), nullable=True) - type: Mapped[SegmentType] = mapped_column( - EnumText(SegmentType, length=255), nullable=False, server_default=sa.text("'automatic'") + created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=sa.func.current_timestamp(), init=False ) - created_by = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=sa.func.current_timestamp()) - updated_by = mapped_column(StringUUID, nullable=True) + updated_by: Mapped[str | None] = mapped_column(StringUUID, nullable=True, init=False) updated_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=sa.func.current_timestamp(), onupdate=func.current_timestamp() + DateTime, + nullable=False, + server_default=sa.func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, ) - indexing_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - completed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True) - error = mapped_column(LongText, nullable=True) + indexing_at: Mapped[datetime | None] = mapped_column( + DateTime, nullable=True, insert_default=None, server_default=None, init=False + ) + completed_at: Mapped[datetime | None] = mapped_column( + DateTime, nullable=True, insert_default=None, server_default=None, init=False + ) + index_node_id: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + index_node_hash: Mapped[str | None] = mapped_column(String(255), nullable=True, default=None) + type: Mapped[SegmentType] = mapped_column( + EnumText(SegmentType, length=255), + nullable=False, + server_default=sa.text("'automatic'"), + default=SegmentType.AUTOMATIC, + ) + error: Mapped[str | None] = mapped_column(LongText, nullable=True, init=False) @property def dataset(self): diff --git a/api/models/model.py b/api/models/model.py index 15139cbe93..f0f8d60cdc 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -1867,15 +1867,18 @@ class MessageAnnotation(TypeBase): ) id: Mapped[str] = mapped_column( - StringUUID, insert_default=lambda: str(uuid4()), default_factory=lambda: str(uuid4()), init=False + StringUUID, + insert_default=lambda: str(uuid4()), + default_factory=lambda: str(uuid4()), + init=False, ) app_id: Mapped[str] = mapped_column(StringUUID) question: Mapped[str] = mapped_column(LongText, nullable=False) content: Mapped[str] = mapped_column(LongText, nullable=False) + hit_count: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0"), init=False) account_id: Mapped[str] = mapped_column(StringUUID, nullable=False) conversation_id: Mapped[str | None] = mapped_column(StringUUID, sa.ForeignKey("conversations.id"), default=None) message_id: Mapped[str | None] = mapped_column(StringUUID, default=None) - hit_count: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0"), default=0) created_at: Mapped[datetime] = mapped_column( sa.DateTime, nullable=False, server_default=func.current_timestamp(), init=False ) diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py index 286dda419c..ac09060e9d 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py @@ -225,8 +225,10 @@ class TestSpanBuilder: span = builder.build_span(span_data) assert isinstance(span, ReadableSpan) assert span.name == "test-span" + assert span.context is not None assert span.context.trace_id == 123 assert span.context.span_id == 456 + assert span.parent is not None assert span.parent.span_id == 789 assert span.resource == resource assert span.attributes == {"attr1": "val1"} diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py index 38d33dd21b..a6808fec0a 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py @@ -64,12 +64,13 @@ class TestSpanData: def test_span_data_missing_required_fields(self): with pytest.raises(ValidationError): - SpanData( - trace_id=123, - # span_id missing - name="test_span", - start_time=1000, - end_time=2000, + SpanData.model_validate( + { + "trace_id": 123, + "name": "test_span", + "start_time": 1000, + "end_time": 2000, + } ) def test_span_data_arbitrary_types_allowed(self): diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py index c1b11c9186..fa00829653 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py @@ -2,12 +2,14 @@ from __future__ import annotations from datetime import UTC, datetime from types import SimpleNamespace +from typing import cast from unittest.mock import MagicMock import dify_trace_aliyun.aliyun_trace as aliyun_trace_module import pytest from dify_trace_aliyun.aliyun_trace import AliyunDataTrace from dify_trace_aliyun.config import AliyunConfig +from dify_trace_aliyun.entities.aliyun_trace_entity import SpanData, TraceMetadata from dify_trace_aliyun.entities.semconv import ( GEN_AI_COMPLETION, GEN_AI_INPUT_MESSAGE, @@ -44,7 +46,7 @@ class RecordingTraceClient: self.endpoint = endpoint self.added_spans: list[object] = [] - def add_span(self, span) -> None: + def add_span(self, span: object) -> None: self.added_spans.append(span) def api_check(self) -> bool: @@ -63,11 +65,35 @@ def _make_link(trace_id: int = 1, span_id: int = 2) -> Link: trace_id=trace_id, span_id=span_id, is_remote=False, - trace_flags=TraceFlags.SAMPLED, + trace_flags=TraceFlags(TraceFlags.SAMPLED), ) return Link(context) +def _make_trace_metadata( + trace_id: int = 1, + workflow_span_id: int = 2, + session_id: str = "s", + user_id: str = "u", + links: list[Link] | None = None, +) -> TraceMetadata: + return TraceMetadata( + trace_id=trace_id, + workflow_span_id=workflow_span_id, + session_id=session_id, + user_id=user_id, + links=[] if links is None else links, + ) + + +def _recording_trace_client(trace_instance: AliyunDataTrace) -> RecordingTraceClient: + return cast(RecordingTraceClient, trace_instance.trace_client) + + +def _recorded_span_data(trace_instance: AliyunDataTrace) -> list[SpanData]: + return cast(list[SpanData], _recording_trace_client(trace_instance).added_spans) + + def _make_workflow_trace_info(**overrides) -> WorkflowTraceInfo: defaults = { "workflow_id": "workflow-id", @@ -263,20 +289,20 @@ def test_workflow_trace_adds_workflow_and_node_spans(trace_instance: AliyunDataT trace_instance.workflow_trace(trace_info) add_workflow_span.assert_called_once() - passed_trace_metadata = add_workflow_span.call_args.args[1] + passed_trace_metadata = cast(TraceMetadata, add_workflow_span.call_args.args[1]) assert passed_trace_metadata.trace_id == 111 assert passed_trace_metadata.workflow_span_id == 222 assert passed_trace_metadata.session_id == "c" assert passed_trace_metadata.user_id == "u" assert passed_trace_metadata.links == [] - assert trace_instance.trace_client.added_spans == ["span-1", "span-2"] + assert _recording_trace_client(trace_instance).added_spans == ["span-1", "span-2"] def test_message_trace_returns_early_if_no_message_data(trace_instance: AliyunDataTrace): trace_info = _make_message_trace_info(message_data=None) trace_instance.message_trace(trace_info) - assert trace_instance.trace_client.added_spans == [] + assert _recording_trace_client(trace_instance).added_spans == [] def test_message_trace_creates_message_and_llm_spans(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): @@ -302,8 +328,9 @@ def test_message_trace_creates_message_and_llm_spans(trace_instance: AliyunDataT ) trace_instance.message_trace(trace_info) - assert len(trace_instance.trace_client.added_spans) == 2 - message_span, llm_span = trace_instance.trace_client.added_spans + spans = _recorded_span_data(trace_instance) + assert len(spans) == 2 + message_span, llm_span = spans assert message_span.name == "message" assert message_span.trace_id == 10 @@ -324,7 +351,7 @@ def test_message_trace_creates_message_and_llm_spans(trace_instance: AliyunDataT def test_dataset_retrieval_trace_returns_early_if_no_message_data(trace_instance: AliyunDataTrace): trace_info = _make_dataset_retrieval_trace_info(message_data=None) trace_instance.dataset_retrieval_trace(trace_info) - assert trace_instance.trace_client.added_spans == [] + assert _recording_trace_client(trace_instance).added_spans == [] def test_dataset_retrieval_trace_creates_span(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): @@ -338,8 +365,9 @@ def test_dataset_retrieval_trace_creates_span(trace_instance: AliyunDataTrace, m monkeypatch.setattr(aliyun_trace_module, "extract_retrieval_documents", lambda _: [{"doc": "d"}]) trace_instance.dataset_retrieval_trace(_make_dataset_retrieval_trace_info(inputs="query")) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "dataset_retrieval" assert span.attributes[RETRIEVAL_QUERY] == "query" assert span.attributes[RETRIEVAL_DOCUMENT] == '[{"doc": "d"}]' @@ -348,7 +376,7 @@ def test_dataset_retrieval_trace_creates_span(trace_instance: AliyunDataTrace, m def test_tool_trace_returns_early_if_no_message_data(trace_instance: AliyunDataTrace): trace_info = _make_tool_trace_info(message_data=None) trace_instance.tool_trace(trace_info) - assert trace_instance.trace_client.added_spans == [] + assert _recording_trace_client(trace_instance).added_spans == [] def test_tool_trace_creates_span(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): @@ -371,8 +399,9 @@ def test_tool_trace_creates_span(trace_instance: AliyunDataTrace, monkeypatch: p ) ) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "my-tool" assert span.status == status assert span.attributes[TOOL_NAME] == "my-tool" @@ -409,7 +438,7 @@ def test_get_workflow_node_executions_builds_repo_and_fetches( def test_build_workflow_node_span_routes_llm_type(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_llm_span", MagicMock(return_value="llm")) @@ -422,7 +451,7 @@ def test_build_workflow_node_span_routes_knowledge_retrieval_type( ): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_retrieval_span", MagicMock(return_value="retrieval")) @@ -433,7 +462,7 @@ def test_build_workflow_node_span_routes_knowledge_retrieval_type( def test_build_workflow_node_span_routes_tool_type(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_tool_span", MagicMock(return_value="tool")) @@ -444,7 +473,7 @@ def test_build_workflow_node_span_routes_tool_type(trace_instance: AliyunDataTra def test_build_workflow_node_span_routes_code_type(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_task_span", MagicMock(return_value="task")) @@ -457,7 +486,7 @@ def test_build_workflow_node_span_handles_errors( ): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_task_span", MagicMock(side_effect=RuntimeError("boom"))) node_execution.node_type = BuiltinNodeTypes.CODE @@ -472,7 +501,7 @@ def test_build_workflow_task_span(trace_instance: AliyunDataTrace, monkeypatch: status = Status(StatusCode.OK) monkeypatch.setattr(aliyun_trace_module, "get_workflow_node_status", lambda _: status) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "title" @@ -494,7 +523,7 @@ def test_build_workflow_tool_span(trace_instance: AliyunDataTrace, monkeypatch: status = Status(StatusCode.OK) monkeypatch.setattr(aliyun_trace_module, "get_workflow_node_status", lambda _: status) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[_make_link()]) + trace_metadata = _make_trace_metadata(links=[_make_link()]) node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "my-tool" @@ -527,7 +556,7 @@ def test_build_workflow_retrieval_span(trace_instance: AliyunDataTrace, monkeypa aliyun_trace_module, "format_retrieval_documents", lambda docs: [{"formatted": True}] if docs else [] ) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "retrieval" @@ -556,7 +585,7 @@ def test_build_workflow_llm_span(trace_instance: AliyunDataTrace, monkeypatch: p monkeypatch.setattr(aliyun_trace_module, "format_input_messages", lambda _: "in") monkeypatch.setattr(aliyun_trace_module, "format_output_messages", lambda _: "out") - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "llm" @@ -594,7 +623,7 @@ def test_add_workflow_span(trace_instance: AliyunDataTrace, monkeypatch: pytest. status = Status(StatusCode.OK) monkeypatch.setattr(aliyun_trace_module, "create_status_from_error", lambda _: status) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() # CASE 1: With message_id trace_info = _make_workflow_trace_info( @@ -602,9 +631,11 @@ def test_add_workflow_span(trace_instance: AliyunDataTrace, monkeypatch: pytest. ) trace_instance.add_workflow_span(trace_info, trace_metadata) - assert len(trace_instance.trace_client.added_spans) == 2 - message_span = trace_instance.trace_client.added_spans[0] - workflow_span = trace_instance.trace_client.added_spans[1] + client = _recording_trace_client(trace_instance) + spans = _recorded_span_data(trace_instance) + assert len(spans) == 2 + message_span = spans[0] + workflow_span = spans[1] assert message_span.name == "message" assert message_span.span_kind == SpanKind.SERVER @@ -614,13 +645,14 @@ def test_add_workflow_span(trace_instance: AliyunDataTrace, monkeypatch: pytest. assert workflow_span.span_kind == SpanKind.INTERNAL assert workflow_span.parent_span_id == 20 - trace_instance.trace_client.added_spans.clear() + client.added_spans.clear() # CASE 2: Without message_id trace_info_no_msg = _make_workflow_trace_info(message_id=None) trace_instance.add_workflow_span(trace_info_no_msg, trace_metadata) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "workflow" assert span.span_kind == SpanKind.SERVER assert span.parent_span_id is None @@ -641,7 +673,8 @@ def test_suggested_question_trace(trace_instance: AliyunDataTrace, monkeypatch: trace_info = _make_suggested_question_trace_info(suggested_question=["how?"]) trace_instance.suggested_question_trace(trace_info) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "suggested_question" assert span.attributes[GEN_AI_COMPLETION] == '["how?"]' diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index a9e7b80c2a..1b97746dea 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -1,4 +1,6 @@ import json +from collections.abc import Mapping +from typing import Any, cast from unittest.mock import MagicMock from dify_trace_aliyun.entities.semconv import ( @@ -170,7 +172,7 @@ def test_create_common_span_attributes(): def test_format_retrieval_documents(): # Not a list - assert format_retrieval_documents("not a list") == [] + assert format_retrieval_documents(cast(list[object], "not a list")) == [] # Valid list docs = [ @@ -211,7 +213,7 @@ def test_format_retrieval_documents(): def test_format_input_messages(): # Not a dict - assert format_input_messages(None) == serialize_json_data([]) + assert format_input_messages(cast(Mapping[str, Any], None)) == serialize_json_data([]) # No prompts assert format_input_messages({}) == serialize_json_data([]) @@ -244,7 +246,7 @@ def test_format_input_messages(): def test_format_output_messages(): # Not a dict - assert format_output_messages(None) == serialize_json_data([]) + assert format_output_messages(cast(Mapping[str, Any], None)) == serialize_json_data([]) # No text assert format_output_messages({"finish_reason": "stop"}) == serialize_json_data([]) diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py index 1b24ee7421..8068ee1328 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py @@ -25,13 +25,13 @@ class TestAliyunConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - AliyunConfig() + AliyunConfig.model_validate({}) with pytest.raises(ValidationError): - AliyunConfig(license_key="test_license") + AliyunConfig.model_validate({"license_key": "test_license"}) with pytest.raises(ValidationError): - AliyunConfig(endpoint="https://tracing-analysis-dc-hz.aliyuncs.com") + AliyunConfig.model_validate({"endpoint": "https://tracing-analysis-dc-hz.aliyuncs.com"}) def test_app_name_validation_empty(self): """Test app_name validation with empty value""" diff --git a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py index b0691a87ea..e9ecc2e083 100644 --- a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py +++ b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py @@ -1,4 +1,5 @@ from datetime import UTC, datetime, timedelta +from typing import cast from unittest.mock import MagicMock, patch import pytest @@ -129,7 +130,7 @@ def test_set_span_status(): return "SilentErrorRepr" span.reset_mock() - set_span_status(span, SilentError()) + set_span_status(span, cast(Exception | str | None, SilentError())) assert span.add_event.call_args[1]["attributes"][OTELSpanAttributes.EXCEPTION_MESSAGE] == "SilentErrorRepr" diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py index 103d888eef..0c3c3fc81e 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py @@ -28,13 +28,13 @@ class TestLangfuseConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - LangfuseConfig() + LangfuseConfig.model_validate({}) with pytest.raises(ValidationError): - LangfuseConfig(public_key="public") + LangfuseConfig.model_validate({"public_key": "public"}) with pytest.raises(ValidationError): - LangfuseConfig(secret_key="secret") + LangfuseConfig.model_validate({"secret_key": "secret"}) def test_host_validation_empty(self): """Test host validation with empty value""" diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py index 0340ffb669..82d69b6180 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py @@ -2,6 +2,7 @@ from datetime import datetime, timedelta from types import SimpleNamespace +from typing import cast from unittest.mock import MagicMock, patch from dify_trace_langfuse.config import LangfuseConfig @@ -134,4 +135,4 @@ class TestLangFuseDataTraceCompletionStartTime: assert trace._get_completion_start_time(start_time, None) is None assert trace._get_completion_start_time(start_time, -1) is None - assert trace._get_completion_start_time(start_time, "invalid") is None + assert trace._get_completion_start_time(start_time, cast(float | int | None, "invalid")) is None diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py index 37efaf69cf..bd226c9f1a 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py @@ -21,13 +21,13 @@ class TestLangSmithConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - LangSmithConfig() + LangSmithConfig.model_validate({}) with pytest.raises(ValidationError): - LangSmithConfig(api_key="key") + LangSmithConfig.model_validate({"api_key": "key"}) with pytest.raises(ValidationError): - LangSmithConfig(project="project") + LangSmithConfig.model_validate({"project": "project"}) def test_endpoint_validation_https_only(self): """Test endpoint validation only allows HTTPS""" diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 20211456e3..46c9750a5d 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -599,7 +599,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_instance.message_trace(_make_message_trace_info()) mock_tracing["start"].assert_called_once() @@ -609,7 +608,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_info = _make_message_trace_info(error="something broke") trace_instance.message_trace(trace_info) @@ -620,7 +618,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None monkeypatch.setenv("FILES_URL", "http://files.test") file_data = SimpleNamespace(url="path/to/file.png") @@ -638,7 +635,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_info = _make_message_trace_info(file_list=None, message_file_data=None) trace_instance.message_trace(trace_info) @@ -651,7 +647,6 @@ class TestMessageTrace: end_user = MagicMock() end_user.session_id = "session-xyz" - mock_db.session.query.return_value.where.return_value.first.return_value = end_user trace_info = _make_message_trace_info( metadata={"from_end_user_id": "eu-1", "conversation_id": "c1"}, @@ -664,7 +659,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_info = _make_message_trace_info( metadata={"from_account_id": "acc-1"}, diff --git a/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py index fba290f5b8..2e0796c291 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py @@ -12,6 +12,7 @@ from __future__ import annotations import uuid from datetime import datetime +from typing import cast from unittest.mock import MagicMock, patch from dify_trace_opik.opik_trace import OpikDataTrace, _seed_to_uuid4, prepare_opik_uuid @@ -69,6 +70,14 @@ def _make_opik_trace_instance() -> OpikDataTrace: return instance +def _add_trace_mock(instance: OpikDataTrace) -> MagicMock: + return cast(MagicMock, instance.add_trace) + + +def _add_span_mock(instance: OpikDataTrace) -> MagicMock: + return cast(MagicMock, instance.add_span) + + # --------------------------------------------------------------------------- # _seed_to_uuid4 # --------------------------------------------------------------------------- @@ -155,21 +164,21 @@ class TestWorkflowTraceWithoutMessageId: def test_root_span_is_created(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - assert instance.add_span.called + assert _add_span_mock(instance).called def test_root_span_id_matches_expected(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) expected = self._expected_root_span_id(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["id"] == expected def test_root_span_has_no_parent(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["parent_span_id"] is None def test_trace_name_is_workflow_trace(self): @@ -177,21 +186,21 @@ class TestWorkflowTraceWithoutMessageId: trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - trace_kwargs = instance.add_trace.call_args_list[0][0][0] + trace_kwargs = _add_trace_mock(instance).call_args_list[0][0][0] assert trace_kwargs["name"] == TraceTaskName.WORKFLOW_TRACE def test_root_span_name_is_workflow_trace(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["name"] == TraceTaskName.WORKFLOW_TRACE def test_root_span_has_workflow_tag(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert "workflow" in root_span_kwargs["tags"] def test_node_execution_spans_are_parented_to_root(self): @@ -214,8 +223,9 @@ class TestWorkflowTraceWithoutMessageId: instance = self._run(trace_info, node_executions=[node_exec]) # call_args_list[0] = root span, [1] = node execution span - assert instance.add_span.call_count == 2 - node_span_kwargs = instance.add_span.call_args_list[1][0][0] + add_span = _add_span_mock(instance) + assert add_span.call_count == 2 + node_span_kwargs = add_span.call_args_list[1][0][0] assert node_span_kwargs["parent_span_id"] == expected_root_span_id def test_node_span_not_parented_to_workflow_app_log_id(self): @@ -240,7 +250,7 @@ class TestWorkflowTraceWithoutMessageId: instance = self._run(trace_info, node_executions=[node_exec]) old_parent_id = prepare_opik_uuid(trace_info.start_time, trace_info.workflow_app_log_id) - node_span_kwargs = instance.add_span.call_args_list[1][0][0] + node_span_kwargs = _add_span_mock(instance).call_args_list[1][0][0] assert node_span_kwargs["parent_span_id"] != old_parent_id def test_root_span_id_differs_from_trace_id(self): @@ -283,7 +293,7 @@ class TestWorkflowTraceWithMessageId: trace_info = _make_workflow_trace_info(message_id=self._MESSAGE_ID) instance = self._run(trace_info) - trace_kwargs = instance.add_trace.call_args_list[0][0][0] + trace_kwargs = _add_trace_mock(instance).call_args_list[0][0][0] assert trace_kwargs["name"] == TraceTaskName.MESSAGE_TRACE def test_root_span_uses_workflow_run_id_directly(self): @@ -292,7 +302,7 @@ class TestWorkflowTraceWithMessageId: instance = self._run(trace_info) expected_root_span_id = prepare_opik_uuid(trace_info.start_time, trace_info.workflow_run_id) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["id"] == expected_root_span_id def test_root_span_id_differs_from_no_message_id_case(self): @@ -326,5 +336,5 @@ class TestWorkflowTraceWithMessageId: instance = self._run(trace_info, node_executions=[node_exec]) - node_span_kwargs = instance.add_span.call_args_list[1][0][0] + node_span_kwargs = _add_span_mock(instance).call_args_list[1][0][0] assert node_span_kwargs["parent_span_id"] == expected_root_span_id diff --git a/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py b/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py index 1e656e2462..3cd918f408 100644 --- a/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py +++ b/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py @@ -5,6 +5,7 @@ from __future__ import annotations import sys import types from types import SimpleNamespace +from typing import Any, TypedDict, cast from unittest.mock import MagicMock import pytest @@ -12,7 +13,7 @@ from dify_trace_tencent import client as client_module from dify_trace_tencent.client import TencentTraceClient, _get_opentelemetry_sdk_version from dify_trace_tencent.entities.tencent_trace_entity import SpanData from opentelemetry.sdk.trace import Event -from opentelemetry.trace import Status, StatusCode +from opentelemetry.trace import SpanContext, Status, StatusCode, TraceFlags metric_reader_instances: list[DummyMetricReader] = [] meter_provider_instances: list[DummyMeterProvider] = [] @@ -80,6 +81,16 @@ class DummyJsonMetricExporterNoTemporality: self.kwargs = kwargs +class PatchedCoreComponents(TypedDict): + span_exporter: MagicMock + span_processor: MagicMock + tracer: MagicMock + span: MagicMock + tracer_provider: MagicMock + logger: MagicMock + trace_api: Any + + def _add_stub_modules(monkeypatch: pytest.MonkeyPatch) -> None: """Drop fake metric modules into sys.modules so the client imports resolve.""" @@ -118,7 +129,7 @@ def stub_metric_modules(monkeypatch: pytest.MonkeyPatch) -> None: @pytest.fixture(autouse=True) -def patch_core_components(monkeypatch: pytest.MonkeyPatch) -> dict[str, object]: +def patch_core_components(monkeypatch: pytest.MonkeyPatch) -> PatchedCoreComponents: span_exporter = MagicMock(name="span_exporter") monkeypatch.setattr(client_module, "OTLPSpanExporter", MagicMock(return_value=span_exporter)) @@ -168,6 +179,15 @@ def patch_core_components(monkeypatch: pytest.MonkeyPatch) -> dict[str, object]: } +def _make_span_context(trace_id: int = 1, span_id: int = 2) -> SpanContext: + return SpanContext( + trace_id=trace_id, + span_id=span_id, + is_remote=False, + trace_flags=TraceFlags(TraceFlags.SAMPLED), + ) + + def _build_client() -> TencentTraceClient: return TencentTraceClient( service_name="service", @@ -208,7 +228,7 @@ def test_resolve_grpc_target_parsable_variants(endpoint: str, expected: tuple[st def test_resolve_grpc_target_handles_errors() -> None: - assert TencentTraceClient._resolve_grpc_target(123) == ("localhost:4317", True, "localhost", 4317) + assert TencentTraceClient._resolve_grpc_target(cast(str, 123)) == ("localhost:4317", True, "localhost", 4317) @pytest.mark.parametrize( @@ -248,7 +268,7 @@ def test_record_methods_skip_when_histogram_missing() -> None: client.record_trace_duration(0.5) -def test_record_llm_duration_handles_exceptions(patch_core_components: dict[str, object]) -> None: +def test_record_llm_duration_handles_exceptions(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() client.hist_llm_duration = MagicMock(name="hist_llm_duration") client.hist_llm_duration.record.side_effect = RuntimeError("boom") @@ -258,10 +278,11 @@ def test_record_llm_duration_handles_exceptions(patch_core_components: dict[str, logger.debug.assert_called() -def test_create_and_export_span_sets_attributes(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_sets_attributes(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span = patch_core_components["span"] - span.get_span_context.return_value = "ctx" + ctx = _make_span_context(span_id=2) + span.get_span_context.return_value = ctx data = SpanData( trace_id=1, @@ -280,14 +301,15 @@ def test_create_and_export_span_sets_attributes(patch_core_components: dict[str, span.add_event.assert_called_once() span.set_status.assert_called_once() span.end.assert_called_once_with(end_time=20) - assert client.span_contexts[2] == "ctx" + assert client.span_contexts[2] == ctx -def test_create_and_export_span_uses_parent_context(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_uses_parent_context(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() - client.span_contexts[10] = "existing" + existing_context = _make_span_context(span_id=10) + client.span_contexts[10] = existing_context span = patch_core_components["span"] - span.get_span_context.return_value = "child" + span.get_span_context.return_value = _make_span_context(span_id=11) data = SpanData( trace_id=1, @@ -302,14 +324,14 @@ def test_create_and_export_span_uses_parent_context(patch_core_components: dict[ client._create_and_export_span(data) trace_api = patch_core_components["trace_api"] - trace_api.NonRecordingSpan.assert_called_once_with("existing") + trace_api.NonRecordingSpan.assert_called_once_with(existing_context) trace_api.set_span_in_context.assert_called_once() -def test_create_and_export_span_exception_logs_error(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_exception_logs_error(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span = patch_core_components["span"] - span.get_span_context.return_value = "ctx" + span.get_span_context.return_value = _make_span_context(span_id=2) client.tracer.start_span.side_effect = RuntimeError("boom") client._create_and_export_span( @@ -385,7 +407,7 @@ def test_get_project_url() -> None: assert client.get_project_url() == "https://console.cloud.tencent.com/apm" -def test_shutdown_flushes_all_components(patch_core_components: dict[str, object]) -> None: +def test_shutdown_flushes_all_components(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span_processor = patch_core_components["span_processor"] tracer_provider = patch_core_components["tracer_provider"] @@ -401,10 +423,11 @@ def test_shutdown_flushes_all_components(patch_core_components: dict[str, object metric_reader.shutdown.assert_called_once() -def test_shutdown_logs_when_meter_provider_fails(patch_core_components: dict[str, object]) -> None: +def test_shutdown_logs_when_meter_provider_fails(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() meter_provider = meter_provider_instances[-1] meter_provider.shutdown.side_effect = RuntimeError("boom") + assert client.metric_reader is not None client.metric_reader.shutdown.side_effect = RuntimeError("boom") client.shutdown() @@ -433,7 +456,7 @@ def test_metrics_initialization_failure_sets_histogram_attributes(monkeypatch: p assert client.metric_reader is None -def test_add_span_logs_exception(monkeypatch: pytest.MonkeyPatch, patch_core_components: dict[str, object]) -> None: +def test_add_span_logs_exception(monkeypatch: pytest.MonkeyPatch, patch_core_components: PatchedCoreComponents) -> None: client = _build_client() monkeypatch.setattr(client, "_create_and_export_span", MagicMock(side_effect=RuntimeError("boom"))) @@ -454,10 +477,10 @@ def test_add_span_logs_exception(monkeypatch: pytest.MonkeyPatch, patch_core_com logger.exception.assert_called_once() -def test_create_and_export_span_converts_attribute_types(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_converts_attribute_types(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span = patch_core_components["span"] - span.get_span_context.return_value = "ctx" + span.get_span_context.return_value = _make_span_context(span_id=2) data = SpanData.model_construct( trace_id=1, @@ -485,7 +508,7 @@ def test_record_llm_duration_converts_attributes() -> None: hist_mock = MagicMock(name="hist_llm_duration") client.hist_llm_duration = hist_mock - client.record_llm_duration(0.3, {"foo": object(), "bar": 2}) + client.record_llm_duration(0.3, cast(dict[str, str], {"foo": object(), "bar": 2})) _, attrs = hist_mock.record.call_args.args assert isinstance(attrs["foo"], str) assert attrs["bar"] == 2 @@ -496,7 +519,7 @@ def test_record_trace_duration_converts_attributes() -> None: hist_mock = MagicMock(name="hist_trace_duration") client.hist_trace_duration = hist_mock - client.record_trace_duration(1.0, {"meta": object(), "ok": True}) + client.record_trace_duration(1.0, cast(dict[str, str], {"meta": object(), "ok": True})) _, attrs = hist_mock.record.call_args.args assert isinstance(attrs["meta"], str) assert attrs["ok"] is True @@ -512,7 +535,7 @@ def test_record_trace_duration_converts_attributes() -> None: ], ) def test_record_methods_handle_exceptions( - method: str, attr_name: str, args: tuple[object, ...], patch_core_components: dict[str, object] + method: str, attr_name: str, args: tuple[object, ...], patch_core_components: PatchedCoreComponents ) -> None: client = _build_client() hist_mock = MagicMock(name=attr_name) @@ -527,35 +550,38 @@ def test_record_methods_handle_exceptions( def test_metrics_initializes_grpc_metric_exporter() -> None: client = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyGrpcMetricExporter, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyGrpcMetricExporter) + assert isinstance(exporter, DummyGrpcMetricExporter) assert metric_reader.export_interval_millis == client.metrics_export_interval_sec * 1000 - assert metric_reader.exporter.kwargs["endpoint"] == "trace.example.com:4317" - assert metric_reader.exporter.kwargs["insecure"] is False - assert metric_reader.exporter.kwargs["headers"]["authorization"] == "Bearer token" + assert exporter.kwargs["endpoint"] == "trace.example.com:4317" + assert exporter.kwargs["insecure"] is False + assert cast(dict[str, dict[str, str]], exporter.kwargs)["headers"]["authorization"] == "Bearer token" def test_metrics_initializes_http_protobuf_metric_exporter(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/protobuf") client = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyHttpMetricExporter, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyHttpMetricExporter) + assert isinstance(exporter, DummyHttpMetricExporter) assert metric_reader.export_interval_millis == client.metrics_export_interval_sec * 1000 - assert metric_reader.exporter.kwargs["endpoint"] == client.endpoint - assert metric_reader.exporter.kwargs["headers"]["authorization"] == "Bearer token" + assert exporter.kwargs["endpoint"] == client.endpoint + assert cast(dict[str, dict[str, str]], exporter.kwargs)["headers"]["authorization"] == "Bearer token" def test_metrics_initializes_http_json_metric_exporter(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/json") client = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyJsonMetricExporter, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyJsonMetricExporter) + assert isinstance(exporter, DummyJsonMetricExporter) assert metric_reader.export_interval_millis == client.metrics_export_interval_sec * 1000 - assert metric_reader.exporter.kwargs["endpoint"] == client.endpoint - assert metric_reader.exporter.kwargs["headers"]["authorization"] == "Bearer token" - assert "preferred_temporality" in metric_reader.exporter.kwargs + assert exporter.kwargs["endpoint"] == client.endpoint + assert cast(dict[str, dict[str, str]], exporter.kwargs)["headers"]["authorization"] == "Bearer token" + assert "preferred_temporality" in exporter.kwargs def test_metrics_http_json_metric_exporter_falls_back_without_temporality(monkeypatch: pytest.MonkeyPatch) -> None: @@ -564,9 +590,10 @@ def test_metrics_http_json_metric_exporter_falls_back_without_temporality(monkey monkeypatch.setattr(exporter_module, "OTLPMetricExporter", DummyJsonMetricExporterNoTemporality) _ = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyJsonMetricExporterNoTemporality, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyJsonMetricExporterNoTemporality) - assert "preferred_temporality" not in metric_reader.exporter.kwargs + assert isinstance(exporter, DummyJsonMetricExporterNoTemporality) + assert "preferred_temporality" not in exporter.kwargs def test_metrics_http_json_uses_http_fallback_when_no_json_exporter(monkeypatch: pytest.MonkeyPatch) -> None: diff --git a/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py index eeb1fe1d87..377c768198 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py @@ -31,13 +31,13 @@ class TestWeaveConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - WeaveConfig() + WeaveConfig.model_validate({}) with pytest.raises(ValidationError): - WeaveConfig(api_key="key") + WeaveConfig.model_validate({"api_key": "key"}) with pytest.raises(ValidationError): - WeaveConfig(project="project") + WeaveConfig.model_validate({"project": "project"}) def test_endpoint_validation_https_only(self): """Test endpoint validation only allows HTTPS""" diff --git a/api/pyproject.toml b/api/pyproject.toml index aa41093826..4c59f8424c 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -6,9 +6,10 @@ requires-python = "~=3.12.0" dependencies = [ # Legacy: mature and widely deployed "bleach>=6.3.0", - "boto3>=1.42.91", + "boto3>=1.42.96", "celery>=5.6.3", "croniter>=6.2.2", + "flask>=3.1.3,<4.0.0", "flask-cors>=6.0.2", "gevent>=26.4.0", "gevent-websocket>=0.10.1", @@ -16,7 +17,7 @@ dependencies = [ "google-api-python-client>=2.194.0", "gunicorn>=25.3.0", "psycogreen>=1.0.2", - "psycopg2-binary>=2.9.11", + "psycopg2-binary>=2.9.12", "python-socketio>=5.13.0", "redis[hiredis]>=7.4.0", "sendgrid>=6.12.5", @@ -32,13 +33,13 @@ dependencies = [ "flask-restx>=1.3.2,<2.0.0", "google-cloud-aiplatform>=1.148.1,<2.0.0", "httpx[socks]>=0.28.1,<1.0.0", - "opentelemetry-distro>=0.62b0,<1.0.0", + "opentelemetry-distro>=0.62b1,<1.0.0", "opentelemetry-instrumentation-celery>=0.62b0,<1.0.0", "opentelemetry-instrumentation-flask>=0.62b0,<1.0.0", "opentelemetry-instrumentation-httpx>=0.62b0,<1.0.0", "opentelemetry-instrumentation-redis>=0.62b0,<1.0.0", "opentelemetry-instrumentation-sqlalchemy>=0.62b0,<1.0.0", - "opentelemetry-propagator-b3>=1.41.0,<2.0.0", + "opentelemetry-propagator-b3>=1.41.1,<2.0.0", "readabilipy>=0.3.0,<1.0.0", "resend>=2.27.0,<3.0.0", @@ -117,7 +118,7 @@ dev = [ "faker>=40.15.0", "lxml-stubs>=0.5.1", "basedpyright>=1.39.3", - "ruff>=0.15.11", + "ruff>=0.15.12", "pytest>=9.0.3", "pytest-benchmark>=5.2.3", "pytest-cov>=7.1.0", @@ -144,7 +145,7 @@ dev = [ "types-pexpect>=4.9.0", "types-protobuf>=7.34.1", "types-psutil>=7.2.2", - "types-psycopg2>=2.9.21", + "types-psycopg2>=2.9.21.20260422", "types-pygments>=2.20.0", "types-pymysql>=1.1.0", "types-python-dateutil>=2.9.0", @@ -157,9 +158,9 @@ dev = [ "types-tensorflow>=2.18.0.20260408", "types-tqdm>=4.67.3.20260408", "types-ujson>=5.10.0", - "boto3-stubs>=1.42.92", + "boto3-stubs>=1.42.96", "types-jmespath>=1.1.0.20260408", - "hypothesis>=6.152.1", + "hypothesis>=6.152.3", "types_pyOpenSSL>=24.1.0", "types_cffi>=2.0.0.20260408", "types_setuptools>=82.0.0.20260408", @@ -169,12 +170,12 @@ dev = [ "import-linter>=2.3", "types-redis>=4.6.0.20241004", "celery-types>=0.23.0", - "mypy>=1.20.1", + "mypy>=1.20.2", # "locust>=2.40.4", # Temporarily removed due to compatibility issues. Uncomment when resolved. "pytest-timeout>=2.4.0", "pytest-xdist>=3.8.0", "pyrefly>=0.62.0", - "xinference-client>=2.5.0", + "xinference-client>=2.7.0", ] ############################################################ @@ -184,12 +185,12 @@ dev = [ storage = [ "azure-storage-blob>=12.28.0", "bce-python-sdk>=0.9.70", - "cos-python-sdk-v5>=1.9.41", + "cos-python-sdk-v5>=1.9.42", "esdk-obs-python>=3.22.2", "google-cloud-storage>=3.10.1", "opendal>=0.46.0", "oss2>=2.19.1", - "supabase>=2.28.3", + "supabase>=2.29.0", "tos>=2.9.0", ] @@ -272,7 +273,7 @@ vdb-vastbase = ["dify-vdb-vastbase"] vdb-vikingdb = ["dify-vdb-vikingdb"] vdb-weaviate = ["dify-vdb-weaviate"] # Optional client used by some tests / integrations (not a vector backend plugin) -vdb-xinference = ["xinference-client>=2.5.0"] +vdb-xinference = ["xinference-client>=2.7.0"] trace-all = [ "dify-trace-aliyun", diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index ff0882ad5c..0229a1f43a 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -133,7 +133,14 @@ class AppAnnotationService: raise ValueError("'question' is required when 'message_id' is not provided") question = maybe_question - annotation = MessageAnnotation(app_id=app.id, content=answer, question=question, account_id=current_user.id) + annotation = MessageAnnotation( + app_id=app.id, + conversation_id=None, + message_id=None, + content=answer, + question=question, + account_id=current_user.id, + ) db.session.add(annotation) db.session.commit() diff --git a/api/services/async_workflow_service.py b/api/services/async_workflow_service.py index 8b39d63385..ceda30e950 100644 --- a/api/services/async_workflow_service.py +++ b/api/services/async_workflow_service.py @@ -89,7 +89,10 @@ class AsyncWorkflowService: raise WorkflowNotFoundError(f"App not found: {trigger_data.app_id}") # 2. Get workflow - workflow = cls._get_workflow(workflow_service, app_model, trigger_data.workflow_id) + workflow = cls._get_workflow(workflow_service, app_model, trigger_data.workflow_id, session=session) + + # commit read only session before starting the billig rpc call + session.commit() # 3. Get dispatcher based on tenant subscription dispatcher = dispatcher_manager.get_dispatcher(trigger_data.tenant_id) @@ -302,13 +305,21 @@ class AsyncWorkflowService: return [log.to_dict() for log in logs] @staticmethod - def _get_workflow(workflow_service: WorkflowService, app_model: App, workflow_id: str | None = None) -> Workflow: + def _get_workflow( + workflow_service: WorkflowService, + app_model: App, + workflow_id: str | None = None, + session: Session | None = None, + ) -> Workflow: """ Get workflow for the app Args: app_model: App model instance workflow_id: Optional specific workflow ID + session: Reuse this SQLAlchemy session for the lookup when provided, + so the caller's explicit session bears the connection cost + instead of Flask's request-scoped ``db.session``. Returns: Workflow instance @@ -318,12 +329,12 @@ class AsyncWorkflowService: """ if workflow_id: # Get specific published workflow - workflow = workflow_service.get_published_workflow_by_id(app_model, workflow_id) + workflow = workflow_service.get_published_workflow_by_id(app_model, workflow_id, session=session) if not workflow: raise WorkflowNotFoundError(f"Published workflow not found: {workflow_id}") else: # Get default published workflow - workflow = workflow_service.get_published_workflow(app_model) + workflow = workflow_service.get_published_workflow(app_model, session=session) if not workflow: raise WorkflowNotFoundError(f"No published workflow found for app: {app_model.id}") diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 894cb05687..eef38f1ce2 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -3748,6 +3748,7 @@ class SegmentService: ChildChunk.segment_id == segment.id, ) ) + assert current_user.current_tenant_id child_chunk = ChildChunk( tenant_id=current_user.current_tenant_id, dataset_id=dataset.id, @@ -3758,7 +3759,7 @@ class SegmentService: index_node_hash=index_node_hash, content=content, word_count=len(content), - type="customized", + type=SegmentType.CUSTOMIZED, created_by=current_user.id, ) db.session.add(child_chunk) @@ -3818,6 +3819,7 @@ class SegmentService: if new_child_chunks_args: child_chunk_count = len(child_chunks) for position, args in enumerate(new_child_chunks_args, start=child_chunk_count + 1): + assert current_user.current_tenant_id index_node_id = str(uuid.uuid4()) index_node_hash = helper.generate_text_hash(args.content) child_chunk = ChildChunk( @@ -3830,7 +3832,7 @@ class SegmentService: index_node_hash=index_node_hash, content=args.content, word_count=len(args.content), - type="customized", + type=SegmentType.CUSTOMIZED, created_by=current_user.id, ) diff --git a/api/services/trigger/webhook_service.py b/api/services/trigger/webhook_service.py index d562220fa7..5d99900a04 100644 --- a/api/services/trigger/webhook_service.py +++ b/api/services/trigger/webhook_service.py @@ -799,50 +799,47 @@ class WebhookService: Exception: If workflow execution fails """ try: - with Session(db.engine) as session: - # Prepare inputs for the webhook node - # The webhook node expects webhook_data in the inputs - workflow_inputs = cls.build_workflow_inputs(webhook_data) + workflow_inputs = cls.build_workflow_inputs(webhook_data) - # Create trigger data - trigger_data = WebhookTriggerData( - app_id=webhook_trigger.app_id, - workflow_id=workflow.id, - root_node_id=webhook_trigger.node_id, # Start from the webhook node - inputs=workflow_inputs, - tenant_id=webhook_trigger.tenant_id, + trigger_data = WebhookTriggerData( + app_id=webhook_trigger.app_id, + workflow_id=workflow.id, + root_node_id=webhook_trigger.node_id, + inputs=workflow_inputs, + tenant_id=webhook_trigger.tenant_id, + ) + + end_user = EndUserService.get_or_create_end_user_by_type( + type=InvokeFrom.TRIGGER, + tenant_id=webhook_trigger.tenant_id, + app_id=webhook_trigger.app_id, + user_id=None, + ) + + try: + quota_charge = QuotaService.reserve(QuotaType.TRIGGER, webhook_trigger.tenant_id) + except QuotaExceededError: + AppTriggerService.mark_tenant_triggers_rate_limited(webhook_trigger.tenant_id) + logger.info( + "Tenant %s rate limited, skipping webhook trigger %s", + webhook_trigger.tenant_id, + webhook_trigger.webhook_id, ) + raise - end_user = EndUserService.get_or_create_end_user_by_type( - type=InvokeFrom.TRIGGER, - tenant_id=webhook_trigger.tenant_id, - app_id=webhook_trigger.app_id, - user_id=None, - ) - - # reserve quota before triggering workflow execution - try: - quota_charge = QuotaService.reserve(QuotaType.TRIGGER, webhook_trigger.tenant_id) - except QuotaExceededError: - AppTriggerService.mark_tenant_triggers_rate_limited(webhook_trigger.tenant_id) - logger.info( - "Tenant %s rate limited, skipping webhook trigger %s", - webhook_trigger.tenant_id, - webhook_trigger.webhook_id, - ) - raise - - # Trigger workflow execution asynchronously - try: + try: + # NOTE: don not use `with sessionmaker(bind=db.engine, expire_on_commit=False).begin()` + # trigger_workflow_async need to handle multipe session commits internally + with Session(db.engine, expire_on_commit=False) as session: AsyncWorkflowService.trigger_workflow_async( session, end_user, trigger_data, ) - quota_charge.commit() - except Exception: - quota_charge.refund() - raise + quota_charge.commit() + except Exception: + quota_charge.refund() + raise except Exception: logger.exception("Failed to trigger workflow for webhook %s", webhook_trigger.webhook_id) diff --git a/api/services/vector_service.py b/api/services/vector_service.py index 9827c8dfbc..4838e68b8a 100644 --- a/api/services/vector_service.py +++ b/api/services/vector_service.py @@ -16,6 +16,7 @@ from extensions.ext_database import db from models import UploadFile from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegment, SegmentAttachmentBinding from models.dataset import Document as DatasetDocument +from models.enums import SegmentType logger = logging.getLogger(__name__) @@ -178,7 +179,7 @@ class VectorService: index_node_hash=child_chunk.metadata["doc_hash"], content=child_chunk.page_content, word_count=len(child_chunk.page_content), - type="automatic", + type=SegmentType.AUTOMATIC, created_by=dataset_document.created_by, ) db.session.add(child_segment) @@ -222,6 +223,7 @@ class VectorService: ) documents.append(new_child_document) for update_child_chunk in update_child_chunks: + assert update_child_chunk.index_node_id child_document = Document( page_content=update_child_chunk.content, metadata={ @@ -234,6 +236,7 @@ class VectorService: documents.append(child_document) delete_node_ids.append(update_child_chunk.index_node_id) for delete_child_chunk in delete_child_chunks: + assert delete_child_chunk.index_node_id delete_node_ids.append(delete_child_chunk.index_node_id) if dataset.indexing_technique == IndexTechniqueType.HIGH_QUALITY: # update vector index @@ -246,6 +249,7 @@ class VectorService: @classmethod def delete_child_chunk_vector(cls, child_chunk: ChildChunk, dataset: Dataset): vector = Vector(dataset=dataset) + assert child_chunk.index_node_id vector.delete_by_ids([child_chunk.index_node_id]) @classmethod diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 6c0719d6f7..5cf81915d6 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -173,11 +173,18 @@ class WorkflowService: # return draft workflow return workflow - def get_published_workflow_by_id(self, app_model: App, workflow_id: str) -> Workflow | None: + def get_published_workflow_by_id( + self, app_model: App, workflow_id: str, session: Session | None = None + ) -> Workflow | None: """ fetch published workflow by workflow_id + + When ``session`` is provided, reuse it so callers that already hold a + Session avoid checking out an extra request-scoped ``db.session`` + connection. Falls back to ``db.session`` for backward compatibility. """ - workflow = db.session.scalar( + bind = session if session is not None else db.session + workflow = bind.scalar( select(Workflow) .where( Workflow.tenant_id == app_model.tenant_id, @@ -195,16 +202,20 @@ class WorkflowService: ) return workflow - def get_published_workflow(self, app_model: App) -> Workflow | None: + def get_published_workflow(self, app_model: App, session: Session | None = None) -> Workflow | None: """ Get published workflow + + When ``session`` is provided, reuse it so callers that already hold a + Session avoid checking out an extra request-scoped ``db.session`` + connection. Falls back to ``db.session`` for backward compatibility. """ if not app_model.workflow_id: return None - # fetch published workflow by workflow_id - workflow = db.session.scalar( + bind = session if session is not None else db.session + workflow = bind.scalar( select(Workflow) .where( Workflow.tenant_id == app_model.tenant_id, diff --git a/api/tasks/trigger_processing_tasks.py b/api/tasks/trigger_processing_tasks.py index b0cbc54db3..8505375b6a 100644 --- a/api/tasks/trigger_processing_tasks.py +++ b/api/tasks/trigger_processing_tasks.py @@ -259,59 +259,58 @@ def dispatch_triggered_workflow( tenant_id=subscription.tenant_id, provider_id=TriggerProviderID(subscription.provider_id) ) trigger_entity: TriggerProviderEntity = provider_controller.entity + + # Ensure expire_on_commit is set to False to remain workflows available with session_factory.create_session() as session: workflows: Mapping[str, Workflow] = _get_latest_workflows_by_app_ids(session, subscribers) - end_users: Mapping[str, EndUser] = EndUserService.create_end_user_batch( - type=InvokeFrom.TRIGGER, - tenant_id=subscription.tenant_id, - app_ids=[plugin_trigger.app_id for plugin_trigger in subscribers], - user_id=user_id, - ) - for plugin_trigger in subscribers: - # Get workflow from mapping - workflow: Workflow | None = workflows.get(plugin_trigger.app_id) - if not workflow: - logger.error( - "Workflow not found for app %s", - plugin_trigger.app_id, - ) - continue + end_users: Mapping[str, EndUser] = EndUserService.create_end_user_batch( + type=InvokeFrom.TRIGGER, + tenant_id=subscription.tenant_id, + app_ids=[plugin_trigger.app_id for plugin_trigger in subscribers], + user_id=user_id, + ) - # Find the trigger node in the workflow - event_node = None - for node_id, node_config in workflow.walk_nodes(TRIGGER_PLUGIN_NODE_TYPE): - if node_id == plugin_trigger.node_id: - event_node = node_config - break - - if not event_node: - logger.error("Trigger event node not found for app %s", plugin_trigger.app_id) - continue - - # invoke trigger - trigger_metadata = PluginTriggerMetadata( - plugin_unique_identifier=provider_controller.plugin_unique_identifier or "", - endpoint_id=subscription.endpoint_id, - provider_id=subscription.provider_id, - event_name=event_name, - icon_filename=trigger_entity.identity.icon or "", - icon_dark_filename=trigger_entity.identity.icon_dark or "", + for plugin_trigger in subscribers: + workflow: Workflow | None = workflows.get(plugin_trigger.app_id) + if not workflow: + logger.error( + "Workflow not found for app %s", + plugin_trigger.app_id, ) + continue - # reserve quota before invoking trigger - quota_charge = unlimited() - try: - quota_charge = QuotaService.reserve(QuotaType.TRIGGER, subscription.tenant_id) - except QuotaExceededError: - AppTriggerService.mark_tenant_triggers_rate_limited(subscription.tenant_id) - logger.info( - "Tenant %s rate limited, skipping plugin trigger %s", subscription.tenant_id, plugin_trigger.id - ) - return 0 + event_node = None + for node_id, node_config in workflow.walk_nodes(TRIGGER_PLUGIN_NODE_TYPE): + if node_id == plugin_trigger.node_id: + event_node = node_config + break - node_data: TriggerEventNodeData = TriggerEventNodeData.model_validate(event_node) - invoke_response: TriggerInvokeEventResponse | None = None + if not event_node: + logger.error("Trigger event node not found for app %s", plugin_trigger.app_id) + continue + + trigger_metadata = PluginTriggerMetadata( + plugin_unique_identifier=provider_controller.plugin_unique_identifier or "", + endpoint_id=subscription.endpoint_id, + provider_id=subscription.provider_id, + event_name=event_name, + icon_filename=trigger_entity.identity.icon or "", + icon_dark_filename=trigger_entity.identity.icon_dark or "", + ) + + quota_charge = unlimited() + try: + quota_charge = QuotaService.reserve(QuotaType.TRIGGER, subscription.tenant_id) + except QuotaExceededError: + AppTriggerService.mark_tenant_triggers_rate_limited(subscription.tenant_id) + logger.info("Tenant %s rate limited, skipping plugin trigger %s", subscription.tenant_id, plugin_trigger.id) + return dispatched_count + + node_data: TriggerEventNodeData = TriggerEventNodeData.model_validate(event_node) + invoke_response: TriggerInvokeEventResponse | None = None + + with session_factory.create_session() as session: try: invoke_response = TriggerManager.invoke_trigger_event( tenant_id=subscription.tenant_id, @@ -403,7 +402,7 @@ def dispatch_triggered_workflow( plugin_trigger.app_id, ) - return dispatched_count + return dispatched_count def dispatch_triggered_workflows( diff --git a/api/tasks/workflow_schedule_tasks.py b/api/tasks/workflow_schedule_tasks.py index dfb2fb3391..7638652000 100644 --- a/api/tasks/workflow_schedule_tasks.py +++ b/api/tasks/workflow_schedule_tasks.py @@ -33,6 +33,7 @@ def run_schedule_trigger(schedule_id: str) -> None: TenantOwnerNotFoundError: If no owner/admin for tenant ScheduleExecutionError: If workflow trigger fails """ + # Ensure expire_on_commit is set to False to remain schedule/tenant_owner available with session_factory.create_session() as session: schedule = session.get(WorkflowSchedulePlan, schedule_id) if not schedule: @@ -42,16 +43,16 @@ def run_schedule_trigger(schedule_id: str) -> None: if not tenant_owner: raise TenantOwnerNotFoundError(f"No owner or admin found for tenant {schedule.tenant_id}") - quota_charge = unlimited() - try: - quota_charge = QuotaService.reserve(QuotaType.TRIGGER, schedule.tenant_id) - except QuotaExceededError: - AppTriggerService.mark_tenant_triggers_rate_limited(schedule.tenant_id) - logger.info("Tenant %s rate limited, skipping schedule trigger %s", schedule.tenant_id, schedule_id) - return + quota_charge = unlimited() + try: + quota_charge = QuotaService.reserve(QuotaType.TRIGGER, schedule.tenant_id) + except QuotaExceededError: + AppTriggerService.mark_tenant_triggers_rate_limited(schedule.tenant_id) + logger.info("Tenant %s rate limited, skipping schedule trigger %s", schedule.tenant_id, schedule_id) + return - try: - # Production dispatch: Trigger the workflow normally + try: + with session_factory.create_session() as session: response = AsyncWorkflowService.trigger_workflow_async( session=session, user=tenant_owner, @@ -62,10 +63,10 @@ def run_schedule_trigger(schedule_id: str) -> None: tenant_id=schedule.tenant_id, ), ) - quota_charge.commit() - logger.info("Schedule %s triggered workflow: %s", schedule_id, response.workflow_trigger_log_id) - except Exception as e: - quota_charge.refund() - raise ScheduleExecutionError( - f"Failed to trigger workflow for schedule {schedule_id}, app {schedule.app_id}" - ) from e + quota_charge.commit() + logger.info("Schedule %s triggered workflow: %s", schedule_id, response.workflow_trigger_log_id) + except Exception as e: + quota_charge.refund() + raise ScheduleExecutionError( + f"Failed to trigger workflow for schedule {schedule_id}, app {schedule.app_id}" + ) from e diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index d10e5ed13c..3b5e822b90 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -171,35 +171,13 @@ class TestChatMessageApiPermissions: parent_message_id=None, ) - class MockQuery: - def __init__(self, model): - self.model = model - - def where(self, *args, **kwargs): - return self - - def first(self): - if getattr(self.model, "__name__", "") == "Conversation": - return mock_conversation - return None - - def order_by(self, *args, **kwargs): - return self - - def limit(self, *_): - return self - - def all(self): - if getattr(self.model, "__name__", "") == "Message": - return [mock_message] - return [] - mock_session = mock.Mock() - mock_session.query.side_effect = MockQuery - mock_session.scalar.return_value = False + mock_session.scalar.return_value = mock_conversation + mock_session.scalars.return_value.all.return_value = [mock_message] monkeypatch.setattr(message_api, "db", SimpleNamespace(session=mock_session)) monkeypatch.setattr(message_api, "current_user", mock_account) + monkeypatch.setattr(message_api, "attach_message_extra_contents", mock.Mock()) class DummyPagination: def __init__(self, data, limit, has_more): diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py index f14b2c0ae5..635cfee2da 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py @@ -24,7 +24,6 @@ def _patch_wraps(): patch("controllers.console.wraps.dify_config", dify_settings), patch("controllers.console.wraps.FeatureService.get_system_features", return_value=wraps_features), ): - mock_db.session.query.return_value.first.return_value = MagicMock() yield diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index d82933ccb9..3dcd6586e2 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -13,6 +13,12 @@ from models.model import App, Conversation, Message from services.feedback_service import FeedbackService +def _execute_result(rows): + result = mock.Mock() + result.all.return_value = rows + return result + + class TestFeedbackService: """Test FeedbackService methods.""" @@ -81,25 +87,17 @@ class TestFeedbackService: def test_export_feedbacks_csv_format(self, mock_db_session, sample_data): """Test exporting feedback data in CSV format.""" - - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["user_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["user_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["user_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["user_feedback"].from_account, + ) + ] + ) # Test CSV export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="csv") @@ -120,25 +118,17 @@ class TestFeedbackService: def test_export_feedbacks_json_format(self, mock_db_session, sample_data): """Test exporting feedback data in JSON format.""" - - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["admin_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["admin_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["admin_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["admin_feedback"].from_account, + ) + ] + ) # Test JSON export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="json") @@ -157,25 +147,17 @@ class TestFeedbackService: def test_export_feedbacks_with_filters(self, mock_db_session, sample_data): """Test exporting feedback with various filters.""" - - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["admin_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["admin_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["admin_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["admin_feedback"].from_account, + ) + ] + ) # Test with filters result = FeedbackService.export_feedbacks( @@ -193,17 +175,7 @@ class TestFeedbackService: def test_export_feedbacks_no_data(self, mock_db_session, sample_data): """Test exporting feedback when no data exists.""" - - # Setup mock query result with no data - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result([]) result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="csv") @@ -251,24 +223,17 @@ class TestFeedbackService: created_at=datetime(2024, 1, 1, 10, 0, 0), ) - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["user_feedback"], - long_message, - sample_data["conversation"], - sample_data["app"], - sample_data["user_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["user_feedback"], + long_message, + sample_data["conversation"], + sample_data["app"], + sample_data["user_feedback"].from_account, + ) + ] + ) # Test export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="json") @@ -309,24 +274,17 @@ class TestFeedbackService: created_at=datetime(2024, 1, 1, 10, 0, 0), ) - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - chinese_feedback, - chinese_message, - sample_data["conversation"], - sample_data["app"], - None, # No account for user feedback - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + chinese_feedback, + chinese_message, + sample_data["conversation"], + sample_data["app"], + None, + ) + ] + ) # Test export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="csv") @@ -339,32 +297,24 @@ class TestFeedbackService: def test_export_feedbacks_emoji_ratings(self, mock_db_session, sample_data): """Test that rating emojis are properly formatted in export.""" - - # Setup mock query result with both like and dislike feedback - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["user_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["user_feedback"].from_account, - ), - ( - sample_data["admin_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["admin_feedback"].from_account, - ), - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["user_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["user_feedback"].from_account, + ), + ( + sample_data["admin_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["admin_feedback"].from_account, + ), + ] + ) # Test export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="json") diff --git a/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py b/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py index ec10c51e04..85ce3a6ba6 100644 --- a/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py +++ b/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py @@ -10,6 +10,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.trigger.constants import TRIGGER_WEBHOOK_NODE_TYPE +from enums.quota_type import QuotaType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from models.enums import AppTriggerStatus, AppTriggerType from models.model import App @@ -290,17 +291,26 @@ class TestWebhookServiceTriggerExecutionWithContainers: end_user = SimpleNamespace(id=str(uuid4())) webhook_data = {"body": {"value": 1}, "headers": {}, "query_params": {}, "files": {}, "method": "POST"} + quota_charge = MagicMock() + with ( patch( "services.trigger.webhook_service.EndUserService.get_or_create_end_user_by_type", return_value=end_user, ), - patch("services.trigger.webhook_service.QuotaType.TRIGGER.consume") as mock_consume, + patch( + "services.trigger.webhook_service.QuotaService.reserve", + return_value=quota_charge, + ) as mock_reserve, patch("services.trigger.webhook_service.AsyncWorkflowService.trigger_workflow_async") as mock_trigger, ): WebhookService.trigger_workflow_execution(webhook_trigger, webhook_data, workflow) - mock_consume.assert_called_once_with(webhook_trigger.tenant_id) + mock_reserve.assert_called_once() + reserve_args = mock_reserve.call_args.args + assert reserve_args[0] == QuotaType.TRIGGER + assert reserve_args[1] == webhook_trigger.tenant_id + quota_charge.commit.assert_called_once() mock_trigger.assert_called_once() trigger_args = mock_trigger.call_args.args assert trigger_args[1] is end_user @@ -327,7 +337,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: return_value=SimpleNamespace(id=str(uuid4())), ), patch( - "services.trigger.webhook_service.QuotaType.TRIGGER.consume", + "services.trigger.webhook_service.QuotaService.reserve", side_effect=QuotaExceededError(feature="trigger", tenant_id=tenant.id, required=1), ), patch( diff --git a/api/tests/unit_tests/conftest.py b/api/tests/unit_tests/conftest.py index 55873b06a8..7174530e97 100644 --- a/api/tests/unit_tests/conftest.py +++ b/api/tests/unit_tests/conftest.py @@ -121,33 +121,32 @@ def _configure_session_factory(_unit_test_engine): configure_session_factory(_unit_test_engine, expire_on_commit=False) -def setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account): +def setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_owner): """ - Helper to set up the mock DB execute chain for tenant/account authentication. + Helper to stub the tenant-owner execute result for service API app authentication. - This configures the mock to return (tenant, account) for the - db.session.execute(select(...).join().join().where()).one_or_none() - query used by validate_app_token decorator. + The validate_app_token decorator currently resolves the active tenant owner + via db.session.execute(select(Tenant, Account)...).one_or_none(). Args: mock_db: The mocked db object mock_tenant: Mock tenant object to return - mock_account: Mock account object to return + mock_owner: Mock owner object to return from the execute result """ - mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_account) + mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_owner) -def setup_mock_dataset_tenant_query(mock_db, mock_tenant, mock_ta): +def setup_mock_dataset_owner_execute_result(mock_db, mock_tenant, mock_tenant_account_join): """ - Helper to set up the mock DB execute chain for dataset tenant authentication. + Helper to stub the tenant-owner execute result for dataset token authentication. - This configures the mock to return (tenant, tenant_account) for the - db.session.execute(select(...).where().where().where().where()).one_or_none() - query used by validate_dataset_token decorator. + The validate_dataset_token decorator currently resolves the owner mapping via + db.session.execute(select(Tenant, TenantAccountJoin)...).one_or_none(), and + then loads the Account separately via db.session.get(...). Args: mock_db: The mocked db object mock_tenant: Mock tenant object to return - mock_ta: Mock tenant account object to return + mock_tenant_account_join: Mock tenant-account join object to return """ - mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_ta) + mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_tenant_account_join) diff --git a/api/tests/unit_tests/controllers/console/app/test_annotation_security.py b/api/tests/unit_tests/controllers/console/app/test_annotation_security.py index 9f1ff9b40f..bfa4048191 100644 --- a/api/tests/unit_tests/controllers/console/app/test_annotation_security.py +++ b/api/tests/unit_tests/controllers/console/app/test_annotation_security.py @@ -208,8 +208,6 @@ class TestAnnotationImportServiceValidation: file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with patch("services.annotation_service.current_account_with_tenant") as mock_auth: mock_auth.return_value = (MagicMock(id="user_id"), "tenant_id") @@ -230,8 +228,6 @@ class TestAnnotationImportServiceValidation: file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with patch("services.annotation_service.current_account_with_tenant") as mock_auth: mock_auth.return_value = (MagicMock(id="user_id"), "tenant_id") @@ -248,8 +244,6 @@ class TestAnnotationImportServiceValidation: csv_content = 'invalid,csv,format\nwith,unbalanced,quotes,and"stuff' file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with ( patch("services.annotation_service.current_account_with_tenant") as mock_auth, patch("services.annotation_service.pd.read_csv", side_effect=ParserError("malformed CSV")), @@ -269,8 +263,6 @@ class TestAnnotationImportServiceValidation: file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with patch("services.annotation_service.current_account_with_tenant") as mock_auth: mock_auth.return_value = (MagicMock(id="user_id"), "tenant_id") diff --git a/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py b/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py index cb4fe40944..17bee94c52 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py +++ b/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py @@ -43,7 +43,6 @@ class TestAuthenticationSecurity: mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = services.errors.account.AccountPasswordError("Invalid email or password.") - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists mock_features.return_value.is_allow_register = True # Act @@ -76,7 +75,6 @@ class TestAuthenticationSecurity: mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = services.errors.account.AccountPasswordError("Wrong password") - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists # Act with self.app.test_request_context( @@ -109,7 +107,6 @@ class TestAuthenticationSecurity: mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = services.errors.account.AccountPasswordError("Invalid email or password.") - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists mock_features.return_value.is_allow_register = False # Act @@ -135,7 +132,6 @@ class TestAuthenticationSecurity: def test_reset_password_with_existing_account(self, mock_send_email, mock_get_user, mock_features, mock_db): """Test that reset password returns success with token for existing accounts.""" # Mock the setup check - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists # Test with existing account mock_get_user.return_value = MagicMock(email="existing@example.com") diff --git a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py index 9929a71120..b7bc73da5f 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py +++ b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py @@ -65,7 +65,6 @@ class TestEmailCodeLoginSendEmailApi: - IP rate limiting is checked """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = mock_account mock_send_email.return_value = "email_token_123" @@ -98,7 +97,6 @@ class TestEmailCodeLoginSendEmailApi: - Registration is allowed by system features """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = None mock_get_features.return_value.is_allow_register = True @@ -130,7 +128,6 @@ class TestEmailCodeLoginSendEmailApi: - Registration is blocked by system features """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = None mock_get_features.return_value.is_allow_register = False @@ -152,7 +149,6 @@ class TestEmailCodeLoginSendEmailApi: - Prevents spam and abuse """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = True # Act & Assert @@ -172,7 +168,6 @@ class TestEmailCodeLoginSendEmailApi: - AccountInFreezeError is raised for frozen accounts """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.side_effect = AccountRegisterError("Account frozen") @@ -213,7 +208,6 @@ class TestEmailCodeLoginSendEmailApi: - Defaults to en-US when not specified """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = mock_account mock_send_email.return_value = "token" @@ -286,7 +280,6 @@ class TestEmailCodeLoginApi: - User is logged in with token pair """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [MagicMock()] @@ -335,7 +328,6 @@ class TestEmailCodeLoginApi: - User is logged in after account creation """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "newuser@example.com", "code": "123456"} mock_get_user.return_value = None mock_create_account.return_value = mock_account @@ -369,7 +361,6 @@ class TestEmailCodeLoginApi: - InvalidTokenError is raised for invalid/expired tokens """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = None # Act & Assert @@ -392,7 +383,6 @@ class TestEmailCodeLoginApi: - InvalidEmailError is raised when email doesn't match token """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "original@example.com", "code": "123456"} # Act & Assert @@ -415,7 +405,6 @@ class TestEmailCodeLoginApi: - EmailCodeError is raised for wrong verification code """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} # Act & Assert @@ -453,7 +442,6 @@ class TestEmailCodeLoginApi: - User is added as owner of new workspace """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [] @@ -496,7 +484,6 @@ class TestEmailCodeLoginApi: - WorkspacesLimitExceeded is raised when limit reached """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [] @@ -538,7 +525,6 @@ class TestEmailCodeLoginApi: - NotAllowedCreateWorkspace is raised when creation disabled """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [] diff --git a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py index 0cf97da878..d089be8905 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py +++ b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py @@ -110,7 +110,6 @@ class TestLoginApi: - Rate limit is reset after successful login """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.return_value = mock_account @@ -162,7 +161,6 @@ class TestLoginApi: - Authentication proceeds with invitation token """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = {"data": {"email": "test@example.com"}} mock_authenticate.return_value = mock_account @@ -199,7 +197,6 @@ class TestLoginApi: - EmailPasswordLoginLimitError is raised when limit exceeded """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = True mock_get_invitation.return_value = None @@ -228,7 +225,6 @@ class TestLoginApi: - AccountInFreezeError is raised for frozen accounts """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_frozen.return_value = True # Act & Assert @@ -268,7 +264,6 @@ class TestLoginApi: - Generic error message prevents user enumeration """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = AccountPasswordError("Invalid password") @@ -305,7 +300,6 @@ class TestLoginApi: - Login is prevented even with valid credentials """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = AccountLoginError("Account is banned") @@ -351,7 +345,6 @@ class TestLoginApi: - User cannot login without an assigned workspace """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.return_value = mock_account @@ -383,7 +376,6 @@ class TestLoginApi: - Security check prevents invitation token abuse """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = {"data": {"email": "invited@example.com"}} @@ -425,7 +417,6 @@ class TestLoginApi: mock_token_pair, ): """Test that login retries with lowercase email when uppercase lookup fails.""" - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = [AccountPasswordError("Invalid"), mock_account] @@ -459,7 +450,6 @@ class TestLoginApi: mock_db, app, ): - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_token_data.return_value = {"email": "User@Example.com", "code": "123456"} mock_get_account.side_effect = Unauthorized("Account is banned.") @@ -513,7 +503,6 @@ class TestLogoutApi: - Success response is returned """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_current_account.return_value = (mock_account, MagicMock()) # Act @@ -539,7 +528,6 @@ class TestLogoutApi: - Success response is returned """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() # Create a mock anonymous user that will pass isinstance check anonymous_user = MagicMock() mock_flask_login.AnonymousUserMixin = type("AnonymousUserMixin", (), {}) diff --git a/api/tests/unit_tests/controllers/console/billing/test_billing.py b/api/tests/unit_tests/controllers/console/billing/test_billing.py index c80758c857..810f1b94fc 100644 --- a/api/tests/unit_tests/controllers/console/billing/test_billing.py +++ b/api/tests/unit_tests/controllers/console/billing/test_billing.py @@ -46,7 +46,6 @@ class TestPartnerTenants: patch("libs.login.dify_config.LOGIN_DISABLED", False), patch("libs.login.check_csrf_token") as mock_csrf, ): - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists mock_csrf.return_value = None yield {"db": mock_db, "csrf": mock_csrf} diff --git a/api/tests/unit_tests/controllers/console/tag/test_tags.py b/api/tests/unit_tests/controllers/console/tag/test_tags.py index 2be5a21f28..6405558bb4 100644 --- a/api/tests/unit_tests/controllers/console/tag/test_tags.py +++ b/api/tests/unit_tests/controllers/console/tag/test_tags.py @@ -8,8 +8,10 @@ from werkzeug.exceptions import Forbidden import controllers.console.tag.tags as module from controllers.console import console_ns from controllers.console.tag.tags import ( - TagBindingCreateApi, - TagBindingDeleteApi, + DeprecatedTagBindingCreateApi, + DeprecatedTagBindingRemoveApi, + TagBindingCollectionApi, + TagBindingItemApi, TagListApi, TagUpdateDeleteApi, ) @@ -205,9 +207,9 @@ class TestTagUpdateDeleteApi: assert status == 204 -class TestTagBindingCreateApi: +class TestTagBindingCollectionApi: def test_create_success(self, app, admin_user, payload_patch): - api = TagBindingCreateApi() + api = TagBindingCollectionApi() method = unwrap(api.post) payload = { @@ -232,7 +234,7 @@ class TestTagBindingCreateApi: assert result["result"] == "success" def test_create_forbidden(self, app, readonly_user, payload_patch): - api = TagBindingCreateApi() + api = TagBindingCollectionApi() method = unwrap(api.post) with app.test_request_context("/", json={}): @@ -247,9 +249,78 @@ class TestTagBindingCreateApi: method(api) -class TestTagBindingDeleteApi: +class TestDeprecatedTagBindingCreateApi: + def test_create_success(self, app, admin_user, payload_patch): + api = DeprecatedTagBindingCreateApi() + method = unwrap(api.post) + + payload = { + "tag_ids": ["tag-1"], + "target_id": "target-1", + "type": "knowledge", + } + + with app.test_request_context("/", json=payload): + with ( + patch( + "controllers.console.tag.tags.current_account_with_tenant", + return_value=(admin_user, None), + ), + payload_patch(payload), + patch("controllers.console.tag.tags.TagService.save_tag_binding") as save_mock, + ): + result, status = method(api) + + save_mock.assert_called_once() + assert status == 200 + assert result["result"] == "success" + + +class TestTagBindingItemApi: + def test_delete_success(self, app, admin_user, payload_patch): + api = TagBindingItemApi() + method = unwrap(api.delete) + + payload = { + "target_id": "target-1", + "type": "knowledge", + } + + with app.test_request_context("/", json=payload): + with ( + patch( + "controllers.console.tag.tags.current_account_with_tenant", + return_value=(admin_user, None), + ), + payload_patch(payload), + patch("controllers.console.tag.tags.TagService.delete_tag_binding") as delete_mock, + ): + result, status = method(api, "tag-1") + + delete_mock.assert_called_once() + delete_payload = delete_mock.call_args.args[0] + assert delete_payload.tag_id == "tag-1" + assert delete_payload.target_id == "target-1" + assert delete_payload.type == TagType.KNOWLEDGE + assert status == 200 + assert result["result"] == "success" + + def test_delete_forbidden(self, app, readonly_user): + api = TagBindingItemApi() + method = unwrap(api.delete) + + with app.test_request_context("/"): + with patch( + "controllers.console.tag.tags.current_account_with_tenant", + return_value=(readonly_user, None), + ): + with pytest.raises(Forbidden): + method(api, "tag-1") + + +class TestDeprecatedTagBindingRemoveApi: def test_remove_success(self, app, admin_user, payload_patch): - api = TagBindingDeleteApi() + api = DeprecatedTagBindingRemoveApi() method = unwrap(api.post) payload = { @@ -274,7 +345,7 @@ class TestTagBindingDeleteApi: assert result["result"] == "success" def test_remove_forbidden(self, app, readonly_user, payload_patch): - api = TagBindingDeleteApi() + api = DeprecatedTagBindingRemoveApi() method = unwrap(api.post) with app.test_request_context("/", json={}): @@ -297,3 +368,35 @@ class TestTagResponseModel: assert payload["type"] == "knowledge" assert payload["binding_count"] == "1" + + +class TestTagBindingRouteMetadata: + def test_legacy_write_routes_are_marked_deprecated(self): + assert DeprecatedTagBindingCreateApi.post.__apidoc__["deprecated"] is True + assert DeprecatedTagBindingRemoveApi.post.__apidoc__["deprecated"] is True + assert TagBindingCollectionApi.post.__apidoc__.get("deprecated") is not True + assert TagBindingItemApi.delete.__apidoc__.get("deprecated") is not True + + def test_write_routes_have_stable_operation_ids(self): + assert TagBindingCollectionApi.post.__apidoc__["id"] == "create_tag_binding" + assert TagBindingItemApi.delete.__apidoc__["id"] == "delete_tag_binding" + assert DeprecatedTagBindingCreateApi.post.__apidoc__["id"] == "create_tag_binding_deprecated" + assert DeprecatedTagBindingRemoveApi.post.__apidoc__["id"] == "delete_tag_binding_deprecated" + + def test_canonical_and_legacy_write_routes_are_registered(self): + route_map = { + resource.__name__: urls + for resource, urls, _route_doc, _kwargs in console_ns.resources + if resource.__name__ + in { + "TagBindingCollectionApi", + "TagBindingItemApi", + "DeprecatedTagBindingCreateApi", + "DeprecatedTagBindingRemoveApi", + } + } + + assert route_map["TagBindingCollectionApi"] == ("/tag-bindings",) + assert route_map["TagBindingItemApi"] == ("/tag-bindings/",) + assert route_map["DeprecatedTagBindingCreateApi"] == ("/tag-bindings/create",) + assert route_map["DeprecatedTagBindingRemoveApi"] == ("/tag-bindings/remove",) diff --git a/api/tests/unit_tests/controllers/console/test_workspace_account.py b/api/tests/unit_tests/controllers/console/test_workspace_account.py index 26ff264f18..0b1a32581a 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_account.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_account.py @@ -24,10 +24,6 @@ def app(): return app -def _mock_wraps_db(mock_db): - mock_db.session.query.return_value.first.return_value = MagicMock() - - def _build_account(email: str, account_id: str = "acc", tenant: object | None = None) -> Account: tenant_obj = tenant if tenant is not None else SimpleNamespace(id="tenant-id") account = Account(name=account_id, email=email) @@ -64,7 +60,6 @@ class TestChangeEmailSend: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("current@example.com", "acc1") mock_current_account.return_value = (mock_account, None) @@ -117,7 +112,6 @@ class TestChangeEmailSend: """GHSA-4q3w-q5mc-45rq: a phase-1 token must not unlock the new-email send step.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("current@example.com", "acc1") mock_current_account.return_value = (mock_account, None) @@ -163,7 +157,6 @@ class TestChangeEmailValidity: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("user@example.com", "acc2") mock_current_account.return_value = (mock_account, None) @@ -223,7 +216,6 @@ class TestChangeEmailValidity: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) mock_is_rate_limit.return_value = False @@ -280,7 +272,6 @@ class TestChangeEmailValidity: """A token whose phase marker is a string but not a known transition must be rejected.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) mock_is_rate_limit.return_value = False @@ -330,7 +321,6 @@ class TestChangeEmailValidity: """A token minted without a phase marker (e.g. a hand-crafted token) must not validate.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) mock_is_rate_limit.return_value = False @@ -378,7 +368,6 @@ class TestChangeEmailReset: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") mock_current_account.return_value = (current_user, None) @@ -434,7 +423,6 @@ class TestChangeEmailReset: """GHSA-4q3w-q5mc-45rq PoC: phase-1 token must not be usable against /reset.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") mock_current_account.return_value = (current_user, None) @@ -488,7 +476,6 @@ class TestChangeEmailReset: """A verified token for address A must not be replayed to change to address B.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") mock_current_account.return_value = (current_user, None) @@ -561,7 +548,6 @@ class TestAccountDeletionFeedback: @patch("controllers.console.wraps.db") @patch("controllers.console.workspace.account.BillingService.update_account_deletion_feedback") def test_should_normalize_feedback_email(self, mock_update, mock_db, app): - _mock_wraps_db(mock_db) with app.test_request_context( "/account/delete/feedback", method="POST", @@ -578,7 +564,6 @@ class TestCheckEmailUnique: @patch("controllers.console.workspace.account.AccountService.check_email_unique") @patch("controllers.console.workspace.account.AccountService.is_account_in_freeze") def test_should_normalize_email(self, mock_is_freeze, mock_check_unique, mock_db, app): - _mock_wraps_db(mock_db) mock_is_freeze.return_value = False mock_check_unique.return_value = True diff --git a/api/tests/unit_tests/controllers/console/test_workspace_members.py b/api/tests/unit_tests/controllers/console/test_workspace_members.py index 239fec8430..811bf5b1e7 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_members.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_members.py @@ -1,5 +1,5 @@ from types import SimpleNamespace -from unittest.mock import MagicMock, patch +from unittest.mock import patch import pytest from flask import Flask, g @@ -16,10 +16,6 @@ def app(): return flask_app -def _mock_wraps_db(mock_db): - mock_db.session.query.return_value.first.return_value = MagicMock() - - def _build_feature_flags(): placeholder_quota = SimpleNamespace(limit=0, size=0) workspace_members = SimpleNamespace(is_available=lambda count: True) @@ -49,7 +45,6 @@ class TestMemberInviteEmailApi: mock_get_features, app, ): - _mock_wraps_db(mock_db) mock_get_features.return_value = _build_feature_flags() mock_invite_member.return_value = "token-abc" diff --git a/api/tests/unit_tests/controllers/console/test_wraps.py b/api/tests/unit_tests/controllers/console/test_wraps.py index f6e096a97b..aa4973851a 100644 --- a/api/tests/unit_tests/controllers/console/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/test_wraps.py @@ -310,7 +310,6 @@ class TestSystemSetup: def test_should_allow_when_setup_complete(self, mock_db): """Test that requests are allowed when setup is complete""" # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() # Setup exists @setup_required def admin_view(): diff --git a/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py b/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py index 44feacf2ad..1422f29849 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py @@ -22,7 +22,7 @@ _WRAPS_MODULE: ModuleType | None = None @contextmanager def _mock_db(): - mock_session = SimpleNamespace(query=lambda *args, **kwargs: SimpleNamespace(first=lambda: True)) + mock_session = SimpleNamespace(scalar=lambda *args, **kwargs: True) with patch("extensions.ext_database.db.session", mock_session): yield diff --git a/api/tests/unit_tests/controllers/service_api/app/test_app.py b/api/tests/unit_tests/controllers/service_api/app/test_app.py index f48ace427d..f5d93b5ac3 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_app.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_app.py @@ -12,7 +12,7 @@ from controllers.service_api.app.app import AppInfoApi, AppMetaApi, AppParameter from controllers.service_api.app.error import AppUnavailableError from models.account import TenantStatus from models.model import App, AppMode -from tests.unit_tests.conftest import setup_mock_tenant_account_query +from tests.unit_tests.conftest import setup_mock_tenant_owner_execute_result class TestAppParameterApi: @@ -74,7 +74,7 @@ class TestAppParameterApi: # Mock tenant owner info for login mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -120,7 +120,7 @@ class TestAppParameterApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -161,7 +161,7 @@ class TestAppParameterApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act & Assert with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -200,7 +200,7 @@ class TestAppParameterApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act & Assert with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -263,7 +263,7 @@ class TestAppMetaApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/meta", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -331,7 +331,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -388,7 +388,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -434,7 +434,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -486,7 +486,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): diff --git a/api/tests/unit_tests/controllers/service_api/conftest.py b/api/tests/unit_tests/controllers/service_api/conftest.py index eddba5a517..8c89812cb4 100644 --- a/api/tests/unit_tests/controllers/service_api/conftest.py +++ b/api/tests/unit_tests/controllers/service_api/conftest.py @@ -15,7 +15,10 @@ from flask import Flask from core.rag.index_processor.constant.index_type import IndexStructureType from models.account import TenantStatus from models.model import App, AppMode, EndUser -from tests.unit_tests.conftest import setup_mock_tenant_account_query +from tests.unit_tests.conftest import ( + setup_mock_dataset_owner_execute_result, + setup_mock_tenant_owner_execute_result, +) @pytest.fixture @@ -123,9 +126,7 @@ class AuthenticationMocker: mock_db.session.get.side_effect = [mock_app, mock_tenant] if mock_account: - mock_ta = Mock() - mock_ta.account_id = mock_account.id - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_ta) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) @staticmethod def setup_dataset_auth(mock_db, mock_tenant, mock_account): @@ -133,8 +134,7 @@ class AuthenticationMocker: mock_ta = Mock() mock_ta.account_id = mock_account.id - mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_ta) - + setup_mock_dataset_owner_execute_result(mock_db, mock_tenant, mock_ta) mock_db.session.get.return_value = mock_account diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_document.py b/api/tests/unit_tests/controllers/service_api/dataset/test_document.py index 288659b192..1b391e67ec 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_document.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_document.py @@ -701,8 +701,8 @@ class TestDocumentApiDelete: ``delete`` is wrapped by ``@cloud_edition_billing_rate_limit_check`` which internally calls ``validate_and_get_api_token``. To bypass the decorator we call the original function via ``__wrapped__`` (preserved by - ``functools.wraps``). ``delete`` queries the dataset via - ``db.session.query(Dataset)`` directly, so we patch ``db`` at the + ``functools.wraps``). ``delete`` loads the dataset via + ``db.session.scalar(select(Dataset)...)``, so we patch ``db`` at the controller module. """ diff --git a/api/tests/unit_tests/controllers/service_api/test_wraps.py b/api/tests/unit_tests/controllers/service_api/test_wraps.py index a2008e024b..6dfbdcf98e 100644 --- a/api/tests/unit_tests/controllers/service_api/test_wraps.py +++ b/api/tests/unit_tests/controllers/service_api/test_wraps.py @@ -24,8 +24,8 @@ from enums.cloud_plan import CloudPlan from models.account import TenantStatus from models.model import ApiToken from tests.unit_tests.conftest import ( - setup_mock_dataset_tenant_query, - setup_mock_tenant_account_query, + setup_mock_dataset_owner_execute_result, + setup_mock_tenant_owner_execute_result, ) @@ -141,14 +141,11 @@ class TestValidateAppToken: mock_account = Mock() mock_account.id = str(uuid.uuid4()) - mock_ta = Mock() - mock_ta.account_id = mock_account.id - # Use side_effect to return app first, then tenant via session.get() mock_db.session.get.side_effect = [mock_app, mock_tenant] - # Mock the tenant owner query (execute(select(...)).one_or_none()) - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_ta) + # Mock the tenant owner execute result (execute(select(...)).one_or_none()) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) @validate_app_token def protected_view(app_model): @@ -471,7 +468,7 @@ class TestValidateDatasetToken: mock_account.current_tenant = mock_tenant # Mock the tenant account join query (execute(select(...)).one_or_none()) - setup_mock_dataset_tenant_query(mock_db, mock_tenant, mock_ta) + setup_mock_dataset_owner_execute_result(mock_db, mock_tenant, mock_ta) # Mock the account lookup via session.get() mock_db.session.get.return_value = mock_account diff --git a/api/tests/unit_tests/controllers/web/conftest.py b/api/tests/unit_tests/controllers/web/conftest.py index 274d78c9cf..b7f3244c6c 100644 --- a/api/tests/unit_tests/controllers/web/conftest.py +++ b/api/tests/unit_tests/controllers/web/conftest.py @@ -22,18 +22,16 @@ class FakeSession: def __init__(self, mapping: dict[str, Any] | None = None): self._mapping: dict[str, Any] = mapping or {} - self._model_name: str | None = None - def query(self, model: type) -> FakeSession: - self._model_name = model.__name__ - return self + def get(self, model: type, _ident: object) -> Any: + return self._mapping.get(model.__name__) - def where(self, *_args: object, **_kwargs: object) -> FakeSession: - return self - - def first(self) -> Any: - assert self._model_name is not None - return self._mapping.get(self._model_name) + def scalar(self, stmt: Any) -> Any: + try: + model = stmt.column_descriptions[0]["entity"] + except (AttributeError, IndexError, KeyError, TypeError): + return None + return self._mapping.get(model.__name__) class FakeDB: diff --git a/api/tests/unit_tests/controllers/web/test_human_input_form.py b/api/tests/unit_tests/controllers/web/test_human_input_form.py index a1dbc80b20..5f2dc19aab 100644 --- a/api/tests/unit_tests/controllers/web/test_human_input_form.py +++ b/api/tests/unit_tests/controllers/web/test_human_input_form.py @@ -36,18 +36,6 @@ class _FakeSession: def __init__(self, mapping: dict[str, Any]): self._mapping = mapping - self._model_name: str | None = None - - def query(self, model): - self._model_name = model.__name__ - return self - - def where(self, *args, **kwargs): - return self - - def first(self): - assert self._model_name is not None - return self._mapping.get(self._model_name) def get(self, model, ident): return self._mapping.get(model.__name__) diff --git a/api/tests/unit_tests/controllers/web/test_web_login.py b/api/tests/unit_tests/controllers/web/test_web_login.py index a01587d64a..13b953c04d 100644 --- a/api/tests/unit_tests/controllers/web/test_web_login.py +++ b/api/tests/unit_tests/controllers/web/test_web_login.py @@ -34,7 +34,6 @@ def _patch_wraps(): patch("controllers.console.wraps.FeatureService.get_system_features", return_value=wraps_features), patch("controllers.web.login.dify_config", web_dify), ): - mock_db.session.query.return_value.first.return_value = MagicMock() yield diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py index 1fb0dc6cf1..e053d0779e 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py @@ -154,7 +154,6 @@ class TestAdvancedChatAppRunnerConversationVariables: mock_sessionmaker.return_value.begin.return_value.__enter__.return_value = mock_session mock_sessionmaker.return_value.begin.return_value.__exit__ = MagicMock(return_value=False) mock_session_class.return_value.__enter__.return_value = MagicMock() - mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists mock_db.engine = MagicMock() # Mock GraphRuntimeState to accept the variable pool @@ -301,7 +300,6 @@ class TestAdvancedChatAppRunnerConversationVariables: mock_sessionmaker.return_value.begin.return_value.__enter__.return_value = mock_session mock_sessionmaker.return_value.begin.return_value.__exit__ = MagicMock(return_value=False) mock_session_class.return_value.__enter__.return_value = MagicMock() - mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists mock_db.engine = MagicMock() # Mock ConversationVariable.from_variable to return mock objects @@ -453,7 +451,6 @@ class TestAdvancedChatAppRunnerConversationVariables: mock_sessionmaker.return_value.begin.return_value.__enter__.return_value = mock_session mock_sessionmaker.return_value.begin.return_value.__exit__ = MagicMock(return_value=False) mock_session_class.return_value.__enter__.return_value = MagicMock() - mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists mock_db.engine = MagicMock() # Mock GraphRuntimeState to accept the variable pool diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index 9a2dc38f74..c36edf48fc 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -375,7 +375,7 @@ def test_generate_success_returns_converted(generator, mocker): workflow = MagicMock(id="wf", tenant_id="tenant", app_id="pipe", graph_dict={}) session = MagicMock() - session.query.return_value.where.return_value.first.return_value = workflow + session.get.return_value = workflow mocker.patch.object(module.db, "session", session) queue_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 618c8fd76f..603062a51c 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -132,11 +132,8 @@ def test_run_pipeline_not_found(mocker): app_generate_entity.single_iteration_run = None app_generate_entity.single_loop_run = None - query = MagicMock() - query.where.return_value.first.return_value = None - session = MagicMock() - session.query.return_value = query + session.get.side_effect = [None, None] mocker.patch.object(module.db, "session", session) runner = PipelineRunner( @@ -157,11 +154,9 @@ def test_run_workflow_not_initialized(mocker): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") - query_pipeline = MagicMock() - query_pipeline.where.return_value.first.return_value = pipeline session = MagicMock() - session.query.return_value = query_pipeline + session.get.side_effect = [None, pipeline] mocker.patch.object(module.db, "session", session) runner = PipelineRunner( diff --git a/api/tests/unit_tests/core/datasource/test_notion_provider.py b/api/tests/unit_tests/core/datasource/test_notion_provider.py index e4bd7d3bdf..d21b9e471b 100644 --- a/api/tests/unit_tests/core/datasource/test_notion_provider.py +++ b/api/tests/unit_tests/core/datasource/test_notion_provider.py @@ -775,9 +775,6 @@ class TestNotionExtractorLastEditedTime: "last_edited_time": "2024-11-27T18:00:00.000Z", } mock_request.return_value = mock_response - mock_query = Mock() - mock_db.session.query.return_value = mock_query - mock_query.filter_by.return_value = mock_query # Act extractor_page.update_last_edited_time(mock_document_model) @@ -863,9 +860,6 @@ class TestNotionExtractorIntegration: } mock_request.side_effect = [last_edited_response, block_response] - mock_query = Mock() - mock_db.session.query.return_value = mock_query - mock_query.filter_by.return_value = mock_query # Act documents = extractor.extract() @@ -919,10 +913,6 @@ class TestNotionExtractorIntegration: } mock_post.return_value = database_response - mock_query = Mock() - mock_db.session.query.return_value = mock_query - mock_query.filter_by.return_value = mock_query - # Act documents = extractor.extract() diff --git a/api/tests/unit_tests/core/helper/test_encrypter.py b/api/tests/unit_tests/core/helper/test_encrypter.py index f3ef7fccd0..73e081a570 100644 --- a/api/tests/unit_tests/core/helper/test_encrypter.py +++ b/api/tests/unit_tests/core/helper/test_encrypter.py @@ -40,11 +40,11 @@ class TestObfuscatedToken: class TestEncryptToken: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_successful_encryption(self, mock_encrypt, mock_query): + def test_successful_encryption(self, mock_encrypt, mock_get): """Test successful token encryption""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_data" result = encrypt_token("tenant-123", "test_token") @@ -53,9 +53,9 @@ class TestEncryptToken: mock_encrypt.assert_called_with("test_token", "mock_public_key") @patch("extensions.ext_database.db.session.get") - def test_tenant_not_found(self, mock_query): + def test_tenant_not_found(self, mock_get): """Test error when tenant doesn't exist""" - mock_query.return_value = None + mock_get.return_value = None with pytest.raises(ValueError) as exc_info: encrypt_token("invalid-tenant", "test_token") @@ -122,12 +122,12 @@ class TestEncryptDecryptIntegration: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") @patch("libs.rsa.decrypt") - def test_should_encrypt_and_decrypt_consistently(self, mock_decrypt, mock_encrypt, mock_query): + def test_should_encrypt_and_decrypt_consistently(self, mock_decrypt, mock_encrypt, mock_get): """Test that encryption and decryption are consistent""" # Setup mock tenant mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant # Setup mock encryption/decryption original_token = "test_token_123" @@ -148,12 +148,12 @@ class TestSecurity: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_cross_tenant_isolation(self, mock_encrypt, mock_query): + def test_cross_tenant_isolation(self, mock_encrypt, mock_get): """Ensure tokens encrypted for one tenant cannot be used by another""" # Setup mock tenant mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "tenant1_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_for_tenant1" # Encrypt token for tenant1 @@ -183,10 +183,10 @@ class TestSecurity: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_encryption_randomness(self, mock_encrypt, mock_query): + def test_encryption_randomness(self, mock_encrypt, mock_get): """Ensure same plaintext produces different ciphertext""" mock_tenant = MagicMock(encrypt_public_key="key") - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant # Different outputs for same input mock_encrypt.side_effect = [b"enc1", b"enc2", b"enc3"] @@ -207,11 +207,11 @@ class TestEdgeCases: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_should_handle_empty_token_encryption(self, mock_encrypt, mock_query): + def test_should_handle_empty_token_encryption(self, mock_encrypt, mock_get): """Test encryption of empty token""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_empty" result = encrypt_token("tenant-123", "") @@ -221,11 +221,11 @@ class TestEdgeCases: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_should_handle_special_characters_in_token(self, mock_encrypt, mock_query): + def test_should_handle_special_characters_in_token(self, mock_encrypt, mock_get): """Test tokens containing special/unicode characters""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_special" # Test various special characters @@ -244,11 +244,11 @@ class TestEdgeCases: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_should_handle_rsa_size_limits(self, mock_encrypt, mock_query): + def test_should_handle_rsa_size_limits(self, mock_encrypt, mock_get): """Test behavior when token exceeds RSA encryption limits""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant # RSA 2048-bit can only encrypt ~245 bytes # The actual limit depends on padding scheme diff --git a/api/tests/unit_tests/core/llm_generator/test_llm_generator.py b/api/tests/unit_tests/core/llm_generator/test_llm_generator.py index 3b64ce6b5c..048f114951 100644 --- a/api/tests/unit_tests/core/llm_generator/test_llm_generator.py +++ b/api/tests/unit_tests/core/llm_generator/test_llm_generator.py @@ -495,7 +495,7 @@ class TestLLMGenerator: def test_instruction_modify_workflow_no_last_run_fallback(self, mock_model_instance, model_config_entity): with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() workflow.graph_dict = {"graph": {"nodes": [{"id": "node_id", "data": {"type": "code"}}]}} @@ -521,7 +521,7 @@ class TestLLMGenerator: def test_instruction_modify_workflow_node_type_fallback(self, mock_model_instance, model_config_entity): with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() # Cause exception in node_type logic workflow.graph_dict = {"graph": {"nodes": []}} @@ -548,7 +548,7 @@ class TestLLMGenerator: def test_instruction_modify_workflow_empty_agent_log(self, mock_model_instance, model_config_entity): with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() workflow.graph_dict = {"graph": {"nodes": [{"id": "node_id", "data": {"type": "llm"}}]}} @@ -636,7 +636,7 @@ class TestLLMGenerator: instance.invoke_llm.return_value = mock_response with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() workflow.graph_dict = {"graph": {"nodes": [{"id": "node_id", "data": {"type": "other"}}]}} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 136ac0c72a..1e91c2dd88 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -29,15 +29,6 @@ class _Field: return ("in", self._name, tuple(values)) -class _FakeQuery: - def __init__(self): - self.where_calls: list[tuple] = [] - - def where(self, *conditions): - self.where_calls.append(conditions) - return self - - class _FakeExecuteResult: def __init__(self, segments: list[SimpleNamespace]): self._segments = segments diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index 0baf85c314..b0ecad4d0c 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -109,17 +109,6 @@ class _FakeExecuteResult: return _FakeExecuteScalarResult(self._data) -class _FakeSummaryQuery: - def __init__(self, summaries: list) -> None: - self._summaries = summaries - - def filter(self, *args, **kwargs): - return self - - def all(self) -> list: - return self._summaries - - class _FakeScalarsResult: def __init__(self, data: list) -> None: self._data = data diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index dc21d378a2..9de04c80ba 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -372,19 +372,11 @@ def test_vector_delegation_methods(vector_factory_module): def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): - class _Field: - def __eq__(self, value): - return value - - upload_query = MagicMock() - upload_query.where.return_value = upload_query - vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() mock_session = SimpleNamespace(get=lambda _model, _id: None) - monkeypatch.setattr(vector_factory_module, "UploadFile", SimpleNamespace(id=_Field())) monkeypatch.setattr(vector_factory_module, "db", SimpleNamespace(session=mock_session)) assert vector.search_by_file("file-1") == [] diff --git a/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py b/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py index 641c5d9ba0..fb14c6283c 100644 --- a/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py +++ b/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py @@ -1484,11 +1484,8 @@ class TestIndexingRunnerProcessChunk: mock_dependencies["redis"].get.return_value = None - # Mock database query for segment updates - mock_query = MagicMock() - mock_dependencies["db"].session.query.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.update.return_value = None + # Mock database update for segment status + mock_dependencies["db"].session.execute.return_value = None # Create a proper context manager mock mock_context = MagicMock() diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py index 89830f7517..fd607210f1 100644 --- a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py @@ -2417,12 +2417,11 @@ class TestDatasetRetrievalKnowledgeRetrieval: mock_document.data_source_type = "upload_file" mock_document.doc_metadata = {} - mock_session.query.return_value.filter.return_value.all.return_value = [ - mock_dataset_from_db - ] - mock_session.query.return_value.filter.return_value.all.__iter__ = lambda self: iter( - [mock_dataset_from_db, mock_document] - ) + mock_datasets = MagicMock() + mock_datasets.all.return_value = [mock_dataset_from_db] + mock_documents = MagicMock() + mock_documents.all.return_value = [mock_document] + mock_session.scalars.side_effect = [mock_datasets, mock_documents] # Act result = dataset_retrieval.knowledge_retrieval(request) diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py index 90feb4cf01..aace419d15 100644 --- a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py @@ -451,12 +451,11 @@ class TestDatasetRetrievalKnowledgeRetrieval: mock_document.data_source_type = "upload_file" mock_document.doc_metadata = {} - mock_session.query.return_value.filter.return_value.all.return_value = [ - mock_dataset_from_db - ] - mock_session.query.return_value.filter.return_value.all.__iter__ = lambda self: iter( - [mock_dataset_from_db, mock_document] - ) + mock_datasets = MagicMock() + mock_datasets.all.return_value = [mock_dataset_from_db] + mock_documents = MagicMock() + mock_documents.all.return_value = [mock_document] + mock_session.scalars.side_effect = [mock_datasets, mock_documents] # Act result = dataset_retrieval.knowledge_retrieval(request) diff --git a/api/tests/unit_tests/models/test_app_models.py b/api/tests/unit_tests/models/test_app_models.py index 4e46cf9654..e3b8269e15 100644 --- a/api/tests/unit_tests/models/test_app_models.py +++ b/api/tests/unit_tests/models/test_app_models.py @@ -711,6 +711,8 @@ class TestMessageAnnotation: annotation = MessageAnnotation( app_id=app_id, question="What is AI?", + conversation_id=None, + message_id=None, content="AI stands for Artificial Intelligence.", account_id=account_id, ) @@ -728,6 +730,8 @@ class TestMessageAnnotation: annotation = MessageAnnotation( app_id=str(uuid4()), question="Test question", + conversation_id=None, + message_id=None, content="Test content", account_id=str(uuid4()), ) @@ -1068,6 +1072,8 @@ class TestModelIntegration: app_id=app_id, question="What is AI?", content="AI stands for Artificial Intelligence.", + conversation_id=None, + message_id=message_id, account_id=account_id, ) annotation.id = annotation_id diff --git a/api/tests/unit_tests/services/dataset_service_test_helpers.py b/api/tests/unit_tests/services/dataset_service_test_helpers.py index 3349c1fd8c..806f1e8d91 100644 --- a/api/tests/unit_tests/services/dataset_service_test_helpers.py +++ b/api/tests/unit_tests/services/dataset_service_test_helpers.py @@ -365,7 +365,6 @@ def _make_segment( def _make_child_chunk() -> ChildChunk: return ChildChunk( - id="child-a", tenant_id="tenant-1", dataset_id="dataset-1", document_id="doc-1", diff --git a/api/tests/unit_tests/services/document_indexing_task_proxy.py b/api/tests/unit_tests/services/document_indexing_task_proxy.py deleted file mode 100644 index ff243b8dc3..0000000000 --- a/api/tests/unit_tests/services/document_indexing_task_proxy.py +++ /dev/null @@ -1,1291 +0,0 @@ -""" -Comprehensive unit tests for DocumentIndexingTaskProxy service. - -This module contains extensive unit tests for the DocumentIndexingTaskProxy class, -which is responsible for routing document indexing tasks to appropriate Celery queues -based on tenant billing configuration and managing tenant-isolated task queues. - -The DocumentIndexingTaskProxy handles: -- Task scheduling and queuing (direct vs tenant-isolated queues) -- Priority vs normal task routing based on billing plans -- Tenant isolation using TenantIsolatedTaskQueue -- Batch indexing operations with multiple document IDs -- Error handling and retry logic through queue management - -This test suite ensures: -- Correct task routing based on billing configuration -- Proper tenant isolation queue management -- Accurate batch operation handling -- Comprehensive error condition coverage -- Edge cases are properly handled - -================================================================================ -ARCHITECTURE OVERVIEW -================================================================================ - -The DocumentIndexingTaskProxy is a critical component in the document indexing -workflow. It acts as a proxy/router that determines which Celery queue to use -for document indexing tasks based on tenant billing configuration. - -1. Task Queue Routing: - - Direct Queue: Bypasses tenant isolation, used for self-hosted/enterprise - - Tenant Queue: Uses tenant isolation, queues tasks when another task is running - - Default Queue: Normal priority with tenant isolation (SANDBOX plan) - - Priority Queue: High priority with tenant isolation (TEAM/PRO plans) - - Priority Direct Queue: High priority without tenant isolation (billing disabled) - -2. Tenant Isolation: - - Uses TenantIsolatedTaskQueue to ensure only one indexing task runs per tenant - - When a task is running, new tasks are queued in Redis - - When a task completes, it pulls the next task from the queue - - Prevents resource contention and ensures fair task distribution - -3. Billing Configuration: - - SANDBOX plan: Uses default tenant queue (normal priority, tenant isolated) - - TEAM/PRO plans: Uses priority tenant queue (high priority, tenant isolated) - - Billing disabled: Uses priority direct queue (high priority, no isolation) - -4. Batch Operations: - - Supports indexing multiple documents in a single task - - DocumentTask entity serializes task information - - Tasks are queued with all document IDs for batch processing - -================================================================================ -TESTING STRATEGY -================================================================================ - -This test suite follows a comprehensive testing strategy that covers: - -1. Initialization and Configuration: - - Proxy initialization with various parameters - - TenantIsolatedTaskQueue initialization - - Features property caching - - Edge cases (empty document_ids, single document, large batches) - -2. Task Queue Routing: - - Direct queue routing (bypasses tenant isolation) - - Tenant queue routing with existing task key (pushes to waiting queue) - - Tenant queue routing without task key (sets flag and executes immediately) - - DocumentTask serialization and deserialization - - Task function delay() call with correct parameters - -3. Queue Type Selection: - - Default tenant queue routing (normal_document_indexing_task) - - Priority tenant queue routing (priority_document_indexing_task with isolation) - - Priority direct queue routing (priority_document_indexing_task without isolation) - -4. Dispatch Logic: - - Billing enabled + SANDBOX plan → default tenant queue - - Billing enabled + non-SANDBOX plan (TEAM, PRO, etc.) → priority tenant queue - - Billing disabled (self-hosted/enterprise) → priority direct queue - - All CloudPlan enum values handling - - Edge cases: None plan, empty plan string - -5. Tenant Isolation and Queue Management: - - Task key existence checking (get_task_key) - - Task waiting time setting (set_task_waiting_time) - - Task pushing to queue (push_tasks) - - Queue state transitions (idle → active → idle) - - Multiple concurrent task handling - -6. Batch Operations: - - Single document indexing - - Multiple document batch indexing - - Large batch handling - - Empty batch handling (edge case) - -7. Error Handling and Retry Logic: - - Task function delay() failure handling - - Queue operation failures (Redis errors) - - Feature service failures - - Invalid task data handling - - Retry mechanism through queue pull operations - -8. Integration Points: - - FeatureService integration (billing features, subscription plans) - - TenantIsolatedTaskQueue integration (Redis operations) - - Celery task integration (normal_document_indexing_task, priority_document_indexing_task) - - DocumentTask entity serialization - -================================================================================ -""" - -from unittest.mock import Mock, patch - -import pytest - -from core.entities.document_task import DocumentTask -from core.rag.pipeline.queue import TenantIsolatedTaskQueue -from enums.cloud_plan import CloudPlan -from services.document_indexing_proxy.document_indexing_task_proxy import DocumentIndexingTaskProxy - -# ============================================================================ -# Test Data Factory -# ============================================================================ - - -class DocumentIndexingTaskProxyTestDataFactory: - """ - Factory class for creating test data and mock objects for DocumentIndexingTaskProxy tests. - - This factory provides static methods to create mock objects for: - - FeatureService features with billing configuration - - TenantIsolatedTaskQueue mocks with various states - - DocumentIndexingTaskProxy instances with different configurations - - DocumentTask entities for testing serialization - - The factory methods help maintain consistency across tests and reduce - code duplication when setting up test scenarios. - """ - - @staticmethod - def create_mock_features(billing_enabled: bool = False, plan: CloudPlan = CloudPlan.SANDBOX) -> Mock: - """ - Create mock features with billing configuration. - - This method creates a mock FeatureService features object with - billing configuration that can be used to test different billing - scenarios in the DocumentIndexingTaskProxy. - - Args: - billing_enabled: Whether billing is enabled for the tenant - plan: The CloudPlan enum value for the subscription plan - - Returns: - Mock object configured as FeatureService features with billing info - """ - features = Mock() - - features.billing = Mock() - - features.billing.enabled = billing_enabled - - features.billing.subscription = Mock() - - features.billing.subscription.plan = plan - - return features - - @staticmethod - def create_mock_tenant_queue(has_task_key: bool = False) -> Mock: - """ - Create mock TenantIsolatedTaskQueue. - - This method creates a mock TenantIsolatedTaskQueue that can simulate - different queue states for testing tenant isolation logic. - - Args: - has_task_key: Whether the queue has an active task key (task running) - - Returns: - Mock object configured as TenantIsolatedTaskQueue - """ - queue = Mock(spec=TenantIsolatedTaskQueue) - - queue.get_task_key.return_value = "task_key" if has_task_key else None - - queue.push_tasks = Mock() - - queue.set_task_waiting_time = Mock() - - queue.delete_task_key = Mock() - - return queue - - @staticmethod - def create_document_task_proxy( - tenant_id: str = "tenant-123", dataset_id: str = "dataset-456", document_ids: list[str] | None = None - ) -> DocumentIndexingTaskProxy: - """ - Create DocumentIndexingTaskProxy instance for testing. - - This method creates a DocumentIndexingTaskProxy instance with default - or specified parameters for use in test cases. - - Args: - tenant_id: Tenant identifier for the proxy - dataset_id: Dataset identifier for the proxy - document_ids: List of document IDs to index (defaults to 3 documents) - - Returns: - DocumentIndexingTaskProxy instance configured for testing - """ - if document_ids is None: - document_ids = ["doc-1", "doc-2", "doc-3"] - - return DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - @staticmethod - def create_document_task( - tenant_id: str = "tenant-123", dataset_id: str = "dataset-456", document_ids: list[str] | None = None - ) -> DocumentTask: - """ - Create DocumentTask entity for testing. - - This method creates a DocumentTask entity that can be used to test - task serialization and deserialization logic. - - Args: - tenant_id: Tenant identifier for the task - dataset_id: Dataset identifier for the task - document_ids: List of document IDs to index (defaults to 3 documents) - - Returns: - DocumentTask entity configured for testing - """ - if document_ids is None: - document_ids = ["doc-1", "doc-2", "doc-3"] - - return DocumentTask(tenant_id=tenant_id, dataset_id=dataset_id, document_ids=document_ids) - - -# ============================================================================ -# Test Classes -# ============================================================================ - - -class TestDocumentIndexingTaskProxy: - """ - Comprehensive unit tests for DocumentIndexingTaskProxy class. - - This test class covers all methods and scenarios of the DocumentIndexingTaskProxy, - including initialization, task routing, queue management, dispatch logic, and - error handling. - """ - - # ======================================================================== - # Initialization Tests - # ======================================================================== - - def test_initialization(self): - """ - Test DocumentIndexingTaskProxy initialization. - - This test verifies that the proxy is correctly initialized with - the provided tenant_id, dataset_id, and document_ids, and that - the TenantIsolatedTaskQueue is properly configured. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = ["doc-1", "doc-2", "doc-3"] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert isinstance(proxy._tenant_isolated_task_queue, TenantIsolatedTaskQueue) - - assert proxy._tenant_isolated_task_queue._tenant_id == tenant_id - - assert proxy._tenant_isolated_task_queue._unique_key == "document_indexing" - - def test_initialization_with_empty_document_ids(self): - """ - Test initialization with empty document_ids list. - - This test verifies that the proxy can be initialized with an empty - document_ids list, which may occur in edge cases or error scenarios. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = [] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert len(proxy._document_ids) == 0 - - def test_initialization_with_single_document_id(self): - """ - Test initialization with single document_id. - - This test verifies that the proxy can be initialized with a single - document ID, which is a common use case for single document indexing. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = ["doc-1"] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert len(proxy._document_ids) == 1 - - def test_initialization_with_large_batch(self): - """ - Test initialization with large batch of document IDs. - - This test verifies that the proxy can handle large batches of - document IDs, which may occur in bulk indexing scenarios. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = [f"doc-{i}" for i in range(100)] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert len(proxy._document_ids) == 100 - - # ======================================================================== - # Features Property Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_features_property(self, mock_feature_service): - """ - Test cached_property features. - - This test verifies that the features property is correctly cached - and that FeatureService.get_features is called only once, even when - the property is accessed multiple times. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features() - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - # Act - features1 = proxy.features - - features2 = proxy.features # Second call should use cached property - - # Assert - assert features1 == mock_features - - assert features2 == mock_features - - assert features1 is features2 # Should be the same instance due to caching - - mock_feature_service.get_features.assert_called_once_with("tenant-123") - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_features_property_with_different_tenants(self, mock_feature_service): - """ - Test features property with different tenant IDs. - - This test verifies that the features property correctly calls - FeatureService.get_features with the correct tenant_id for each - proxy instance. - """ - # Arrange - mock_features1 = DocumentIndexingTaskProxyTestDataFactory.create_mock_features() - - mock_features2 = DocumentIndexingTaskProxyTestDataFactory.create_mock_features() - - mock_feature_service.get_features.side_effect = [mock_features1, mock_features2] - - proxy1 = DocumentIndexingTaskProxy("tenant-1", "dataset-1", ["doc-1"]) - - proxy2 = DocumentIndexingTaskProxy("tenant-2", "dataset-2", ["doc-2"]) - - # Act - features1 = proxy1.features - - features2 = proxy2.features - - # Assert - assert features1 == mock_features1 - - assert features2 == mock_features2 - - mock_feature_service.get_features.assert_any_call("tenant-1") - - mock_feature_service.get_features.assert_any_call("tenant-2") - - # ======================================================================== - # Direct Queue Routing Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue(self, mock_task): - """ - Test _send_to_direct_queue method. - - This test verifies that _send_to_direct_queue correctly calls - task_func.delay() with the correct parameters, bypassing tenant - isolation queue management. - """ - # Arrange - tenant_id = "tenant-direct-queue" - dataset_id = "dataset-direct-queue" - document_ids = ["doc-direct-1", "doc-direct-2"] - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with(tenant_id=tenant_id, dataset_id=dataset_id, document_ids=document_ids) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_direct_queue_with_priority_task(self, mock_task): - """ - Test _send_to_direct_queue with priority task function. - - This test verifies that _send_to_direct_queue works correctly - with priority_document_indexing_task as the task function. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue_with_single_document(self, mock_task): - """ - Test _send_to_direct_queue with single document ID. - - This test verifies that _send_to_direct_queue correctly handles - a single document ID in the document_ids list. - """ - # Arrange - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", ["doc-1"]) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue_with_empty_documents(self, mock_task): - """ - Test _send_to_direct_queue with empty document_ids list. - - This test verifies that _send_to_direct_queue correctly handles - an empty document_ids list, which may occur in edge cases. - """ - # Arrange - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", []) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with(tenant_id="tenant-123", dataset_id="dataset-456", document_ids=[]) - - # ======================================================================== - # Tenant Queue Routing Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_with_existing_task_key(self, mock_task): - """ - Test _send_to_tenant_queue when task key exists. - - This test verifies that when a task key exists (indicating another - task is running), the new task is pushed to the waiting queue instead - of being executed immediately. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=True - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - proxy._tenant_isolated_task_queue.push_tasks.assert_called_once() - - pushed_tasks = proxy._tenant_isolated_task_queue.push_tasks.call_args[0][0] - - assert len(pushed_tasks) == 1 - - expected_task_data = { - "tenant_id": "tenant-123", - "dataset_id": "dataset-456", - "document_ids": ["doc-1", "doc-2", "doc-3"], - } - assert pushed_tasks[0] == expected_task_data - - assert pushed_tasks[0]["document_ids"] == ["doc-1", "doc-2", "doc-3"] - - mock_task.delay.assert_not_called() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_without_task_key(self, mock_task): - """ - Test _send_to_tenant_queue when no task key exists. - - This test verifies that when no task key exists (indicating no task - is currently running), the task is executed immediately and the - task waiting time flag is set. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - proxy._tenant_isolated_task_queue.push_tasks.assert_not_called() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_tenant_queue_with_priority_task(self, mock_task): - """ - Test _send_to_tenant_queue with priority task function. - - This test verifies that _send_to_tenant_queue works correctly - with priority_document_indexing_task as the task function. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_document_task_serialization(self, mock_task): - """ - Test DocumentTask serialization in _send_to_tenant_queue. - - This test verifies that DocumentTask entities are correctly - serialized to dictionaries when pushing to the waiting queue. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=True - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - pushed_tasks = proxy._tenant_isolated_task_queue.push_tasks.call_args[0][0] - - task_dict = pushed_tasks[0] - - # Verify the task can be deserialized back to DocumentTask - document_task = DocumentTask(**task_dict) - - assert document_task.tenant_id == "tenant-123" - - assert document_task.dataset_id == "dataset-456" - - assert document_task.document_ids == ["doc-1", "doc-2", "doc-3"] - - # ======================================================================== - # Queue Type Selection Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_default_tenant_queue(self, mock_task): - """ - Test _send_to_default_tenant_queue method. - - This test verifies that _send_to_default_tenant_queue correctly - calls _send_to_tenant_queue with normal_document_indexing_task. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_tenant_queue = Mock() - - # Act - proxy._send_to_default_tenant_queue() - - # Assert - proxy._send_to_tenant_queue.assert_called_once_with(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_priority_tenant_queue(self, mock_task): - """ - Test _send_to_priority_tenant_queue method. - - This test verifies that _send_to_priority_tenant_queue correctly - calls _send_to_tenant_queue with priority_document_indexing_task. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_tenant_queue = Mock() - - # Act - proxy._send_to_priority_tenant_queue() - - # Assert - proxy._send_to_tenant_queue.assert_called_once_with(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_priority_direct_queue(self, mock_task): - """ - Test _send_to_priority_direct_queue method. - - This test verifies that _send_to_priority_direct_queue correctly - calls _send_to_direct_queue with priority_document_indexing_task. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_direct_queue = Mock() - - # Act - proxy._send_to_priority_direct_queue() - - # Assert - proxy._send_to_direct_queue.assert_called_once_with(mock_task) - - # ======================================================================== - # Dispatch Logic Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_enabled_sandbox_plan(self, mock_feature_service): - """ - Test _dispatch method when billing is enabled with SANDBOX plan. - - This test verifies that when billing is enabled and the subscription - plan is SANDBOX, the dispatch method routes to the default tenant queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_default_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_default_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_enabled_team_plan(self, mock_feature_service): - """ - Test _dispatch method when billing is enabled with TEAM plan. - - This test verifies that when billing is enabled and the subscription - plan is TEAM, the dispatch method routes to the priority tenant queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.TEAM - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_enabled_professional_plan(self, mock_feature_service): - """ - Test _dispatch method when billing is enabled with PROFESSIONAL plan. - - This test verifies that when billing is enabled and the subscription - plan is PROFESSIONAL, the dispatch method routes to the priority tenant queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.PROFESSIONAL - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_disabled(self, mock_feature_service): - """ - Test _dispatch method when billing is disabled. - - This test verifies that when billing is disabled (e.g., self-hosted - or enterprise), the dispatch method routes to the priority direct queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=False) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_direct_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_direct_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_edge_case_empty_plan(self, mock_feature_service): - """ - Test _dispatch method with empty plan string. - - This test verifies that when billing is enabled but the plan is an - empty string, the dispatch method routes to the priority tenant queue - (treats it as a non-SANDBOX plan). - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=True, plan="") - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_edge_case_none_plan(self, mock_feature_service): - """ - Test _dispatch method with None plan. - - This test verifies that when billing is enabled but the plan is None, - the dispatch method routes to the priority tenant queue (treats it as - a non-SANDBOX plan). - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=True, plan=None) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - # ======================================================================== - # Delay Method Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_delay_method(self, mock_feature_service): - """ - Test delay method integration. - - This test verifies that the delay method correctly calls _dispatch, - which is the public interface for scheduling document indexing tasks. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_default_tenant_queue = Mock() - - # Act - proxy.delay() - - # Assert - proxy._send_to_default_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_delay_method_with_team_plan(self, mock_feature_service): - """ - Test delay method with TEAM plan. - - This test verifies that the delay method correctly routes to the - priority tenant queue when the subscription plan is TEAM. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.TEAM - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy.delay() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_delay_method_with_billing_disabled(self, mock_feature_service): - """ - Test delay method with billing disabled. - - This test verifies that the delay method correctly routes to the - priority direct queue when billing is disabled. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=False) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_direct_queue = Mock() - - # Act - proxy.delay() - - # Assert - proxy._send_to_priority_direct_queue.assert_called_once() - - # ======================================================================== - # DocumentTask Entity Tests - # ======================================================================== - - def test_document_task_dataclass(self): - """ - Test DocumentTask dataclass. - - This test verifies that DocumentTask entities can be created and - accessed correctly, which is important for task serialization. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = ["doc-1", "doc-2"] - - # Act - task = DocumentTask(tenant_id=tenant_id, dataset_id=dataset_id, document_ids=document_ids) - - # Assert - assert task.tenant_id == tenant_id - - assert task.dataset_id == dataset_id - - assert task.document_ids == document_ids - - def test_document_task_serialization(self): - """ - Test DocumentTask serialization to dictionary. - - This test verifies that DocumentTask entities can be correctly - serialized to dictionaries using asdict() for queue storage. - """ - # Arrange - from dataclasses import asdict - - task = DocumentIndexingTaskProxyTestDataFactory.create_document_task() - - # Act - task_dict = asdict(task) - - # Assert - assert task_dict["tenant_id"] == "tenant-123" - - assert task_dict["dataset_id"] == "dataset-456" - - assert task_dict["document_ids"] == ["doc-1", "doc-2", "doc-3"] - - def test_document_task_deserialization(self): - """ - Test DocumentTask deserialization from dictionary. - - This test verifies that DocumentTask entities can be correctly - deserialized from dictionaries when pulled from the queue. - """ - # Arrange - task_dict = { - "tenant_id": "tenant-123", - "dataset_id": "dataset-456", - "document_ids": ["doc-1", "doc-2", "doc-3"], - } - - # Act - task = DocumentTask(**task_dict) - - # Assert - assert task.tenant_id == "tenant-123" - - assert task.dataset_id == "dataset-456" - - assert task.document_ids == ["doc-1", "doc-2", "doc-3"] - - # ======================================================================== - # Batch Operations Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_batch_operation_with_multiple_documents(self, mock_task): - """ - Test batch operation with multiple documents. - - This test verifies that the proxy correctly handles batch operations - with multiple document IDs in a single task. - """ - # Arrange - document_ids = [f"doc-{i}" for i in range(10)] - - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", document_ids) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=document_ids - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_batch_operation_with_large_batch(self, mock_task): - """ - Test batch operation with large batch of documents. - - This test verifies that the proxy correctly handles large batches - of document IDs, which may occur in bulk indexing scenarios. - """ - # Arrange - document_ids = [f"doc-{i}" for i in range(100)] - - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", document_ids) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=document_ids - ) - - assert len(mock_task.delay.call_args[1]["document_ids"]) == 100 - - # ======================================================================== - # Error Handling Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue_task_delay_failure(self, mock_task): - """ - Test _send_to_direct_queue when task.delay() raises an exception. - - This test verifies that exceptions raised by task.delay() are - propagated correctly and not swallowed. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_task.delay.side_effect = Exception("Task delay failed") - - # Act & Assert - with pytest.raises(Exception, match="Task delay failed"): - proxy._send_to_direct_queue(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_push_tasks_failure(self, mock_task): - """ - Test _send_to_tenant_queue when push_tasks raises an exception. - - This test verifies that exceptions raised by push_tasks are - propagated correctly when a task key exists. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue(has_task_key=True) - - mock_queue.push_tasks.side_effect = Exception("Push tasks failed") - - proxy._tenant_isolated_task_queue = mock_queue - - # Act & Assert - with pytest.raises(Exception, match="Push tasks failed"): - proxy._send_to_tenant_queue(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_set_waiting_time_failure(self, mock_task): - """ - Test _send_to_tenant_queue when set_task_waiting_time raises an exception. - - This test verifies that exceptions raised by set_task_waiting_time are - propagated correctly when no task key exists. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue(has_task_key=False) - - mock_queue.set_task_waiting_time.side_effect = Exception("Set waiting time failed") - - proxy._tenant_isolated_task_queue = mock_queue - - # Act & Assert - with pytest.raises(Exception, match="Set waiting time failed"): - proxy._send_to_tenant_queue(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_feature_service_failure(self, mock_feature_service): - """ - Test _dispatch when FeatureService.get_features raises an exception. - - This test verifies that exceptions raised by FeatureService.get_features - are propagated correctly during dispatch. - """ - # Arrange - mock_feature_service.get_features.side_effect = Exception("Feature service failed") - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - # Act & Assert - with pytest.raises(Exception, match="Feature service failed"): - proxy._dispatch() - - # ======================================================================== - # Integration Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_full_flow_sandbox_plan(self, mock_task, mock_feature_service): - """ - Test full flow for SANDBOX plan with tenant queue. - - This test verifies the complete flow from delay() call to task - scheduling for a SANDBOX plan tenant, including tenant isolation. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_full_flow_team_plan(self, mock_task, mock_feature_service): - """ - Test full flow for TEAM plan with priority tenant queue. - - This test verifies the complete flow from delay() call to task - scheduling for a TEAM plan tenant, including priority routing. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.TEAM - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_full_flow_billing_disabled(self, mock_task, mock_feature_service): - """ - Test full flow for billing disabled (self-hosted/enterprise). - - This test verifies the complete flow from delay() call to task - scheduling when billing is disabled, using priority direct queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=False) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_task_proxy.normal_document_indexing_task") - def test_full_flow_with_existing_task_key(self, mock_task, mock_feature_service): - """ - Test full flow when task key exists (task queuing). - - This test verifies the complete flow when another task is already - running, ensuring the new task is queued correctly. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=True - ) - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - proxy._tenant_isolated_task_queue.push_tasks.assert_called_once() - - pushed_tasks = proxy._tenant_isolated_task_queue.push_tasks.call_args[0][0] - - expected_task_data = { - "tenant_id": "tenant-123", - "dataset_id": "dataset-456", - "document_ids": ["doc-1", "doc-2", "doc-3"], - } - assert pushed_tasks[0] == expected_task_data - - assert pushed_tasks[0]["document_ids"] == ["doc-1", "doc-2", "doc-3"] - - mock_task.delay.assert_not_called() diff --git a/api/tests/unit_tests/services/external_dataset_service.py b/api/tests/unit_tests/services/external_dataset_service.py deleted file mode 100644 index 83bae370eb..0000000000 --- a/api/tests/unit_tests/services/external_dataset_service.py +++ /dev/null @@ -1,925 +0,0 @@ -""" -Extensive unit tests for ``ExternalDatasetService``. - -This module focuses on the *external dataset service* surface area, which is responsible -for integrating with **external knowledge APIs** and wiring them into Dify datasets. - -The goal of this test suite is twofold: - -- Provide **high‑confidence regression coverage** for all public helpers on - ``ExternalDatasetService``. -- Serve as **executable documentation** for how external API integration is expected - to behave in different scenarios (happy paths, validation failures, and error codes). - -The file intentionally contains **rich comments and generous spacing** in order to make -each scenario easy to scan during reviews. -""" - -from __future__ import annotations - -from types import SimpleNamespace -from typing import Any, cast -from unittest.mock import MagicMock, Mock, patch - -import httpx -import pytest - -from constants import HIDDEN_VALUE -from models.dataset import Dataset, ExternalKnowledgeApis, ExternalKnowledgeBindings -from services.entities.external_knowledge_entities.external_knowledge_entities import ( - Authorization, - AuthorizationConfig, - ExternalKnowledgeApiSetting, -) -from services.errors.dataset import DatasetNameDuplicateError -from services.external_knowledge_service import ExternalDatasetService - - -class ExternalDatasetTestDataFactory: - """ - Factory helpers for building *lightweight* mocks for external knowledge tests. - - These helpers are intentionally small and explicit: - - - They avoid pulling in unnecessary fixtures. - - They reflect the minimal contract that the service under test cares about. - """ - - @staticmethod - def create_external_api( - api_id: str = "api-123", - tenant_id: str = "tenant-1", - name: str = "Test API", - description: str = "Description", - settings: dict[str, Any] | None = None, - ) -> ExternalKnowledgeApis: - """ - Create a concrete ``ExternalKnowledgeApis`` instance with minimal fields. - - Using the real SQLAlchemy model (instead of a pure Mock) makes it easier to - exercise ``settings_dict`` and other convenience properties if needed. - """ - - instance = ExternalKnowledgeApis( - tenant_id=tenant_id, - name=name, - description=description, - settings=None if settings is None else cast(str, pytest.approx), # type: ignore[assignment] - ) - - # Overwrite generated id for determinism in assertions. - instance.id = api_id - return instance - - @staticmethod - def create_dataset( - dataset_id: str = "ds-1", - tenant_id: str = "tenant-1", - name: str = "External Dataset", - provider: str = "external", - ) -> Dataset: - """ - Build a small ``Dataset`` instance representing an external dataset. - """ - - dataset = Dataset( - tenant_id=tenant_id, - name=name, - description="", - provider=provider, - created_by="user-1", - ) - dataset.id = dataset_id - return dataset - - @staticmethod - def create_external_binding( - tenant_id: str = "tenant-1", - dataset_id: str = "ds-1", - api_id: str = "api-1", - external_knowledge_id: str = "knowledge-1", - ) -> ExternalKnowledgeBindings: - """ - Small helper for a binding between dataset and external knowledge API. - """ - - binding = ExternalKnowledgeBindings( - tenant_id=tenant_id, - dataset_id=dataset_id, - external_knowledge_api_id=api_id, - external_knowledge_id=external_knowledge_id, - created_by="user-1", - ) - return binding - - -# --------------------------------------------------------------------------- -# get_external_knowledge_apis -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceGetExternalKnowledgeApis: - """ - Tests for ``ExternalDatasetService.get_external_knowledge_apis``. - - These tests focus on: - - - Basic pagination wiring via ``db.paginate``. - - Optional search keyword behaviour. - """ - - @pytest.fixture - def mock_db_paginate(self): - """ - Patch ``db.paginate`` so we do not touch the real database layer. - """ - - with ( - patch("services.external_knowledge_service.db.paginate", autospec=True) as mock_paginate, - patch("services.external_knowledge_service.select", autospec=True), - ): - yield mock_paginate - - def test_get_external_knowledge_apis_basic_pagination(self, mock_db_paginate: MagicMock): - """ - It should return ``items`` and ``total`` coming from the paginate object. - """ - - # Arrange - tenant_id = "tenant-1" - page = 1 - per_page = 20 - - mock_items = [Mock(spec=ExternalKnowledgeApis), Mock(spec=ExternalKnowledgeApis)] - mock_pagination = SimpleNamespace(items=mock_items, total=42) - mock_db_paginate.return_value = mock_pagination - - # Act - items, total = ExternalDatasetService.get_external_knowledge_apis(page, per_page, tenant_id) - - # Assert - assert items is mock_items - assert total == 42 - - mock_db_paginate.assert_called_once() - call_kwargs = mock_db_paginate.call_args.kwargs - assert call_kwargs["page"] == page - assert call_kwargs["per_page"] == per_page - assert call_kwargs["max_per_page"] == 100 - assert call_kwargs["error_out"] is False - - def test_get_external_knowledge_apis_with_search_keyword(self, mock_db_paginate: MagicMock): - """ - When a search keyword is provided, the query should be adjusted - (we simply assert that paginate is still called and does not explode). - """ - - # Arrange - tenant_id = "tenant-1" - page = 2 - per_page = 10 - search = "foo" - - mock_pagination = SimpleNamespace(items=[], total=0) - mock_db_paginate.return_value = mock_pagination - - # Act - items, total = ExternalDatasetService.get_external_knowledge_apis(page, per_page, tenant_id, search=search) - - # Assert - assert items == [] - assert total == 0 - mock_db_paginate.assert_called_once() - - -# --------------------------------------------------------------------------- -# validate_api_list -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceValidateApiList: - """ - Lightweight validation tests for ``validate_api_list``. - """ - - def test_validate_api_list_success(self): - """ - A minimal valid configuration (endpoint + api_key) should pass. - """ - - config = {"endpoint": "https://example.com", "api_key": "secret"} - - # Act & Assert – no exception expected - ExternalDatasetService.validate_api_list(config) - - @pytest.mark.parametrize( - ("config", "expected_message"), - [ - ({}, "api list is empty"), - ({"api_key": "k"}, "endpoint is required"), - ({"endpoint": "https://example.com"}, "api_key is required"), - ], - ) - def test_validate_api_list_failures(self, config: dict[str, Any], expected_message: str): - """ - Invalid configs should raise ``ValueError`` with a clear message. - """ - - with pytest.raises(ValueError, match=expected_message): - ExternalDatasetService.validate_api_list(config) - - -# --------------------------------------------------------------------------- -# create_external_knowledge_api & get/update/delete -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceCrudExternalKnowledgeApi: - """ - CRUD tests for external knowledge API templates. - """ - - @pytest.fixture - def mock_db_session(self): - """ - Patch ``db.session`` for all CRUD tests in this class. - """ - - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_create_external_knowledge_api_success(self, mock_db_session: MagicMock): - """ - ``create_external_knowledge_api`` should persist a new record - when settings are present and valid. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - args = { - "name": "API", - "description": "desc", - "settings": {"endpoint": "https://api.example.com", "api_key": "secret"}, - } - - # We do not want to actually call the remote endpoint here, so we patch the validator. - with patch.object(ExternalDatasetService, "check_endpoint_and_api_key", autospec=True) as mock_check: - result = ExternalDatasetService.create_external_knowledge_api(tenant_id, user_id, args) - - assert isinstance(result, ExternalKnowledgeApis) - mock_check.assert_called_once_with(args["settings"]) - mock_db_session.add.assert_called_once() - mock_db_session.commit.assert_called_once() - - def test_create_external_knowledge_api_missing_settings_raises(self, mock_db_session: MagicMock): - """ - Missing ``settings`` should result in a ``ValueError``. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - args = {"name": "API", "description": "desc"} - - with pytest.raises(ValueError, match="settings is required"): - ExternalDatasetService.create_external_knowledge_api(tenant_id, user_id, args) - - mock_db_session.add.assert_not_called() - mock_db_session.commit.assert_not_called() - - def test_get_external_knowledge_api_found(self, mock_db_session: MagicMock): - """ - ``get_external_knowledge_api`` should return the first matching record. - """ - - api = Mock(spec=ExternalKnowledgeApis) - mock_db_session.scalar.return_value = api - - result = ExternalDatasetService.get_external_knowledge_api("api-id", "tenant-id") - assert result is api - - def test_get_external_knowledge_api_not_found_raises(self, mock_db_session: MagicMock): - """ - When the record is absent, a ``ValueError`` is raised. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.get_external_knowledge_api("missing-id", "tenant-id") - - def test_update_external_knowledge_api_success_with_hidden_api_key(self, mock_db_session: MagicMock): - """ - Updating an API should keep the existing API key when the special hidden - value placeholder is sent from the UI. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - api_id = "api-1" - - existing_api = Mock(spec=ExternalKnowledgeApis) - existing_api.settings_dict = {"api_key": "stored-key"} - existing_api.settings = '{"api_key":"stored-key"}' - mock_db_session.scalar.return_value = existing_api - - args = { - "name": "New Name", - "description": "New Desc", - "settings": {"endpoint": "https://api.example.com", "api_key": HIDDEN_VALUE}, - } - - result = ExternalDatasetService.update_external_knowledge_api(tenant_id, user_id, api_id, args) - - assert result is existing_api - # The placeholder should be replaced with stored key. - assert args["settings"]["api_key"] == "stored-key" - mock_db_session.commit.assert_called_once() - - def test_update_external_knowledge_api_not_found_raises(self, mock_db_session: MagicMock): - """ - Updating a non‑existent API template should raise ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.update_external_knowledge_api( - tenant_id="tenant-1", - user_id="user-1", - external_knowledge_api_id="missing-id", - args={"name": "n", "description": "d", "settings": {}}, - ) - - def test_delete_external_knowledge_api_success(self, mock_db_session: MagicMock): - """ - ``delete_external_knowledge_api`` should delete and commit when found. - """ - - api = Mock(spec=ExternalKnowledgeApis) - mock_db_session.scalar.return_value = api - - ExternalDatasetService.delete_external_knowledge_api("tenant-1", "api-1") - - mock_db_session.delete.assert_called_once_with(api) - mock_db_session.commit.assert_called_once() - - def test_delete_external_knowledge_api_not_found_raises(self, mock_db_session: MagicMock): - """ - Deletion of a missing template should raise ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.delete_external_knowledge_api("tenant-1", "missing") - - -# --------------------------------------------------------------------------- -# external_knowledge_api_use_check & binding lookups -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceUsageAndBindings: - """ - Tests for usage checks and dataset binding retrieval. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_external_knowledge_api_use_check_in_use(self, mock_db_session: MagicMock): - """ - When there are bindings, ``external_knowledge_api_use_check`` returns True and count. - """ - - mock_db_session.scalar.return_value = 3 - - in_use, count = ExternalDatasetService.external_knowledge_api_use_check("api-1", "tenant-1") - - assert in_use is True - assert count == 3 - assert "tenant_id" in str(mock_db_session.scalar.call_args.args[0]) - - def test_external_knowledge_api_use_check_not_in_use(self, mock_db_session: MagicMock): - """ - Zero bindings should return ``(False, 0)``. - """ - - mock_db_session.scalar.return_value = 0 - - in_use, count = ExternalDatasetService.external_knowledge_api_use_check("api-1", "tenant-1") - - assert in_use is False - assert count == 0 - - def test_get_external_knowledge_binding_with_dataset_id_found(self, mock_db_session: MagicMock): - """ - Binding lookup should return the first record when present. - """ - - binding = Mock(spec=ExternalKnowledgeBindings) - mock_db_session.scalar.return_value = binding - - result = ExternalDatasetService.get_external_knowledge_binding_with_dataset_id("tenant-1", "ds-1") - assert result is binding - - def test_get_external_knowledge_binding_with_dataset_id_not_found_raises(self, mock_db_session: MagicMock): - """ - Missing binding should result in a ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="external knowledge binding not found"): - ExternalDatasetService.get_external_knowledge_binding_with_dataset_id("tenant-1", "ds-1") - - -# --------------------------------------------------------------------------- -# document_create_args_validate -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceDocumentCreateArgsValidate: - """ - Tests for ``document_create_args_validate``. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_document_create_args_validate_success(self, mock_db_session: MagicMock): - """ - All required custom parameters present – validation should pass. - """ - - external_api = Mock(spec=ExternalKnowledgeApis) - external_api.settings = json_settings = ( - '[{"document_process_setting":[{"name":"foo","required":true},{"name":"bar","required":false}]}]' - ) - # Raw string; the service itself calls json.loads on it - mock_db_session.scalar.return_value = external_api - - process_parameter = {"foo": "value", "bar": "optional"} - - # Act & Assert – no exception - ExternalDatasetService.document_create_args_validate("tenant-1", "api-1", process_parameter) - - assert json_settings in external_api.settings # simple sanity check on our test data - - def test_document_create_args_validate_missing_template_raises(self, mock_db_session: MagicMock): - """ - When the referenced API template is missing, a ``ValueError`` is raised. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.document_create_args_validate("tenant-1", "missing", {}) - - def test_document_create_args_validate_missing_required_parameter_raises(self, mock_db_session: MagicMock): - """ - Required document process parameters must be supplied. - """ - - external_api = Mock(spec=ExternalKnowledgeApis) - external_api.settings = ( - '[{"document_process_setting":[{"name":"foo","required":true},{"name":"bar","required":false}]}]' - ) - mock_db_session.scalar.return_value = external_api - - process_parameter = {"bar": "present"} # missing "foo" - - with pytest.raises(ValueError, match="foo is required"): - ExternalDatasetService.document_create_args_validate("tenant-1", "api-1", process_parameter) - - -# --------------------------------------------------------------------------- -# process_external_api -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceProcessExternalApi: - """ - Tests focused on the HTTP request assembly and method mapping behaviour. - """ - - def test_process_external_api_valid_method_post(self): - """ - For a supported HTTP verb we should delegate to the correct ``ssrf_proxy`` function. - """ - - settings = ExternalKnowledgeApiSetting( - url="https://example.com/path", - request_method="POST", - headers={"X-Test": "1"}, - params={"foo": "bar"}, - ) - - fake_response = httpx.Response(200) - - with patch("services.external_knowledge_service.ssrf_proxy.post", autospec=True) as mock_post: - mock_post.return_value = fake_response - - result = ExternalDatasetService.process_external_api(settings, files=None) - - assert result is fake_response - mock_post.assert_called_once() - kwargs = mock_post.call_args.kwargs - assert kwargs["url"] == settings.url - assert kwargs["headers"] == settings.headers - assert kwargs["follow_redirects"] is True - assert "data" in kwargs - - def test_process_external_api_invalid_method_raises(self): - """ - An unsupported HTTP verb should raise ``InvalidHttpMethodError``. - """ - - settings = ExternalKnowledgeApiSetting( - url="https://example.com", - request_method="INVALID", - headers=None, - params={}, - ) - - from graphon.nodes.http_request.exc import InvalidHttpMethodError - - with pytest.raises(InvalidHttpMethodError): - ExternalDatasetService.process_external_api(settings, files=None) - - -# --------------------------------------------------------------------------- -# assembling_headers -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceAssemblingHeaders: - """ - Tests for header assembly based on different authentication flavours. - """ - - def test_assembling_headers_bearer_token(self): - """ - For bearer auth we expect ``Authorization: Bearer `` by default. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="bearer", api_key="secret", header=None), - ) - - headers = ExternalDatasetService.assembling_headers(auth) - - assert headers["Authorization"] == "Bearer secret" - - def test_assembling_headers_basic_token_with_custom_header(self): - """ - For basic auth we honour the configured header name. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="basic", api_key="abc123", header="X-Auth"), - ) - - headers = ExternalDatasetService.assembling_headers(auth, headers={"Existing": "1"}) - - assert headers["Existing"] == "1" - assert headers["X-Auth"] == "Basic abc123" - - def test_assembling_headers_custom_type(self): - """ - Custom auth type should inject the raw API key. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="custom", api_key="raw-key", header="X-API-KEY"), - ) - - headers = ExternalDatasetService.assembling_headers(auth, headers=None) - - assert headers["X-API-KEY"] == "raw-key" - - def test_assembling_headers_missing_config_raises(self): - """ - Missing config object should be rejected. - """ - - auth = Authorization(type="api-key", config=None) - - with pytest.raises(ValueError, match="authorization config is required"): - ExternalDatasetService.assembling_headers(auth) - - def test_assembling_headers_missing_api_key_raises(self): - """ - ``api_key`` is required when type is ``api-key``. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="bearer", api_key=None, header="Authorization"), - ) - - with pytest.raises(ValueError, match="api_key is required"): - ExternalDatasetService.assembling_headers(auth) - - def test_assembling_headers_no_auth_type_leaves_headers_unchanged(self): - """ - For ``no-auth`` we should not modify the headers mapping. - """ - - auth = Authorization(type="no-auth", config=None) - - base_headers = {"X": "1"} - result = ExternalDatasetService.assembling_headers(auth, headers=base_headers) - - # A copy is returned, original is not mutated. - assert result == base_headers - assert result is not base_headers - - -# --------------------------------------------------------------------------- -# get_external_knowledge_api_settings -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceGetExternalKnowledgeApiSettings: - """ - Simple shape test for ``get_external_knowledge_api_settings``. - """ - - def test_get_external_knowledge_api_settings(self): - settings_dict: dict[str, Any] = { - "url": "https://example.com/retrieval", - "request_method": "post", - "headers": {"Content-Type": "application/json"}, - "params": {"foo": "bar"}, - } - - result = ExternalDatasetService.get_external_knowledge_api_settings(settings_dict) - - assert isinstance(result, ExternalKnowledgeApiSetting) - assert result.url == settings_dict["url"] - assert result.request_method == settings_dict["request_method"] - assert result.headers == settings_dict["headers"] - assert result.params == settings_dict["params"] - - -# --------------------------------------------------------------------------- -# create_external_dataset -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceCreateExternalDataset: - """ - Tests around creating the external dataset and its binding row. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_create_external_dataset_success(self, mock_db_session: MagicMock): - """ - A brand new dataset name with valid external knowledge references - should create both the dataset and its binding. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - - args = { - "name": "My Dataset", - "description": "desc", - "external_knowledge_api_id": "api-1", - "external_knowledge_id": "knowledge-1", - "external_retrieval_model": {"top_k": 3}, - } - - # No existing dataset with same name. - mock_db_session.scalar.side_effect = [ - None, # duplicate‑name check - Mock(spec=ExternalKnowledgeApis), # external knowledge api - ] - - dataset = ExternalDatasetService.create_external_dataset(tenant_id, user_id, args) - - assert isinstance(dataset, Dataset) - assert dataset.provider == "external" - assert dataset.retrieval_model == args["external_retrieval_model"] - - assert mock_db_session.add.call_count >= 2 # dataset + binding - mock_db_session.flush.assert_called_once() - mock_db_session.commit.assert_called_once() - - def test_create_external_dataset_duplicate_name_raises(self, mock_db_session: MagicMock): - """ - When a dataset with the same name already exists, - ``DatasetNameDuplicateError`` is raised. - """ - - existing_dataset = Mock(spec=Dataset) - mock_db_session.scalar.return_value = existing_dataset - - args = { - "name": "Existing", - "external_knowledge_api_id": "api-1", - "external_knowledge_id": "knowledge-1", - } - - with pytest.raises(DatasetNameDuplicateError): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args) - - mock_db_session.add.assert_not_called() - mock_db_session.commit.assert_not_called() - - def test_create_external_dataset_missing_api_template_raises(self, mock_db_session: MagicMock): - """ - If the referenced external knowledge API does not exist, a ``ValueError`` is raised. - """ - - # First call: duplicate name check – not found. - mock_db_session.scalar.side_effect = [ - None, - None, # external knowledge api lookup - ] - - args = { - "name": "Dataset", - "external_knowledge_api_id": "missing", - "external_knowledge_id": "knowledge-1", - } - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args) - - def test_create_external_dataset_missing_required_ids_raise(self, mock_db_session: MagicMock): - """ - ``external_knowledge_id`` and ``external_knowledge_api_id`` are mandatory. - """ - - # duplicate name check — two calls to create_external_dataset, each does 2 scalar calls - mock_db_session.scalar.side_effect = [ - None, - Mock(spec=ExternalKnowledgeApis), - None, - Mock(spec=ExternalKnowledgeApis), - ] - - args_missing_knowledge_id = { - "name": "Dataset", - "external_knowledge_api_id": "api-1", - "external_knowledge_id": None, - } - - with pytest.raises(ValueError, match="external_knowledge_id is required"): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args_missing_knowledge_id) - - args_missing_api_id = { - "name": "Dataset", - "external_knowledge_api_id": None, - "external_knowledge_id": "k-1", - } - - with pytest.raises(ValueError, match="external_knowledge_api_id is required"): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args_missing_api_id) - - -# --------------------------------------------------------------------------- -# fetch_external_knowledge_retrieval -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceFetchExternalKnowledgeRetrieval: - """ - Tests for ``fetch_external_knowledge_retrieval`` which orchestrates - external retrieval requests and normalises the response payload. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_fetch_external_knowledge_retrieval_success(self, mock_db_session: MagicMock): - """ - With a valid binding and API template, records from the external - service should be returned when the HTTP response is 200. - """ - - tenant_id = "tenant-1" - dataset_id = "ds-1" - query = "test query" - external_retrieval_parameters = {"top_k": 3, "score_threshold_enabled": True, "score_threshold": 0.5} - - binding = ExternalDatasetTestDataFactory.create_external_binding( - tenant_id=tenant_id, - dataset_id=dataset_id, - api_id="api-1", - external_knowledge_id="knowledge-1", - ) - - api = Mock(spec=ExternalKnowledgeApis) - api.settings = '{"endpoint":"https://example.com","api_key":"secret"}' - - # First query: binding; second query: api. - mock_db_session.scalar.side_effect = [ - binding, - api, - ] - - fake_records = [{"content": "doc", "score": 0.9}] - fake_response = Mock(spec=httpx.Response) - fake_response.status_code = 200 - fake_response.json.return_value = {"records": fake_records} - - metadata_condition = SimpleNamespace(model_dump=lambda: {"field": "value"}) - - with patch.object( - ExternalDatasetService, "process_external_api", return_value=fake_response, autospec=True - ) as mock_process: - result = ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id=tenant_id, - dataset_id=dataset_id, - query=query, - external_retrieval_parameters=external_retrieval_parameters, - metadata_condition=metadata_condition, - ) - - assert result == fake_records - - mock_process.assert_called_once() - setting_arg = mock_process.call_args.args[0] - assert isinstance(setting_arg, ExternalKnowledgeApiSetting) - assert setting_arg.url.endswith("/retrieval") - - def test_fetch_external_knowledge_retrieval_binding_not_found_raises(self, mock_db_session: MagicMock): - """ - Missing binding should raise ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="external knowledge binding not found"): - ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id="tenant-1", - dataset_id="missing", - query="q", - external_retrieval_parameters={}, - metadata_condition=None, - ) - - def test_fetch_external_knowledge_retrieval_missing_api_template_raises(self, mock_db_session: MagicMock): - """ - When the API template is missing or has no settings, a ``ValueError`` is raised. - """ - - binding = ExternalDatasetTestDataFactory.create_external_binding() - mock_db_session.scalar.side_effect = [ - binding, - None, - ] - - with pytest.raises(ValueError, match="external api template not found"): - ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id="tenant-1", - dataset_id="ds-1", - query="q", - external_retrieval_parameters={}, - metadata_condition=None, - ) - - def test_fetch_external_knowledge_retrieval_non_200_status_returns_empty_list(self, mock_db_session: MagicMock): - """ - Non‑200 responses should be treated as an empty result set. - """ - - binding = ExternalDatasetTestDataFactory.create_external_binding() - api = Mock(spec=ExternalKnowledgeApis) - api.settings = '{"endpoint":"https://example.com","api_key":"secret"}' - - mock_db_session.scalar.side_effect = [ - binding, - api, - ] - - fake_response = Mock(spec=httpx.Response) - fake_response.status_code = 500 - fake_response.json.return_value = {} - - with patch.object(ExternalDatasetService, "process_external_api", return_value=fake_response, autospec=True): - result = ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id="tenant-1", - dataset_id="ds-1", - query="q", - external_retrieval_parameters={}, - metadata_condition=None, - ) - - assert result == [] diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py index 327281d07f..efb79aadde 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py @@ -374,24 +374,14 @@ def test_publish_workflow_success(mocker, rag_pipeline_service) -> None: mock_db = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db", mock_db) mock_dataset_service_class = mocker.patch("services.dataset_service.DatasetService") - mock_dataset_service = mock_dataset_service_class.return_value - # 6. Mock session and its scalar/query methods + # 6. Mock session and dataset lookup mock_session = mocker.Mock() mock_session.scalar.return_value = draft_wf - # Mock dataset update query (needed even if service is mocked, as rag_pipeline fetches it first) dataset = mocker.Mock() dataset.retrieval_model_dict = {} - dataset_query = mocker.Mock() - dataset_query.where.return_value.first.return_value = dataset - - # Mock node execution copy - node_exec_query = mocker.Mock() - node_exec_query.where.return_value.all.return_value = [] - - # Mocked session query side effects - mock_session.query.side_effect = [node_exec_query, dataset_query] + pipeline.retrieve_dataset.return_value = dataset # 7. Run test result = rag_pipeline_service.publish_workflow(session=mock_session, pipeline=pipeline, account=account) @@ -1524,7 +1514,6 @@ def test_handle_node_run_result_marks_document_error_for_published_invoke(mocker ) document = SimpleNamespace(indexing_status="waiting", error=None) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", return_value=document) add_mock = mocker.patch("services.rag_pipeline.rag_pipeline.db.session.add") commit_mock = mocker.patch("services.rag_pipeline.rag_pipeline.db.session.commit") @@ -1595,7 +1584,6 @@ def test_publish_customized_pipeline_template_raises_for_missing_workflow_id(moc def test_get_pipeline_raises_when_dataset_missing(mocker, rag_pipeline_service) -> None: - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=None) with pytest.raises(ValueError, match="Dataset not found"): @@ -1604,7 +1592,6 @@ def test_get_pipeline_raises_when_dataset_missing(mocker, rag_pipeline_service) def test_get_pipeline_raises_when_pipeline_missing(mocker, rag_pipeline_service) -> None: dataset = SimpleNamespace(pipeline_id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, None]) with pytest.raises(ValueError, match="Pipeline not found"): @@ -1644,7 +1631,6 @@ def test_get_pipeline_templates_builtin_en_us_no_fallback(mocker) -> None: def test_update_customized_pipeline_template_commits_when_name_empty(mocker) -> None: template = SimpleNamespace(name="old", description="old", icon={}, updated_by=None) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=template) commit = mocker.patch("services.rag_pipeline.rag_pipeline.db.session.commit") mocker.patch("services.rag_pipeline.rag_pipeline.current_user", SimpleNamespace(id="u1", current_tenant_id="t1")) @@ -1871,7 +1857,6 @@ def test_run_free_workflow_node_delegates_to_handle_result(mocker, rag_pipeline_ def test_publish_customized_pipeline_template_raises_when_workflow_missing(mocker, rag_pipeline_service) -> None: pipeline = SimpleNamespace(id="p1", tenant_id="t1", workflow_id="wf-1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", side_effect=[pipeline, None]) with pytest.raises(ValueError, match="Workflow not found"): @@ -1910,7 +1895,6 @@ def test_get_recommended_plugins_skips_manifest_when_missing(mocker, rag_pipelin def test_retry_error_document_raises_when_pipeline_missing(mocker, rag_pipeline_service) -> None: exec_log = SimpleNamespace(pipeline_id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=exec_log) mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", return_value=None) @@ -1923,7 +1907,6 @@ def test_retry_error_document_raises_when_pipeline_missing(mocker, rag_pipeline_ def test_retry_error_document_raises_when_workflow_missing(mocker, rag_pipeline_service) -> None: exec_log = SimpleNamespace(pipeline_id="p1") pipeline = SimpleNamespace(id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=exec_log) mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", return_value=pipeline) mocker.patch.object(rag_pipeline_service, "get_published_workflow", return_value=None) @@ -1940,7 +1923,6 @@ def test_get_datasource_plugins_returns_empty_for_non_datasource_nodes(mocker, r workflow = SimpleNamespace( graph_dict={"nodes": [{"id": "n1", "data": {"type": "start"}}]}, rag_pipeline_variables=[] ) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) mocker.patch.object(rag_pipeline_service, "get_published_workflow", return_value=workflow) @@ -2103,7 +2085,6 @@ def test_get_datasource_plugins_handles_empty_datasource_data_and_non_published( graph_dict={"nodes": [{"id": "n1", "data": {"type": "datasource", "datasource_parameters": {}}}]}, rag_pipeline_variables=[{"variable": "v1", "belong_to_node_id": "shared"}], ) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) mocker.patch.object(rag_pipeline_service, "get_draft_workflow", return_value=workflow) mocker.patch( @@ -2143,7 +2124,6 @@ def test_get_datasource_plugins_extracts_user_inputs_and_credentials(mocker, rag {"variable": "v3", "belong_to_node_id": "shared"}, ], ) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) mocker.patch.object(rag_pipeline_service, "get_published_workflow", return_value=workflow) mocker.patch( @@ -2161,7 +2141,6 @@ def test_get_datasource_plugins_extracts_user_inputs_and_credentials(mocker, rag def test_get_pipeline_returns_pipeline_when_found(mocker, rag_pipeline_service) -> None: dataset = SimpleNamespace(pipeline_id="p1") pipeline = SimpleNamespace(id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) result = rag_pipeline_service.get_pipeline("t1", "d1") diff --git a/api/tests/unit_tests/services/segment_service.py b/api/tests/unit_tests/services/segment_service.py deleted file mode 100644 index f0a66a00d4..0000000000 --- a/api/tests/unit_tests/services/segment_service.py +++ /dev/null @@ -1,1115 +0,0 @@ -from unittest.mock import MagicMock, Mock, patch - -import pytest - -from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models.account import Account -from models.dataset import ChildChunk, Dataset, Document, DocumentSegment -from models.enums import SegmentType -from services.dataset_service import SegmentService -from services.entities.knowledge_entities.knowledge_entities import SegmentUpdateArgs -from services.errors.chunk import ChildChunkDeleteIndexError, ChildChunkIndexingError - - -class SegmentTestDataFactory: - """Factory class for creating test data and mock objects for segment service tests.""" - - @staticmethod - def create_segment_mock( - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - content: str = "Test segment content", - position: int = 1, - enabled: bool = True, - status: str = "completed", - word_count: int = 3, - tokens: int = 5, - **kwargs, - ) -> Mock: - """Create a mock segment with specified attributes.""" - segment = Mock(spec=DocumentSegment) - segment.id = segment_id - segment.document_id = document_id - segment.dataset_id = dataset_id - segment.tenant_id = tenant_id - segment.content = content - segment.position = position - segment.enabled = enabled - segment.status = status - segment.word_count = word_count - segment.tokens = tokens - segment.index_node_id = f"node-{segment_id}" - segment.index_node_hash = "hash-123" - segment.keywords = [] - segment.answer = None - segment.disabled_at = None - segment.disabled_by = None - segment.updated_by = None - segment.updated_at = None - segment.indexing_at = None - segment.completed_at = None - segment.error = None - for key, value in kwargs.items(): - setattr(segment, key, value) - return segment - - @staticmethod - def create_child_chunk_mock( - chunk_id: str = "chunk-123", - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - content: str = "Test child chunk content", - position: int = 1, - word_count: int = 3, - **kwargs, - ) -> Mock: - """Create a mock child chunk with specified attributes.""" - chunk = Mock(spec=ChildChunk) - chunk.id = chunk_id - chunk.segment_id = segment_id - chunk.document_id = document_id - chunk.dataset_id = dataset_id - chunk.tenant_id = tenant_id - chunk.content = content - chunk.position = position - chunk.word_count = word_count - chunk.index_node_id = f"node-{chunk_id}" - chunk.index_node_hash = "hash-123" - chunk.type = SegmentType.AUTOMATIC - chunk.created_by = "user-123" - chunk.updated_by = None - chunk.updated_at = None - for key, value in kwargs.items(): - setattr(chunk, key, value) - return chunk - - @staticmethod - def create_document_mock( - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - doc_form: str = IndexStructureType.PARAGRAPH_INDEX, - word_count: int = 100, - **kwargs, - ) -> Mock: - """Create a mock document with specified attributes.""" - document = Mock(spec=Document) - document.id = document_id - document.dataset_id = dataset_id - document.tenant_id = tenant_id - document.doc_form = doc_form - document.word_count = word_count - for key, value in kwargs.items(): - setattr(document, key, value) - return document - - @staticmethod - def create_dataset_mock( - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - indexing_technique: str = IndexTechniqueType.HIGH_QUALITY, - embedding_model: str = "text-embedding-ada-002", - embedding_model_provider: str = "openai", - **kwargs, - ) -> Mock: - """Create a mock dataset with specified attributes.""" - dataset = Mock(spec=Dataset) - dataset.id = dataset_id - dataset.tenant_id = tenant_id - dataset.indexing_technique = indexing_technique - dataset.embedding_model = embedding_model - dataset.embedding_model_provider = embedding_model_provider - for key, value in kwargs.items(): - setattr(dataset, key, value) - return dataset - - @staticmethod - def create_user_mock( - user_id: str = "user-789", - tenant_id: str = "tenant-123", - **kwargs, - ) -> Mock: - """Create a mock user with specified attributes.""" - user = Mock(spec=Account) - user.id = user_id - user.current_tenant_id = tenant_id - user.name = "Test User" - for key, value in kwargs.items(): - setattr(user, key, value) - return user - - -class TestSegmentServiceCreateSegment: - """Tests for SegmentService.create_segment method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_create_segment_success(self, mock_db_session, mock_current_user): - """Test successful creation of a segment.""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = {"content": "New segment content", "keywords": ["test", "segment"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None # No existing segments - mock_db_session.query.return_value = mock_query - - mock_segment = SegmentTestDataFactory.create_segment_mock() - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert mock_db_session.add.call_count == 2 - - created_segment = mock_db_session.add.call_args_list[0].args[0] - assert isinstance(created_segment, DocumentSegment) - assert created_segment.content == args["content"] - assert created_segment.word_count == len(args["content"]) - - mock_db_session.commit.assert_called_once() - - mock_vector_service.assert_called_once() - vector_call_args = mock_vector_service.call_args[0] - assert vector_call_args[0] == [args["keywords"]] - assert vector_call_args[1][0] == created_segment - assert vector_call_args[2] == dataset - assert vector_call_args[3] == document.doc_form - - assert result == mock_segment - - def test_create_segment_with_qa_model(self, mock_db_session, mock_current_user): - """Test creation of segment with QA model (requires answer).""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(doc_form=IndexStructureType.QA_INDEX, word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = {"content": "What is AI?", "answer": "AI is Artificial Intelligence", "keywords": ["ai"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - mock_segment = SegmentTestDataFactory.create_segment_mock() - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert result == mock_segment - mock_db_session.add.assert_called() - mock_db_session.commit.assert_called() - - def test_create_segment_with_high_quality_indexing(self, mock_db_session, mock_current_user): - """Test creation of segment with high quality indexing technique.""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - args = {"content": "New segment content", "keywords": ["test"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - mock_embedding_model = MagicMock() - mock_embedding_model.get_text_embedding_num_tokens.return_value = [10] - mock_model_manager = MagicMock() - mock_model_manager.get_model_instance.return_value = mock_embedding_model - - mock_segment = SegmentTestDataFactory.create_segment_mock() - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.ModelManager.for_tenant", autospec=True) as mock_model_manager_class, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_model_manager_class.return_value = mock_model_manager - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert result == mock_segment - mock_model_manager.get_model_instance.assert_called_once() - mock_embedding_model.get_text_embedding_num_tokens.assert_called_once() - - def test_create_segment_vector_index_failure(self, mock_db_session, mock_current_user): - """Test segment creation when vector indexing fails.""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = {"content": "New segment content", "keywords": ["test"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - mock_segment = SegmentTestDataFactory.create_segment_mock(enabled=False, status="error") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_vector_service.side_effect = Exception("Vector indexing failed") - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert result == mock_segment - assert mock_db_session.commit.call_count == 2 # Once for creation, once for error update - - -class TestSegmentServiceUpdateSegment: - """Tests for SegmentService.update_segment method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_update_segment_content_success(self, mock_db_session, mock_current_user): - """Test successful update of segment content.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True, word_count=10) - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = SegmentUpdateArgs(content="Updated content", keywords=["updated"]) - - mock_db_session.query.return_value.where.return_value.first.return_value = segment - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.VectorService.update_segment_vector", autospec=True) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_redis_get.return_value = None # Not indexing - mock_hash.return_value = "new-hash" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_segment(args, segment, document, dataset) - - # Assert - assert result == segment - assert segment.content == "Updated content" - assert segment.keywords == ["updated"] - assert segment.word_count == len("Updated content") - assert document.word_count == 100 + (len("Updated content") - 10) - mock_db_session.add.assert_called() - mock_db_session.commit.assert_called() - - def test_update_segment_disable(self, mock_db_session, mock_current_user): - """Test disabling a segment.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - args = SegmentUpdateArgs(enabled=False) - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.redis_client.setex", autospec=True) as mock_redis_setex, - patch("services.dataset_service.disable_segment_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_redis_get.return_value = None - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_segment(args, segment, document, dataset) - - # Assert - assert result == segment - assert segment.enabled is False - mock_db_session.add.assert_called() - mock_db_session.commit.assert_called() - mock_task.delay.assert_called_once() - - def test_update_segment_indexing_in_progress(self, mock_db_session, mock_current_user): - """Test update fails when segment is currently indexing.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - args = SegmentUpdateArgs(content="Updated content") - - with patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get: - mock_redis_get.return_value = "1" # Indexing in progress - - # Act & Assert - with pytest.raises(ValueError, match="Segment is indexing"): - SegmentService.update_segment(args, segment, document, dataset) - - def test_update_segment_disabled_segment(self, mock_db_session, mock_current_user): - """Test update fails when segment is disabled.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=False) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - args = SegmentUpdateArgs(content="Updated content") - - with patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get: - mock_redis_get.return_value = None - - # Act & Assert - with pytest.raises(ValueError, match="Can't update disabled segment"): - SegmentService.update_segment(args, segment, document, dataset) - - def test_update_segment_with_qa_model(self, mock_db_session, mock_current_user): - """Test update segment with QA model (includes answer).""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True, word_count=10) - document = SegmentTestDataFactory.create_document_mock(doc_form=IndexStructureType.QA_INDEX, word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = SegmentUpdateArgs(content="Updated question", answer="Updated answer", keywords=["qa"]) - - mock_db_session.query.return_value.where.return_value.first.return_value = segment - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.VectorService.update_segment_vector", autospec=True) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_redis_get.return_value = None - mock_hash.return_value = "new-hash" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_segment(args, segment, document, dataset) - - # Assert - assert result == segment - assert segment.content == "Updated question" - assert segment.answer == "Updated answer" - assert segment.keywords == ["qa"] - new_word_count = len("Updated question") + len("Updated answer") - assert segment.word_count == new_word_count - assert document.word_count == 100 + (new_word_count - 10) - mock_db_session.commit.assert_called() - - -class TestSegmentServiceDeleteSegment: - """Tests for SegmentService.delete_segment method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_delete_segment_success(self, mock_db_session): - """Test successful deletion of a segment.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True, word_count=50) - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock() - - mock_scalars = MagicMock() - mock_scalars.all.return_value = [] - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.redis_client.setex", autospec=True) as mock_redis_setex, - patch("services.dataset_service.delete_segment_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.select", autospec=True) as mock_select, - ): - mock_redis_get.return_value = None - mock_select.return_value.where.return_value = mock_select - - # Act - SegmentService.delete_segment(segment, document, dataset) - - # Assert - mock_db_session.delete.assert_called_once_with(segment) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_delete_segment_disabled(self, mock_db_session): - """Test deletion of disabled segment (no index deletion).""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=False, word_count=50) - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock() - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.delete_segment_from_index_task", autospec=True) as mock_task, - ): - mock_redis_get.return_value = None - - # Act - SegmentService.delete_segment(segment, document, dataset) - - # Assert - mock_db_session.delete.assert_called_once_with(segment) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_not_called() - - def test_delete_segment_indexing_in_progress(self, mock_db_session): - """Test deletion fails when segment is currently being deleted.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get: - mock_redis_get.return_value = "1" # Deletion in progress - - # Act & Assert - with pytest.raises(ValueError, match="Segment is deleting"): - SegmentService.delete_segment(segment, document, dataset) - - -class TestSegmentServiceDeleteSegments: - """Tests for SegmentService.delete_segments method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_delete_segments_success(self, mock_db_session, mock_current_user): - """Test successful deletion of multiple segments.""" - # Arrange - segment_ids = ["segment-1", "segment-2"] - document = SegmentTestDataFactory.create_document_mock(word_count=200) - dataset = SegmentTestDataFactory.create_dataset_mock() - - segments_info = [ - ("node-1", "segment-1", 50), - ("node-2", "segment-2", 30), - ] - - mock_query = MagicMock() - mock_query.with_entities.return_value.where.return_value.all.return_value = segments_info - mock_db_session.query.return_value = mock_query - - mock_scalars = MagicMock() - mock_scalars.all.return_value = [] - mock_select = MagicMock() - mock_select.where.return_value = mock_select - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.delete_segment_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.select", autospec=True) as mock_select_func, - ): - mock_select_func.return_value = mock_select - - # Act - SegmentService.delete_segments(segment_ids, document, dataset) - - # Assert - mock_db_session.query.return_value.where.return_value.delete.assert_called_once() - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_delete_segments_empty_list(self, mock_db_session, mock_current_user): - """Test deletion with empty list (should return early).""" - # Arrange - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - # Act - SegmentService.delete_segments([], document, dataset) - - # Assert - mock_db_session.query.assert_not_called() - - -class TestSegmentServiceUpdateSegmentsStatus: - """Tests for SegmentService.update_segments_status method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_update_segments_status_enable(self, mock_db_session, mock_current_user): - """Test enabling multiple segments.""" - # Arrange - segment_ids = ["segment-1", "segment-2"] - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - segments = [ - SegmentTestDataFactory.create_segment_mock(segment_id="segment-1", enabled=False), - SegmentTestDataFactory.create_segment_mock(segment_id="segment-2", enabled=False), - ] - - mock_scalars = MagicMock() - mock_scalars.all.return_value = segments - mock_select = MagicMock() - mock_select.where.return_value = mock_select - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.enable_segments_to_index_task", autospec=True) as mock_task, - patch("services.dataset_service.select", autospec=True) as mock_select_func, - ): - mock_redis_get.return_value = None - mock_select_func.return_value = mock_select - - # Act - SegmentService.update_segments_status(segment_ids, "enable", dataset, document) - - # Assert - assert all(seg.enabled is True for seg in segments) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_update_segments_status_disable(self, mock_db_session, mock_current_user): - """Test disabling multiple segments.""" - # Arrange - segment_ids = ["segment-1", "segment-2"] - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - segments = [ - SegmentTestDataFactory.create_segment_mock(segment_id="segment-1", enabled=True), - SegmentTestDataFactory.create_segment_mock(segment_id="segment-2", enabled=True), - ] - - mock_scalars = MagicMock() - mock_scalars.all.return_value = segments - mock_select = MagicMock() - mock_select.where.return_value = mock_select - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.disable_segments_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - patch("services.dataset_service.select", autospec=True) as mock_select_func, - ): - mock_redis_get.return_value = None - mock_now.return_value = "2024-01-01T00:00:00" - mock_select_func.return_value = mock_select - - # Act - SegmentService.update_segments_status(segment_ids, "disable", dataset, document) - - # Assert - assert all(seg.enabled is False for seg in segments) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_update_segments_status_empty_list(self, mock_db_session, mock_current_user): - """Test update with empty list (should return early).""" - # Arrange - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - # Act - SegmentService.update_segments_status([], "enable", dataset, document) - - # Assert - mock_db_session.scalars.assert_not_called() - - -class TestSegmentServiceGetSegments: - """Tests for SegmentService.get_segments method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_get_segments_success(self, mock_db_session, mock_current_user): - """Test successful retrieval of segments.""" - # Arrange - document_id = "doc-123" - tenant_id = "tenant-123" - segments = [ - SegmentTestDataFactory.create_segment_mock(segment_id="segment-1"), - SegmentTestDataFactory.create_segment_mock(segment_id="segment-2"), - ] - - mock_paginate = MagicMock() - mock_paginate.items = segments - mock_paginate.total = 2 - mock_db_session.paginate.return_value = mock_paginate - - # Act - items, total = SegmentService.get_segments(document_id, tenant_id) - - # Assert - assert len(items) == 2 - assert total == 2 - mock_db_session.paginate.assert_called_once() - - def test_get_segments_with_status_filter(self, mock_db_session, mock_current_user): - """Test retrieval with status filter.""" - # Arrange - document_id = "doc-123" - tenant_id = "tenant-123" - status_list = ["completed", "error"] - - mock_paginate = MagicMock() - mock_paginate.items = [] - mock_paginate.total = 0 - mock_db_session.paginate.return_value = mock_paginate - - # Act - items, total = SegmentService.get_segments(document_id, tenant_id, status_list=status_list) - - # Assert - assert len(items) == 0 - assert total == 0 - - def test_get_segments_with_keyword(self, mock_db_session, mock_current_user): - """Test retrieval with keyword search.""" - # Arrange - document_id = "doc-123" - tenant_id = "tenant-123" - keyword = "test" - - mock_paginate = MagicMock() - mock_paginate.items = [SegmentTestDataFactory.create_segment_mock()] - mock_paginate.total = 1 - mock_db_session.paginate.return_value = mock_paginate - - # Act - items, total = SegmentService.get_segments(document_id, tenant_id, keyword=keyword) - - # Assert - assert len(items) == 1 - assert total == 1 - - -class TestSegmentServiceGetSegmentById: - """Tests for SegmentService.get_segment_by_id method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_get_segment_by_id_success(self, mock_db_session): - """Test successful retrieval of segment by ID.""" - # Arrange - segment_id = "segment-123" - tenant_id = "tenant-123" - segment = SegmentTestDataFactory.create_segment_mock(segment_id=segment_id) - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = segment - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_segment_by_id(segment_id, tenant_id) - - # Assert - assert result == segment - - def test_get_segment_by_id_not_found(self, mock_db_session): - """Test retrieval when segment is not found.""" - # Arrange - segment_id = "non-existent" - tenant_id = "tenant-123" - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = None - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_segment_by_id(segment_id, tenant_id) - - # Assert - assert result is None - - -class TestSegmentServiceGetChildChunks: - """Tests for SegmentService.get_child_chunks method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_get_child_chunks_success(self, mock_db_session, mock_current_user): - """Test successful retrieval of child chunks.""" - # Arrange - segment_id = "segment-123" - document_id = "doc-123" - dataset_id = "dataset-123" - page = 1 - limit = 20 - - mock_paginate = MagicMock() - mock_paginate.items = [ - SegmentTestDataFactory.create_child_chunk_mock(chunk_id="chunk-1"), - SegmentTestDataFactory.create_child_chunk_mock(chunk_id="chunk-2"), - ] - mock_paginate.total = 2 - mock_db_session.paginate.return_value = mock_paginate - - # Act - result = SegmentService.get_child_chunks(segment_id, document_id, dataset_id, page, limit) - - # Assert - assert result == mock_paginate - mock_db_session.paginate.assert_called_once() - - def test_get_child_chunks_with_keyword(self, mock_db_session, mock_current_user): - """Test retrieval with keyword search.""" - # Arrange - segment_id = "segment-123" - document_id = "doc-123" - dataset_id = "dataset-123" - page = 1 - limit = 20 - keyword = "test" - - mock_paginate = MagicMock() - mock_paginate.items = [] - mock_paginate.total = 0 - mock_db_session.paginate.return_value = mock_paginate - - # Act - result = SegmentService.get_child_chunks(segment_id, document_id, dataset_id, page, limit, keyword=keyword) - - # Assert - assert result == mock_paginate - - -class TestSegmentServiceGetChildChunkById: - """Tests for SegmentService.get_child_chunk_by_id method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_get_child_chunk_by_id_success(self, mock_db_session): - """Test successful retrieval of child chunk by ID.""" - # Arrange - chunk_id = "chunk-123" - tenant_id = "tenant-123" - chunk = SegmentTestDataFactory.create_child_chunk_mock(chunk_id=chunk_id) - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = chunk - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_child_chunk_by_id(chunk_id, tenant_id) - - # Assert - assert result == chunk - - def test_get_child_chunk_by_id_not_found(self, mock_db_session): - """Test retrieval when child chunk is not found.""" - # Arrange - chunk_id = "non-existent" - tenant_id = "tenant-123" - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = None - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_child_chunk_by_id(chunk_id, tenant_id) - - # Assert - assert result is None - - -class TestSegmentServiceCreateChildChunk: - """Tests for SegmentService.create_child_chunk method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_create_child_chunk_success(self, mock_db_session, mock_current_user): - """Test successful creation of a child chunk.""" - # Arrange - content = "New child chunk content" - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_hash.return_value = "hash-123" - - # Act - result = SegmentService.create_child_chunk(content, segment, document, dataset) - - # Assert - assert result is not None - mock_db_session.add.assert_called_once() - mock_db_session.commit.assert_called_once() - mock_vector_service.assert_called_once() - - def test_create_child_chunk_vector_index_failure(self, mock_db_session, mock_current_user): - """Test child chunk creation when vector indexing fails.""" - # Arrange - content = "New child chunk content" - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_vector_service.side_effect = Exception("Vector indexing failed") - mock_hash.return_value = "hash-123" - - # Act & Assert - with pytest.raises(ChildChunkIndexingError): - SegmentService.create_child_chunk(content, segment, document, dataset) - - mock_db_session.rollback.assert_called_once() - - -class TestSegmentServiceUpdateChildChunk: - """Tests for SegmentService.update_child_chunk method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_update_child_chunk_success(self, mock_db_session, mock_current_user): - """Test successful update of a child chunk.""" - # Arrange - content = "Updated child chunk content" - chunk = SegmentTestDataFactory.create_child_chunk_mock() - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with ( - patch( - "services.dataset_service.VectorService.update_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_child_chunk(content, chunk, segment, document, dataset) - - # Assert - assert result == chunk - assert chunk.content == content - assert chunk.word_count == len(content) - mock_db_session.add.assert_called_once_with(chunk) - mock_db_session.commit.assert_called_once() - mock_vector_service.assert_called_once() - - def test_update_child_chunk_vector_index_failure(self, mock_db_session, mock_current_user): - """Test child chunk update when vector indexing fails.""" - # Arrange - content = "Updated content" - chunk = SegmentTestDataFactory.create_child_chunk_mock() - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with ( - patch( - "services.dataset_service.VectorService.update_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_vector_service.side_effect = Exception("Vector indexing failed") - mock_now.return_value = "2024-01-01T00:00:00" - - # Act & Assert - with pytest.raises(ChildChunkIndexingError): - SegmentService.update_child_chunk(content, chunk, segment, document, dataset) - - mock_db_session.rollback.assert_called_once() - - -class TestSegmentServiceDeleteChildChunk: - """Tests for SegmentService.delete_child_chunk method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_delete_child_chunk_success(self, mock_db_session): - """Test successful deletion of a child chunk.""" - # Arrange - chunk = SegmentTestDataFactory.create_child_chunk_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with patch( - "services.dataset_service.VectorService.delete_child_chunk_vector", autospec=True - ) as mock_vector_service: - # Act - SegmentService.delete_child_chunk(chunk, dataset) - - # Assert - mock_db_session.delete.assert_called_once_with(chunk) - mock_db_session.commit.assert_called_once() - mock_vector_service.assert_called_once_with(chunk, dataset) - - def test_delete_child_chunk_vector_index_failure(self, mock_db_session): - """Test child chunk deletion when vector indexing fails.""" - # Arrange - chunk = SegmentTestDataFactory.create_child_chunk_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with patch( - "services.dataset_service.VectorService.delete_child_chunk_vector", autospec=True - ) as mock_vector_service: - mock_vector_service.side_effect = Exception("Vector deletion failed") - - # Act & Assert - with pytest.raises(ChildChunkDeleteIndexError): - SegmentService.delete_child_chunk(chunk, dataset) - - mock_db_session.rollback.assert_called_once() diff --git a/api/tests/unit_tests/services/services_test_help.py b/api/tests/unit_tests/services/services_test_help.py deleted file mode 100644 index c6b962f7fc..0000000000 --- a/api/tests/unit_tests/services/services_test_help.py +++ /dev/null @@ -1,59 +0,0 @@ -from unittest.mock import MagicMock - - -class ServiceDbTestHelper: - """ - Helper class for service database query tests. - """ - - @staticmethod - def setup_db_query_filter_by_mock(mock_db, query_results): - """ - Smart database query mock that responds based on model type and query parameters. - - Args: - mock_db: Mock database session - query_results: Dict mapping (model_name, filter_key, filter_value) to return value - Example: {('Account', 'email', 'test@example.com'): mock_account} - """ - - def query_side_effect(model): - mock_query = MagicMock() - - def filter_by_side_effect(**kwargs): - mock_filter_result = MagicMock() - - def first_side_effect(): - # Find matching result based on model and filter parameters - for (model_name, filter_key, filter_value), result in query_results.items(): - if model.__name__ == model_name and filter_key in kwargs and kwargs[filter_key] == filter_value: - return result - return None - - mock_filter_result.first.side_effect = first_side_effect - - # Handle order_by calls for complex queries - def order_by_side_effect(*args, **kwargs): - mock_order_result = MagicMock() - - def order_first_side_effect(): - # Look for order_by results in the same query_results dict - for (model_name, filter_key, filter_value), result in query_results.items(): - if ( - model.__name__ == model_name - and filter_key == "order_by" - and filter_value == "first_available" - ): - return result - return None - - mock_order_result.first.side_effect = order_first_side_effect - return mock_order_result - - mock_filter_result.order_by.side_effect = order_by_side_effect - return mock_filter_result - - mock_query.filter_by.side_effect = filter_by_side_effect - return mock_query - - mock_db.session.query.side_effect = query_side_effect diff --git a/api/tests/unit_tests/services/test_account_service.py b/api/tests/unit_tests/services/test_account_service.py index c4f5f57153..e9d2f1481e 100644 --- a/api/tests/unit_tests/services/test_account_service.py +++ b/api/tests/unit_tests/services/test_account_service.py @@ -14,7 +14,6 @@ from services.errors.account import ( AccountRegisterError, CurrentPasswordIncorrectError, ) -from tests.unit_tests.services.services_test_help import ServiceDbTestHelper class TestAccountAssociatedDataFactory: @@ -149,7 +148,6 @@ class TestAccountService: # Setup basic session methods mock_session.add = MagicMock() mock_session.commit = MagicMock() - mock_session.query = MagicMock() yield mock_db @@ -1572,15 +1570,9 @@ class TestRegisterService: account_id="existing-user-456", email="existing@example.com", status="active" ) - # Mock database queries - query_results = { - ( - "TenantAccountJoin", - "tenant_id", - "tenant-456", - ): TestAccountAssociatedDataFactory.create_tenant_join_mock(), - } - ServiceDbTestHelper.setup_db_query_filter_by_mock(mock_db_dependencies["db"], query_results) + mock_db_dependencies[ + "db" + ].session.scalar.return_value = TestAccountAssociatedDataFactory.create_tenant_join_mock() # Mock TenantService methods with ( diff --git a/api/tests/unit_tests/services/test_annotation_service.py b/api/tests/unit_tests/services/test_annotation_service.py index 4295315f48..5054010e89 100644 --- a/api/tests/unit_tests/services/test_annotation_service.py +++ b/api/tests/unit_tests/services/test_annotation_service.py @@ -238,6 +238,8 @@ class TestAppAnnotationServiceUpInsert: assert result == annotation_instance mock_cls.assert_called_once_with( app_id=app.id, + conversation_id=None, + message_id=None, content="hello", question="q1", account_id=current_user.id, diff --git a/api/tests/unit_tests/services/test_async_workflow_service.py b/api/tests/unit_tests/services/test_async_workflow_service.py index 73fc399ac3..1b9cc8a2ff 100644 --- a/api/tests/unit_tests/services/test_async_workflow_service.py +++ b/api/tests/unit_tests/services/test_async_workflow_service.py @@ -163,7 +163,7 @@ class TestAsyncWorkflowService: mocks["quota_service"].reserve.assert_called_once() quota_charge_mock.commit.assert_called_once() - assert session.commit.call_count == 2 + assert session.commit.call_count == 3 created_log = mocks["repo"].create.call_args[0][0] assert created_log.status == WorkflowTriggerStatus.QUEUED @@ -266,7 +266,7 @@ class TestAsyncWorkflowService: trigger_data=trigger_data, ) - assert session.commit.call_count == 2 + assert session.commit.call_count == 3 updated_log = mocks["repo"].update.call_args[0][0] assert updated_log.status == WorkflowTriggerStatus.RATE_LIMITED assert "Quota limit reached" in updated_log.error @@ -469,7 +469,7 @@ class TestAsyncWorkflowServiceGetWorkflow: # Assert assert result == workflow - workflow_service.get_published_workflow_by_id.assert_called_once_with(app_model, "workflow-123") + workflow_service.get_published_workflow_by_id.assert_called_once_with(app_model, "workflow-123", session=None) workflow_service.get_published_workflow.assert_not_called() def test_should_raise_when_specific_workflow_id_not_found(self): @@ -497,7 +497,7 @@ class TestAsyncWorkflowServiceGetWorkflow: # Assert assert result == workflow - workflow_service.get_published_workflow.assert_called_once_with(app_model) + workflow_service.get_published_workflow.assert_called_once_with(app_model, session=None) workflow_service.get_published_workflow_by_id.assert_not_called() def test_should_raise_when_default_published_workflow_not_found(self): diff --git a/api/tests/unit_tests/services/test_dataset_service_segment.py b/api/tests/unit_tests/services/test_dataset_service_segment.py index d6c104708c..6330e53765 100644 --- a/api/tests/unit_tests/services/test_dataset_service_segment.py +++ b/api/tests/unit_tests/services/test_dataset_service_segment.py @@ -89,7 +89,6 @@ class TestSegmentServiceChildChunks: document = _make_document() segment = _make_segment() existing_a = ChildChunk( - id="child-a", tenant_id="tenant-1", dataset_id="dataset-1", document_id="doc-1", @@ -100,7 +99,6 @@ class TestSegmentServiceChildChunks: created_by="user-1", ) existing_b = ChildChunk( - id="child-b", tenant_id="tenant-1", dataset_id="dataset-1", document_id="doc-1", @@ -110,7 +108,8 @@ class TestSegmentServiceChildChunks: word_count=9, created_by="user-1", ) - + existing_a.id = "child-a" + existing_b.id = "child-b" with ( patch("services.dataset_service.db") as mock_db, patch("services.dataset_service.uuid.uuid4", return_value="node-new"), @@ -714,7 +713,6 @@ class TestSegmentServiceMutations: patch("services.dataset_service.db") as mock_db, patch("services.dataset_service.delete_segment_from_index_task") as delete_task, ): - segments_query = MagicMock() # execute().all() for segments_info (multi-column) execute_result = MagicMock() execute_result.all.return_value = [ diff --git a/api/tests/unit_tests/services/test_datasource_provider_service.py b/api/tests/unit_tests/services/test_datasource_provider_service.py index d304e0ec44..c389c4a635 100644 --- a/api/tests/unit_tests/services/test_datasource_provider_service.py +++ b/api/tests/unit_tests/services/test_datasource_provider_service.py @@ -36,9 +36,7 @@ class TestDatasourceProviderService: @pytest.fixture def mock_db_session(self): """ - Robust, chainable query mock. - q returns itself for .filter_by(), .order_by(), .where() so any - SQLAlchemy chaining pattern works without multiple brittle sub-mocks. + Mock session with scalar/scalars defaults for current SQLAlchemy access paths. """ with ( patch("services.datasource_provider_service.Session") as mock_cls, @@ -46,20 +44,6 @@ class TestDatasourceProviderService: ): sess = MagicMock(spec=Session) - q = MagicMock() - sess.query.return_value = q - - # Self-returning chain — any method called on q returns q - q.filter_by.return_value = q - q.order_by.return_value = q - q.where.return_value = q - - # Default terminal values (tests override per-case) - q.first.return_value = None - q.all.return_value = [] - q.count.return_value = 0 - q.delete.return_value = 1 - # Default values for select()-style calls (tests override per-case) sess.scalar.return_value = None sess.scalars.return_value.all.return_value = [] diff --git a/api/tests/unit_tests/services/test_webhook_service_additional.py b/api/tests/unit_tests/services/test_webhook_service_additional.py index 776cb5dc3f..491dd94842 100644 --- a/api/tests/unit_tests/services/test_webhook_service_additional.py +++ b/api/tests/unit_tests/services/test_webhook_service_additional.py @@ -17,23 +17,6 @@ from services.trigger import webhook_service as service_module from services.trigger.webhook_service import WebhookService -class _FakeQuery: - def __init__(self, result: Any) -> None: - self._result = result - - def where(self, *args: Any, **kwargs: Any) -> "_FakeQuery": - return self - - def filter(self, *args: Any, **kwargs: Any) -> "_FakeQuery": - return self - - def order_by(self, *args: Any, **kwargs: Any) -> "_FakeQuery": - return self - - def first(self) -> Any: - return self._result - - @pytest.fixture def flask_app() -> Flask: return Flask(__name__) diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index 0015e8b908..feafada59a 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -1649,8 +1649,6 @@ class TestWorkflowServiceCredentialValidation: """Missing BuiltinToolProvider → plugin requires no credentials → no error.""" # Arrange with patch("services.workflow_service.db") as mock_db: - mock_db.session.query.return_value.where.return_value.order_by.return_value.first.return_value = None - # Act + Assert (should NOT raise) service._check_default_tool_credential("tenant-1", "some-provider") @@ -1662,10 +1660,6 @@ class TestWorkflowServiceCredentialValidation: patch("services.workflow_service.db") as mock_db, patch("core.helper.credential_utils.check_credential_policy_compliance", side_effect=Exception("denied")), ): - mock_db.session.query.return_value.where.return_value.order_by.return_value.first.return_value = ( - mock_provider - ) - # Act + Assert with pytest.raises(ValueError, match="Failed to validate default credential"): service._check_default_tool_credential("tenant-1", "some-provider") diff --git a/api/tests/unit_tests/services/vector_service.py b/api/tests/unit_tests/services/vector_service.py deleted file mode 100644 index ad80beb4e3..0000000000 --- a/api/tests/unit_tests/services/vector_service.py +++ /dev/null @@ -1,1793 +0,0 @@ -""" -Comprehensive unit tests for VectorService and Vector classes. - -This module contains extensive unit tests for the VectorService and Vector -classes, which are critical components in the RAG (Retrieval-Augmented Generation) -pipeline that handle vector database operations, collection management, embedding -storage and retrieval, and metadata filtering. - -The VectorService provides methods for: -- Creating vector embeddings for document segments -- Updating segment vector embeddings -- Generating child chunks for hierarchical indexing -- Managing child chunk vectors (create, update, delete) - -The Vector class provides methods for: -- Vector database operations (create, add, delete, search) -- Collection creation and management with Redis locking -- Embedding storage and retrieval -- Vector index operations (HNSW, L2 distance, etc.) -- Metadata filtering in vector space -- Support for multiple vector database backends - -This test suite ensures: -- Correct vector database operations -- Proper collection creation and management -- Accurate embedding storage and retrieval -- Comprehensive vector search functionality -- Metadata filtering and querying -- Error conditions are handled correctly -- Edge cases are properly validated - -================================================================================ -ARCHITECTURE OVERVIEW -================================================================================ - -The Vector service system is a critical component that bridges document -segments and vector databases, enabling semantic search and retrieval. - -1. VectorService: - - High-level service for managing vector operations on document segments - - Handles both regular segments and hierarchical (parent-child) indexing - - Integrates with IndexProcessor for document transformation - - Manages embedding model instances via ModelManager - -2. Vector Class: - - Wrapper around BaseVector implementations - - Handles embedding generation via ModelManager - - Supports multiple vector database backends (Chroma, Milvus, Qdrant, etc.) - - Manages collection creation with Redis locking for concurrency control - - Provides batch processing for large document sets - -3. BaseVector Abstract Class: - - Defines interface for vector database operations - - Implemented by various vector database backends - - Provides methods for CRUD operations on vectors - - Supports both vector similarity search and full-text search - -4. Collection Management: - - Uses Redis locks to prevent concurrent collection creation - - Caches collection existence status in Redis - - Supports collection deletion with cache invalidation - -5. Embedding Generation: - - Uses ModelManager to get embedding model instances - - Supports cached embeddings for performance - - Handles batch processing for large document sets - - Generates embeddings for both documents and queries - -================================================================================ -TESTING STRATEGY -================================================================================ - -This test suite follows a comprehensive testing strategy that covers: - -1. VectorService Methods: - - create_segments_vector: Regular and hierarchical indexing - - update_segment_vector: Vector and keyword index updates - - generate_child_chunks: Child chunk generation with full doc mode - - create_child_chunk_vector: Child chunk vector creation - - update_child_chunk_vector: Batch child chunk updates - - delete_child_chunk_vector: Child chunk deletion - -2. Vector Class Methods: - - Initialization with dataset and attributes - - Collection creation with Redis locking - - Embedding generation and batch processing - - Vector operations (create, add_texts, delete_by_ids, etc.) - - Search operations (by vector, by full text) - - Metadata filtering and querying - - Duplicate checking logic - - Vector factory selection - -3. Integration Points: - - ModelManager integration for embedding models - - IndexProcessor integration for document transformation - - Redis integration for locking and caching - - Database session management - - Vector database backend abstraction - -4. Error Handling: - - Invalid vector store configuration - - Missing embedding models - - Collection creation failures - - Search operation errors - - Metadata filtering errors - -5. Edge Cases: - - Empty document lists - - Missing metadata fields - - Duplicate document IDs - - Large batch processing - - Concurrent collection creation - -================================================================================ -""" - -from typing import Any -from unittest.mock import Mock, patch - -import pytest - -from core.rag.datasource.vdb.vector_base import BaseVector -from core.rag.datasource.vdb.vector_factory import Vector -from core.rag.datasource.vdb.vector_type import VectorType -from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from core.rag.models.document import Document -from models.dataset import ChildChunk, Dataset, DatasetDocument, DatasetProcessRule, DocumentSegment -from services.vector_service import VectorService - -# ============================================================================ -# Test Data Factory -# ============================================================================ - - -class VectorServiceTestDataFactory: - """ - Factory class for creating test data and mock objects for Vector service tests. - - This factory provides static methods to create mock objects for: - - Dataset instances with various configurations - - DocumentSegment instances - - ChildChunk instances - - Document instances (RAG documents) - - Embedding model instances - - Vector processor mocks - - Index processor mocks - - The factory methods help maintain consistency across tests and reduce - code duplication when setting up test scenarios. - """ - - @staticmethod - def create_dataset_mock( - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - doc_form: str = IndexStructureType.PARAGRAPH_INDEX, - indexing_technique: str = IndexTechniqueType.HIGH_QUALITY, - embedding_model_provider: str = "openai", - embedding_model: str = "text-embedding-ada-002", - index_struct_dict: dict[str, Any] | None = None, - **kwargs, - ) -> Mock: - """ - Create a mock Dataset with specified attributes. - - Args: - dataset_id: Unique identifier for the dataset - tenant_id: Tenant identifier - doc_form: Document form type - indexing_technique: Indexing technique (high_quality or economy) - embedding_model_provider: Embedding model provider - embedding_model: Embedding model name - index_struct_dict: Index structure dictionary - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a Dataset instance - """ - dataset = Mock(spec=Dataset) - - dataset.id = dataset_id - - dataset.tenant_id = tenant_id - - dataset.doc_form = doc_form - - dataset.indexing_technique = indexing_technique - - dataset.embedding_model_provider = embedding_model_provider - - dataset.embedding_model = embedding_model - - dataset.index_struct_dict = index_struct_dict - - for key, value in kwargs.items(): - setattr(dataset, key, value) - - return dataset - - @staticmethod - def create_document_segment_mock( - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - content: str = "Test segment content", - index_node_id: str = "node-123", - index_node_hash: str = "hash-123", - **kwargs, - ) -> Mock: - """ - Create a mock DocumentSegment with specified attributes. - - Args: - segment_id: Unique identifier for the segment - document_id: Parent document identifier - dataset_id: Dataset identifier - content: Segment content text - index_node_id: Index node identifier - index_node_hash: Index node hash - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a DocumentSegment instance - """ - segment = Mock(spec=DocumentSegment) - - segment.id = segment_id - - segment.document_id = document_id - - segment.dataset_id = dataset_id - - segment.content = content - - segment.index_node_id = index_node_id - - segment.index_node_hash = index_node_hash - - for key, value in kwargs.items(): - setattr(segment, key, value) - - return segment - - @staticmethod - def create_child_chunk_mock( - chunk_id: str = "chunk-123", - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - content: str = "Test child chunk content", - index_node_id: str = "node-chunk-123", - index_node_hash: str = "hash-chunk-123", - position: int = 1, - **kwargs, - ) -> Mock: - """ - Create a mock ChildChunk with specified attributes. - - Args: - chunk_id: Unique identifier for the child chunk - segment_id: Parent segment identifier - document_id: Parent document identifier - dataset_id: Dataset identifier - tenant_id: Tenant identifier - content: Child chunk content text - index_node_id: Index node identifier - index_node_hash: Index node hash - position: Position in parent segment - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a ChildChunk instance - """ - chunk = Mock(spec=ChildChunk) - - chunk.id = chunk_id - - chunk.segment_id = segment_id - - chunk.document_id = document_id - - chunk.dataset_id = dataset_id - - chunk.tenant_id = tenant_id - - chunk.content = content - - chunk.index_node_id = index_node_id - - chunk.index_node_hash = index_node_hash - - chunk.position = position - - for key, value in kwargs.items(): - setattr(chunk, key, value) - - return chunk - - @staticmethod - def create_dataset_document_mock( - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - dataset_process_rule_id: str = "rule-123", - doc_language: str = "en", - created_by: str = "user-123", - **kwargs, - ) -> Mock: - """ - Create a mock DatasetDocument with specified attributes. - - Args: - document_id: Unique identifier for the document - dataset_id: Dataset identifier - tenant_id: Tenant identifier - dataset_process_rule_id: Process rule identifier - doc_language: Document language - created_by: Creator user ID - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a DatasetDocument instance - """ - document = Mock(spec=DatasetDocument) - - document.id = document_id - - document.dataset_id = dataset_id - - document.tenant_id = tenant_id - - document.dataset_process_rule_id = dataset_process_rule_id - - document.doc_language = doc_language - - document.created_by = created_by - - for key, value in kwargs.items(): - setattr(document, key, value) - - return document - - @staticmethod - def create_dataset_process_rule_mock( - rule_id: str = "rule-123", - **kwargs, - ) -> Mock: - """ - Create a mock DatasetProcessRule with specified attributes. - - Args: - rule_id: Unique identifier for the process rule - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a DatasetProcessRule instance - """ - rule = Mock(spec=DatasetProcessRule) - - rule.id = rule_id - - rule.to_dict = Mock(return_value={"rules": {"parent_mode": "chunk"}}) - - for key, value in kwargs.items(): - setattr(rule, key, value) - - return rule - - @staticmethod - def create_rag_document_mock( - page_content: str = "Test document content", - doc_id: str = "doc-123", - doc_hash: str = "hash-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - **kwargs, - ) -> Document: - """ - Create a RAG Document with specified attributes. - - Args: - page_content: Document content text - doc_id: Document identifier in metadata - doc_hash: Document hash in metadata - document_id: Parent document ID in metadata - dataset_id: Dataset ID in metadata - **kwargs: Additional metadata fields - - Returns: - Document instance configured for testing - """ - metadata = { - "doc_id": doc_id, - "doc_hash": doc_hash, - "document_id": document_id, - "dataset_id": dataset_id, - } - - metadata.update(kwargs) - - return Document(page_content=page_content, metadata=metadata) - - @staticmethod - def create_embedding_model_instance_mock() -> Mock: - """ - Create a mock embedding model instance. - - Returns: - Mock object configured as an embedding model instance - """ - model_instance = Mock() - - model_instance.embed_documents = Mock(return_value=[[0.1] * 1536]) - - model_instance.embed_query = Mock(return_value=[0.1] * 1536) - - return model_instance - - @staticmethod - def create_vector_processor_mock() -> Mock: - """ - Create a mock vector processor (BaseVector implementation). - - Returns: - Mock object configured as a BaseVector instance - """ - processor = Mock(spec=BaseVector) - - processor.collection_name = "test_collection" - - processor.create = Mock() - - processor.add_texts = Mock() - - processor.text_exists = Mock(return_value=False) - - processor.delete_by_ids = Mock() - - processor.delete_by_metadata_field = Mock() - - processor.search_by_vector = Mock(return_value=[]) - - processor.search_by_full_text = Mock(return_value=[]) - - processor.delete = Mock() - - return processor - - @staticmethod - def create_index_processor_mock() -> Mock: - """ - Create a mock index processor. - - Returns: - Mock object configured as an index processor instance - """ - processor = Mock() - - processor.load = Mock() - - processor.clean = Mock() - - processor.transform = Mock(return_value=[]) - - return processor - - -# ============================================================================ -# Tests for VectorService -# ============================================================================ - - -class TestVectorService: - """ - Comprehensive unit tests for VectorService class. - - This test class covers all methods of the VectorService class, including - segment vector operations, child chunk operations, and integration with - various components like IndexProcessor and ModelManager. - """ - - # ======================================================================== - # Tests for create_segments_vector - # ======================================================================== - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_create_segments_vector_regular_indexing(self, mock_db, mock_index_processor_factory): - """ - Test create_segments_vector with regular indexing (non-hierarchical). - - This test verifies that segments are correctly converted to RAG documents - and loaded into the index processor for regular indexing scenarios. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form=IndexStructureType.PARAGRAPH_INDEX, indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - keywords_list = [["keyword1", "keyword2"]] - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.create_segments_vector(keywords_list, [segment], dataset, IndexStructureType.PARAGRAPH_INDEX) - - # Assert - mock_index_processor.load.assert_called_once() - - call_args = mock_index_processor.load.call_args - - assert call_args[0][0] == dataset - - assert len(call_args[0][1]) == 1 - - assert call_args[1]["with_keywords"] is True - - assert call_args[1]["keywords_list"] == keywords_list - - @patch("services.vector_service.VectorService.generate_child_chunks") - @patch("services.vector_service.ModelManager.for_tenant") - @patch("services.vector_service.db") - def test_create_segments_vector_parent_child_indexing( - self, mock_db, mock_model_manager, mock_generate_child_chunks - ): - """ - Test create_segments_vector with parent-child indexing. - - This test verifies that for hierarchical indexing, child chunks are - generated instead of regular segment indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = dataset_document - - mock_db.session.query.return_value.where.return_value.first.return_value = processing_rule - - mock_embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_model_manager.return_value.get_model_instance.return_value = mock_embedding_model - - # Act - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - # Assert - mock_generate_child_chunks.assert_called_once() - - @patch("services.vector_service.db") - def test_create_segments_vector_missing_document(self, mock_db): - """ - Test create_segments_vector when document is missing. - - This test verifies that when a document is not found, the segment - is skipped with a warning log. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = None - - # Act - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - # Assert - # Should not raise an error, just skip the segment - - @patch("services.vector_service.db") - def test_create_segments_vector_missing_processing_rule(self, mock_db): - """ - Test create_segments_vector when processing rule is missing. - - This test verifies that when a processing rule is not found, a - ValueError is raised. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = dataset_document - - mock_db.session.query.return_value.where.return_value.first.return_value = None - - # Act & Assert - with pytest.raises(ValueError, match="No processing rule found"): - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - @patch("services.vector_service.db") - def test_create_segments_vector_economy_indexing_technique(self, mock_db): - """ - Test create_segments_vector with economy indexing technique. - - This test verifies that when indexing_technique is not high_quality, - a ValueError is raised for parent-child indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.ECONOMY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = dataset_document - - mock_db.session.query.return_value.where.return_value.first.return_value = processing_rule - - # Act & Assert - with pytest.raises(ValueError, match="The knowledge base index technique is not high quality"): - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_create_segments_vector_empty_documents(self, mock_db, mock_index_processor_factory): - """ - Test create_segments_vector with empty documents list. - - This test verifies that when no documents are created, the index - processor is not called. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.create_segments_vector(None, [], dataset, IndexStructureType.PARAGRAPH_INDEX) - - # Assert - mock_index_processor.load.assert_not_called() - - # ======================================================================== - # Tests for update_segment_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_segment_vector_high_quality(self, mock_db, mock_vector_class): - """ - Test update_segment_vector with high_quality indexing technique. - - This test verifies that segments are correctly updated in the vector - store when using high_quality indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_segment_vector(None, segment, dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once_with([segment.index_node_id]) - - mock_vector.add_texts.assert_called_once() - - @patch("services.vector_service.Keyword") - @patch("services.vector_service.db") - def test_update_segment_vector_economy_with_keywords(self, mock_db, mock_keyword_class): - """ - Test update_segment_vector with economy indexing and keywords. - - This test verifies that segments are correctly updated in the keyword - index when using economy indexing with keywords. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - keywords = ["keyword1", "keyword2"] - - mock_keyword = Mock() - - mock_keyword.delete_by_ids = Mock() - - mock_keyword.add_texts = Mock() - - mock_keyword_class.return_value = mock_keyword - - # Act - VectorService.update_segment_vector(keywords, segment, dataset) - - # Assert - mock_keyword.delete_by_ids.assert_called_once_with([segment.index_node_id]) - - mock_keyword.add_texts.assert_called_once() - - call_args = mock_keyword.add_texts.call_args - - assert call_args[1]["keywords_list"] == [keywords] - - @patch("services.vector_service.Keyword") - @patch("services.vector_service.db") - def test_update_segment_vector_economy_without_keywords(self, mock_db, mock_keyword_class): - """ - Test update_segment_vector with economy indexing without keywords. - - This test verifies that segments are correctly updated in the keyword - index when using economy indexing without keywords. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - mock_keyword = Mock() - - mock_keyword.delete_by_ids = Mock() - - mock_keyword.add_texts = Mock() - - mock_keyword_class.return_value = mock_keyword - - # Act - VectorService.update_segment_vector(None, segment, dataset) - - # Assert - mock_keyword.delete_by_ids.assert_called_once_with([segment.index_node_id]) - - mock_keyword.add_texts.assert_called_once() - - call_args = mock_keyword.add_texts.call_args - - assert "keywords_list" not in call_args[1] or call_args[1].get("keywords_list") is None - - # ======================================================================== - # Tests for generate_child_chunks - # ======================================================================== - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_generate_child_chunks_with_children(self, mock_db, mock_index_processor_factory): - """ - Test generate_child_chunks when children are generated. - - This test verifies that child chunks are correctly generated and - saved to the database when the index processor returns children. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - child_document = VectorServiceTestDataFactory.create_rag_document_mock( - page_content="Child content", doc_id="child-node-123" - ) - - child_document.children = [child_document] - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor.transform.return_value = [child_document] - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.generate_child_chunks(segment, dataset_document, dataset, embedding_model, processing_rule, False) - - # Assert - mock_index_processor.transform.assert_called_once() - - mock_index_processor.load.assert_called_once() - - mock_db.session.add.assert_called() - - mock_db.session.commit.assert_called_once() - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_generate_child_chunks_regenerate(self, mock_db, mock_index_processor_factory): - """ - Test generate_child_chunks with regenerate=True. - - This test verifies that when regenerate is True, existing child chunks - are cleaned before generating new ones. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor.transform.return_value = [] - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.generate_child_chunks(segment, dataset_document, dataset, embedding_model, processing_rule, True) - - # Assert - mock_index_processor.clean.assert_called_once() - - call_args = mock_index_processor.clean.call_args - - assert call_args[0][0] == dataset - - assert call_args[0][1] == [segment.index_node_id] - - assert call_args[1]["with_keywords"] is True - - assert call_args[1]["delete_child_chunks"] is True - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_generate_child_chunks_no_children(self, mock_db, mock_index_processor_factory): - """ - Test generate_child_chunks when no children are generated. - - This test verifies that when the index processor returns no children, - no child chunks are saved to the database. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor.transform.return_value = [] - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.generate_child_chunks(segment, dataset_document, dataset, embedding_model, processing_rule, False) - - # Assert - mock_index_processor.transform.assert_called_once() - - mock_index_processor.load.assert_not_called() - - mock_db.session.add.assert_not_called() - - # ======================================================================== - # Tests for create_child_chunk_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_create_child_chunk_vector_high_quality(self, mock_db, mock_vector_class): - """ - Test create_child_chunk_vector with high_quality indexing. - - This test verifies that child chunk vectors are correctly created - when using high_quality indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.create_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.add_texts.assert_called_once() - - call_args = mock_vector.add_texts.call_args - - assert call_args[1]["duplicate_check"] is True - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_create_child_chunk_vector_economy(self, mock_db, mock_vector_class): - """ - Test create_child_chunk_vector with economy indexing. - - This test verifies that child chunk vectors are not created when - using economy indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.create_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.add_texts.assert_not_called() - - # ======================================================================== - # Tests for update_child_chunk_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_with_all_operations(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with new, update, and delete operations. - - This test verifies that child chunk vectors are correctly updated - when there are new chunks, updated chunks, and deleted chunks. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - new_chunk = VectorServiceTestDataFactory.create_child_chunk_mock(chunk_id="new-chunk-1") - - update_chunk = VectorServiceTestDataFactory.create_child_chunk_mock(chunk_id="update-chunk-1") - - delete_chunk = VectorServiceTestDataFactory.create_child_chunk_mock(chunk_id="delete-chunk-1") - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([new_chunk], [update_chunk], [delete_chunk], dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once() - - delete_ids = mock_vector.delete_by_ids.call_args[0][0] - - assert update_chunk.index_node_id in delete_ids - - assert delete_chunk.index_node_id in delete_ids - - mock_vector.add_texts.assert_called_once() - - call_args = mock_vector.add_texts.call_args - - assert len(call_args[0][0]) == 2 # new_chunk + update_chunk - - assert call_args[1]["duplicate_check"] is True - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_only_new(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with only new chunks. - - This test verifies that when only new chunks are provided, only - add_texts is called, not delete_by_ids. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - new_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([new_chunk], [], [], dataset) - - # Assert - mock_vector.delete_by_ids.assert_not_called() - - mock_vector.add_texts.assert_called_once() - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_only_delete(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with only deleted chunks. - - This test verifies that when only deleted chunks are provided, only - delete_by_ids is called, not add_texts. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - delete_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([], [], [delete_chunk], dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once_with([delete_chunk.index_node_id]) - - mock_vector.add_texts.assert_not_called() - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_economy(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with economy indexing. - - This test verifies that child chunk vectors are not updated when - using economy indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - new_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([new_chunk], [], [], dataset) - - # Assert - mock_vector.delete_by_ids.assert_not_called() - - mock_vector.add_texts.assert_not_called() - - # ======================================================================== - # Tests for delete_child_chunk_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_delete_child_chunk_vector_high_quality(self, mock_db, mock_vector_class): - """ - Test delete_child_chunk_vector with high_quality indexing. - - This test verifies that child chunk vectors are correctly deleted - when using high_quality indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.delete_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once_with([child_chunk.index_node_id]) - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_delete_child_chunk_vector_economy(self, mock_db, mock_vector_class): - """ - Test delete_child_chunk_vector with economy indexing. - - This test verifies that child chunk vectors are not deleted when - using economy indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.delete_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.delete_by_ids.assert_not_called() - - -# ============================================================================ -# Tests for Vector Class -# ============================================================================ - - -class TestVector: - """ - Comprehensive unit tests for Vector class. - - This test class covers all methods of the Vector class, including - initialization, collection management, embedding operations, vector - database operations, and search functionality. - """ - - # ======================================================================== - # Tests for Vector Initialization - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_initialization_default_attributes(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector initialization with default attributes. - - This test verifies that Vector is correctly initialized with default - attributes when none are provided. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - # Act - vector = Vector(dataset=dataset) - - # Assert - assert vector._dataset == dataset - - assert vector._attributes == ["doc_id", "dataset_id", "document_id", "doc_hash"] - - mock_get_embeddings.assert_called_once() - - mock_init_vector.assert_called_once() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_initialization_custom_attributes(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector initialization with custom attributes. - - This test verifies that Vector is correctly initialized with custom - attributes when provided. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - custom_attributes = ["custom_attr1", "custom_attr2"] - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - # Act - vector = Vector(dataset=dataset, attributes=custom_attributes) - - # Assert - assert vector._dataset == dataset - - assert vector._attributes == custom_attributes - - # ======================================================================== - # Tests for Vector.create - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_create_with_texts(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.create with texts list. - - This test verifies that documents are correctly embedded and created - in the vector store with batch processing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [ - VectorServiceTestDataFactory.create_rag_document_mock(page_content=f"Content {i}") for i in range(5) - ] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536] * 5) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.create(texts=documents) - - # Assert - mock_embeddings.embed_documents.assert_called() - - mock_vector_processor.create.assert_called() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_create_empty_texts(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.create with empty texts list. - - This test verifies that when texts is None or empty, no operations - are performed. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.create(texts=None) - - # Assert - mock_embeddings.embed_documents.assert_not_called() - - mock_vector_processor.create.assert_not_called() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_create_large_batch(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.create with large batch of documents. - - This test verifies that large batches are correctly processed in - chunks of 1000 documents. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [ - VectorServiceTestDataFactory.create_rag_document_mock(page_content=f"Content {i}") for i in range(2500) - ] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536] * 1000) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.create(texts=documents) - - # Assert - # Should be called 3 times (1000, 1000, 500) - assert mock_embeddings.embed_documents.call_count == 3 - - assert mock_vector_processor.create.call_count == 3 - - # ======================================================================== - # Tests for Vector.add_texts - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_add_texts_without_duplicate_check(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.add_texts without duplicate check. - - This test verifies that documents are added without checking for - duplicates when duplicate_check is False. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [VectorServiceTestDataFactory.create_rag_document_mock()] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536]) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.add_texts(documents, duplicate_check=False) - - # Assert - mock_embeddings.embed_documents.assert_called_once() - - mock_vector_processor.create.assert_called_once() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_add_texts_with_duplicate_check(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.add_texts with duplicate check. - - This test verifies that duplicate documents are filtered out when - duplicate_check is True. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [VectorServiceTestDataFactory.create_rag_document_mock(doc_id="doc-123")] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536]) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(return_value=True) # Document exists - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.add_texts(documents, duplicate_check=True) - - # Assert - mock_vector_processor.text_exists.assert_called_once_with("doc-123") - - mock_embeddings.embed_documents.assert_not_called() - - mock_vector_processor.create.assert_not_called() - - # ======================================================================== - # Tests for Vector.text_exists - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_text_exists_true(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.text_exists when text exists. - - This test verifies that text_exists correctly returns True when - a document exists in the vector store. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(return_value=True) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.text_exists("doc-123") - - # Assert - assert result is True - - mock_vector_processor.text_exists.assert_called_once_with("doc-123") - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_text_exists_false(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.text_exists when text does not exist. - - This test verifies that text_exists correctly returns False when - a document does not exist in the vector store. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(return_value=False) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.text_exists("doc-123") - - # Assert - assert result is False - - mock_vector_processor.text_exists.assert_called_once_with("doc-123") - - # ======================================================================== - # Tests for Vector.delete_by_ids - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_delete_by_ids(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.delete_by_ids. - - This test verifies that documents are correctly deleted by their IDs. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - ids = ["doc-1", "doc-2", "doc-3"] - - # Act - vector.delete_by_ids(ids) - - # Assert - mock_vector_processor.delete_by_ids.assert_called_once_with(ids) - - # ======================================================================== - # Tests for Vector.delete_by_metadata_field - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_delete_by_metadata_field(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.delete_by_metadata_field. - - This test verifies that documents are correctly deleted by metadata - field value. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.delete_by_metadata_field("dataset_id", "dataset-123") - - # Assert - mock_vector_processor.delete_by_metadata_field.assert_called_once_with("dataset_id", "dataset-123") - - # ======================================================================== - # Tests for Vector.search_by_vector - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_search_by_vector(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.search_by_vector. - - This test verifies that vector search correctly embeds the query - and searches the vector store. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - query = "test query" - - query_vector = [0.1] * 1536 - - mock_embeddings = Mock() - - mock_embeddings.embed_query = Mock(return_value=query_vector) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.search_by_vector = Mock(return_value=[]) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.search_by_vector(query) - - # Assert - mock_embeddings.embed_query.assert_called_once_with(query) - - mock_vector_processor.search_by_vector.assert_called_once_with(query_vector) - - assert result == [] - - # ======================================================================== - # Tests for Vector.search_by_full_text - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_search_by_full_text(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.search_by_full_text. - - This test verifies that full-text search correctly searches the - vector store without embedding the query. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - query = "test query" - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.search_by_full_text = Mock(return_value=[]) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.search_by_full_text(query) - - # Assert - mock_vector_processor.search_by_full_text.assert_called_once_with(query) - - assert result == [] - - # ======================================================================== - # Tests for Vector.delete - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.redis_client") - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_delete(self, mock_get_embeddings, mock_init_vector, mock_redis_client): - """ - Test Vector.delete. - - This test verifies that the collection is deleted and Redis cache - is cleared. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.collection_name = "test_collection" - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.delete() - - # Assert - mock_vector_processor.delete.assert_called_once() - - mock_redis_client.delete.assert_called_once_with("vector_indexing_test_collection") - - # ======================================================================== - # Tests for Vector.get_vector_factory - # ======================================================================== - - def test_vector_get_vector_factory_chroma(self): - """ - Test Vector.get_vector_factory for Chroma. - - This test verifies that the correct factory class is returned for - Chroma vector type. - """ - # Act - factory_class = Vector.get_vector_factory(VectorType.CHROMA) - - # Assert - assert factory_class is not None - - # Verify it's the correct factory by checking the module name - assert "chroma" in factory_class.__module__.lower() - - def test_vector_get_vector_factory_milvus(self): - """ - Test Vector.get_vector_factory for Milvus. - - This test verifies that the correct factory class is returned for - Milvus vector type. - """ - # Act - factory_class = Vector.get_vector_factory(VectorType.MILVUS) - - # Assert - assert factory_class is not None - - assert "milvus" in factory_class.__module__.lower() - - def test_vector_get_vector_factory_invalid_type(self): - """ - Test Vector.get_vector_factory with invalid vector type. - - This test verifies that a ValueError is raised when an invalid - vector type is provided. - """ - # Act & Assert - with pytest.raises(ValueError, match="Vector store .* is not supported"): - Vector.get_vector_factory("invalid_type") - - # ======================================================================== - # Tests for Vector._filter_duplicate_texts - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_filter_duplicate_texts(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector._filter_duplicate_texts. - - This test verifies that duplicate documents are correctly filtered - based on doc_id in metadata. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(side_effect=[True, False]) # First exists, second doesn't - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - doc1 = VectorServiceTestDataFactory.create_rag_document_mock(doc_id="doc-1") - - doc2 = VectorServiceTestDataFactory.create_rag_document_mock(doc_id="doc-2") - - documents = [doc1, doc2] - - # Act - filtered = vector._filter_duplicate_texts(documents) - - # Assert - assert len(filtered) == 1 - - assert filtered[0].metadata["doc_id"] == "doc-2" - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_filter_duplicate_texts_no_metadata(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector._filter_duplicate_texts with documents without metadata. - - This test verifies that documents without metadata are not filtered. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - doc1 = Document(page_content="Content 1", metadata=None) - - doc2 = Document(page_content="Content 2", metadata={}) - - documents = [doc1, doc2] - - # Act - filtered = vector._filter_duplicate_texts(documents) - - # Assert - assert len(filtered) == 2 - - # ======================================================================== - # Tests for Vector._get_embeddings - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.CacheEmbedding") - @patch("core.rag.datasource.vdb.vector_factory.ModelManager.for_tenant") - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - def test_vector_get_embeddings(self, mock_init_vector, mock_model_manager, mock_cache_embedding): - """ - Test Vector._get_embeddings. - - This test verifies that embeddings are correctly retrieved from - ModelManager and wrapped in CacheEmbedding. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - embedding_model_provider="openai", embedding_model="text-embedding-ada-002" - ) - - mock_embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_model_manager.return_value.get_model_instance.return_value = mock_embedding_model - - mock_cache_embedding_instance = Mock() - - mock_cache_embedding.return_value = mock_cache_embedding_instance - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - # Act - vector = Vector(dataset=dataset) - - # Assert - mock_model_manager.return_value.get_model_instance.assert_called_once() - - mock_cache_embedding.assert_called_once_with(mock_embedding_model) - - assert vector._embeddings == mock_cache_embedding_instance diff --git a/api/tests/unit_tests/tasks/test_dataset_indexing_task.py b/api/tests/unit_tests/tasks/test_dataset_indexing_task.py index 5dad58b8f1..b74079bd69 100644 --- a/api/tests/unit_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/unit_tests/tasks/test_dataset_indexing_task.py @@ -89,9 +89,6 @@ def mock_db_session(): session = MagicMock() session._shared_data = {"dataset": None, "documents": []} - # Keep a pointer so repeated Document.first() calls iterate across provided docs - session._doc_first_idx = 0 - def _get_entity(stmt) -> type | None: """Extract the mapped entity class from a SQLAlchemy select statement.""" try: @@ -1591,18 +1588,7 @@ class TestDocumentIndexingTaskSummaryFlow: need_summary=True, ) - dataset_query = MagicMock() - dataset_query.where.return_value = dataset_query - dataset_query.first.return_value = dataset - phase1_docs = [SimpleNamespace(id="doc-1"), SimpleNamespace(id="doc-2"), SimpleNamespace(id="doc-3")] - phase1_document_query = MagicMock() - phase1_document_query.where.return_value = phase1_document_query - phase1_document_query.all.return_value = phase1_docs - - summary_document_query = MagicMock() - summary_document_query.where.return_value = summary_document_query - summary_document_query.all.return_value = [doc_eligible, doc_skip_form, doc_skip_status] session1 = MagicMock() session2 = MagicMock() @@ -1657,18 +1643,6 @@ class TestDocumentIndexingTaskSummaryFlow: need_summary=True, ) - dataset_query = MagicMock() - dataset_query.where.return_value = dataset_query - dataset_query.first.return_value = dataset - - phase1_query = MagicMock() - phase1_query.where.return_value = phase1_query - phase1_query.all.return_value = [SimpleNamespace(id="doc-1")] - - summary_query = MagicMock() - summary_query.where.return_value = summary_query - summary_query.all.return_value = [doc_eligible] - session1 = MagicMock() session2 = MagicMock() session2.begin.return_value = nullcontext() diff --git a/api/tests/unit_tests/tasks/test_trigger_processing_tasks.py b/api/tests/unit_tests/tasks/test_trigger_processing_tasks.py new file mode 100644 index 0000000000..59da5cc7a2 --- /dev/null +++ b/api/tests/unit_tests/tasks/test_trigger_processing_tasks.py @@ -0,0 +1,204 @@ +from unittest.mock import MagicMock, patch + +import pytest + +import tasks.trigger_processing_tasks as trigger_processing_tasks_module +from services.errors.app import QuotaExceededError +from tasks.trigger_processing_tasks import dispatch_triggered_workflow + + +class TestDispatchTriggeredWorkflow: + """Unit tests covering branch behaviours of ``dispatch_triggered_workflow``. + + The covered branches are: + - workflow missing for ``plugin_trigger.app_id`` → log + ``continue`` + - ``QuotaService.reserve`` raising ``QuotaExceededError`` → + ``mark_tenant_triggers_rate_limited`` + early ``return`` + - ``trigger_workflow_async`` succeeds → + ``quota_charge.commit()`` + ``dispatched_count`` increments + """ + + @pytest.fixture + def subscription(self): + sub = MagicMock() + sub.id = "subscription-123" + sub.tenant_id = "tenant-123" + sub.provider_id = "langgenius/test_plugin/test_plugin" + sub.endpoint_id = "endpoint-123" + sub.credentials = {} + sub.credential_type = "api_key" + return sub + + @pytest.fixture + def plugin_trigger(self): + trigger = MagicMock() + trigger.id = "plugin-trigger-123" + trigger.app_id = "app-123" + trigger.node_id = "node-123" + return trigger + + @pytest.fixture + def provider_controller(self): + controller = MagicMock() + controller.plugin_unique_identifier = "langgenius/test_plugin:0.0.1" + controller.entity.identity.name = "Test Plugin" + controller.entity.identity.icon = "icon.svg" + controller.entity.identity.icon_dark = "icon_dark.svg" + return controller + + @pytest.fixture + def dispatch_mocks(self, subscription, plugin_trigger, provider_controller): + """Patch all external dependencies reached by ``dispatch_triggered_workflow``. + + Defaults are configured so the code flow can reach the final async + trigger block (line ~385); each test overrides specific handles + (``get_workflows``, ``reserve``, ``create_end_user_batch``, ...) to + drive the path it targets. + """ + session_cm = MagicMock() + session_cm.__enter__.return_value = MagicMock() + session_cm.__exit__.return_value = False + + invoke_response = MagicMock() + invoke_response.cancelled = False + invoke_response.variables = {} + + quota_charge = MagicMock() + + with ( + patch.object( + trigger_processing_tasks_module.TriggerHttpRequestCachingService, + "get_request", + return_value=MagicMock(), + ), + patch.object( + trigger_processing_tasks_module.TriggerHttpRequestCachingService, + "get_payload", + return_value=MagicMock(), + ), + patch.object( + trigger_processing_tasks_module.TriggerSubscriptionOperatorService, + "get_subscriber_triggers", + return_value=[plugin_trigger], + ), + patch.object( + trigger_processing_tasks_module.TriggerManager, + "get_trigger_provider", + return_value=provider_controller, + ), + patch.object( + trigger_processing_tasks_module.TriggerManager, + "invoke_trigger_event", + return_value=invoke_response, + ) as invoke_trigger_event, + patch.object( + trigger_processing_tasks_module.TriggerEventNodeData, + "model_validate", + return_value=MagicMock(), + ), + patch.object( + trigger_processing_tasks_module, + "_get_latest_workflows_by_app_ids", + ) as get_workflows, + patch.object( + trigger_processing_tasks_module.EndUserService, + "create_end_user_batch", + return_value={}, + ) as create_end_user_batch, + patch.object( + trigger_processing_tasks_module.session_factory, + "create_session", + return_value=session_cm, + ), + patch.object( + trigger_processing_tasks_module.QuotaService, + "reserve", + return_value=quota_charge, + ) as reserve, + patch.object( + trigger_processing_tasks_module.AppTriggerService, + "mark_tenant_triggers_rate_limited", + ) as mark_rate_limited, + patch.object( + trigger_processing_tasks_module.AsyncWorkflowService, + "trigger_workflow_async", + ) as trigger_workflow_async, + ): + yield { + "get_workflows": get_workflows, + "reserve": reserve, + "quota_charge": quota_charge, + "mark_rate_limited": mark_rate_limited, + "invoke_trigger_event": invoke_trigger_event, + "invoke_response": invoke_response, + "create_end_user_batch": create_end_user_batch, + "trigger_workflow_async": trigger_workflow_async, + } + + def test_dispatch_skips_when_workflow_missing(self, subscription, dispatch_mocks): + """Covers missing workflow → log + ``continue``.""" + dispatch_mocks["get_workflows"].return_value = {} + + dispatched = dispatch_triggered_workflow( + user_id="user-123", + subscription=subscription, + event_name="test_event", + request_id="request-123", + ) + + assert dispatched == 0 + dispatch_mocks["reserve"].assert_not_called() + dispatch_mocks["invoke_trigger_event"].assert_not_called() + dispatch_mocks["mark_rate_limited"].assert_not_called() + + def test_dispatch_marks_rate_limited_when_quota_exceeded(self, subscription, plugin_trigger, dispatch_mocks): + """Covers QuotaExceededError → mark rate-limited + early return.""" + workflow_mock = MagicMock() + workflow_mock.walk_nodes.return_value = iter( + [(plugin_trigger.node_id, {"type": trigger_processing_tasks_module.TRIGGER_PLUGIN_NODE_TYPE})] + ) + dispatch_mocks["get_workflows"].return_value = {plugin_trigger.app_id: workflow_mock} + dispatch_mocks["reserve"].side_effect = QuotaExceededError( + feature="trigger", tenant_id=subscription.tenant_id, required=1 + ) + + dispatched = dispatch_triggered_workflow( + user_id="user-123", + subscription=subscription, + event_name="test_event", + request_id="request-123", + ) + + assert dispatched == 0 + dispatch_mocks["reserve"].assert_called_once() + dispatch_mocks["mark_rate_limited"].assert_called_once_with(subscription.tenant_id) + dispatch_mocks["invoke_trigger_event"].assert_not_called() + + def test_dispatch_commits_quota_and_counts_when_workflow_triggered( + self, subscription, plugin_trigger, dispatch_mocks + ): + """Happy path: end user exists and async trigger succeeds.""" + workflow_mock = MagicMock() + workflow_mock.id = "workflow-123" + workflow_mock.walk_nodes.return_value = iter( + [(plugin_trigger.node_id, {"type": trigger_processing_tasks_module.TRIGGER_PLUGIN_NODE_TYPE})] + ) + dispatch_mocks["get_workflows"].return_value = {plugin_trigger.app_id: workflow_mock} + + end_user_mock = MagicMock() + dispatch_mocks["create_end_user_batch"].return_value = {plugin_trigger.app_id: end_user_mock} + + dispatched = dispatch_triggered_workflow( + user_id="user-123", + subscription=subscription, + event_name="test_event", + request_id="request-123", + ) + + assert dispatched == 1 + dispatch_mocks["trigger_workflow_async"].assert_called_once() + _, kwargs = dispatch_mocks["trigger_workflow_async"].call_args + assert kwargs["user"] is end_user_mock + dispatch_mocks["quota_charge"].commit.assert_called_once() + dispatch_mocks["quota_charge"].refund.assert_not_called() + dispatch_mocks["mark_rate_limited"].assert_not_called() diff --git a/api/uv.lock b/api/uv.lock index 239dbf5ac8..6b2da24994 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -613,29 +613,29 @@ wheels = [ [[package]] name = "boto3" -version = "1.42.91" +version = "1.42.96" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a7/c0/98b8cec7ca22dde776df48c58940ae1abc425593959b7226e270760d726f/boto3-1.42.91.tar.gz", hash = "sha256:03d70532b17f7f84df37ca7e8c21553280454dea53ae12b15d1cfef9b16fcb8a", size = 113181, upload-time = "2026-04-17T19:31:06.251Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/2d/69fb3acd50bab83fb295c167d33c4b653faeb5fb0f42bfca4d9b69d6fb68/boto3-1.42.96.tar.gz", hash = "sha256:b38a9e4a3fbbee9017252576f1379780d0a5814768676c08df2f539d31fcdd68", size = 113203, upload-time = "2026-04-24T19:47:18.677Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/29/faba6521257c34085cc9b439ef98235b581772580f417fa3629728007270/boto3-1.42.91-py3-none-any.whl", hash = "sha256:04e72071cde022951ce7f81bd9933c90095ab8923e8ced61c8dacfe9edac0f5c", size = 140553, upload-time = "2026-04-17T19:31:02.57Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9d/b3f617d011c42eb804d993103b8fa9acdce153e181a3042f58bfe33d7cb4/boto3-1.42.96-py3-none-any.whl", hash = "sha256:2f4566da2c209a98bdbfc874d813ef231c84ad24e4f815e9bc91de5f63351a24", size = 140557, upload-time = "2026-04-24T19:47:15.824Z" }, ] [[package]] name = "boto3-stubs" -version = "1.42.92" +version = "1.42.96" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "types-s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fa/b4/7f472d64a89f6aa6b8e8eeadc876667b7e4edfb526c6118efe2b2c98ba17/boto3_stubs-1.42.92.tar.gz", hash = "sha256:4bc934069c5e8c7b3cdd2442569dae14e8272fe207d445bd38aa578b8463638f", size = 102696, upload-time = "2026-04-20T19:55:19.858Z" } +sdist = { url = "https://files.pythonhosted.org/packages/77/86/65f45f84621cccc2471871088bab8fe515b4346ba9e48d9001484ec440d6/boto3_stubs-1.42.96.tar.gz", hash = "sha256:1e7819c34d1eae8e5e3cfaf9d144fdcad65aad184b380488871de1d0b2851879", size = 102691, upload-time = "2026-04-24T20:25:13.984Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6c/ce/2fe2c6456f8dc0b8bb8d80e05e154c7975ec058991bedf54f3aeed634b79/boto3_stubs-1.42.92-py3-none-any.whl", hash = "sha256:b3994e60f0133b2dd3d9a88ceaeef48fa6367d9a9429426e919575768a1ad9c6", size = 70666, upload-time = "2026-04-20T19:55:16.398Z" }, + { url = "https://files.pythonhosted.org/packages/a7/51/bdac1ff9fd4321091183776c5adffce5fc7b4d0fec7e38af9064e24a2497/boto3_stubs-1.42.96-py3-none-any.whl", hash = "sha256:2c112e257f40006147a53f6f62075804689154271973b2807f5656feaa804216", size = 70668, upload-time = "2026-04-24T20:25:09.736Z" }, ] [package.optional-dependencies] @@ -645,16 +645,16 @@ bedrock-runtime = [ [[package]] name = "botocore" -version = "1.42.91" +version = "1.42.96" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/21/bc/a4b7c46471c2e789ad8c4c7acfd7f302fdb481d93ff870f441249b924ae6/botocore-1.42.91.tar.gz", hash = "sha256:d252e27bc454afdbf5ed3dc617aa423f2c855c081e98b7963093399483ecc698", size = 15213010, upload-time = "2026-04-17T19:30:50.793Z" } +sdist = { url = "https://files.pythonhosted.org/packages/61/77/2c333622a1d47cf5bf73cdcab0cb6c92addafbef2ec05f81b9f75687d9e5/botocore-1.42.96.tar.gz", hash = "sha256:75b3b841ffacaa944f645196655a21ca777591dd8911e732bfb6614545af0250", size = 15263344, upload-time = "2026-04-24T19:47:05.283Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/fc/24cc0a47c824f13933e210e9ad034b4fba22f7185b8d904c0fbf5a3b2be8/botocore-1.42.91-py3-none-any.whl", hash = "sha256:7a28c3cc6bfab5724ad18899d52402b776a0de7d87fa20c3c5270bcaaf199ce8", size = 14897344, upload-time = "2026-04-17T19:30:44.245Z" }, + { url = "https://files.pythonhosted.org/packages/45/56/152c3a859ca1b9d77ed16deac3cf81682013677c68cf5715698781fc81bd/botocore-1.42.96-py3-none-any.whl", hash = "sha256:db2c3e2006628be6fde81a24124a6563c363d6982fb92728837cf174bad9d98a", size = 14945920, upload-time = "2026-04-24T19:47:00.323Z" }, ] [[package]] @@ -1067,7 +1067,7 @@ wheels = [ [[package]] name = "cos-python-sdk-v5" -version = "1.9.41" +version = "1.9.42" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "crcmod" }, @@ -1076,9 +1076,9 @@ dependencies = [ { name = "six" }, { name = "xmltodict" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0e/38/c0029f413f51238aa2319715f45d74bcae931768e36c7e4604b02f407c6c/cos_python_sdk_v5-1.9.41.tar.gz", hash = "sha256:68f4be7d8fe27a1d186b3159b93c622816e398effdc236eddd442b86db592b82", size = 102625, upload-time = "2026-01-06T07:00:11.692Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/e3/b903b4acde334510f481d126a686bc4013710c00e2af34bff369511329ac/cos_python_sdk_v5-1.9.42.tar.gz", hash = "sha256:2a01d1868f50c5a70771f2b67da868f1dc6c6f3890f8009715313834404decc4", size = 102670, upload-time = "2026-04-23T11:08:27.949Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/aa/2f/ead3fb551509fdc94e4a42093b770e3de2827ff7227570165df5e35c2a3e/cos_python_sdk_v5-1.9.41-py3-none-any.whl", hash = "sha256:f465aae43a4ba3f1caa8caeaca838d0395932f6848e89d6dde2807725e3c88a0", size = 98285, upload-time = "2026-01-06T06:43:02.754Z" }, + { url = "https://files.pythonhosted.org/packages/ee/bf/4ea660bb79d91fd41ba394605eccffd3d0943ed547b3fe2bdc6c7a52d2d1/cos_python_sdk_v5-1.9.42-py3-none-any.whl", hash = "sha256:02e583a1094e1794e6c0f56618d5190eb9eb7bfe75909f1dfac41bbee46e46c5", size = 98375, upload-time = "2026-04-23T11:05:14.519Z" }, ] [[package]] @@ -1386,6 +1386,7 @@ dependencies = [ { name = "celery" }, { name = "croniter" }, { name = "fastopenapi", extra = ["flask"] }, + { name = "flask" }, { name = "flask-compress" }, { name = "flask-cors" }, { name = "flask-login" }, @@ -1668,10 +1669,11 @@ requires-dist = [ { name = "aliyun-log-python-sdk", specifier = ">=0.9.44,<1.0.0" }, { name = "azure-identity", specifier = ">=1.25.3,<2.0.0" }, { name = "bleach", specifier = ">=6.3.0" }, - { name = "boto3", specifier = ">=1.42.91" }, + { name = "boto3", specifier = ">=1.42.96" }, { name = "celery", specifier = ">=5.6.3" }, { name = "croniter", specifier = ">=6.2.2" }, { name = "fastopenapi", extras = ["flask"], specifier = "~=0.7.0" }, + { name = "flask", specifier = ">=3.1.3,<4.0.0" }, { name = "flask-compress", specifier = ">=1.24,<2.0.0" }, { name = "flask-cors", specifier = ">=6.0.2" }, { name = "flask-login", specifier = ">=0.6.3,<1.0.0" }, @@ -1688,15 +1690,15 @@ requires-dist = [ { name = "httpx", extras = ["socks"], specifier = ">=0.28.1,<1.0.0" }, { name = "httpx-sse", specifier = "~=0.4.0" }, { name = "json-repair", specifier = "~=0.59.4" }, - { name = "opentelemetry-distro", specifier = ">=0.62b0,<1.0.0" }, + { name = "opentelemetry-distro", specifier = ">=0.62b1,<1.0.0" }, { name = "opentelemetry-instrumentation-celery", specifier = ">=0.62b0,<1.0.0" }, { name = "opentelemetry-instrumentation-flask", specifier = ">=0.62b0,<1.0.0" }, { name = "opentelemetry-instrumentation-httpx", specifier = ">=0.62b0,<1.0.0" }, { name = "opentelemetry-instrumentation-redis", specifier = ">=0.62b0,<1.0.0" }, { name = "opentelemetry-instrumentation-sqlalchemy", specifier = ">=0.62b0,<1.0.0" }, - { name = "opentelemetry-propagator-b3", specifier = ">=1.41.0,<2.0.0" }, + { name = "opentelemetry-propagator-b3", specifier = ">=1.41.1,<2.0.0" }, { name = "psycogreen", specifier = ">=1.0.2" }, - { name = "psycopg2-binary", specifier = ">=2.9.11" }, + { name = "psycopg2-binary", specifier = ">=2.9.12" }, { name = "python-socketio", specifier = ">=5.13.0" }, { name = "readabilipy", specifier = ">=0.3.0,<1.0.0" }, { name = "redis", extras = ["hiredis"], specifier = ">=7.4.0" }, @@ -1708,15 +1710,15 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ { name = "basedpyright", specifier = ">=1.39.3" }, - { name = "boto3-stubs", specifier = ">=1.42.92" }, + { name = "boto3-stubs", specifier = ">=1.42.96" }, { name = "celery-types", specifier = ">=0.23.0" }, { name = "coverage", specifier = ">=7.13.4" }, { name = "dotenv-linter", specifier = ">=0.7.0" }, { name = "faker", specifier = ">=40.15.0" }, - { name = "hypothesis", specifier = ">=6.152.1" }, + { name = "hypothesis", specifier = ">=6.152.3" }, { name = "import-linter", specifier = ">=2.3" }, { name = "lxml-stubs", specifier = ">=0.5.1" }, - { name = "mypy", specifier = ">=1.20.1" }, + { name = "mypy", specifier = ">=1.20.2" }, { name = "pandas-stubs", specifier = ">=3.0.0" }, { name = "pyrefly", specifier = ">=0.62.0" }, { name = "pytest", specifier = ">=9.0.3" }, @@ -1726,7 +1728,7 @@ dev = [ { name = "pytest-mock", specifier = ">=3.15.1" }, { name = "pytest-timeout", specifier = ">=2.4.0" }, { name = "pytest-xdist", specifier = ">=3.8.0" }, - { name = "ruff", specifier = ">=0.15.11" }, + { name = "ruff", specifier = ">=0.15.12" }, { name = "scipy-stubs", specifier = ">=1.17.1.4" }, { name = "testcontainers", specifier = ">=4.14.2" }, { name = "types-aiofiles", specifier = ">=25.1.0" }, @@ -1751,7 +1753,7 @@ dev = [ { name = "types-pexpect", specifier = ">=4.9.0" }, { name = "types-protobuf", specifier = ">=7.34.1" }, { name = "types-psutil", specifier = ">=7.2.2" }, - { name = "types-psycopg2", specifier = ">=2.9.21" }, + { name = "types-psycopg2", specifier = ">=2.9.21.20260422" }, { name = "types-pygments", specifier = ">=2.20.0" }, { name = "types-pymysql", specifier = ">=1.1.0" }, { name = "types-pyopenssl", specifier = ">=24.1.0" }, @@ -1768,7 +1770,7 @@ dev = [ { name = "types-tensorflow", specifier = ">=2.18.0.20260408" }, { name = "types-tqdm", specifier = ">=4.67.3.20260408" }, { name = "types-ujson", specifier = ">=5.10.0" }, - { name = "xinference-client", specifier = ">=2.5.0" }, + { name = "xinference-client", specifier = ">=2.7.0" }, ] evaluation = [ { name = "deepeval", specifier = ">=2.0.0" }, @@ -1777,12 +1779,12 @@ evaluation = [ storage = [ { name = "azure-storage-blob", specifier = ">=12.28.0" }, { name = "bce-python-sdk", specifier = ">=0.9.70" }, - { name = "cos-python-sdk-v5", specifier = ">=1.9.41" }, + { name = "cos-python-sdk-v5", specifier = ">=1.9.42" }, { name = "esdk-obs-python", specifier = ">=3.22.2" }, { name = "google-cloud-storage", specifier = ">=3.10.1" }, { name = "opendal", specifier = ">=0.46.0" }, { name = "oss2", specifier = ">=2.19.1" }, - { name = "supabase", specifier = ">=2.28.3" }, + { name = "supabase", specifier = ">=2.29.0" }, { name = "tos", specifier = ">=2.9.0" }, ] tools = [ @@ -1869,7 +1871,7 @@ vdb-upstash = [{ name = "dify-vdb-upstash", editable = "providers/vdb/vdb-upstas vdb-vastbase = [{ name = "dify-vdb-vastbase", editable = "providers/vdb/vdb-vastbase" }] vdb-vikingdb = [{ name = "dify-vdb-vikingdb", editable = "providers/vdb/vdb-vikingdb" }] vdb-weaviate = [{ name = "dify-vdb-weaviate", editable = "providers/vdb/vdb-weaviate" }] -vdb-xinference = [{ name = "xinference-client", specifier = ">=2.5.0" }] +vdb-xinference = [{ name = "xinference-client", specifier = ">=2.7.0" }] [[package]] name = "dify-trace-aliyun" @@ -2773,14 +2775,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.45" +version = "3.1.47" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/bd/50db468e9b1310529a19fce651b3b0e753b5c07954d486cba31bbee9a5d5/gitpython-3.1.47.tar.gz", hash = "sha256:dba27f922bd2b42cb54c87a8ab3cb6beb6bf07f3d564e21ac848913a05a8a3cd", size = 216978, upload-time = "2026-04-22T02:44:44.059Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" }, + { url = "https://files.pythonhosted.org/packages/f2/c5/a1bc0996af85757903cf2bf444a7824e68e0035ce63fb41d6f76f9def68b/gitpython-3.1.47-py3-none-any.whl", hash = "sha256:489f590edfd6d20571b2c0e72c6a6ac6915ee8b8cd04572330e3842207a78905", size = 209547, upload-time = "2026-04-22T02:44:41.271Z" }, ] [[package]] @@ -3435,14 +3437,14 @@ wheels = [ [[package]] name = "hypothesis" -version = "6.152.1" +version = "6.152.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "sortedcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/64/b1/c32bcddb9aab9e3abc700f1f56faf14e7655c64a16ca47701a57362276ea/hypothesis-6.152.1.tar.gz", hash = "sha256:4f4ed934eee295dd84ee97592477d23e8dc03e9f12ae0ee30a4e7c9ef3fca3b0", size = 465029, upload-time = "2026-04-14T22:29:24.062Z" } +sdist = { url = "https://files.pythonhosted.org/packages/70/90/fc0b263b6f2622e5f8d2aa93f2e95ba79718a5faa7d2a74bfab10d6b0905/hypothesis-6.152.3.tar.gz", hash = "sha256:c4e5300d3755b6c8a270a28fe5abff40153e927328e89d2bb0229c1384618998", size = 466478, upload-time = "2026-04-26T17:31:07.657Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/83/860fb3075e00b0fc19a22a2301bc3c96f00437558c3911bdd0a3573a4a53/hypothesis-6.152.1-py3-none-any.whl", hash = "sha256:40a3619d9e0cb97b018857c7986f75cf5de2e5ec0fa8a0b172d00747758f749e", size = 530752, upload-time = "2026-04-14T22:29:20.893Z" }, + { url = "https://files.pythonhosted.org/packages/90/38/15475b91a4c12721d2be3349e9d6cf8649c76ed9bc1287e2de7c8d06c261/hypothesis-6.152.3-py3-none-any.whl", hash = "sha256:4b47f00916c858ed49cf870a2f08b04e5fff5afae0bb78f3b4a6d9c74fd6c7bc", size = 532154, upload-time = "2026-04-26T17:31:04.42Z" }, ] [[package]] @@ -4290,7 +4292,7 @@ wheels = [ [[package]] name = "mypy" -version = "1.20.1" +version = "1.20.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, @@ -4298,16 +4300,16 @@ dependencies = [ { name = "pathspec" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0b/3d/5b373635b3146264eb7a68d09e5ca11c305bbb058dfffbb47c47daf4f632/mypy-1.20.1.tar.gz", hash = "sha256:6fc3f4ecd52de81648fed1945498bf42fa2993ddfad67c9056df36ae5757f804", size = 3815892, upload-time = "2026-04-13T02:46:51.474Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/af/e3d4b3e9ec91a0ff9aabfdb38692952acf49bbb899c2e4c29acb3a6da3ae/mypy-1.20.2.tar.gz", hash = "sha256:e8222c26daaafd9e8626dec58ae36029f82585890589576f769a650dd20fd665", size = 3817349, upload-time = "2026-04-21T17:12:28.473Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/1b/75a7c825a02781ca10bc2f2f12fba2af5202f6d6005aad8d2d1f264d8d78/mypy-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:36ee2b9c6599c230fea89bbd79f401f9f9f8e9fcf0c777827789b19b7da90f51", size = 14494077, upload-time = "2026-04-13T02:45:55.085Z" }, - { url = "https://files.pythonhosted.org/packages/b0/54/5e5a569ea5c2b4d48b729fb32aa936eeb4246e4fc3e6f5b3d36a2dfbefb9/mypy-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fba3fb0968a7b48806b0c90f38d39296f10766885a94c83bd21399de1e14eb28", size = 13319495, upload-time = "2026-04-13T02:45:29.674Z" }, - { url = "https://files.pythonhosted.org/packages/6f/a4/a1945b19f33e91721b59deee3abb484f2fa5922adc33bb166daf5325d76d/mypy-1.20.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef1415a637cd3627d6304dfbeddbadd21079dafc2a8a753c477ce4fc0c2af54f", size = 13696948, upload-time = "2026-04-13T02:46:15.006Z" }, - { url = "https://files.pythonhosted.org/packages/b2/c6/75e969781c2359b2f9c15b061f28ec6d67c8b61865ceda176e85c8e7f2de/mypy-1.20.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef3461b1ad5cd446e540016e90b5984657edda39f982f4cc45ca317b628f5a37", size = 14706744, upload-time = "2026-04-13T02:46:00.482Z" }, - { url = "https://files.pythonhosted.org/packages/a8/6e/b221b1de981fc4262fe3e0bf9ec272d292dfe42394a689c2d49765c144c4/mypy-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:542dd63c9e1339b6092eb25bd515f3a32a1453aee8c9521d2ddb17dacd840237", size = 14949035, upload-time = "2026-04-13T02:45:06.021Z" }, - { url = "https://files.pythonhosted.org/packages/ca/4b/298ba2de0aafc0da3ff2288da06884aae7ba6489bc247c933f87847c41b3/mypy-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:1d55c7cd8ca22e31f93af2a01160a9e95465b5878de23dba7e48116052f20a8d", size = 10883216, upload-time = "2026-04-13T02:45:47.232Z" }, - { url = "https://files.pythonhosted.org/packages/c7/f9/5e25b8f0b8cb92f080bfed9c21d3279b2a0b6a601cdca369a039ba84789d/mypy-1.20.1-cp312-cp312-win_arm64.whl", hash = "sha256:f5b84a79070586e0d353ee07b719d9d0a4aa7c8ee90c0ea97747e98cbe193019", size = 9814299, upload-time = "2026-04-13T02:45:21.934Z" }, - { url = "https://files.pythonhosted.org/packages/d8/28/926bd972388e65a39ee98e188ccf67e81beb3aacfd5d6b310051772d974b/mypy-1.20.1-py3-none-any.whl", hash = "sha256:1aae28507f253fe82d883790d1c0a0d35798a810117c88184097fe8881052f06", size = 2636553, upload-time = "2026-04-13T02:46:30.45Z" }, + { url = "https://files.pythonhosted.org/packages/71/4e/7560e4528db9e9b147e4c0f22660466bf30a0a1fe3d63d1b9d3b0fd354ee/mypy-1.20.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4dbfcf869f6b0517f70cf0030ba6ea1d6645e132337a7d5204a18d8d5636c02b", size = 14539393, upload-time = "2026-04-21T17:07:12.52Z" }, + { url = "https://files.pythonhosted.org/packages/32/d9/34a5efed8124f5a9234f55ac6a4ced4201e2c5b81e1109c49ad23190ec8c/mypy-1.20.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b6481b228d072315b053210b01ac320e1be243dc17f9e5887ef167f23f5fae4", size = 13361642, upload-time = "2026-04-21T17:06:53.742Z" }, + { url = "https://files.pythonhosted.org/packages/d1/14/eb377acf78c03c92d566a1510cda8137348215b5335085ef662ab82ecd3a/mypy-1.20.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34397cdced6b90b836e38182076049fdb41424322e0b0728c946b0939ebdf9f6", size = 13740347, upload-time = "2026-04-21T17:12:04.73Z" }, + { url = "https://files.pythonhosted.org/packages/b9/94/7e4634a32b641aa1c112422eed1bbece61ee16205f674190e8b536f884de/mypy-1.20.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5da6976f20cae27059ea8d0c86e7cef3de720e04c4bb9ee18e3690fdb792066", size = 14734042, upload-time = "2026-04-21T17:07:43.16Z" }, + { url = "https://files.pythonhosted.org/packages/7a/f3/f7e62395cb7f434541b4491a01149a4439e28ace4c0c632bbf5431e92d1f/mypy-1.20.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:56908d7e08318d39f85b1f0c6cfd47b0cac1a130da677630dac0de3e0623e102", size = 14964958, upload-time = "2026-04-21T17:11:00.665Z" }, + { url = "https://files.pythonhosted.org/packages/3e/0d/47e3c3a0ec2a876e35aeac365df3cac7776c36bbd4ed18cc521e1b9d255b/mypy-1.20.2-cp312-cp312-win_amd64.whl", hash = "sha256:d52ad8d78522da1d308789df651ee5379088e77c76cb1994858d40a426b343b9", size = 10911340, upload-time = "2026-04-21T17:10:49.179Z" }, + { url = "https://files.pythonhosted.org/packages/d6/b2/6c852d72e0ea8b01f49da817fb52539993cde327e7d010e0103dc12d0dac/mypy-1.20.2-cp312-cp312-win_arm64.whl", hash = "sha256:785b08db19c9f214dc37d65f7c165d19a30fcecb48abfa30f31b01b5acaabb58", size = 9833947, upload-time = "2026-04-21T17:09:05.267Z" }, + { url = "https://files.pythonhosted.org/packages/28/9a/f23c163e25b11074188251b0b5a0342625fc1cdb6af604757174fa9acc9b/mypy-1.20.2-py3-none-any.whl", hash = "sha256:a94c5a76ab46c5e6257c7972b6c8cff0574201ca7dc05647e33e795d78680563", size = 2637314, upload-time = "2026-04-21T17:05:54.5Z" }, ] [[package]] @@ -4587,29 +4589,29 @@ wheels = [ [[package]] name = "opentelemetry-api" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/47/8e/3778a7e87801d994869a9396b9fc2a289e5f9be91ff54a27d41eace494b0/opentelemetry_api-1.41.0.tar.gz", hash = "sha256:9421d911326ec12dee8bc933f7839090cad7a3f13fcfb0f9e82f8174dc003c09", size = 71416, upload-time = "2026-04-09T14:38:34.544Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/fc/b7564cbef36601aef0d6c9bc01f7badb64be8e862c2e1c3c5c3b43b53e4f/opentelemetry_api-1.41.1.tar.gz", hash = "sha256:0ad1814d73b875f84494387dae86ce0b12c68556331ce6ce8fe789197c949621", size = 71416, upload-time = "2026-04-24T13:15:38.262Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/ee/99ab786653b3bda9c37ade7e24a7b607a1b1f696063172768417539d876d/opentelemetry_api-1.41.0-py3-none-any.whl", hash = "sha256:0e77c806e6a89c9e4f8d372034622f3e1418a11bdbe1c80a50b3d3397ad0fa4f", size = 69007, upload-time = "2026-04-09T14:38:11.833Z" }, + { url = "https://files.pythonhosted.org/packages/29/59/3e7118ed140f76b0982ba4321bdaed1997a0473f9720de2d10788a577033/opentelemetry_api-1.41.1-py3-none-any.whl", hash = "sha256:a22df900e75c76dc08440710e51f52f1aa6b451b429298896023e60db5b3139f", size = 69007, upload-time = "2026-04-24T13:15:15.662Z" }, ] [[package]] name = "opentelemetry-distro" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-instrumentation" }, { name = "opentelemetry-sdk" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/72/c6/52b0dbcc8fbdecf179047921940516cbb8aaf05f6b737faa526ad76fec51/opentelemetry_distro-0.62b0.tar.gz", hash = "sha256:aa0308fbe50ad8f17d4446982dbf26870e20b8031ba38d8e1224ecf7aedd3184", size = 2611, upload-time = "2026-04-09T14:40:20.404Z" } +sdist = { url = "https://files.pythonhosted.org/packages/45/f1/314e5015e353a001948e03f48a6935ca7ef00e99107b8e3e63871426b0f6/opentelemetry_distro-0.62b1.tar.gz", hash = "sha256:0169b128b9d6d5cab809ae4c4fb3d576bfc5d3f30b32d8a43b770b587f04f253", size = 2606, upload-time = "2026-04-24T13:22:29.403Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/7e/5858bba1c7ed880c7b0fe7d9a1ea40ab8affd18c9ebc1e16c2d69c501da1/opentelemetry_distro-0.62b0-py3-none-any.whl", hash = "sha256:23e9065a35cef12868ad5efb18ce9c88a9103800256b318dec4c9c850c6c78c1", size = 3348, upload-time = "2026-04-09T14:39:17.406Z" }, + { url = "https://files.pythonhosted.org/packages/b9/19/c58c119a299298f03d0797fcb780f221880e8d725959c71bcfb4ae034738/opentelemetry_distro-0.62b1-py3-none-any.whl", hash = "sha256:fd938de6ca1d047ffd15a65fa09d89f4b4ca7dd97ef25601a12d6d10efd693a0", size = 3348, upload-time = "2026-04-24T13:21:27.389Z" }, ] [[package]] @@ -4675,7 +4677,7 @@ wheels = [ [[package]] name = "opentelemetry-instrumentation" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4683,14 +4685,14 @@ dependencies = [ { name = "packaging" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f9/fd/b8e90bb340957f059084376f94cff336b0e871a42feba7d3f7342365e987/opentelemetry_instrumentation-0.62b0.tar.gz", hash = "sha256:aa1b0b9ab2e1722c2a8a5384fb016fc28d30bba51826676c8036074790d2861e", size = 34042, upload-time = "2026-04-09T14:40:22.843Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/cb/0523b92c112a6cc70be43724343dc45225d3af134419844d7879a07755d4/opentelemetry_instrumentation-0.62b1.tar.gz", hash = "sha256:90e92a905ba4f84db06ac3aec96701df6c079b2d66e9379f8739f0a1bdcc7f45", size = 34043, upload-time = "2026-04-24T13:22:31.997Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/b6/3356d2e335e3c449c5183e9b023f30f04f1b7073a6583c68745ea2e704b1/opentelemetry_instrumentation-0.62b0-py3-none-any.whl", hash = "sha256:30d4e76486eae64fb095264a70c2c809c4bed17b73373e53091470661f7d477c", size = 34158, upload-time = "2026-04-09T14:39:21.428Z" }, + { url = "https://files.pythonhosted.org/packages/4d/0f/45adbaea1f81b847cffdcee4f4b5f89297e42facf7fac78c7aaac4c38e75/opentelemetry_instrumentation-0.62b1-py3-none-any.whl", hash = "sha256:976fc6e640f2006599e97429c949e622c108d0c17c2059347d1e6c93c707f257", size = 34163, upload-time = "2026-04-24T13:21:31.722Z" }, ] [[package]] name = "opentelemetry-instrumentation-asgi" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "asgiref" }, @@ -4699,28 +4701,28 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "opentelemetry-util-http" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f1/38/999bf777774878971c2716de4b7a03cd57a7decb4af25090e703b79fa0e5/opentelemetry_instrumentation_asgi-0.62b0.tar.gz", hash = "sha256:93cde8c62e5918a3c1ff9ba020518127300e5e0816b7e8b14baf46a26ba619fc", size = 26779, upload-time = "2026-04-09T14:40:26.566Z" } +sdist = { url = "https://files.pythonhosted.org/packages/54/43/b2f0703ff46718ff7b17d7fbf8e9d7f20e26a23c7c325092dd762d09cf9d/opentelemetry_instrumentation_asgi-0.62b1.tar.gz", hash = "sha256:7cf5f5d5c493bbb1edd2bd6d51fa879d964e94048904017258a32ffa47329310", size = 26781, upload-time = "2026-04-24T13:22:37.158Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/25/cf/29df82f5870178143bdb5c9a7be044b9f78c71e1c5dcf995242e86d80158/opentelemetry_instrumentation_asgi-0.62b0-py3-none-any.whl", hash = "sha256:89b62a6f996b260b162f515c25e6d78e39286e4cbe2f935899e51b32f31027e2", size = 17011, upload-time = "2026-04-09T14:39:27.305Z" }, + { url = "https://files.pythonhosted.org/packages/d0/41/968c1fe12fb90abffca6620e65d4af91451c02ecca8f74a17a62cac490de/opentelemetry_instrumentation_asgi-0.62b1-py3-none-any.whl", hash = "sha256:b7f89be48528512619bd54fa2459f72afb1695ba71d7024d382ad96d467e7fa8", size = 17011, upload-time = "2026-04-24T13:21:38.006Z" }, ] [[package]] name = "opentelemetry-instrumentation-celery" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-instrumentation" }, { name = "opentelemetry-semantic-conventions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/01/b4/20a3c8c669dc45aa3703c0370041d67e8be613f1829523cdaf634a5f9626/opentelemetry_instrumentation_celery-0.62b0.tar.gz", hash = "sha256:55e8fa48e5b886bcca448fa32e28a6cc2165157745e8328de479a826d3903095", size = 14808, upload-time = "2026-04-09T14:40:31.603Z" } +sdist = { url = "https://files.pythonhosted.org/packages/35/86/9e78c174b2f6ea92af3f99aa7488807b74290a5cd44a8e05bfbfd7b109be/opentelemetry_instrumentation_celery-0.62b1.tar.gz", hash = "sha256:f0035abd464a2989414a9c5ecdd79a25c87bd8c43f96c7f39e07000c6f25dfef", size = 14809, upload-time = "2026-04-24T13:22:45.656Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/60/cf951e6bd6ec62ec55bd2384e0ba9841ea38f2d128c773d85dc60da97172/opentelemetry_instrumentation_celery-0.62b0-py3-none-any.whl", hash = "sha256:cadfd3e65287a36099dce5ba7e05d98e4c5f9479a455241e01d140ecc5c10935", size = 13864, upload-time = "2026-04-09T14:39:35.009Z" }, + { url = "https://files.pythonhosted.org/packages/24/51/f38a31ac8f8e3bd365f301f697661679addaf548d52a05cfdde4448a5493/opentelemetry_instrumentation_celery-0.62b1-py3-none-any.whl", hash = "sha256:50567a47b7adc4ea552d09709de4d73fea7b4ff24ab0e9d38739d03fcd3f95ef", size = 13864, upload-time = "2026-04-24T13:21:46.557Z" }, ] [[package]] name = "opentelemetry-instrumentation-fastapi" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4729,14 +4731,14 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "opentelemetry-util-http" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/37/09/92740c6d114d1bef392557a03ae6de64065c83c1b331dae9b57fe718497c/opentelemetry_instrumentation_fastapi-0.62b0.tar.gz", hash = "sha256:e4748e4e575077e08beaf2c5d2f369da63dd90882d89d73c4192a97356637dec", size = 25056, upload-time = "2026-04-09T14:40:36.438Z" } +sdist = { url = "https://files.pythonhosted.org/packages/77/38/91780475a25370b6d483afbaed3e1e170459d6351c5f7c08d66b65e2172e/opentelemetry_instrumentation_fastapi-0.62b1.tar.gz", hash = "sha256:b377d4ba32868fb1ff0f64da3fcdd3aa154d698fc83d65f5d380ea21bf31ee19", size = 25054, upload-time = "2026-04-24T13:22:50.222Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/64/bb/186ffe0fde0ad33ceb50e1d3596cc849b732d3b825592a6a507a40c8c49b/opentelemetry_instrumentation_fastapi-0.62b0-py3-none-any.whl", hash = "sha256:06d3272ad15f9daea5a0a27c32831aff376110a4b0394197120256ef6d610e6e", size = 13482, upload-time = "2026-04-09T14:39:43.446Z" }, + { url = "https://files.pythonhosted.org/packages/8c/6f/602e4081d3fe82731aff7e3e9c2f1662d85701841d6dc25f16a1874e11cd/opentelemetry_instrumentation_fastapi-0.62b1-py3-none-any.whl", hash = "sha256:93fa9cc4f315819aee5f4fceb6196c1e5b0fbd789c5520c631de228bd3e5285b", size = 13484, upload-time = "2026-04-24T13:21:54.538Z" }, ] [[package]] name = "opentelemetry-instrumentation-flask" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4746,14 +4748,14 @@ dependencies = [ { name = "opentelemetry-util-http" }, { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8e/86/522294f6a80d59560d8f722da59513d2ed2d53c6178fa109789dacc5dd50/opentelemetry_instrumentation_flask-0.62b0.tar.gz", hash = "sha256:330e903c0e92b06aae32f9eb7b8a923599d7a29440f50841a59dbba34ec6dd9f", size = 24100, upload-time = "2026-04-09T14:40:37.111Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d3/08/e52e6eab550db1736c5657a7e38484c22a101009e77fc67eb00b272a96c1/opentelemetry_instrumentation_flask-0.62b1.tar.gz", hash = "sha256:37662ad159570dab1e3017a2a415193c014a5798fc32d33f3bdd254469e8c69a", size = 24100, upload-time = "2026-04-24T13:22:50.845Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/c8/9f3bb38281bcb50c93c3d2358b303645f6917bf972c167484c09f9a97ff1/opentelemetry_instrumentation_flask-0.62b0-py3-none-any.whl", hash = "sha256:8c1f8986ec3887d08899d2eb654625252c929105174911b3b50dcf12b1001807", size = 16006, upload-time = "2026-04-09T14:39:44.401Z" }, + { url = "https://files.pythonhosted.org/packages/2b/58/d0e5e82d225365987bd192576095b1125f6b172decc4db79963373c92b74/opentelemetry_instrumentation_flask-0.62b1-py3-none-any.whl", hash = "sha256:6df32684a7dd5dab5feb499c0748a4628b3fd139bffd8171326fb479aa525367", size = 16007, upload-time = "2026-04-24T13:21:55.462Z" }, ] [[package]] name = "opentelemetry-instrumentation-httpx" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4762,14 +4764,14 @@ dependencies = [ { name = "opentelemetry-util-http" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/a7/63e2c6325c8e99cd9b8e0229a8b61c37520ee537214a2c8d514e84486a94/opentelemetry_instrumentation_httpx-0.62b0.tar.gz", hash = "sha256:d865398db3f3c289ba226e355bf4d94460a4301c0c8916e3136caea55ae18000", size = 24182, upload-time = "2026-04-09T14:40:38.719Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/cb/7a418e69c7dad281803529cb4f6de1b747d802cca44c38032668690b4836/opentelemetry_instrumentation_httpx-0.62b1.tar.gz", hash = "sha256:a1fac9bcc3a6ef5996a7990563f1af0798468b2c146de535fd598369383fba7e", size = 24181, upload-time = "2026-04-24T13:22:52.124Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/5e/7d5fc28487637871b015128cd5dbb3c36f6d343a9098b893bd803d5a9cca/opentelemetry_instrumentation_httpx-0.62b0-py3-none-any.whl", hash = "sha256:c7660b939c12608fec67743126e9b4dc23dceef0ed631c415924966b0d1579e3", size = 17200, upload-time = "2026-04-09T14:39:46.618Z" }, + { url = "https://files.pythonhosted.org/packages/c7/e0/eca824e9492ccec00e055bdd243aeda8eb7c5eda746d98af4d7a2d97ecf3/opentelemetry_instrumentation_httpx-0.62b1-py3-none-any.whl", hash = "sha256:88614015df451d61bc7e73f22524e6f223611f80b6caad2f6bdcbe05fa0df653", size = 17201, upload-time = "2026-04-24T13:21:58.072Z" }, ] [[package]] name = "opentelemetry-instrumentation-redis" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4777,14 +4779,14 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/55/7d/5acdb4e4e36c522f9393cfa91f7a431ee089663c77855e524bc97f993020/opentelemetry_instrumentation_redis-0.62b0.tar.gz", hash = "sha256:513bc6679ee251436f0aff7be7ddab6186637dde09a795a8dc9659103f103bef", size = 14796, upload-time = "2026-04-09T14:40:48.391Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/ff/35414ad80409bd9e472c7959832524c5f2c8f63965af08c41c2b42d3a6a6/opentelemetry_instrumentation_redis-0.62b1.tar.gz", hash = "sha256:2d3c421d95e05ade075bee5becbe34e743b1cdf5bdee2085cb524f88c4f13dcb", size = 14796, upload-time = "2026-04-24T13:23:01.138Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/de/42/a13a7da074c972a51c14277e7f747e90037b9d815515c73b802e95897690/opentelemetry_instrumentation_redis-0.62b0-py3-none-any.whl", hash = "sha256:92ada3d7bdf395785f660549b0e6e8e5bac7cab80e7f1369a7d02228b27684c3", size = 15501, upload-time = "2026-04-09T14:40:00.69Z" }, + { url = "https://files.pythonhosted.org/packages/31/37/bc2271f3472e3041eeade8b8da1cfd3b06badae76fe5d0ff135b6285e70c/opentelemetry_instrumentation_redis-0.62b1-py3-none-any.whl", hash = "sha256:9aedd02c1acf631251d1d676634db47da9da04e0a626cd0c7d83fe0eb791d165", size = 15501, upload-time = "2026-04-24T13:22:11.705Z" }, ] [[package]] name = "opentelemetry-instrumentation-sqlalchemy" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4793,14 +4795,14 @@ dependencies = [ { name = "packaging" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2a/3d/40adc8c38e5be017ceb230a28ca57ca81981d4dc0c4b902cc930c77fd14f/opentelemetry_instrumentation_sqlalchemy-0.62b0.tar.gz", hash = "sha256:d02f85b83f349e9ef70a34cb3f4c3a3481fa15b11747f09209818663e161cac4", size = 18539, upload-time = "2026-04-09T14:40:50.251Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/53/fa511ab998dd66b4eb66a36d8c262d0604cc5bad7a9c82e923be038dda97/opentelemetry_instrumentation_sqlalchemy-0.62b1.tar.gz", hash = "sha256:bdeac015351a1de057e8ea39f1fe26c9e60ea6bedbf1d5ad6a8262a516b3dc7d", size = 18539, upload-time = "2026-04-24T13:23:03.169Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/e0/77954ac593f34740dc32e28a15fe7170e90f6ba6398eaaa5c88b34c05ed1/opentelemetry_instrumentation_sqlalchemy-0.62b0-py3-none-any.whl", hash = "sha256:ec576e0660080d9d15ce4fa44d2a07fff8cb4b796a84344cb0f2c9e5d6e26f79", size = 15534, upload-time = "2026-04-09T14:40:03.957Z" }, + { url = "https://files.pythonhosted.org/packages/2d/c5/aa2abcf8752a435536901636c5d540ba7a2c0ba2c4e98c7d119482e04262/opentelemetry_instrumentation_sqlalchemy-0.62b1-py3-none-any.whl", hash = "sha256:613542ecd52aabeec83d8813b5c287a3fb6c9ac3cd660694c94c0571f066e972", size = 15536, upload-time = "2026-04-24T13:22:14.767Z" }, ] [[package]] name = "opentelemetry-instrumentation-wsgi" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -4808,22 +4810,22 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "opentelemetry-util-http" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b7/5c/ed45ff053d76c94c59173f2bcde3d61052adb10214f70f028f760aa56625/opentelemetry_instrumentation_wsgi-0.62b0.tar.gz", hash = "sha256:d179f969ecce0c29a15ffd4d982580dfae57c8ff2fd4d9366e299a6d4815e668", size = 19922, upload-time = "2026-04-09T14:40:56.227Z" } +sdist = { url = "https://files.pythonhosted.org/packages/36/db/19f1d66cead56e52291fccaa235b07ad45a5c24be1c740301a840c68235a/opentelemetry_instrumentation_wsgi-0.62b1.tar.gz", hash = "sha256:02a364fd9c940a46b19c825c5bfe386b007d5292ef91573894164836953fe831", size = 19919, upload-time = "2026-04-24T13:23:09.796Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/cb/753dbbe624df88594fa35a3ff26302fea22623385ed64462f6c8ee7c81eb/opentelemetry_instrumentation_wsgi-0.62b0-py3-none-any.whl", hash = "sha256:2714ab5ab2f35e67dc181ffa3a43fa15313c85c09b4d024c36d72cf1efa29c9a", size = 14628, upload-time = "2026-04-09T14:40:13.529Z" }, + { url = "https://files.pythonhosted.org/packages/f7/0e/60fec0780e16929c821df7c55c4f0bea45d6ef562e662c5f27f47d0ff195/opentelemetry_instrumentation_wsgi-0.62b1-py3-none-any.whl", hash = "sha256:a2df11de0113f504043e2b0fa0288238a93ee49ff607bd5100cb2d3a75bc771f", size = 14629, upload-time = "2026-04-24T13:22:23.951Z" }, ] [[package]] name = "opentelemetry-propagator-b3" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ef/43/cea77e171c014324876104cf2a17c78f5e931408b977b9e64979f950912c/opentelemetry_propagator_b3-1.41.0.tar.gz", hash = "sha256:ef98b715b3a05e8b0b03ebaea1bf295b4ad61a0e306e2d1da81d32af7395e6ad", size = 9588, upload-time = "2026-04-09T14:38:43.328Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/ef/e2c1093e21fb9b5f8e44fa6cebacf2cbb60b47b4646d652805dcce48f3b8/opentelemetry_propagator_b3-1.41.1.tar.gz", hash = "sha256:e8563b588aa5f1f90740dcd678f04d5634de2d4e0077b7ca4a177c71a02f745d", size = 9587, upload-time = "2026-04-24T13:15:48.349Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/50/c1/11345c06774ec6ed6d89e3994dd1f62ad2ab41dfeb312eacd6b2a2323280/opentelemetry_propagator_b3-1.41.0-py3-none-any.whl", hash = "sha256:0b085c26ba59fcb66771226f967e91886bdeef998b3b5f2e9da6a604918c6f90", size = 8923, upload-time = "2026-04-09T14:38:26.865Z" }, + { url = "https://files.pythonhosted.org/packages/c8/78/388ea1ae84fd3d2858c782f0410d73d936ffbd1a54711e45874490c576e7/opentelemetry_propagator_b3-1.41.1-py3-none-any.whl", hash = "sha256:f4b045d0aa4b5c17ac25a371bf3d08173a2f4b8f19a94357e57ae690c15415dc", size = 8921, upload-time = "2026-04-24T13:15:30.408Z" }, ] [[package]] @@ -4840,38 +4842,38 @@ wheels = [ [[package]] name = "opentelemetry-sdk" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f8/0e/a586df1186f9f56b5a0879d52653effc40357b8e88fc50fe300038c3c08b/opentelemetry_sdk-1.41.0.tar.gz", hash = "sha256:7bddf3961131b318fc2d158947971a8e37e38b1cd23470cfb72b624e7cc108bd", size = 230181, upload-time = "2026-04-09T14:38:47.225Z" } +sdist = { url = "https://files.pythonhosted.org/packages/58/d0/54ee30dab82fb0acda23d144502771ff76ef8728459c83c3e89ef9fb1825/opentelemetry_sdk-1.41.1.tar.gz", hash = "sha256:724b615e1215b5aeacda0abb8a6a8922c9a1853068948bd0bd225a56d0c792e6", size = 230180, upload-time = "2026-04-24T13:15:50.991Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/13/a7825118208cb32e6a4edcd0a99f925cbef81e77b3b0aedfd9125583c543/opentelemetry_sdk-1.41.0-py3-none-any.whl", hash = "sha256:a596f5687964a3e0d7f8edfdcf5b79cbca9c93c7025ebf5fb00f398a9443b0bd", size = 180214, upload-time = "2026-04-09T14:38:30.657Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e7/a1420b698aad018e1cf60fdbaaccbe49021fb415e2a0d81c242f4c518f54/opentelemetry_sdk-1.41.1-py3-none-any.whl", hash = "sha256:edee379c126c1bce952b0c812b48fe8ff35b30df0eecf17e98afa4d598b7d85d", size = 180213, upload-time = "2026-04-24T13:15:33.767Z" }, ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/b0/c14f723e86c049b7bf8ff431160d982519b97a7be2857ed2247377397a24/opentelemetry_semantic_conventions-0.62b0.tar.gz", hash = "sha256:cbfb3c8fc259575cf68a6e1b94083cc35adc4a6b06e8cf431efa0d62606c0097", size = 145753, upload-time = "2026-04-09T14:38:48.274Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/de/911ac9e309052aca1b20b2d5549d3db45d1011e1a610e552c6ccdd1b64f8/opentelemetry_semantic_conventions-0.62b1.tar.gz", hash = "sha256:c5cc6e04a7f8c7cdd30be2ed81499fa4e75bfbd52c9cb70d40af1f9cd3619802", size = 145750, upload-time = "2026-04-24T13:15:52.236Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/6c/5e86fa1759a525ef91c2d8b79d668574760ff3f900d114297765eb8786cb/opentelemetry_semantic_conventions-0.62b0-py3-none-any.whl", hash = "sha256:0ddac1ce59eaf1a827d9987ab60d9315fb27aea23304144242d1fcad9e16b489", size = 231619, upload-time = "2026-04-09T14:38:32.394Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a6/83dc2ab6fa397ee66fba04fe2e74bdf7be3b3870005359ceb7689103c058/opentelemetry_semantic_conventions-0.62b1-py3-none-any.whl", hash = "sha256:cf506938103d331fbb78eded0d9788095f7fd59016f2bda813c3324e5a74a93c", size = 231620, upload-time = "2026-04-24T13:15:35.454Z" }, ] [[package]] name = "opentelemetry-util-http" -version = "0.62b0" +version = "0.62b1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/830f7c57135158eb8a8efd3f94ab191a89e3b8a49bed314a35ee501da3f2/opentelemetry_util_http-0.62b0.tar.gz", hash = "sha256:a62e4b19b8a432c0de657f167dee3455516136bb9c6ed463ca8063019970d835", size = 11393, upload-time = "2026-04-09T14:40:59.442Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/1b/aa71b63e18d30a8384036b9937f40f7618f8030a7aa213155fb54f6f2b47/opentelemetry_util_http-0.62b1.tar.gz", hash = "sha256:adf6facbb89aef8f8bc566e2f04624942ba08a7b678b3479a91051a8f4dc70a3", size = 11393, upload-time = "2026-04-24T13:23:12.994Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/7f/5c1b7d4385852b9e5eacd4e7f9d8b565d3d351d17463b24916ad098adf1a/opentelemetry_util_http-0.62b0-py3-none-any.whl", hash = "sha256:c20462808d8cc95b69b0dc4a3e02a9d36beb663347e96c931f51ffd78bd318ad", size = 9294, upload-time = "2026-04-09T14:40:19.014Z" }, + { url = "https://files.pythonhosted.org/packages/5d/85/a9d9d32161c1ced61346267db4c9702da54f81ec5dc88214bc65c23f4e9d/opentelemetry_util_http-0.62b1-py3-none-any.whl", hash = "sha256:c57e8a6c19fc422c288e6074e882f506f85030b69b7376182f74f9257b9261f0", size = 9295, upload-time = "2026-04-24T13:22:28.078Z" }, ] [[package]] @@ -5179,7 +5181,7 @@ wheels = [ [[package]] name = "postgrest" -version = "2.28.3" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "deprecation" }, @@ -5187,9 +5189,9 @@ dependencies = [ { name = "pydantic" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/60/9378ddd6e21b6005b34aeb42dc7a9ed9985c673c97c9b6a1858f9c52ebbd/postgrest-2.28.3.tar.gz", hash = "sha256:56336e9304950a78315ec7d6c8eb307cdb964d0878a7bec6111392ddb6c16a45", size = 13758, upload-time = "2026-03-20T14:38:06.542Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/98/f216b8b5c4d116ab6a2fb21339b5821da279ee773e163612418e1c56c012/postgrest-2.29.0.tar.gz", hash = "sha256:a87081858f627fcd57e8e7137004a1ef0adbdf0dbdfed1384e9ea1d7a9c525ec", size = 14217, upload-time = "2026-04-24T13:13:00.281Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/5e/6eeb1d53d010d80e800204c1eee6b3d5419a6a2b985c364f56f36cf48cca/postgrest-2.28.3-py3-none-any.whl", hash = "sha256:5a44d6c6d509abdbe0f928c86f0dc31ef26bda36e0357129836ec54dfb50b083", size = 21865, upload-time = "2026-03-20T14:38:05.55Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0b/08b670a93a90d625c557b9e64b8a5fdeec80c3542d2d0265f0b4d6b16646/postgrest-2.29.0-py3-none-any.whl", hash = "sha256:3ee48e146f726272733d20e2b12de354cdb6cb9dd9cc3a61ed97ce69047aeb96", size = 22735, upload-time = "2026-04-24T13:12:58.405Z" }, ] [[package]] @@ -5350,21 +5352,21 @@ wheels = [ [[package]] name = "psycopg2-binary" -version = "2.9.11" +version = "2.9.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ac/6c/8767aaa597ba424643dc87348c6f1754dd9f48e80fdc1b9f7ca5c3a7c213/psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c", size = 379620, upload-time = "2025-10-10T11:14:48.041Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/60/a3624f79acea344c16fbef3a94d28b89a8042ddfb8f3e4ca83f538671409/psycopg2_binary-2.9.12.tar.gz", hash = "sha256:5ac9444edc768c02a6b6a591f070b8aae28ff3a99be57560ac996001580f294c", size = 379686, upload-time = "2026-04-21T09:40:34.304Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d8/91/f870a02f51be4a65987b45a7de4c2e1897dd0d01051e2b559a38fa634e3e/psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4", size = 3756603, upload-time = "2025-10-10T11:11:52.213Z" }, - { url = "https://files.pythonhosted.org/packages/27/fa/cae40e06849b6c9a95eb5c04d419942f00d9eaac8d81626107461e268821/psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc", size = 3864509, upload-time = "2025-10-10T11:11:56.452Z" }, - { url = "https://files.pythonhosted.org/packages/2d/75/364847b879eb630b3ac8293798e380e441a957c53657995053c5ec39a316/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a", size = 4411159, upload-time = "2025-10-10T11:12:00.49Z" }, - { url = "https://files.pythonhosted.org/packages/6f/a0/567f7ea38b6e1c62aafd58375665a547c00c608a471620c0edc364733e13/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e", size = 4468234, upload-time = "2025-10-10T11:12:04.892Z" }, - { url = "https://files.pythonhosted.org/packages/30/da/4e42788fb811bbbfd7b7f045570c062f49e350e1d1f3df056c3fb5763353/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db", size = 4166236, upload-time = "2025-10-10T11:12:11.674Z" }, - { url = "https://files.pythonhosted.org/packages/3c/94/c1777c355bc560992af848d98216148be5f1be001af06e06fc49cbded578/psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757", size = 3983083, upload-time = "2025-10-30T02:55:15.73Z" }, - { url = "https://files.pythonhosted.org/packages/bd/42/c9a21edf0e3daa7825ed04a4a8588686c6c14904344344a039556d78aa58/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3", size = 3652281, upload-time = "2025-10-10T11:12:17.713Z" }, - { url = "https://files.pythonhosted.org/packages/12/22/dedfbcfa97917982301496b6b5e5e6c5531d1f35dd2b488b08d1ebc52482/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a", size = 3298010, upload-time = "2025-10-10T11:12:22.671Z" }, - { url = "https://files.pythonhosted.org/packages/66/ea/d3390e6696276078bd01b2ece417deac954dfdd552d2edc3d03204416c0c/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34", size = 3044641, upload-time = "2025-10-30T02:55:19.929Z" }, - { url = "https://files.pythonhosted.org/packages/12/9a/0402ded6cbd321da0c0ba7d34dc12b29b14f5764c2fc10750daa38e825fc/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d", size = 3347940, upload-time = "2025-10-10T11:12:26.529Z" }, - { url = "https://files.pythonhosted.org/packages/b1/d2/99b55e85832ccde77b211738ff3925a5d73ad183c0b37bcbbe5a8ff04978/psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d", size = 2714147, upload-time = "2025-10-10T11:12:29.535Z" }, + { url = "https://files.pythonhosted.org/packages/e2/9f/ef4ef3c8e15083df90ca35265cfd1a081a2f0cc07bb229c6314c6af817f4/psycopg2_binary-2.9.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5cdc05117180c5fa9c40eea8ea559ce64d73824c39d928b7da9fb5f6a9392433", size = 3712459, upload-time = "2026-04-20T23:34:30.549Z" }, + { url = "https://files.pythonhosted.org/packages/b5/01/3dd14e46ba48c1e1a6ec58ee599fa1b5efa00c246d5046cd903d0eeb1af1/psycopg2_binary-2.9.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d3227a3bc228c10d21011a99245edca923e4e8bf461857e869a507d9a41fe9f6", size = 3822936, upload-time = "2026-04-20T23:34:32.77Z" }, + { url = "https://files.pythonhosted.org/packages/a6/f7/0640e4901119d8a9f7a1784b927f494e2198e213ceb593753d1f2c8b1b30/psycopg2_binary-2.9.12-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:995ce929eede89db6254b50827e2b7fd61e50d11f0b116b29fffe4a2e53c4580", size = 4578676, upload-time = "2026-04-20T23:34:35.18Z" }, + { url = "https://files.pythonhosted.org/packages/b0/55/44df3965b5f297c50cc0b1b594a31c67d6127a9d133045b8a66611b14dfb/psycopg2_binary-2.9.12-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9fe06d93e72f1c048e731a2e3e7854a5bfaa58fc736068df90b352cefe66f03f", size = 4274917, upload-time = "2026-04-20T23:34:37.982Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4b/74535248b1eac0c9336862e8617c765ac94dac76f9e25d7c4a79588c8907/psycopg2_binary-2.9.12-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:40e7b28b63aaf737cb3a1edc3a9bbc9a9f4ad3dcb7152e8c1130e4050eddcb7d", size = 5894843, upload-time = "2026-04-20T23:34:40.856Z" }, + { url = "https://files.pythonhosted.org/packages/f2/ba/f1bf8d2ae71868ad800b661099086ee52bc0f8d9f05be1acd8ebb06757cc/psycopg2_binary-2.9.12-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:89d19a9f7899e8eb0656a2b3a08e0da04c720a06db6e0033eab5928aabe60fa9", size = 4110556, upload-time = "2026-04-20T23:34:44.016Z" }, + { url = "https://files.pythonhosted.org/packages/45/46/c15706c338403b7c420bcc0c2905aad116cc064545686d8bf85f1999ea00/psycopg2_binary-2.9.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:612b965daee295ae2da8f8218ce1d274645dc76ef3f1abf6a0a94fd57eff876d", size = 3655714, upload-time = "2026-04-20T23:34:46.233Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7c/a2d5dc09b64a4564db242a0fe418fde7d33f6f8259dd2c5b9d7def00fb5a/psycopg2_binary-2.9.12-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b9a339b79d37c1b45f3235265f07cdeb0cb5ad7acd2ac7720a5920989c17c24e", size = 3301154, upload-time = "2026-04-20T23:34:49.528Z" }, + { url = "https://files.pythonhosted.org/packages/c0/e8/cc8c9a4ce71461f9ec548d38cadc41dc184b34c73e6455450775a9334ccd/psycopg2_binary-2.9.12-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3471336e1acfd9c7fe507b8bad5af9317b6a89294f9eb37bd9a030bb7bebcdc6", size = 3048882, upload-time = "2026-04-20T23:34:51.86Z" }, + { url = "https://files.pythonhosted.org/packages/19/6a/31e2296bc0787c5ab75d3d118e40b239db8151b5192b90b77c72bc9256e9/psycopg2_binary-2.9.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7af18183109e23502c8b2ae7f6926c0882766f35b5175a4cd737ad825e4d7a1b", size = 3351298, upload-time = "2026-04-20T23:34:54.124Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a8/75f4e3e11203b590150abed2cf7794b9c9c9f7eceddae955191138b44dde/psycopg2_binary-2.9.12-cp312-cp312-win_amd64.whl", hash = "sha256:398fcd4db988c7d7d3713e2b8e18939776fd3fb447052daae4f24fa39daede4c", size = 2757230, upload-time = "2026-04-20T23:34:56.242Z" }, ] [[package]] @@ -6176,16 +6178,16 @@ wheels = [ [[package]] name = "realtime" -version = "2.28.3" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic" }, { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9c/3d/ef6ed9221f98766f3a503e6e3ac68fa7ca25c117b383f1efc448294232ac/realtime-2.28.3.tar.gz", hash = "sha256:5cc83a6217874426799d8bf74e96d904ac6fa77c39fa8982fa99287947eb2cbf", size = 18723, upload-time = "2026-03-20T14:38:08.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6e/f1/08c42a42653942fadfbef495d5b0239356140e7186cc528704956c5f06d4/realtime-2.29.0.tar.gz", hash = "sha256:8efe4a1b3a548a5fda09de701bd041fa0970c5a2fe7d13db0b9861ce11828be2", size = 18715, upload-time = "2026-04-24T13:13:02.315Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/d5/659405f9d4c9b022b7ac02bd52986ccc081f211db081051440f46bf4f358/realtime-2.28.3-py3-none-any.whl", hash = "sha256:efe484d6d39024c7e00ef70f70be600142e9407e5d802de8c96e86e014ce3b36", size = 22378, upload-time = "2026-03-20T14:38:07.144Z" }, + { url = "https://files.pythonhosted.org/packages/77/48/f6375c0a24923beb988f0c71c052604c96641cf43c2d22b91ec1df86afa0/realtime-2.29.0-py3-none-any.whl", hash = "sha256:1a4891e6c82e88ac9d96ac715e435e086f6f8c7665212a8717346de829cbb509", size = 22374, upload-time = "2026-04-24T13:13:01.103Z" }, ] [[package]] @@ -6342,27 +6344,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.15.11" +version = "0.15.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/8d/192f3d7103816158dfd5ea50d098ef2aec19194e6cbccd4b3485bdb2eb2d/ruff-0.15.11.tar.gz", hash = "sha256:f092b21708bf0e7437ce9ada249dfe688ff9a0954fc94abab05dcea7dcd29c33", size = 4637264, upload-time = "2026-04-16T18:46:26.58Z" } +sdist = { url = "https://files.pythonhosted.org/packages/99/43/3291f1cc9106f4c63bdce7a8d0df5047fe8422a75b091c16b5e9355e0b11/ruff-0.15.12.tar.gz", hash = "sha256:ecea26adb26b4232c0c2ca19ccbc0083a68344180bba2a600605538ce51a40a6", size = 4643852, upload-time = "2026-04-24T18:17:14.305Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/1e/6aca3427f751295ab011828e15e9bf452200ac74484f1db4be0197b8170b/ruff-0.15.11-py3-none-linux_armv6l.whl", hash = "sha256:e927cfff503135c558eb581a0c9792264aae9507904eb27809cdcff2f2c847b7", size = 10607943, upload-time = "2026-04-16T18:46:05.967Z" }, - { url = "https://files.pythonhosted.org/packages/e7/26/1341c262e74f36d4e84f3d6f4df0ac68cd53331a66bfc5080daa17c84c0b/ruff-0.15.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7a1b5b2938d8f890b76084d4fa843604d787a912541eae85fd7e233398bbb73e", size = 10988592, upload-time = "2026-04-16T18:46:00.742Z" }, - { url = "https://files.pythonhosted.org/packages/03/71/850b1d6ffa9564fbb6740429bad53df1094082fe515c8c1e74b6d8d05f18/ruff-0.15.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d4176f3d194afbdaee6e41b9ccb1a2c287dba8700047df474abfbe773825d1cb", size = 10338501, upload-time = "2026-04-16T18:46:03.723Z" }, - { url = "https://files.pythonhosted.org/packages/f2/11/cc1284d3e298c45a817a6aadb6c3e1d70b45c9b36d8d9cce3387b495a03a/ruff-0.15.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b17c886fb88203ced3afe7f14e8d5ae96e9d2f4ccc0ee66aa19f2c2675a27e4", size = 10670693, upload-time = "2026-04-16T18:46:41.941Z" }, - { url = "https://files.pythonhosted.org/packages/ce/9e/f8288b034ab72b371513c13f9a41d9ba3effac54e24bfb467b007daee2ca/ruff-0.15.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:49fafa220220afe7758a487b048de4c8f9f767f37dfefad46b9dd06759d003eb", size = 10416177, upload-time = "2026-04-16T18:46:21.717Z" }, - { url = "https://files.pythonhosted.org/packages/85/71/504d79abfd3d92532ba6bbe3d1c19fada03e494332a59e37c7c2dabae427/ruff-0.15.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2ab8427e74a00d93b8bda1307b1e60970d40f304af38bccb218e056c220120d", size = 11221886, upload-time = "2026-04-16T18:46:15.086Z" }, - { url = "https://files.pythonhosted.org/packages/43/5a/947e6ab7a5ad603d65b474be15a4cbc6d29832db5d762cd142e4e3a74164/ruff-0.15.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:195072c0c8e1fc8f940652073df082e37a5d9cb43b4ab1e4d0566ab8977a13b7", size = 12075183, upload-time = "2026-04-16T18:46:07.944Z" }, - { url = "https://files.pythonhosted.org/packages/9f/a1/0b7bb6268775fdd3a0818aee8efd8f5b4e231d24dd4d528ced2534023182/ruff-0.15.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a0996d486af3920dec930a2e7daed4847dfc12649b537a9335585ada163e9e", size = 11516575, upload-time = "2026-04-16T18:46:31.687Z" }, - { url = "https://files.pythonhosted.org/packages/30/c3/bb5168fc4d233cc06e95f482770d0f3c87945a0cd9f614b90ea8dc2f2833/ruff-0.15.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bef2cb556d509259f1fe440bb9cd33c756222cf0a7afe90d15edf0866702431", size = 11306537, upload-time = "2026-04-16T18:46:36.988Z" }, - { url = "https://files.pythonhosted.org/packages/e4/92/4cfae6441f3967317946f3b788136eecf093729b94d6561f963ed810c82e/ruff-0.15.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:030d921a836d7d4a12cf6e8d984a88b66094ccb0e0f17ddd55067c331191bf19", size = 11296813, upload-time = "2026-04-16T18:46:24.182Z" }, - { url = "https://files.pythonhosted.org/packages/43/26/972784c5dde8313acde8ac71ba8ac65475b85db4a2352a76c9934361f9bc/ruff-0.15.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0e783b599b4577788dbbb66b9addcef87e9a8832f4ce0c19e34bf55543a2f890", size = 10633136, upload-time = "2026-04-16T18:46:39.802Z" }, - { url = "https://files.pythonhosted.org/packages/5b/53/3985a4f185020c2f367f2e08a103032e12564829742a1b417980ce1514a0/ruff-0.15.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ae90592246625ba4a34349d68ec28d4400d75182b71baa196ddb9f82db025ef5", size = 10424701, upload-time = "2026-04-16T18:46:10.381Z" }, - { url = "https://files.pythonhosted.org/packages/d3/57/bf0dfb32241b56c83bb663a826133da4bf17f682ba8c096973065f6e6a68/ruff-0.15.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1f111d62e3c983ed20e0ca2e800f8d77433a5b1161947df99a5c2a3fb60514f0", size = 10873887, upload-time = "2026-04-16T18:46:29.157Z" }, - { url = "https://files.pythonhosted.org/packages/02/05/e48076b2a57dc33ee8c7a957296f97c744ca891a8ffb4ffb1aaa3b3f517d/ruff-0.15.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:06f483d6646f59eaffba9ae30956370d3a886625f511a3108994000480621d1c", size = 11404316, upload-time = "2026-04-16T18:46:19.462Z" }, - { url = "https://files.pythonhosted.org/packages/88/27/0195d15fe7a897cbcba0904792c4b7c9fdd958456c3a17d2ea6093716a9a/ruff-0.15.11-py3-none-win32.whl", hash = "sha256:476a2aa56b7da0b73a3ee80b6b2f0e19cce544245479adde7baa65466664d5f3", size = 10655535, upload-time = "2026-04-16T18:46:12.47Z" }, - { url = "https://files.pythonhosted.org/packages/3a/5e/c927b325bd4c1d3620211a4b96f47864633199feed60fa936025ab27e090/ruff-0.15.11-py3-none-win_amd64.whl", hash = "sha256:8b6756d88d7e234fb0c98c91511aae3cd519d5e3ed271cae31b20f39cb2a12a3", size = 11779692, upload-time = "2026-04-16T18:46:17.268Z" }, - { url = "https://files.pythonhosted.org/packages/63/b6/aeadee5443e49baa2facd51131159fd6301cc4ccfc1541e4df7b021c37dd/ruff-0.15.11-py3-none-win_arm64.whl", hash = "sha256:063fed18cc1bbe0ee7393957284a6fe8b588c6a406a285af3ee3f46da2391ee4", size = 11032614, upload-time = "2026-04-16T18:46:34.487Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6e/e78ffb61d4686f3d96ba3df2c801161843746dcbcbb17a1e927d4829312b/ruff-0.15.12-py3-none-linux_armv6l.whl", hash = "sha256:f86f176e188e94d6bdbc09f09bfd9dc729059ad93d0e7390b5a73efe19f8861c", size = 10640713, upload-time = "2026-04-24T18:17:22.841Z" }, + { url = "https://files.pythonhosted.org/packages/ae/08/a317bc231fb9e7b93e4ef3089501e51922ff88d6936ce5cf870c4fe55419/ruff-0.15.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:e3bcd123364c3770b8e1b7baaf343cc99a35f197c5c6e8af79015c666c423a6c", size = 11069267, upload-time = "2026-04-24T18:17:30.105Z" }, + { url = "https://files.pythonhosted.org/packages/aa/a4/f828e9718d3dce1f5f11c39c4f65afd32783c8b2aebb2e3d259e492c47bd/ruff-0.15.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fe87510d000220aa1ed530d4448a7c696a0cae1213e5ec30e5874287b66557b5", size = 10397182, upload-time = "2026-04-24T18:17:07.177Z" }, + { url = "https://files.pythonhosted.org/packages/71/e0/3310fc6d1b5e1fdea22bf3b1b807c7e187b581021b0d7d4514cccdb5fb71/ruff-0.15.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84a1630093121375a3e2a95b4a6dc7b59e2b4ee76216e32d81aae550a832d002", size = 10758012, upload-time = "2026-04-24T18:16:55.759Z" }, + { url = "https://files.pythonhosted.org/packages/11/c1/a606911aee04c324ddaa883ae418f3569792fd3c4a10c50e0dd0a2311e1e/ruff-0.15.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb129f40f114f089ebe0ca56c0d251cf2061b17651d464bb6478dc01e69f11f5", size = 10447479, upload-time = "2026-04-24T18:16:51.677Z" }, + { url = "https://files.pythonhosted.org/packages/9d/68/4201e8444f0894f21ab4aeeaee68aa4f10b51613514a20d80bd628d57e88/ruff-0.15.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0c862b172d695db7598426b8af465e7e9ac00a3ea2a3630ee67eb82e366aaa6", size = 11234040, upload-time = "2026-04-24T18:17:16.529Z" }, + { url = "https://files.pythonhosted.org/packages/34/ff/8a6d6cf4ccc23fd67060874e832c18919d1557a0611ebef03fdb01fff11e/ruff-0.15.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2849ea9f3484c3aca43a82f484210370319e7170df4dfe4843395ddf6c57bc33", size = 12087377, upload-time = "2026-04-24T18:17:04.944Z" }, + { url = "https://files.pythonhosted.org/packages/85/f6/c669cf73f5152f623d34e69866a46d5e6185816b19fcd5b6dd8a2d299922/ruff-0.15.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e77c7e51c07fe396826d5969a5b846d9cd4c402535835fb6e21ce8b28fef847", size = 11367784, upload-time = "2026-04-24T18:17:25.409Z" }, + { url = "https://files.pythonhosted.org/packages/e8/39/c61d193b8a1daaa8977f7dea9e8d8ba866e02ea7b65d32f6861693aa4c12/ruff-0.15.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b2f4f2f3b1026b5fb449b467d9264bf22067b600f7b6f41fc5958909f449d0", size = 11344088, upload-time = "2026-04-24T18:17:12.258Z" }, + { url = "https://files.pythonhosted.org/packages/c2/8d/49afab3645e31e12c590acb6d3b5b69d7aab5b81926dbaf7461f9441f37a/ruff-0.15.12-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9ba3b8f1afd7e2e43d8943e55f249e13f9682fde09711644a6e7290eb4f3e339", size = 11271770, upload-time = "2026-04-24T18:17:02.457Z" }, + { url = "https://files.pythonhosted.org/packages/46/06/33f41fe94403e2b755481cdfb9b7ef3e4e0ed031c4581124658d935d52b4/ruff-0.15.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e852ba9fdc890655e1d78f2df1499efbe0e54126bd405362154a75e2bde159c5", size = 10719355, upload-time = "2026-04-24T18:17:27.648Z" }, + { url = "https://files.pythonhosted.org/packages/0d/59/18aa4e014debbf559670e4048e39260a85c7fcee84acfd761ac01e7b8d35/ruff-0.15.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dd8aed930da53780d22fc70bdf84452c843cf64f8cb4eb38984319c24c5cd5fd", size = 10462758, upload-time = "2026-04-24T18:17:32.347Z" }, + { url = "https://files.pythonhosted.org/packages/25/e7/cc9f16fd0f3b5fddcbd7ec3d6ae30c8f3fde1047f32a4093a98d633c6570/ruff-0.15.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:01da3988d225628b709493d7dc67c3b9b12c0210016b08690ef9bd27970b262b", size = 10953498, upload-time = "2026-04-24T18:17:20.674Z" }, + { url = "https://files.pythonhosted.org/packages/72/7a/a9ba7f98c7a575978698f4230c5e8cc54bbc761af34f560818f933dafa0c/ruff-0.15.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9cae0f92bd5700d1213188b31cd3bdd2b315361296d10b96b8e2337d3d11f53e", size = 11447765, upload-time = "2026-04-24T18:17:09.755Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f9/0ae446942c846b8266059ad8a30702a35afae55f5cdc54c5adf8d7afdc27/ruff-0.15.12-py3-none-win32.whl", hash = "sha256:d0185894e038d7043ba8fd6aee7499ece6462dc0ea9f1e260c7451807c714c20", size = 10657277, upload-time = "2026-04-24T18:17:18.591Z" }, + { url = "https://files.pythonhosted.org/packages/33/f1/9614e03e1cdcbf9437570b5400ced8a720b5db22b28d8e0f1bda429f660d/ruff-0.15.12-py3-none-win_amd64.whl", hash = "sha256:c87a162d61ab3adca47c03f7f717c68672edec7d1b5499e652331780fe74950d", size = 11837758, upload-time = "2026-04-24T18:17:00.113Z" }, + { url = "https://files.pythonhosted.org/packages/c0/98/6beb4b351e472e5f4c4613f7c35a5290b8be2497e183825310c4c3a3984b/ruff-0.15.12-py3-none-win_arm64.whl", hash = "sha256:a538f7a82d061cee7be55542aca1d86d1393d55d81d4fcc314370f4340930d4f", size = 11120821, upload-time = "2026-04-24T18:16:57.979Z" }, ] [[package]] @@ -6667,7 +6669,7 @@ wheels = [ [[package]] name = "storage3" -version = "2.28.3" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "deprecation" }, @@ -6676,9 +6678,9 @@ dependencies = [ { name = "pyiceberg" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/b5/18df59ba92951d74774eb0265072bf236ead5e3cbc4b802d8bf1cf3581a0/storage3-2.28.3.tar.gz", hash = "sha256:2b3f843cbd44c4a3b483ec076a12c27de88c0ad5358a43067ed44ef08292353f", size = 20109, upload-time = "2026-03-20T14:38:11.467Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/be/771246434b5caf3c6187bfdc932eaede00bf5f2937b47475ab25209ede3e/storage3-2.29.0.tar.gz", hash = "sha256:b0cc2f6714655d725c998d2c5ae8c6fb4f56a513bd31e4f85770df557fe021e3", size = 20160, upload-time = "2026-04-24T13:13:04.626Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ad/a5/2dbe216954e026a8c2e2dc7dfa5fd7b1a1ae0824d10972e62462f4f15aca/storage3-2.28.3-py3-none-any.whl", hash = "sha256:bac35c5087619174448fdef6a337db4e3dfebf3de69f685bd706de93ddcdad69", size = 28239, upload-time = "2026-03-20T14:38:10.423Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c3/790c31866f52c13b26f108b45759bf50dafae3a0bafb4511fadc98ba7c33/storage3-2.29.0-py3-none-any.whl", hash = "sha256:043ef7ff27cc8b9da12be403cf78ee4586180edfcf62b227ff61e1bd79594b06", size = 28284, upload-time = "2026-04-24T13:13:03.338Z" }, ] [[package]] @@ -6704,7 +6706,7 @@ wheels = [ [[package]] name = "supabase" -version = "2.28.3" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, @@ -6715,37 +6717,37 @@ dependencies = [ { name = "supabase-functions" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5a/98/2f1c95a2269ce995a34f275760b1c2ee71ee7a75649238ca0470afdfc2ef/supabase-2.28.3.tar.gz", hash = "sha256:1200961e46cdec17c7c280a1e09a159544643eada2759591ea69835303a2e1a4", size = 9687, upload-time = "2026-03-20T14:38:13.272Z" } +sdist = { url = "https://files.pythonhosted.org/packages/51/a0/2407d616fdf68e8632bbbfb063d1685c38377ac0199e8ca11deaea1f3bf0/supabase-2.29.0.tar.gz", hash = "sha256:a88c4a4eb50fbb903e2e962fbc7c27733b00589140139f9e837bc9fe30dd3615", size = 9689, upload-time = "2026-04-24T13:13:06.728Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/de/96/1b48eb664153401c22087bbf77f6a428965e830cc8e0d0c6d68324a28342/supabase-2.28.3-py3-none-any.whl", hash = "sha256:52a7ce4a1d2d55fa6d657bf4760672935058143a5bedc64165851be25ce01dbd", size = 16634, upload-time = "2026-03-20T14:38:12.319Z" }, + { url = "https://files.pythonhosted.org/packages/22/52/232f6bbf5326e04ae12e2ef04a24f011a0d7cab379a8b9698652bc8ff78f/supabase-2.29.0-py3-none-any.whl", hash = "sha256:16c3ec4b7094f6b92efc5cd3bb3f96826d3b6dd5d24fe15c89c81166efce88fe", size = 16633, upload-time = "2026-04-24T13:13:05.722Z" }, ] [[package]] name = "supabase-auth" -version = "2.28.3" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx", extra = ["http2"] }, { name = "pydantic" }, { name = "pyjwt", extra = ["crypto"] }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cc/6f/1bf81293374ba71183b321bf5dfd7151c3db0c2e24715f35783bc1c56385/supabase_auth-2.28.3.tar.gz", hash = "sha256:41c049da82f9d7fc2f111808e57e984015f128d033f58caa67fd76f428472807", size = 39160, upload-time = "2026-03-20T14:38:15.128Z" } +sdist = { url = "https://files.pythonhosted.org/packages/51/7f/7ceeb4c7a2caa188062e934897f0e08e1af0a0e47e376c7645c26b4c39d8/supabase_auth-2.29.0.tar.gz", hash = "sha256:46efc6a3455a23957b846dc974303a844ba0413718cfa899425477ac977f95b3", size = 39154, upload-time = "2026-04-24T13:13:08.509Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/d3/e012315aa895b434fa77bc475e2dfeb87119e67918ecca4d88a25f96814d/supabase_auth-2.28.3-py3-none-any.whl", hash = "sha256:e47c5caec7bbf3c258964d027fbbe99f3cc4a956d3a635f898c962b4d22832dd", size = 48378, upload-time = "2026-03-20T14:38:14.169Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ac/3c35cf52281f940b9497cf17abfc5c2050ca49f342d60cfafe22dac3482b/supabase_auth-2.29.0-py3-none-any.whl", hash = "sha256:64de6ef8cae80f97d3aa8d5ca507d5427dda5c89885c0bcfe9f8b0263b6fb9a4", size = 48379, upload-time = "2026-04-24T13:13:07.417Z" }, ] [[package]] name = "supabase-functions" -version = "2.28.3" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx", extra = ["http2"] }, { name = "strenum" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/19/ea/59bf327960e5384fcc9e69afbdf97260a2cf2684a25c0731968a8a393b9c/supabase_functions-2.28.3.tar.gz", hash = "sha256:5a6255d60a263d44251c5ca250fcdde2408a8483a8bf31f4ac80255de8f3fcae", size = 4679, upload-time = "2026-03-20T14:38:16.742Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/19/1a1d22749f38f2a6cbca93a6f5a35c9f816c2c3c06bfaa077fa336e90537/supabase_functions-2.29.0.tar.gz", hash = "sha256:0f8a14a2ea9f12b1c208f61dc6f55e2f4b1121f81bf01c08f9b487d22888744d", size = 4683, upload-time = "2026-04-24T13:13:10.432Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/ca/1e720f1347a88519e3d52b6d801cd031c3a7a5df66640c5dc6e81d925057/supabase_functions-2.28.3-py3-none-any.whl", hash = "sha256:eb30578866103fed9322c54e95dd68c2f1a4b6b177e129d9369edd364637904e", size = 8801, upload-time = "2026-03-20T14:38:15.883Z" }, + { url = "https://files.pythonhosted.org/packages/e0/10/6f8ef0b408ade76b5a439afab588ce5849e9604a23040ca73cfe0b90cb9e/supabase_functions-2.29.0-py3-none-any.whl", hash = "sha256:6f08de52eec5820eae53616868b85e849e181beffaa5d05b8ea1708ceae5e48e", size = 8799, upload-time = "2026-04-24T13:13:09.214Z" }, ] [[package]] @@ -7244,11 +7246,11 @@ wheels = [ [[package]] name = "types-psycopg2" -version = "2.9.21.20260408" +version = "2.9.21.20260422" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cd/24/d8ae11a0c056535557aaabeb7d7838423abdfdcf1e5f8dfb2c04d316c65d/types_psycopg2-2.9.21.20260408.tar.gz", hash = "sha256:bb65cd12f53b6633077fd782607a33065e1f3bf585219c9f786b61ad2b72211c", size = 27078, upload-time = "2026-04-08T04:26:15.848Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/a2/ecb04604074a7f2e82231ab1f2d3b5a792589aa3c21a597cb3232a38ece3/types_psycopg2-2.9.21.20260422.tar.gz", hash = "sha256:ad7574fa8e25d9aa96ab96cd280c4dee20872725cd1fe6a6d3facc354f2644d4", size = 27123, upload-time = "2026-04-22T04:36:33.263Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/fe/9aab9239640107b6e46afddcee578a916b8b98bfee36e03da5b0d2c95124/types_psycopg2-2.9.21.20260408-py3-none-any.whl", hash = "sha256:49b086bfc9e0ce901c6537403ead1c19c75275571040b037af0248a8e48c322f", size = 24921, upload-time = "2026-04-08T04:26:14.715Z" }, + { url = "https://files.pythonhosted.org/packages/61/08/82f86c2d0a7ae4d335c6fe3c4ad193c4a57f0d6bfe1a676289cf63667275/types_psycopg2-2.9.21.20260422-py3-none-any.whl", hash = "sha256:e240684ac37946c5a2a058b04ea1f2fd0e4ee2655719b8c3ec9abf37f96da5ba", size = 24918, upload-time = "2026-04-22T04:36:32.108Z" }, ] [[package]] @@ -7968,7 +7970,7 @@ wheels = [ [[package]] name = "xinference-client" -version = "2.5.0" +version = "2.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -7976,9 +7978,9 @@ dependencies = [ { name = "requests" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/8a/4d7c72510f3c462195c2e7aa63559cafcf20f7d1901132d533b7498bab1c/xinference_client-2.5.0.tar.gz", hash = "sha256:0680324e2f438b8b208ca80e8a7e1c22e9152fce54f8c024c75e2ce57bfa5639", size = 58430, upload-time = "2026-04-13T07:21:40.145Z" } +sdist = { url = "https://files.pythonhosted.org/packages/99/86/89723d8a4f862bac49581ef99c9e52c014acf42355710335470062efabf1/xinference_client-2.7.0.tar.gz", hash = "sha256:51c174bc1704a505512550097d4b2025480a840d97bed8097dfbfaec2172ca9e", size = 58577, upload-time = "2026-04-25T14:37:37.345Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5b/dd/4fd501b8092c01f0775142850e3b601d743edf733077b756defe4a01cc37/xinference_client-2.5.0-py3-none-any.whl", hash = "sha256:bb90f069a2c30ac6ea7453ab37a0fadd34c28b655afa51fe20c18e67a361c269", size = 40006, upload-time = "2026-04-13T07:21:38.851Z" }, + { url = "https://files.pythonhosted.org/packages/1c/22/f9b92941be1cba5b2347211bb04c354a6ba2bad0e7b2da41510f77959327/xinference_client-2.7.0-py3-none-any.whl", hash = "sha256:76377804eb7fd2ece8a7d1e5c517d8aed8b5a511834066e43414ad74bcb34c09", size = 40154, upload-time = "2026-04-25T14:37:35.959Z" }, ] [[package]] diff --git a/depot.json b/depot.json new file mode 100644 index 0000000000..1c8a32f130 --- /dev/null +++ b/depot.json @@ -0,0 +1 @@ +{"id":"smkxz53ddb"} diff --git a/e2e/features/apps/share-app.feature b/e2e/features/apps/share-app.feature new file mode 100644 index 0000000000..22f89f7ebb --- /dev/null +++ b/e2e/features/apps/share-app.feature @@ -0,0 +1,19 @@ +@apps @authenticated @core +Feature: Share app publicly + + Scenario: Enable public share for a published workflow app + Given I am signed in as the default E2E admin + And a "workflow" app has been created via API + And a minimal runnable workflow draft has been synced + When I open the app from the app list + And I open the publish panel + And I publish the app + And I navigate to the app overview page + And I enable the Web App share + Then the Web App should be in service + + @unauthenticated + Scenario: Access a shared workflow app without authentication + Given a workflow app has been published and shared via API + When I open the shared app URL + Then the shared app page should be accessible diff --git a/e2e/features/apps/workflow-run-publish.feature b/e2e/features/apps/workflow-run-publish.feature new file mode 100644 index 0000000000..8640a7490b --- /dev/null +++ b/e2e/features/apps/workflow-run-publish.feature @@ -0,0 +1,13 @@ +@apps @authenticated @core @mode-matrix +Feature: Workflow run and publish + + Scenario: Run and publish a minimal workflow app + Given I am signed in as the default E2E admin + And a "workflow" app has been created via API + And a minimal runnable workflow draft has been synced + When I open the app from the app list + And I run the workflow + Then the workflow run should succeed + When I open the publish panel + And I publish the app + Then the app should be marked as published diff --git a/e2e/features/step-definitions/apps/share-app.steps.ts b/e2e/features/step-definitions/apps/share-app.steps.ts new file mode 100644 index 0000000000..24da05baab --- /dev/null +++ b/e2e/features/step-definitions/apps/share-app.steps.ts @@ -0,0 +1,39 @@ +import type { DifyWorld } from '../../support/world' +import { Given, Then, When } from '@cucumber/cucumber' +import { expect } from '@playwright/test' +import { createTestApp, enableAppSiteAndGetURL, publishWorkflowApp, syncRunnableWorkflowDraft } from '../../../support/api' + +When('I enable the Web App share', async function (this: DifyWorld) { + const page = this.getPage() + const appName = this.lastCreatedAppName + if (!appName) + throw new Error('No app name available. Run "a \\"workflow\\" app has been created via API" first.') + + await page.locator('button').filter({ hasText: appName }).filter({ hasText: 'Workflow' }).click() + await expect(page.getByRole('switch').first()).toBeEnabled({ timeout: 15_000 }) + await page.getByRole('switch').first().click() +}) + +Then('the Web App should be in service', async function (this: DifyWorld) { + await expect(this.getPage().getByText('In Service').first()).toBeVisible({ timeout: 10_000 }) +}) + +Given('a workflow app has been published and shared via API', async function (this: DifyWorld) { + const app = await createTestApp(`E2E Share ${Date.now()}`, 'workflow') + this.createdAppIds.push(app.id) + this.lastCreatedAppName = app.name + await syncRunnableWorkflowDraft(app.id) + await publishWorkflowApp(app.id) + this.shareURL = await enableAppSiteAndGetURL(app.id) +}) + +When('I open the shared app URL', async function (this: DifyWorld) { + if (!this.shareURL) + throw new Error('No share URL available. Run "a workflow app has been published and shared via API" first.') + await this.getPage().goto(this.shareURL, { timeout: 20_000 }) +}) + +Then('the shared app page should be accessible', async function (this: DifyWorld) { + await expect(this.getPage()).toHaveURL(/\/(workflow|chat)\/[a-zA-Z0-9]+/, { timeout: 15_000 }) + await expect(this.getPage().locator('body')).toBeVisible({ timeout: 10_000 }) +}) diff --git a/e2e/features/step-definitions/apps/workflow-run.steps.ts b/e2e/features/step-definitions/apps/workflow-run.steps.ts new file mode 100644 index 0000000000..584a33e774 --- /dev/null +++ b/e2e/features/step-definitions/apps/workflow-run.steps.ts @@ -0,0 +1,23 @@ +import type { DifyWorld } from '../../support/world' +import { Given, Then, When } from '@cucumber/cucumber' +import { expect } from '@playwright/test' +import { syncRunnableWorkflowDraft } from '../../../support/api' + +Given('a minimal runnable workflow draft has been synced', async function (this: DifyWorld) { + const appId = this.createdAppIds.at(-1) + if (!appId) + throw new Error('No app ID found. Run "a \\"workflow\\" app has been created via API" first.') + await syncRunnableWorkflowDraft(appId) +}) + +When('I run the workflow', async function (this: DifyWorld) { + const page = this.getPage() + await page.getByText('Test Run').click() + await expect(page.getByText('Running').first()).toBeVisible({ timeout: 15_000 }) +}) + +Then('the workflow run should succeed', async function (this: DifyWorld) { + const page = this.getPage() + await page.getByText('DETAIL').click() + await expect(page.getByText('SUCCESS').first()).toBeVisible({ timeout: 55_000 }) +}) diff --git a/e2e/features/support/world.ts b/e2e/features/support/world.ts index 986f79c8f9..b53087171f 100644 --- a/e2e/features/support/world.ts +++ b/e2e/features/support/world.ts @@ -15,6 +15,7 @@ export class DifyWorld extends World { lastCreatedAppName: string | undefined createdAppIds: string[] = [] capturedDownloads: Download[] = [] + shareURL: string | undefined constructor(options: IWorldOptions) { super(options) @@ -27,6 +28,7 @@ export class DifyWorld extends World { this.lastCreatedAppName = undefined this.createdAppIds = [] this.capturedDownloads = [] + this.shareURL = undefined } async startSession(browser: Browser, authenticated: boolean) { diff --git a/e2e/scripts/run-cucumber.ts b/e2e/scripts/run-cucumber.ts index d7778e65e2..3c8e895e90 100644 --- a/e2e/scripts/run-cucumber.ts +++ b/e2e/scripts/run-cucumber.ts @@ -67,11 +67,20 @@ const main = async () => { logFilePath: path.join(logDir, 'cucumber-api.log'), }) + const celeryProcess = await startLoggedProcess({ + command: 'npx', + args: ['tsx', './scripts/setup.ts', 'celery'], + cwd: e2eDir, + label: 'celery worker', + logFilePath: path.join(logDir, 'cucumber-celery.log'), + }) + let cleanupPromise: Promise | undefined const cleanup = async () => { if (!cleanupPromise) { cleanupPromise = (async () => { await stopWebServer() + await stopManagedProcess(celeryProcess) await stopManagedProcess(apiProcess) if (startMiddlewareForRun) { diff --git a/e2e/scripts/setup.ts b/e2e/scripts/setup.ts index ba4c011b04..3f77a3f72a 100644 --- a/e2e/scripts/setup.ts +++ b/e2e/scripts/setup.ts @@ -202,6 +202,32 @@ export const startApi = async () => { }) } +export const startCelery = async () => { + const env = await getApiEnvironment() + + await runForegroundProcess({ + command: 'uv', + args: [ + 'run', + '--project', + '.', + '--no-sync', + 'celery', + '-A', + 'app.celery', + 'worker', + '--pool', + 'solo', + '--loglevel', + 'INFO', + '-Q', + 'workflow_based_app_execution', + ], + cwd: apiDir, + env, + }) +} + export const stopMiddleware = async () => { await runCommandOrThrow({ command: 'docker', @@ -308,7 +334,7 @@ export const startMiddleware = async () => { } const printUsage = () => { - console.log('Usage: tsx ./scripts/setup.ts ') + console.log('Usage: tsx ./scripts/setup.ts ') } const main = async () => { @@ -318,6 +344,9 @@ const main = async () => { case 'api': await startApi() return + case 'celery': + await startCelery() + return case 'middleware-down': await stopMiddleware() return diff --git a/e2e/support/api.ts b/e2e/support/api.ts index 7d9fd0264f..74c42d3e73 100644 --- a/e2e/support/api.ts +++ b/e2e/support/api.ts @@ -80,3 +80,83 @@ export async function deleteTestApp(id: string): Promise { await ctx.dispose() } } + +export async function syncRunnableWorkflowDraft(appId: string): Promise { + const ctx = await createApiContext() + try { + await ctx.post(`/console/api/apps/${appId}/workflows/draft`, { + data: { + graph: { + nodes: [ + { + id: 'start', + type: 'custom', + position: { x: 80, y: 282 }, + data: { id: 'start', type: 'start', title: 'Start', variables: [] }, + }, + { + id: 'end', + type: 'custom', + position: { x: 480, y: 282 }, + data: { + id: 'end', + type: 'end', + title: 'End', + outputs: [{ variable: 'result', value_selector: ['sys', 'workflow_run_id'] }], + }, + }, + ], + edges: [ + { + id: 'start-end', + type: 'custom', + source: 'start', + target: 'end', + sourceHandle: 'source', + targetHandle: 'target', + }, + ], + viewport: { x: 0, y: 0, zoom: 1 }, + }, + features: {}, + environment_variables: [], + conversation_variables: [], + }, + }) + } + finally { + await ctx.dispose() + } +} + +export async function publishWorkflowApp(appId: string): Promise { + const ctx = await createApiContext() + try { + await ctx.post(`/console/api/apps/${appId}/workflows/publish`, { + data: { marked_name: '', marked_comment: '' }, + }) + } + finally { + await ctx.dispose() + } +} + +type AppDetailWithSite = { + site: { access_token: string, app_base_url: string, enable_site: boolean } +} + +export async function enableAppSiteAndGetURL(appId: string): Promise { + const ctx = await createApiContext() + try { + await ctx.post(`/console/api/apps/${appId}/site-enable`, { + data: { enable_site: true }, + }) + const res = await ctx.get(`/console/api/apps/${appId}`) + const body = (await res.json()) as AppDetailWithSite + const { app_base_url, access_token } = body.site + return `${app_base_url}/workflow/${access_token}` + } + finally { + await ctx.dispose() + } +} diff --git a/eslint-suppressions.json b/eslint-suppressions.json index 187ab29ac6..d3c2824b0b 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -124,11 +124,6 @@ "count": 1 } }, - "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-panel.tsx": { "ts/no-explicit-any": { "count": 1 @@ -2427,21 +2422,11 @@ "count": 1 } }, - "web/app/components/datasets/documents/create-from-pipeline/data-source/base/header.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.tsx": { "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/bucket.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx": { "no-restricted-imports": { "count": 1 @@ -2530,11 +2515,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/common/summary-status.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/components/index.ts": { "no-barrel-files/no-barrel-files": { "count": 3 @@ -2794,11 +2774,6 @@ "count": 2 } }, - "web/app/components/develop/secret-key/input-copy.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/develop/secret-key/secret-key-generate.tsx": { "no-restricted-imports": { "count": 1 @@ -3164,16 +3139,6 @@ "count": 1 } }, - "web/app/components/plugins/base/badges/icon-with-tooltip.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/base/key-value-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/card/index.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 @@ -3248,14 +3213,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, "web/app/components/plugins/plugin-auth/authorize/index.tsx": { "no-restricted-imports": { "count": 1 @@ -3341,24 +3298,11 @@ "count": 2 } }, - "web/app/components/plugins/plugin-detail-panel/detail-header/components/plugin-source-badge.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/detail-header/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 3 } }, - "web/app/components/plugins/plugin-detail-panel/endpoint-card.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, "web/app/components/plugins/plugin-detail-panel/endpoint-list.tsx": { "no-restricted-imports": { "count": 1 @@ -3557,11 +3501,6 @@ "count": 1 } }, - "web/app/components/plugins/plugin-page/plugin-tasks/components/task-status-indicator.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/readme-panel/index.tsx": { "react/unsupported-syntax": { "count": 1 @@ -3835,14 +3774,6 @@ "count": 1 } }, - "web/app/components/tools/mcp/detail/content.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 3 - } - }, "web/app/components/tools/mcp/detail/tool-item.tsx": { "no-restricted-imports": { "count": 1 @@ -5417,14 +5348,6 @@ "count": 2 } }, - "web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 4 - } - }, "web/app/components/workflow/panel/chat-variable-panel/type.ts": { "erasable-syntax-only/enums": { "count": 1 diff --git a/packages/dify-ui/README.md b/packages/dify-ui/README.md index e9c762073d..cd9485c400 100644 --- a/packages/dify-ui/README.md +++ b/packages/dify-ui/README.md @@ -90,6 +90,22 @@ See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for t - `pnpm -C packages/dify-ui storybook` — Storybook on the default port. Each primitive has `index.stories.tsx`. - `pnpm -C packages/dify-ui type-check` — `tsgo --noEmit` for this package only. +### Disabling Animations In Tests + +Base UI can wait for `element.getAnimations()` to finish before it unmounts overlays, panels, and transition-driven components. Browser-based test runners can make that timing unstable, especially when tests assert final DOM state rather than animation behavior. + +Set the Base UI test flag in a Vitest setup file to skip those waits: + +```ts +( + globalThis as typeof globalThis & { + BASE_UI_ANIMATIONS_DISABLED: boolean + } +).BASE_UI_ANIMATIONS_DISABLED = true +``` + +`packages/dify-ui/vitest.setup.ts` already applies this for primitive tests. + See `[AGENTS.md](./AGENTS.md)` for: - Component authoring rules (one-component-per-folder, `cva` + `cn`, relative imports inside the package, subpath imports from consumers). diff --git a/packages/dify-ui/src/select/__tests__/index.spec.tsx b/packages/dify-ui/src/select/__tests__/index.spec.tsx index eab980a607..9e3e945de0 100644 --- a/packages/dify-ui/src/select/__tests__/index.spec.tsx +++ b/packages/dify-ui/src/select/__tests__/index.spec.tsx @@ -231,10 +231,8 @@ describe('Select wrappers', () => { , ) - screen.getByRole('group', { name: 'select positioner' }).element().dispatchEvent(new MouseEvent('mouseover', { - bubbles: true, - })) - asHTMLElement(screen.getByRole('dialog', { name: 'select popup' }).element()).click() + await screen.getByRole('group', { name: 'select positioner' }).hover() + await screen.getByRole('dialog', { name: 'select popup' }).click() screen.getByRole('listbox', { name: 'select list' }).element().dispatchEvent(new FocusEvent('focusin', { bubbles: true, })) diff --git a/packages/dify-ui/src/toast/__tests__/index.spec.tsx b/packages/dify-ui/src/toast/__tests__/index.spec.tsx index edbdacd203..51fccf70d8 100644 --- a/packages/dify-ui/src/toast/__tests__/index.spec.tsx +++ b/packages/dify-ui/src/toast/__tests__/index.spec.tsx @@ -3,19 +3,20 @@ import { toast, ToastHost } from '../index' const asHTMLElement = (element: HTMLElement | SVGElement) => element as HTMLElement -declare global { - // eslint-disable-next-line vars-on-top - var BASE_UI_ANIMATIONS_DISABLED: boolean | undefined +const dispatchToastMouseOver = (element: HTMLElement | SVGElement) => { + element.dispatchEvent(new MouseEvent('mouseover', { + bubbles: true, + })) +} + +const dispatchToastMouseOut = (element: HTMLElement | SVGElement) => { + element.dispatchEvent(new MouseEvent('mouseout', { + bubbles: true, + relatedTarget: document.body, + })) } describe('@langgenius/dify-ui/toast', () => { - beforeAll(() => { - // Base UI waits for `requestAnimationFrame` + `getAnimations().finished` - // before unmounting a toast. Fake timers can't reliably drive that path, - // so short-circuit it to keep auto-dismiss assertions deterministic in CI. - globalThis.BASE_UI_ANIMATIONS_DISABLED = true - }) - beforeEach(() => { vi.clearAllMocks() vi.useFakeTimers() @@ -28,10 +29,6 @@ describe('@langgenius/dify-ui/toast', () => { vi.useRealTimers() }) - afterAll(() => { - globalThis.BASE_UI_ANIMATIONS_DISABLED = undefined - }) - it('should render a success toast when called through the typed shortcut', async () => { const screen = await render() @@ -62,13 +59,13 @@ describe('@langgenius/dify-ui/toast', () => { expect(document.body.querySelectorAll('[role="dialog"]')).toHaveLength(3) expect(document.body.querySelectorAll('button[aria-label="Close notification"][aria-hidden="true"]')).toHaveLength(3) - screen.getByRole('region', { name: 'Notifications' }).element().dispatchEvent(new MouseEvent('mouseover', { - bubbles: true, - })) + const viewport = screen.getByRole('region', { name: 'Notifications' }).element() + dispatchToastMouseOver(viewport) await vi.waitFor(() => { expect(document.body.querySelector('button[aria-label="Close notification"][aria-hidden="true"]')).not.toBeInTheDocument() }) + dispatchToastMouseOut(viewport) }) it('should render a neutral toast when called directly', async () => { @@ -115,11 +112,11 @@ describe('@langgenius/dify-ui/toast', () => { onClose, }) - screen.getByRole('region', { name: 'Notifications' }).element().dispatchEvent(new MouseEvent('mouseover', { - bubbles: true, - })) + const viewport = screen.getByRole('region', { name: 'Notifications' }).element() + dispatchToastMouseOver(viewport) await expect.element(screen.getByRole('button', { name: 'Close notification' })).toBeInTheDocument() + dispatchToastMouseOut(viewport) asHTMLElement(screen.getByRole('button', { name: 'Close notification' }).element()).click() await vi.waitFor(() => { @@ -128,21 +125,6 @@ describe('@langgenius/dify-ui/toast', () => { expect(onClose).toHaveBeenCalledTimes(1) }) - it('should auto dismiss toasts with the Base UI default timeout', async () => { - const screen = await render() - - toast('Default timeout') - await expect.element(screen.getByText('Default timeout')).toBeInTheDocument() - - await vi.advanceTimersByTimeAsync(4999) - expect(document.body).toHaveTextContent('Default timeout') - - await vi.advanceTimersByTimeAsync(1) - await vi.waitFor(() => { - expect(document.body).not.toHaveTextContent('Default timeout') - }) - }) - it('should respect the host timeout configuration', async () => { const screen = await render() diff --git a/packages/dify-ui/vite.config.ts b/packages/dify-ui/vite.config.ts index 5f3533c706..f2a2d24e57 100644 --- a/packages/dify-ui/vite.config.ts +++ b/packages/dify-ui/vite.config.ts @@ -11,6 +11,7 @@ export default defineConfig({ }, test: { globals: true, + setupFiles: ['./vitest.setup.ts'], browser: { enabled: true, provider: playwright(), diff --git a/packages/dify-ui/vitest.setup.ts b/packages/dify-ui/vitest.setup.ts new file mode 100644 index 0000000000..285d6e7760 --- /dev/null +++ b/packages/dify-ui/vitest.setup.ts @@ -0,0 +1,5 @@ +( + globalThis as typeof globalThis & { + BASE_UI_ANIMATIONS_DISABLED: boolean + } +).BASE_UI_ANIMATIONS_DISABLED = true diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/provider-config-modal.spec.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/provider-config-modal.spec.tsx new file mode 100644 index 0000000000..f9e5ea28ee --- /dev/null +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/provider-config-modal.spec.tsx @@ -0,0 +1,346 @@ +import type { AliyunConfig, ArizeConfig, DatabricksConfig, LangFuseConfig, LangSmithConfig, MLflowConfig, OpikConfig, PhoenixConfig, TencentConfig, WeaveConfig } from '../type' +import { toast } from '@langgenius/dify-ui/toast' +import { render, screen, waitFor } from '@testing-library/react' +import userEvent from '@testing-library/user-event' +import { addTracingConfig, removeTracingConfig, updateTracingConfig } from '@/service/apps' +import ConfigBtn from '../config-button' +import ProviderConfigModal from '../provider-config-modal' +import { TracingProvider } from '../type' + +vi.mock('@/service/apps', () => ({ + addTracingConfig: vi.fn(), + removeTracingConfig: vi.fn(), + updateTracingConfig: vi.fn(), +})) + +vi.mock('@langgenius/dify-ui/toast', () => ({ + toast: vi.fn(), +})) + +type ProviderPayload = AliyunConfig | ArizeConfig | DatabricksConfig | LangFuseConfig | LangSmithConfig | MLflowConfig | OpikConfig | PhoenixConfig | TencentConfig | WeaveConfig + +const validConfigs = { + [TracingProvider.arize]: { + api_key: 'arize-api-key', + space_id: 'space-id', + project: 'arize-project', + endpoint: 'https://otlp.arize.com', + }, + [TracingProvider.phoenix]: { + api_key: 'phoenix-api-key', + project: 'phoenix-project', + endpoint: 'https://app.phoenix.arize.com', + }, + [TracingProvider.langSmith]: { + api_key: 'langsmith-api-key', + project: 'langsmith-project', + endpoint: 'https://api.smith.langchain.com', + }, + [TracingProvider.langfuse]: { + public_key: 'public-key', + secret_key: 'secret-key', + host: 'https://cloud.langfuse.com', + }, + [TracingProvider.opik]: { + api_key: 'opik-api-key', + project: 'opik-project', + workspace: 'default', + url: 'https://www.comet.com/opik/api/', + }, + [TracingProvider.weave]: { + api_key: 'weave-api-key', + entity: 'wandb-entity', + project: 'weave-project', + endpoint: 'https://trace.wandb.ai/', + host: 'https://api.wandb.ai', + }, + [TracingProvider.aliyun]: { + app_name: 'aliyun-app', + license_key: 'license-key', + endpoint: 'https://tracing.arms.aliyuncs.com', + }, + [TracingProvider.mlflow]: { + tracking_uri: 'http://localhost:5000', + experiment_id: 'experiment-id', + username: 'mlflow-user', + password: 'mlflow-password', + }, + [TracingProvider.databricks]: { + experiment_id: 'experiment-id', + host: 'https://workspace.cloud.databricks.com', + client_id: 'client-id', + client_secret: 'client-secret', + personal_access_token: 'personal-access-token', + }, + [TracingProvider.tencent]: { + token: 'tencent-token', + endpoint: 'https://your-region.cls.tencentcs.com', + service_name: 'dify_app', + }, +} satisfies Record + +const providerFieldLabels = [ + [TracingProvider.arize, ['API Key', 'Space ID', 'app.tracing.configProvider.project', 'Endpoint']], + [TracingProvider.phoenix, ['API Key', 'app.tracing.configProvider.project', 'Endpoint']], + [TracingProvider.langSmith, ['API Key', 'app.tracing.configProvider.project', 'Endpoint']], + [TracingProvider.langfuse, ['app.tracing.configProvider.secretKey', 'app.tracing.configProvider.publicKey', 'Host']], + [TracingProvider.opik, ['API Key', 'app.tracing.configProvider.project', 'Workspace', 'Url']], + [TracingProvider.weave, ['API Key', 'app.tracing.configProvider.project', 'Entity', 'Endpoint', 'Host']], + [TracingProvider.aliyun, ['License Key', 'Endpoint', 'App Name']], + [TracingProvider.mlflow, ['app.tracing.configProvider.trackingUri', 'app.tracing.configProvider.experimentId', 'app.tracing.configProvider.username', 'app.tracing.configProvider.password']], + [TracingProvider.databricks, ['app.tracing.configProvider.experimentId', 'app.tracing.configProvider.databricksHost', 'app.tracing.configProvider.clientId', 'app.tracing.configProvider.clientSecret', 'app.tracing.configProvider.personalAccessToken']], + [TracingProvider.tencent, ['Token', 'Endpoint', 'Service Name']], +] as const + +const invalidConfigCases: Array<{ + provider: TracingProvider + payload: ProviderPayload + missingField: string +}> = [ + { provider: TracingProvider.arize, payload: { ...validConfigs[TracingProvider.arize], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.arize, payload: { ...validConfigs[TracingProvider.arize], space_id: '' }, missingField: 'Space ID' }, + { provider: TracingProvider.arize, payload: { ...validConfigs[TracingProvider.arize], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.phoenix, payload: { ...validConfigs[TracingProvider.phoenix], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.phoenix, payload: { ...validConfigs[TracingProvider.phoenix], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.langSmith, payload: { ...validConfigs[TracingProvider.langSmith], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.langSmith, payload: { ...validConfigs[TracingProvider.langSmith], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.langfuse, payload: { ...validConfigs[TracingProvider.langfuse], secret_key: '' }, missingField: 'app.tracing.configProvider.secretKey' }, + { provider: TracingProvider.langfuse, payload: { ...validConfigs[TracingProvider.langfuse], public_key: '' }, missingField: 'app.tracing.configProvider.publicKey' }, + { provider: TracingProvider.langfuse, payload: { ...validConfigs[TracingProvider.langfuse], host: '' }, missingField: 'Host' }, + { provider: TracingProvider.weave, payload: { ...validConfigs[TracingProvider.weave], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.weave, payload: { ...validConfigs[TracingProvider.weave], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.aliyun, payload: { ...validConfigs[TracingProvider.aliyun], app_name: '' }, missingField: 'App Name' }, + { provider: TracingProvider.aliyun, payload: { ...validConfigs[TracingProvider.aliyun], license_key: '' }, missingField: 'License Key' }, + { provider: TracingProvider.aliyun, payload: { ...validConfigs[TracingProvider.aliyun], endpoint: '' }, missingField: 'Endpoint' }, + { provider: TracingProvider.mlflow, payload: { ...validConfigs[TracingProvider.mlflow], tracking_uri: '' }, missingField: 'Tracking URI' }, + { provider: TracingProvider.databricks, payload: { ...validConfigs[TracingProvider.databricks], experiment_id: '' }, missingField: 'Experiment ID' }, + { provider: TracingProvider.databricks, payload: { ...validConfigs[TracingProvider.databricks], host: '' }, missingField: 'Host' }, + { provider: TracingProvider.tencent, payload: { ...validConfigs[TracingProvider.tencent], token: '' }, missingField: 'Token' }, + { provider: TracingProvider.tencent, payload: { ...validConfigs[TracingProvider.tencent], endpoint: '' }, missingField: 'Endpoint' }, + { provider: TracingProvider.tencent, payload: { ...validConfigs[TracingProvider.tencent], service_name: '' }, missingField: 'Service Name' }, +] + +const renderConfigButton = () => { + return render( + + + , + ) +} + +const renderProviderConfigModal = ({ + type = TracingProvider.langfuse, + payload, +}: { + type?: TracingProvider + payload?: ProviderPayload | null +} = {}) => { + const callbacks = { + onCancel: vi.fn(), + onSaved: vi.fn(), + onChosen: vi.fn(), + onRemoved: vi.fn(), + } + + render( + , + ) + + return callbacks +} + +describe('ProviderConfigModal', () => { + beforeEach(() => { + vi.clearAllMocks() + vi.mocked(addTracingConfig).mockResolvedValue({ result: 'success' }) + vi.mocked(updateTracingConfig).mockResolvedValue({ result: 'success' }) + vi.mocked(removeTracingConfig).mockResolvedValue({ result: 'success' }) + }) + + describe('Nested Overlay Behavior', () => { + it('should keep the provider config modal open when clicking inside it', async () => { + const user = userEvent.setup() + renderConfigButton() + + await user.click(screen.getByRole('button', { name: 'Open tracing' })) + await waitFor(() => { + expect(screen.getByText('app.tracing.tracing')).toBeInTheDocument() + }) + + const configActions = screen.getAllByText('app.tracing.config') + expect(configActions.length).toBeGreaterThan(0) + await user.click(configActions[0]!) + await waitFor(() => { + expect(screen.getByText('app.tracing.configProvider.titleapp.tracing.langfuse.title')).toBeInTheDocument() + }) + expect(screen.getByRole('dialog')).toBeInTheDocument() + + await user.click(screen.getByPlaceholderText('https://cloud.langfuse.com')) + + expect(screen.getByText('app.tracing.tracing')).toBeInTheDocument() + expect(screen.getByText('app.tracing.configProvider.titleapp.tracing.langfuse.title')).toBeInTheDocument() + }) + }) + + describe('Rendering', () => { + it.each(providerFieldLabels)('should render %s fields when adding a provider', (provider, expectedLabels) => { + renderProviderConfigModal({ type: provider }) + + expect(screen.getByText(`app.tracing.configProvider.titleapp.tracing.${provider}.title`)).toBeInTheDocument() + expectedLabels.forEach((label) => { + expect(screen.getByText(label)).toBeInTheDocument() + }) + expect(screen.getByRole('button', { name: 'common.operation.saveAndEnable' })).toBeInTheDocument() + }) + }) + + describe('Saving', () => { + it('should add and choose the provider when saving a new config', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal({ type: TracingProvider.langfuse }) + const textboxes = screen.getAllByRole('textbox') + + await user.type(textboxes[0]!, 'secret-key') + await user.type(textboxes[1]!, 'public-key') + await user.type(textboxes[2]!, 'https://cloud.langfuse.com') + await user.click(screen.getByRole('button', { name: 'common.operation.saveAndEnable' })) + + await waitFor(() => { + expect(addTracingConfig).toHaveBeenCalledWith({ + appId: 'app-id', + body: { + tracing_provider: TracingProvider.langfuse, + tracing_config: validConfigs[TracingProvider.langfuse], + }, + }) + }) + expect(callbacks.onSaved).toHaveBeenCalledWith(validConfigs[TracingProvider.langfuse]) + expect(callbacks.onChosen).toHaveBeenCalledWith(TracingProvider.langfuse) + expect(toast).toHaveBeenCalledWith('common.api.success', { type: 'success' }) + }) + + it.each(Object.values(TracingProvider))('should update valid %s config in edit mode', async (provider) => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal({ + type: provider, + payload: validConfigs[provider], + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.save' })) + + await waitFor(() => { + expect(updateTracingConfig).toHaveBeenCalledWith({ + appId: 'app-id', + body: { + tracing_provider: provider, + tracing_config: validConfigs[provider], + }, + }) + }) + expect(callbacks.onSaved).toHaveBeenCalledWith(validConfigs[provider]) + expect(callbacks.onChosen).not.toHaveBeenCalled() + }) + + it.each(invalidConfigCases)('should reject $provider config when $missingField is missing', async ({ provider, payload, missingField }) => { + const user = userEvent.setup() + renderProviderConfigModal({ + type: provider, + payload, + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.save' })) + + expect(updateTracingConfig).not.toHaveBeenCalled() + expect(toast).toHaveBeenCalledWith( + expect.stringContaining(missingField), + { type: 'error' }, + ) + }) + }) + + describe('Closing And Removing', () => { + it('should cancel when the cancel button is clicked', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal() + + await user.click(screen.getByRole('button', { name: 'common.operation.cancel' })) + + expect(callbacks.onCancel).toHaveBeenCalledTimes(1) + }) + + it('should cancel when the dialog is closed with Escape', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal() + + await user.keyboard('{Escape}') + + await waitFor(() => { + expect(callbacks.onCancel).toHaveBeenCalledTimes(1) + }) + }) + + it('should remove an existing provider after confirmation', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal({ + type: TracingProvider.langfuse, + payload: validConfigs[TracingProvider.langfuse], + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.remove' })) + expect(screen.getByText('app.tracing.configProvider.removeConfirmTitle:{"key":"app.tracing.langfuse.title"}')).toBeInTheDocument() + + await user.click(screen.getByRole('button', { name: 'common.operation.confirm' })) + + await waitFor(() => { + expect(removeTracingConfig).toHaveBeenCalledWith({ + appId: 'app-id', + provider: TracingProvider.langfuse, + }) + }) + expect(callbacks.onRemoved).toHaveBeenCalledTimes(1) + expect(toast).toHaveBeenCalledWith('common.api.remove', { type: 'success' }) + }) + + it('should return to the edit dialog when remove confirmation is canceled', async () => { + const user = userEvent.setup() + renderProviderConfigModal({ + type: TracingProvider.langfuse, + payload: validConfigs[TracingProvider.langfuse], + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.remove' })) + await user.click(screen.getByRole('button', { name: 'common.operation.cancel' })) + + expect(removeTracingConfig).not.toHaveBeenCalled() + expect(screen.getByText('app.tracing.configProvider.titleapp.tracing.langfuse.title')).toBeInTheDocument() + }) + }) +}) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx index 4f2497ad71..734b39bd41 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx @@ -11,6 +11,10 @@ import { AlertDialogTitle, } from '@langgenius/dify-ui/alert-dialog' import { Button } from '@langgenius/dify-ui/button' +import { + Dialog, + DialogContent, +} from '@langgenius/dify-ui/dialog' import { toast } from '@langgenius/dify-ui/toast' import { useBoolean } from 'ahooks' import * as React from 'react' @@ -19,10 +23,6 @@ import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' import { LinkExternal02 } from '@/app/components/base/icons/src/vender/line/general' import { Lock01 } from '@/app/components/base/icons/src/vender/solid/security' -import { - PortalToFollowElem, - PortalToFollowElemContent, -} from '@/app/components/base/portal-to-follow-elem' import { addTracingConfig, removeTracingConfig, updateTracingConfig } from '@/service/apps' import { docURL } from './config' import Field from './field' @@ -153,7 +153,11 @@ const ProviderConfigModal: FC = ({ return weaveConfigTemplate })()) - const [isShowRemoveConfirm, { + const [isConfigDialogOpen, { + set: setIsConfigDialogOpen, + }] = useBoolean(true) + const [isRemoveDialogOpen, { + set: setIsRemoveDialogOpen, setTrue: showRemoveConfirm, setFalse: hideRemoveConfirm, }] = useBoolean(false) @@ -291,13 +295,24 @@ const ProviderConfigModal: FC = ({ } }, [appId, checkValid, config, isAdd, isEdit, isSaving, onChosen, onSaved, t, type]) + // Defer onCancel to onOpenChangeComplete so the dialog's exit animation + // (scale/opacity transition) can finish before the parent unmounts this modal. + const handleConfigDialogOpenChangeComplete = useCallback((open: boolean) => { + if (!open) + onCancel() + }, [onCancel]) + return ( <> - {!isShowRemoveConfirm + {!isRemoveDialogOpen ? ( - - -
+ + +
@@ -650,7 +665,7 @@ const ProviderConfigModal: FC = ({ )} @@ -683,11 +698,11 @@ const ProviderConfigModal: FC = ({
- - + +
) : ( - !open && hideRemoveConfirm()}> +
diff --git a/web/app/components/app/app-publisher/index.tsx b/web/app/components/app/app-publisher/index.tsx index 3a1fcdf868..3a38c1b496 100644 --- a/web/app/components/app/app-publisher/index.tsx +++ b/web/app/components/app/app-publisher/index.tsx @@ -9,7 +9,6 @@ import type { PublishWorkflowParams, WorkflowTypeConversionTarget } from '@/type import { Button } from '@langgenius/dify-ui/button' import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { toast } from '@langgenius/dify-ui/toast' -import { RiStoreLine } from '@remixicon/react' import { useSuspenseQuery } from '@tanstack/react-query' import { useKeyPress } from 'ahooks' import { @@ -292,7 +291,7 @@ const AppPublisher = ({ throw new Error('App not found') const { installed_apps } = await fetchInstalledAppList(appDetail.id) if (installed_apps?.length > 0) - return `${basePath}/explore/installed/${installed_apps[0].id}` + return `${basePath}/explore/installed/${installed_apps[0]!.id}` throw new Error('No app found in Explore') }, { onError: (err) => { @@ -546,64 +545,60 @@ const AppPublisher = ({ workflowTypeSwitchDisabledReason={workflowTypeSwitchDisabledReason} onWorkflowTypeSwitch={handleWorkflowTypeSwitch} /> - { - !isEvaluationWorkflowType && ( - <> - { - handleOpenChange(false) - setShowAppAccessControl(true) - }} - /> - { - setEmbeddingModalOpen(true) - handleOpenChange(false) - }} - handleOpenInExplore={() => { - handleOpenChange(false) - handleOpenInExplore() - }} - handleOpenRunConfig={handleOpenWorkflowLaunchDialog} - handlePublish={handlePublish} - hasHumanInputNode={hasHumanInputNode} - hasTriggerNode={hasTriggerNode} - inputs={inputs} - missingStartNode={missingStartNode} - onRefreshData={onRefreshData} - outputs={outputs} - published={published} - publishedAt={publishedAt} - showBatchRunConfig={hiddenLaunchVariables.length > 0 && (appDetail?.mode === AppModeEnum.WORKFLOW || appDetail?.mode === AppModeEnum.COMPLETION)} - showRunConfig={hiddenLaunchVariables.length > 0} - toolPublished={toolPublished} - workflowToolAvailable={workflowToolAvailable} - workflowToolMessage={workflowToolMessage} - /> - {systemFeatures.enable_creators_platform && ( -
- } - disabled={!publishedAt || publishingToMarketplace} - onClick={handlePublishToMarketplace} - > - {publishingToMarketplace - ? t('common.publishingToMarketplace', { ns: 'workflow' }) - : t('common.publishToMarketplace', { ns: 'workflow' })} - -
- )} - - ) - } + {!isEvaluationWorkflowType && ( + <> + { + setShowAppAccessControl(true) + handleOpenChange(false) + }} + /> + { + setEmbeddingModalOpen(true) + handleOpenChange(false) + }} + handleOpenInExplore={() => { + handleOpenChange(false) + handleOpenInExplore() + }} + handleOpenRunConfig={handleOpenWorkflowLaunchDialog} + handlePublish={handlePublish} + hasHumanInputNode={hasHumanInputNode} + hasTriggerNode={hasTriggerNode} + inputs={inputs} + missingStartNode={missingStartNode} + onRefreshData={onRefreshData} + outputs={outputs} + published={published} + publishedAt={publishedAt} + toolPublished={toolPublished} + workflowToolAvailable={workflowToolAvailable} + workflowToolMessage={workflowToolMessage} + /> + {systemFeatures.enable_creators_platform && ( +
+ } + disabled={!publishedAt || publishingToMarketplace} + onClick={handlePublishToMarketplace} + > + {publishingToMarketplace + ? t('common.publishingToMarketplace', { ns: 'workflow' }) + : t('common.publishToMarketplace', { ns: 'workflow' })} + +
+ )} + + )}
({ - Button: ({ children }: { children: React.ReactNode }) => , -})) - vi.mock('@/app/components/base/divider', () => ({ default: () => , })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children: React.ReactNode }) =>
{children}
, -})) - vi.mock('../credential-selector', () => ({ default: () =>
, })) diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/base/header.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/base/header.tsx index a285946272..c91012bf4a 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/base/header.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/base/header.tsx @@ -1,10 +1,9 @@ import type { CredentialSelectorProps } from './credential-selector' import { Button } from '@langgenius/dify-ui/button' -import { RiBookOpenLine, RiEqualizer2Line } from '@remixicon/react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import * as React from 'react' import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' -import Tooltip from '@/app/components/base/tooltip' import CredentialSelector from './credential-selector' type HeaderProps = { @@ -22,6 +21,7 @@ const Header = ({ ...rest }: HeaderProps) => { const { t } = useTranslation() + const configurationTip = t('configurationTip', { ns: 'datasetPipeline', pluginName }) return (
@@ -30,20 +30,23 @@ const Header = ({ {...rest} /> - - + + + + + )} + /> + + {configurationTip} +
- + {docTitle}
diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/__tests__/bucket.spec.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/__tests__/bucket.spec.tsx index 83e17e6e04..b0a49eee0d 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/__tests__/bucket.spec.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/__tests__/bucket.spec.tsx @@ -5,9 +5,6 @@ import Bucket from '../bucket' vi.mock('@/app/components/base/icons/src/public/knowledge/online-drive', () => ({ BucketsGray: (props: React.SVGProps) => , })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children?: React.ReactNode }) =>
{children}
, -})) describe('Bucket', () => { const defaultProps = { @@ -32,8 +29,7 @@ describe('Bucket', () => { it('should call handleBackToBucketList on icon button click', () => { render() - const buttons = screen.getAllByRole('button') - fireEvent.click(buttons[0]!) + fireEvent.click(screen.getByRole('button', { name: 'datasetPipeline.onlineDrive.breadcrumbs.allBuckets' })) expect(defaultProps.handleBackToBucketList).toHaveBeenCalledOnce() }) diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/bucket.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/bucket.tsx index 003aee6542..384188502b 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/bucket.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/bucket.tsx @@ -1,9 +1,10 @@ +import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import * as React from 'react' import { useCallback } from 'react' import { useTranslation } from 'react-i18next' import { BucketsGray } from '@/app/components/base/icons/src/public/knowledge/online-drive' -import Tooltip from '@/app/components/base/tooltip' type BucketProps = { bucketName: string @@ -27,19 +28,28 @@ const Bucket = ({ if (!disabled) handleClickBucketName() }, [disabled, handleClickBucketName]) + const allBucketsLabel = t('onlineDrive.breadcrumbs.allBuckets', { ns: 'datasetPipeline' }) return ( <> - - + + + + + )} + /> + + {allBucketsLabel} + / + default: ({ + children, + onClick, + ...props + }: React.ButtonHTMLAttributes) => ( + ), })) @@ -54,6 +52,6 @@ describe('KeyValueItem', () => { it('renders copy tooltip', () => { render() - expect(screen.getByTestId('tooltip')).toHaveAttribute('data-content', 'common.operation.copy') + expect(screen.getByRole('button', { name: 'common.operation.copy' })).toBeInTheDocument() }) }) diff --git a/web/app/components/plugins/base/badges/__tests__/icon-with-tooltip.spec.tsx b/web/app/components/plugins/base/badges/__tests__/icon-with-tooltip.spec.tsx index e24aa5a873..d4a87fa8a5 100644 --- a/web/app/components/plugins/base/badges/__tests__/icon-with-tooltip.spec.tsx +++ b/web/app/components/plugins/base/badges/__tests__/icon-with-tooltip.spec.tsx @@ -3,24 +3,6 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' import { Theme } from '@/types/app' import IconWithTooltip from '../icon-with-tooltip' -// Mock Tooltip component -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ - children, - popupContent, - popupClassName, - }: { - children: React.ReactNode - popupContent?: string - popupClassName?: string - }) => ( -
- {children} -
- ), -})) - -// Mock icon components const MockLightIcon = ({ className }: { className?: string }) => (
Light Icon
) @@ -44,10 +26,10 @@ describe('IconWithTooltip', () => { />, ) - expect(screen.getByTestId('tooltip')).toBeInTheDocument() + expect(screen.getByTestId('light-icon')).toBeInTheDocument() }) - it('should render Tooltip wrapper', () => { + it('should render tooltip trigger with accessible label when popupContent is provided', () => { render( { />, ) - expect(screen.getByTestId('tooltip')).toHaveAttribute('data-popup-content', 'Test tooltip') - }) - - it('should apply correct popupClassName to Tooltip', () => { - render( - , - ) - - const tooltip = screen.getByTestId('tooltip') - expect(tooltip).toHaveAttribute('data-popup-classname') - expect(tooltip.getAttribute('data-popup-classname')).toContain('border-components-panel-border') + expect(screen.getByLabelText('Test tooltip')).toBeInTheDocument() }) }) @@ -171,10 +139,7 @@ describe('IconWithTooltip', () => { />, ) - expect(screen.getByTestId('tooltip')).toHaveAttribute( - 'data-popup-content', - 'Custom tooltip content', - ) + expect(screen.getByLabelText('Custom tooltip content')).toBeInTheDocument() }) it('should handle undefined popupContent', () => { @@ -186,7 +151,7 @@ describe('IconWithTooltip', () => { />, ) - expect(screen.getByTestId('tooltip')).toBeInTheDocument() + expect(screen.getByTestId('light-icon')).toBeInTheDocument() }) }) @@ -239,7 +204,7 @@ describe('IconWithTooltip', () => { />, ) - expect(screen.getByTestId('tooltip')).toHaveAttribute('data-popup-content', longContent) + expect(screen.getByLabelText(longContent)).toBeInTheDocument() }) it('should handle special characters in popupContent', () => { @@ -253,7 +218,7 @@ describe('IconWithTooltip', () => { />, ) - expect(screen.getByTestId('tooltip')).toHaveAttribute('data-popup-content', specialContent) + expect(screen.getByLabelText(specialContent)).toBeInTheDocument() }) }) }) diff --git a/web/app/components/plugins/base/badges/icon-with-tooltip.tsx b/web/app/components/plugins/base/badges/icon-with-tooltip.tsx index faabd545fd..2cb40adf0a 100644 --- a/web/app/components/plugins/base/badges/icon-with-tooltip.tsx +++ b/web/app/components/plugins/base/badges/icon-with-tooltip.tsx @@ -1,7 +1,7 @@ import type { FC } from 'react' import { cn } from '@langgenius/dify-ui/cn' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import * as React from 'react' -import Tooltip from '@/app/components/base/tooltip' import { Theme } from '@/types/app' type IconWithTooltipProps = { @@ -22,15 +22,24 @@ const IconWithTooltip: FC = ({ const isDark = theme === Theme.dark const iconClassName = cn('h-5 w-5', className) const Icon = isDark ? BadgeIconDark : BadgeIconLight + const icon = ( + + + + ) + + if (!popupContent) + return icon return ( - -
- -
+ + + + {popupContent} + ) } diff --git a/web/app/components/plugins/base/key-value-item.tsx b/web/app/components/plugins/base/key-value-item.tsx index 1ba8e8caf9..a2a3459b5d 100644 --- a/web/app/components/plugins/base/key-value-item.tsx +++ b/web/app/components/plugins/base/key-value-item.tsx @@ -1,16 +1,13 @@ 'use client' import type { FC } from 'react' import { cn } from '@langgenius/dify-ui/cn' -import { - RiClipboardLine, -} from '@remixicon/react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import copy from 'copy-to-clipboard' import * as React from 'react' import { useCallback, useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' import ActionButton from '@/app/components/base/action-button' import { CopyCheck } from '../../base/icons/src/vender/line/files' -import Tooltip from '../../base/tooltip' type Props = { label: string @@ -45,7 +42,7 @@ const KeyValueItem: FC = ({ } }, [isCopied]) - const CopyIcon = isCopied ? CopyCheck : RiClipboardLine + const copyLabel = t(`operation.${isCopied ? 'copied' : 'copy'}`, { ns: 'common' }) return (
@@ -54,10 +51,19 @@ const KeyValueItem: FC = ({ {maskedValue || value} - - - - + + + {isCopied + ? + : } + + )} + /> + + {copyLabel} +
diff --git a/web/app/components/plugins/plugin-auth/authorize/__tests__/add-api-key-button.spec.tsx b/web/app/components/plugins/plugin-auth/authorize/__tests__/add-api-key-button.spec.tsx index 794f847168..7caef50516 100644 --- a/web/app/components/plugins/plugin-auth/authorize/__tests__/add-api-key-button.spec.tsx +++ b/web/app/components/plugins/plugin-auth/authorize/__tests__/add-api-key-button.spec.tsx @@ -5,11 +5,29 @@ import AddApiKeyButton from '../add-api-key-button' let _mockModalOpen = false vi.mock('../api-key-modal', () => ({ - default: ({ onClose, onUpdate }: { onClose: () => void, onUpdate?: () => void }) => { - _mockModalOpen = true + default: ({ + open = true, + onClose, + onOpenChange, + onUpdate, + }: { + open?: boolean + onClose: () => void + onOpenChange?: (open: boolean) => void + onUpdate?: () => void + }) => { + _mockModalOpen = open + if (!open) + return null + + const handleClose = () => { + onOpenChange?.(false) + onClose() + } + return (
- +
) diff --git a/web/app/components/plugins/plugin-auth/authorize/__tests__/api-key-modal.spec.tsx b/web/app/components/plugins/plugin-auth/authorize/__tests__/api-key-modal.spec.tsx index 2bfa94d2ed..41f1aa3718 100644 --- a/web/app/components/plugins/plugin-auth/authorize/__tests__/api-key-modal.spec.tsx +++ b/web/app/components/plugins/plugin-auth/authorize/__tests__/api-key-modal.spec.tsx @@ -1,5 +1,8 @@ import type { ApiKeyModalProps } from '../api-key-modal' +import type { FormSchema } from '@/app/components/base/form/types' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import * as React from 'react' import { beforeEach, describe, expect, it, vi } from 'vitest' import { AuthCategory } from '../../types' @@ -20,17 +23,27 @@ vi.mock('@langgenius/dify-ui/toast', () => ({ })) const mockAddPluginCredential = vi.fn().mockResolvedValue({}) const mockUpdatePluginCredential = vi.fn().mockResolvedValue({}) -const mockFormValues = { isCheckValidated: true, values: { __name__: 'My Key', api_key: 'sk-123' } } +const defaultCredentialSchemas = [ + { name: 'api_key', label: 'API Key', type: 'secret-input', required: true }, +] +type MockFormValues = { + isCheckValidated: boolean + values: Record +} + +const defaultFormValues: MockFormValues = { isCheckValidated: true, values: { __name__: 'My Key', api_key: 'sk-123' } } +let mockCredentialSchemas = defaultCredentialSchemas +let mockIsSchemaLoading = false +let mockFormValues = defaultFormValues +const mockAuthFormProps = vi.fn() vi.mock('../../hooks/use-credential', () => ({ useAddPluginCredentialHook: () => ({ mutateAsync: mockAddPluginCredential, }), useGetPluginCredentialSchemaHook: () => ({ - data: [ - { name: 'api_key', label: 'API Key', type: 'secret-input', required: true }, - ], - isLoading: false, + data: mockCredentialSchemas, + isLoading: mockIsSchemaLoading, }), useUpdatePluginCredentialHook: () => ({ mutateAsync: mockUpdatePluginCredential, @@ -49,36 +62,19 @@ vi.mock('@/app/components/base/encrypted-bottom', () => ({ EncryptedBottom: () =>
, })) -vi.mock('@/app/components/base/modal/modal', () => ({ - default: ({ children, title, onClose, onConfirm, onExtraButtonClick, showExtraButton, disabled }: { - children: React.ReactNode - title: string - onClose?: () => void - onCancel?: () => void - onConfirm?: () => void - onExtraButtonClick?: () => void - showExtraButton?: boolean - disabled?: boolean - [key: string]: unknown - }) => ( -
-
{title}
- {children} - - - {showExtraButton && } -
- ), -})) - -vi.mock('@/app/components/base/form/form-scenarios/auth', () => ({ - default: React.forwardRef((_props: Record, ref: React.Ref) => { +vi.mock('@/app/components/base/form/form-scenarios/auth', () => { + const MockAuthForm = ({ ref, ...props }: { ref?: React.Ref } & Record) => { + mockAuthFormProps(props) React.useImperativeHandle(ref, () => ({ getFormValues: () => mockFormValues, })) return
- }), -})) + } + + return { + default: MockAuthForm, + } +}) vi.mock('@/app/components/base/form/types', () => ({ FormTypeEnum: { textInput: 'text-input' }, @@ -89,11 +85,73 @@ const basePayload = { provider: 'test-provider', } +const PopoverModalHarness = ({ + ApiKeyModal, + onClose, + onPopoverClose, +}: { + ApiKeyModal: React.FC + onClose: () => void + onPopoverClose: () => void +}) => { + const [open, setOpen] = React.useState(true) + + return ( + { + setOpen(nextOpen) + if (!nextOpen) + onPopoverClose() + }} + > + Credentials} /> + +
+ +
+
+
+ ) +} + +const ControlledModalHarness = ({ + ApiKeyModal, + onClose, +}: { + ApiKeyModal: React.FC + onClose: () => void +}) => { + const [open, setOpen] = React.useState(true) + + return ( + <> +
{String(open)}
+ + + ) +} + describe('ApiKeyModal', () => { let ApiKeyModal: React.FC beforeEach(async () => { vi.clearAllMocks() + mockCredentialSchemas = defaultCredentialSchemas + mockIsSchemaLoading = false + mockFormValues = defaultFormValues + mockAddPluginCredential.mockResolvedValue({}) + mockUpdatePluginCredential.mockResolvedValue({}) const mod = await import('../api-key-modal') ApiKeyModal = mod.default }) @@ -110,6 +168,56 @@ describe('ApiKeyModal', () => { expect(screen.getByTestId('auth-form')).toBeInTheDocument() }) + it('should prefer formSchemas prop and apply schema defaults', () => { + const customSchemas: FormSchema[] = [ + { + name: 'custom_api_key', + label: 'Custom API Key', + type: 'secret-input' as FormSchema['type'], + required: true, + default: 'default-key', + }, + ] + + render() + + expect(mockAuthFormProps).toHaveBeenCalledWith(expect.objectContaining({ + formSchemas: expect.arrayContaining([ + expect.objectContaining({ name: 'custom_api_key' }), + ]), + defaultValues: expect.objectContaining({ + custom_api_key: 'default-key', + }), + })) + }) + + it('should not render auth form when credential schema is empty', () => { + mockCredentialSchemas = [] + + render() + + expect(screen.queryByTestId('auth-form')).not.toBeInTheDocument() + }) + + it('should not submit when form ref is unavailable', () => { + mockCredentialSchemas = [] + + render() + + fireEvent.click(screen.getByTestId('modal-confirm')) + + expect(mockAddPluginCredential).not.toHaveBeenCalled() + }) + + it('should disable actions while loading credential schema', () => { + mockIsSchemaLoading = true + + render() + + expect(screen.queryByTestId('auth-form')).not.toBeInTheDocument() + expect(screen.getByTestId('modal-confirm')).toBeDisabled() + }) + it('should show remove button when editValues is provided', () => { render() @@ -130,6 +238,18 @@ describe('ApiKeyModal', () => { expect(mockOnClose).toHaveBeenCalled() }) + it('should close through controlled open state when cancel is clicked', async () => { + const mockOnClose = vi.fn() + render() + + fireEvent.click(screen.getByRole('button', { name: 'common.operation.cancel' })) + + await waitFor(() => { + expect(screen.getByTestId('modal-open-state')).toHaveTextContent('false') + }) + expect(mockOnClose).toHaveBeenCalled() + }) + it('should call addPluginCredential on confirm in add mode', async () => { const mockOnClose = vi.fn() const mockOnUpdate = vi.fn() @@ -145,6 +265,50 @@ describe('ApiKeyModal', () => { }) }) + it('should use empty credential name when authorization name is blank in add mode', async () => { + mockFormValues = { isCheckValidated: true, values: { api_key: 'sk-123' } } + + render() + + fireEvent.click(screen.getByTestId('modal-confirm')) + + await waitFor(() => { + expect(mockAddPluginCredential).toHaveBeenCalledWith(expect.objectContaining({ + name: '', + })) + }) + }) + + it('should not submit when form validation fails', () => { + mockFormValues = { isCheckValidated: false, values: {} } + + render() + + fireEvent.click(screen.getByTestId('modal-confirm')) + + expect(mockAddPluginCredential).not.toHaveBeenCalled() + expect(mockUpdatePluginCredential).not.toHaveBeenCalled() + }) + + it('should ignore repeated confirm while an action is in progress', async () => { + let repeatedClickTriggered = false + mockAddPluginCredential.mockImplementationOnce(async () => { + if (!repeatedClickTriggered) { + repeatedClickTriggered = true + fireEvent.click(screen.getByTestId('modal-confirm')) + } + return {} + }) + + render() + + fireEvent.click(screen.getByTestId('modal-confirm')) + + await waitFor(() => { + expect(mockAddPluginCredential).toHaveBeenCalledTimes(1) + }) + }) + it('should call updatePluginCredential on confirm in edit mode', async () => { render() @@ -155,6 +319,20 @@ describe('ApiKeyModal', () => { }) }) + it('should use empty credential name when authorization name is blank in edit mode', async () => { + mockFormValues = { isCheckValidated: true, values: { api_key: 'updated', __credential_id__: 'cred-1' } } + + render() + + fireEvent.click(screen.getByTestId('modal-confirm')) + + await waitFor(() => { + expect(mockUpdatePluginCredential).toHaveBeenCalledWith(expect.objectContaining({ + name: '', + })) + }) + }) + it('should call onRemove when remove button clicked', () => { const mockOnRemove = vi.fn() render() @@ -163,6 +341,49 @@ describe('ApiKeyModal', () => { expect(mockOnRemove).toHaveBeenCalled() }) + it('should stay open when clicking inside the modal from a popover', async () => { + // Use userEvent instead of fireEvent to avoid CI flakiness: userEvent + // awaits React act() between pointer/mouse/click so base-ui's dialog + // popup ref is guaranteed committed before outside-click detection runs. + const user = userEvent.setup() + const mockOnClose = vi.fn() + const mockOnPopoverClose = vi.fn() + + render( + , + ) + + const form = await screen.findByTestId('auth-form') + + await user.click(form) + + expect(mockOnClose).not.toHaveBeenCalled() + expect(mockOnPopoverClose).not.toHaveBeenCalled() + expect(screen.getByTestId('modal')).toBeInTheDocument() + }) + + it('should close on backdrop click through controlled open state', async () => { + const mockOnClose = vi.fn() + render() + + const backdrop = document.querySelector('.bg-background-overlay') + if (!backdrop) + throw new Error('Expected dialog backdrop to render') + + fireEvent.pointerDown(backdrop) + fireEvent.mouseDown(backdrop) + fireEvent.click(backdrop) + + await waitFor(() => { + expect(screen.getByTestId('modal-open-state')).toHaveTextContent('false') + }) + expect(mockOnClose).toHaveBeenCalled() + }) + it('should render readme entrance when detail is provided', () => { const payload = { ...basePayload, detail: { name: 'Test' } as never } render() diff --git a/web/app/components/plugins/plugin-auth/authorize/add-api-key-button.tsx b/web/app/components/plugins/plugin-auth/authorize/add-api-key-button.tsx index 648a87dabc..38f3f85643 100644 --- a/web/app/components/plugins/plugin-auth/authorize/add-api-key-button.tsx +++ b/web/app/components/plugins/plugin-auth/authorize/add-api-key-button.tsx @@ -25,20 +25,26 @@ const AddApiKeyButton = ({ formSchemas = [], }: AddApiKeyButtonProps) => { const [isApiKeyModalOpen, setIsApiKeyModalOpen] = useState(false) + const [isApiKeyModalMounted, setIsApiKeyModalMounted] = useState(false) return ( <> { - isApiKeyModalOpen && ( + isApiKeyModalMounted && ( setIsApiKeyModalOpen(false)} onUpdate={onUpdate} diff --git a/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx b/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx index db513ecb6f..290621141c 100644 --- a/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx +++ b/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx @@ -3,6 +3,8 @@ import type { FormRefObject, FormSchema, } from '@/app/components/base/form/types' +import { Button } from '@langgenius/dify-ui/button' +import { Dialog, DialogCloseButton, DialogContent, DialogTitle } from '@langgenius/dify-ui/dialog' import { toast } from '@langgenius/dify-ui/toast' import { memo, @@ -16,7 +18,6 @@ import { EncryptedBottom } from '@/app/components/base/encrypted-bottom' import AuthForm from '@/app/components/base/form/form-scenarios/auth' import { FormTypeEnum } from '@/app/components/base/form/types' import Loading from '@/app/components/base/loading' -import Modal from '@/app/components/base/modal/modal' import { ReadmeEntrance } from '../../readme-panel/entrance' import { ReadmeShowType } from '../../readme-panel/store' import { @@ -28,8 +29,10 @@ import { CredentialTypeEnum } from '../types' export type ApiKeyModalProps = { pluginPayload: PluginPayload + open?: boolean + onOpenChange?: (open: boolean) => void onClose?: () => void - editValues?: Record + editValues?: Record onRemove?: () => void disabled?: boolean onUpdate?: () => void @@ -37,6 +40,8 @@ export type ApiKeyModalProps = { } const ApiKeyModal = ({ pluginPayload, + open = true, + onOpenChange, onClose, editValues, onRemove, @@ -73,7 +78,7 @@ const ApiKeyModal = ({ if (schema.default) acc[schema.name] = schema.default return acc - }, {} as Record) + }, {} as Record) const { mutateAsync: addPluginCredential } = useAddPluginCredentialHook(pluginPayload) const { mutateAsync: updatePluginCredential } = useUpdatePluginCredentialHook(pluginPayload) const formRef = useRef(null) @@ -114,53 +119,102 @@ const ApiKeyModal = ({ } toast.success(t('api.actionSuccess', { ns: 'common' })) + onOpenChange?.(false) onClose?.() onUpdate?.() } finally { handleSetDoingAction(false) } - }, [addPluginCredential, onClose, onUpdate, updatePluginCredential, t, editValues, handleSetDoingAction]) + }, [addPluginCredential, onClose, onOpenChange, onUpdate, updatePluginCredential, t, editValues, handleSetDoingAction]) + + const isDisabled = disabled || isLoading || doingAction + const handleOpenChange = useCallback((nextOpen: boolean) => { + onOpenChange?.(nextOpen) + if (!nextOpen) + onClose?.() + }, [onClose, onOpenChange]) return ( -
) - } - bottomSlot={} - onConfirm={handleConfirm} - showExtraButton={!!editValues} - onExtraButtonClick={onRemove} - disabled={disabled || isLoading || doingAction} - clickOutsideNotClose={true} - wrapperClassName="z-1002!" + - {pluginPayload.detail && ( - - )} - { - isLoading && ( -
- + +
+
+ + {t('auth.useApiAuth', { ns: 'plugin' })} + +
+ {t('auth.useApiAuthDesc', { ns: 'plugin' })} +
+
- ) - } - { - !isLoading && !!mergedData.length && ( - - ) - } - +
+ {pluginPayload.detail && ( + + )} + { + isLoading && ( +
+ +
+ ) + } + { + !isLoading && !!mergedData.length && ( + + ) + } +
+
+
+
+ {editValues && ( + <> + +
+ + )} + + +
+
+
+ +
+
+ +
) } diff --git a/web/app/components/plugins/plugin-auth/authorized/index.tsx b/web/app/components/plugins/plugin-auth/authorized/index.tsx index b8b34e33e0..774821b0c8 100644 --- a/web/app/components/plugins/plugin-auth/authorized/index.tsx +++ b/web/app/components/plugins/plugin-auth/authorized/index.tsx @@ -19,9 +19,6 @@ import { PopoverTrigger, } from '@langgenius/dify-ui/popover' import { toast } from '@langgenius/dify-ui/toast' -import { - RiArrowDownSLine, -} from '@remixicon/react' import { memo, useCallback, @@ -93,19 +90,19 @@ const Authorized = ({ }, [onOpenChange]) const oAuthCredentials = credentials.filter(credential => credential.credential_type === CredentialTypeEnum.OAUTH2) const apiKeyCredentials = credentials.filter(credential => credential.credential_type === CredentialTypeEnum.API_KEY) - const pendingOperationCredentialId = useRef(null) + const pendingOperationCredentialIdRef = useRef(null) const [deleteCredentialId, setDeleteCredentialId] = useState(null) const { mutateAsync: deletePluginCredential } = useDeletePluginCredentialHook(pluginPayload) const openConfirm = useCallback((credentialId?: string) => { setMergedIsOpen(false) if (credentialId) - pendingOperationCredentialId.current = credentialId + pendingOperationCredentialIdRef.current = credentialId - setDeleteCredentialId(pendingOperationCredentialId.current) + setDeleteCredentialId(pendingOperationCredentialIdRef.current) }, [setMergedIsOpen]) const closeConfirm = useCallback(() => { setDeleteCredentialId(null) - pendingOperationCredentialId.current = null + pendingOperationCredentialIdRef.current = null }, []) const [doingAction, setDoingAction] = useState(false) const doingActionRef = useRef(doingAction) @@ -116,30 +113,37 @@ const Authorized = ({ const handleConfirm = useCallback(async () => { if (doingActionRef.current) return - if (!pendingOperationCredentialId.current) { + if (!pendingOperationCredentialIdRef.current) { setDeleteCredentialId(null) return } try { handleSetDoingAction(true) - await deletePluginCredential({ credential_id: pendingOperationCredentialId.current }) + await deletePluginCredential({ credential_id: pendingOperationCredentialIdRef.current }) toast.success(t('api.actionSuccess', { ns: 'common' })) onUpdate?.() setDeleteCredentialId(null) - pendingOperationCredentialId.current = null + pendingOperationCredentialIdRef.current = null } finally { handleSetDoingAction(false) } }, [deletePluginCredential, onUpdate, t, handleSetDoingAction]) const [editValues, setEditValues] = useState | null>(null) + const [isApiKeyModalOpen, setIsApiKeyModalOpen] = useState(false) const handleEdit = useCallback((id: string, values: Record) => { setMergedIsOpen(false) - pendingOperationCredentialId.current = id + pendingOperationCredentialIdRef.current = id setEditValues(values) + setIsApiKeyModalOpen(true) }, [setMergedIsOpen]) + const handleApiKeyModalOpenChange = useCallback((open: boolean) => { + setIsApiKeyModalOpen(open) + if (!open) + pendingOperationCredentialIdRef.current = null + }, []) const handleRemove = useCallback(() => { - setDeleteCredentialId(pendingOperationCredentialId.current) + setDeleteCredentialId(pendingOperationCredentialIdRef.current) }, []) const { mutateAsync: setPluginDefaultCredential } = useSetPluginDefaultCredentialHook(pluginPayload) const handleSetDefault = useCallback(async (id: string) => { @@ -213,7 +217,7 @@ const Authorized = ({ ` (${unavailableCredentials.length} ${t('auth.unavailable', { ns: 'plugin' })})` ) } - + ) } @@ -356,12 +360,11 @@ const Authorized = ({ { !!editValues && ( { - setEditValues(null) - pendingOperationCredentialId.current = null - }} + onClose={() => handleApiKeyModalOpenChange(false)} onRemove={handleRemove} disabled={disabled || doingAction} onUpdate={onUpdate} diff --git a/web/app/components/plugins/plugin-detail-panel/detail-header/components/__tests__/plugin-source-badge.spec.tsx b/web/app/components/plugins/plugin-detail-panel/detail-header/components/__tests__/plugin-source-badge.spec.tsx index 4d60433efb..08f5f836f4 100644 --- a/web/app/components/plugins/plugin-detail-panel/detail-header/components/__tests__/plugin-source-badge.spec.tsx +++ b/web/app/components/plugins/plugin-detail-panel/detail-header/components/__tests__/plugin-source-badge.spec.tsx @@ -3,14 +3,6 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' import { PluginSource } from '../../../../types' import PluginSourceBadge from '../plugin-source-badge' -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children, popupContent }: { children: React.ReactNode, popupContent: string }) => ( -
- {children} -
- ), -})) - describe('PluginSourceBadge', () => { beforeEach(() => { vi.clearAllMocks() @@ -20,33 +12,25 @@ describe('PluginSourceBadge', () => { it('should render marketplace source badge', () => { render() - const tooltip = screen.getByTestId('tooltip') - expect(tooltip).toBeInTheDocument() - expect(tooltip).toHaveAttribute('data-content', 'plugin.detailPanel.categoryTip.marketplace') + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.marketplace')).toBeInTheDocument() }) it('should render github source badge', () => { render() - const tooltip = screen.getByTestId('tooltip') - expect(tooltip).toBeInTheDocument() - expect(tooltip).toHaveAttribute('data-content', 'plugin.detailPanel.categoryTip.github') + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.github')).toBeInTheDocument() }) it('should render local source badge', () => { render() - const tooltip = screen.getByTestId('tooltip') - expect(tooltip).toBeInTheDocument() - expect(tooltip).toHaveAttribute('data-content', 'plugin.detailPanel.categoryTip.local') + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.local')).toBeInTheDocument() }) it('should render debugging source badge', () => { render() - const tooltip = screen.getByTestId('tooltip') - expect(tooltip).toBeInTheDocument() - expect(tooltip).toHaveAttribute('data-content', 'plugin.detailPanel.categoryTip.debugging') + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.debugging')).toBeInTheDocument() }) }) @@ -86,71 +70,47 @@ describe('PluginSourceBadge', () => { it('should show marketplace tooltip', () => { render() - expect(screen.getByTestId('tooltip')).toHaveAttribute( - 'data-content', - 'plugin.detailPanel.categoryTip.marketplace', - ) + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.marketplace')).toBeInTheDocument() }) it('should show github tooltip', () => { render() - expect(screen.getByTestId('tooltip')).toHaveAttribute( - 'data-content', - 'plugin.detailPanel.categoryTip.github', - ) + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.github')).toBeInTheDocument() }) it('should show local tooltip', () => { render() - expect(screen.getByTestId('tooltip')).toHaveAttribute( - 'data-content', - 'plugin.detailPanel.categoryTip.local', - ) + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.local')).toBeInTheDocument() }) it('should show debugging tooltip', () => { render() - expect(screen.getByTestId('tooltip')).toHaveAttribute( - 'data-content', - 'plugin.detailPanel.categoryTip.debugging', - ) + expect(screen.getByLabelText('plugin.detailPanel.categoryTip.debugging')).toBeInTheDocument() }) }) describe('Icon Element Structure', () => { it('should render icon inside tooltip for marketplace', () => { - render() - - const tooltip = screen.getByTestId('tooltip') - const iconWrapper = tooltip.querySelector('div') - expect(iconWrapper).toBeInTheDocument() + const { container } = render() + expect(container.querySelector('[aria-label="plugin.detailPanel.categoryTip.marketplace"]')).toBeInTheDocument() }) it('should render icon inside tooltip for github', () => { - render() - - const tooltip = screen.getByTestId('tooltip') - const iconWrapper = tooltip.querySelector('div') - expect(iconWrapper).toBeInTheDocument() + const { container } = render() + expect(container.querySelector('[aria-label="plugin.detailPanel.categoryTip.github"]')).toBeInTheDocument() }) it('should render icon inside tooltip for local', () => { - render() - - const tooltip = screen.getByTestId('tooltip') - const iconWrapper = tooltip.querySelector('div') - expect(iconWrapper).toBeInTheDocument() + const { container } = render() + expect(container.querySelector('[aria-label="plugin.detailPanel.categoryTip.local"]')).toBeInTheDocument() }) it('should render icon inside tooltip for debugging', () => { - render() - - const tooltip = screen.getByTestId('tooltip') - const iconWrapper = tooltip.querySelector('div') - expect(iconWrapper).toBeInTheDocument() + const { container } = render() + expect(container.querySelector('[aria-label="plugin.detailPanel.categoryTip.debugging"]')).toBeInTheDocument() }) }) @@ -188,7 +148,7 @@ describe('PluginSourceBadge', () => { const invalidSource = '' as PluginSource render() - expect(screen.queryByTestId('tooltip')).not.toBeInTheDocument() + expect(screen.queryByLabelText(/^plugin\.detailPanel\.categoryTip\./)).not.toBeInTheDocument() }) }) }) diff --git a/web/app/components/plugins/plugin-detail-panel/detail-header/components/plugin-source-badge.tsx b/web/app/components/plugins/plugin-detail-panel/detail-header/components/plugin-source-badge.tsx index ba15815cde..9b6725da14 100644 --- a/web/app/components/plugins/plugin-detail-panel/detail-header/components/plugin-source-badge.tsx +++ b/web/app/components/plugins/plugin-detail-panel/detail-header/components/plugin-source-badge.tsx @@ -1,14 +1,10 @@ 'use client' import type { FC, ReactNode } from 'react' -import { - RiBugLine, - RiHardDrive3Line, -} from '@remixicon/react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { useTranslation } from 'react-i18next' import { Github } from '@/app/components/base/icons/src/public/common' import { BoxSparkleFill } from '@/app/components/base/icons/src/vender/plugin' -import Tooltip from '@/app/components/base/tooltip' import { PluginSource } from '../../../types' type SourceConfig = { @@ -30,11 +26,11 @@ const SOURCE_CONFIG_MAP: Record = { tipKey: 'detailPanel.categoryTip.github', }, [PluginSource.local]: { - icon: , + icon: , tipKey: 'detailPanel.categoryTip.local', }, [PluginSource.debugging]: { - icon: , + icon: , tipKey: 'detailPanel.categoryTip.debugging', }, } @@ -45,12 +41,22 @@ const PluginSourceBadge: FC = ({ source }) => { const config = SOURCE_CONFIG_MAP[source] if (!config) return null + const tip = t(config.tipKey as never, { ns: 'plugin' }) return ( <>
·
- -
{config.icon}
+ + + {config.icon} +
+ )} + /> + + {tip} + ) diff --git a/web/app/components/plugins/plugin-detail-panel/endpoint-card.tsx b/web/app/components/plugins/plugin-detail-panel/endpoint-card.tsx index e1adc6282d..9aa944c4b3 100644 --- a/web/app/components/plugins/plugin-detail-panel/endpoint-card.tsx +++ b/web/app/components/plugins/plugin-detail-panel/endpoint-card.tsx @@ -1,3 +1,4 @@ +import type { ComponentProps } from 'react' import type { EndpointListItem, PluginDetail } from '../types' import { AlertDialog, @@ -9,7 +10,7 @@ import { } from '@langgenius/dify-ui/alert-dialog' import { Switch } from '@langgenius/dify-ui/switch' import { toast } from '@langgenius/dify-ui/toast' -import { RiClipboardLine, RiDeleteBinLine, RiEditLine, RiLoginCircleLine } from '@remixicon/react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { useBoolean } from 'ahooks' import copy from 'copy-to-clipboard' import * as React from 'react' @@ -17,7 +18,6 @@ import { useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import ActionButton from '@/app/components/base/action-button' import { CopyCheck } from '@/app/components/base/icons/src/vender/line/files' -import Tooltip from '@/app/components/base/tooltip' import Indicator from '@/app/components/header/indicator' import { addDefaultValue, toolCredentialToFormSchemas } from '@/app/components/tools/utils/to-form-schema' import { @@ -29,6 +29,8 @@ import { import EndpointModal from './endpoint-modal' import { NAME_FIELD } from './utils' +type EndpointModalFormSchemas = ComponentProps['formSchemas'] + type Props = { pluginDetail: PluginDetail data: EndpointListItem @@ -118,7 +120,7 @@ const EndpointCard = ({ toast.error(t('actionMsg.modifiedUnsuccessfully', { ns: 'common' })) }, }) - const handleUpdate = (state: Record) => updateEndpoint({ + const handleUpdate = (state: Record) => updateEndpoint({ endpointID, state, }) @@ -148,22 +150,22 @@ const EndpointCard = ({ } }, [isCopied]) - const CopyIcon = isCopied ? CopyCheck : RiClipboardLine + const copyLabel = t(`operation.${isCopied ? 'copied' : 'copy'}`, { ns: 'common' }) return (
- +
{data.name}
- + - +
@@ -172,10 +174,23 @@ const EndpointCard = ({
{endpoint.method}
{`${data.url}${endpoint.path}`}
- - handleCopy(`${data.url}${endpoint.path}`)}> - - + + handleCopy(`${data.url}${endpoint.path}`)} + > + {isCopied + ? + : } + + )} + /> + + {copyLabel} +
@@ -244,7 +259,7 @@ const EndpointCard = ({ {isShowEndpointModal && ( ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children, popupContent }: { children: React.ReactNode, popupContent: string }) => ( -
{children}
- ), -})) - vi.mock('@/app/components/header/plugins-nav/downloading-icon', () => ({ default: () => , })) @@ -38,18 +32,17 @@ describe('TaskStatusIndicator', () => { describe('Rendering', () => { it('should render without crashing', () => { render() - expect(screen.getByTestId('tooltip')).toBeInTheDocument() + expect(screen.getByRole('button', { name: 'Installing plugins' })).toBeInTheDocument() }) - it('should pass tip to tooltip', () => { + it('should use tip as the trigger accessible name', () => { render() - expect(screen.getByTestId('tooltip')).toHaveAttribute('data-tip', 'My tip') + expect(screen.getByRole('button', { name: 'My tip' })).toBeInTheDocument() }) it('should render install icon by default', () => { const { container } = render() - // RiInstallLine renders as svg - expect(container.querySelector('svg')).toBeInTheDocument() + expect(container.querySelector('.i-ri-install-line')).toBeInTheDocument() expect(screen.queryByTestId('downloading-icon')).not.toBeInTheDocument() }) }) @@ -127,7 +120,6 @@ describe('TaskStatusIndicator', () => { totalPluginsLength={3} />, ) - // RiCheckboxCircleFill is rendered as svg with text-text-success const successIcon = container.querySelector('.text-text-success') expect(successIcon).toBeInTheDocument() }) diff --git a/web/app/components/plugins/plugin-page/plugin-tasks/components/task-status-indicator.tsx b/web/app/components/plugins/plugin-page/plugin-tasks/components/task-status-indicator.tsx index d1de645f7b..691ee40f4d 100644 --- a/web/app/components/plugins/plugin-page/plugin-tasks/components/task-status-indicator.tsx +++ b/web/app/components/plugins/plugin-page/plugin-tasks/components/task-status-indicator.tsx @@ -1,12 +1,8 @@ import type { FC } from 'react' +import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' -import { - RiCheckboxCircleFill, - RiErrorWarningFill, - RiInstallLine, -} from '@remixicon/react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import ProgressCircle from '@/app/components/base/progress-bar/progress-circle' -import Tooltip from '@/app/components/base/tooltip' import DownloadingIcon from '@/app/components/header/plugins-nav/downloading-icon' type TaskStatusIndicatorProps = { @@ -39,56 +35,61 @@ const TaskStatusIndicator: FC = ({ const showSuccessIcon = isSuccess || (successPluginsLength > 0 && runningPluginsLength === 0) return ( - -
- {/* Main Icon */} - {showDownloadingIcon - ? - : ( - + + + {showDownloadingIcon + ? + : ( + + )} - {/* Status Indicator Badge */} -
- {(isInstalling || isInstallingWithSuccess) && ( - 0 ? successPluginsLength / totalPluginsLength : 0) * 100} - circleFillColor="fill-components-progress-brand-bg" - /> - )} - {isInstallingWithError && ( - 0 ? runningPluginsLength / totalPluginsLength : 0) * 100} - circleFillColor="fill-components-progress-brand-bg" - sectorFillColor="fill-components-progress-error-border" - circleStrokeColor="stroke-components-progress-error-border" - /> - )} - {showSuccessIcon && !isInstalling && !isInstallingWithSuccess && !isInstallingWithError && ( - - )} - {isFailed && ( - - )} -
-
+
+ {(isInstalling || isInstallingWithSuccess) && ( + 0 ? successPluginsLength / totalPluginsLength : 0) * 100} + circleFillColor="fill-components-progress-brand-bg" + /> + )} + {isInstallingWithError && ( + 0 ? runningPluginsLength / totalPluginsLength : 0) * 100} + circleFillColor="fill-components-progress-brand-bg" + sectorFillColor="fill-components-progress-error-border" + circleStrokeColor="stroke-components-progress-error-border" + /> + )} + {showSuccessIcon && !isInstalling && !isInstallingWithSuccess && !isInstallingWithError && ( + + )} + {isFailed && ( + + )} +
+ + )} + /> + {tip}
) } diff --git a/web/app/components/tools/mcp/detail/__tests__/content.spec.tsx b/web/app/components/tools/mcp/detail/__tests__/content.spec.tsx index 584c9d211a..c2835c992b 100644 --- a/web/app/components/tools/mcp/detail/__tests__/content.spec.tsx +++ b/web/app/components/tools/mcp/detail/__tests__/content.spec.tsx @@ -697,16 +697,9 @@ describe('MCPDetailContent', () => { const onHide = vi.fn() render(, { wrapper: createWrapper() }) - // Find the close button (ActionButton with RiCloseLine) - const buttons = screen.getAllByRole('button') - const closeButton = buttons.find(btn => - btn.querySelector('svg.h-4.w-4'), - ) + fireEvent.click(screen.getByRole('button', { name: 'common.operation.close' })) - if (closeButton) { - fireEvent.click(closeButton) - expect(onHide).toHaveBeenCalled() - } + expect(onHide).toHaveBeenCalled() }) }) diff --git a/web/app/components/tools/mcp/detail/content.tsx b/web/app/components/tools/mcp/detail/content.tsx index 48ea75723c..35c8a35a6f 100644 --- a/web/app/components/tools/mcp/detail/content.tsx +++ b/web/app/components/tools/mcp/detail/content.tsx @@ -1,5 +1,5 @@ 'use client' -import type { FC } from 'react' +import type { ComponentProps, FC } from 'react' import type { ToolWithProvider } from '../../../workflow/types' import { AlertDialog, @@ -12,18 +12,13 @@ import { } from '@langgenius/dify-ui/alert-dialog' import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' -import { - RiCloseLine, - RiLoader2Line, - RiLoopLeftLine, -} from '@remixicon/react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { useBoolean } from 'ahooks' import copy from 'copy-to-clipboard' import * as React from 'react' import { useCallback, useEffect } from 'react' import { useTranslation } from 'react-i18next' import ActionButton from '@/app/components/base/action-button' -import Tooltip from '@/app/components/base/tooltip' import Indicator from '@/app/components/header/indicator' import Icon from '@/app/components/plugins/card/base/card-icon' import { useAppContext } from '@/context/app-context' @@ -49,6 +44,11 @@ type Props = { onFirstCreate: () => void } +type MCPModalConfirmPayload = Parameters['onConfirm']>[0] +type MutationResult = { + result?: string +} + const MCPDetailContent: FC = ({ detail, onUpdate, @@ -128,14 +128,14 @@ const MCPDetailContent: FC = ({ } }, [onFirstCreate, isCurrentWorkspaceManager, detail, authorizeMcp, handleUpdateTools, handleOAuthCallback, onUpdate]) - const handleUpdate = useCallback(async (data: any) => { + const handleUpdate = useCallback(async (data: MCPModalConfirmPayload) => { if (!detail) return const res = await updateMCP({ ...data, provider_id: detail.id, - }) - if ((res as any)?.result === 'success') { + }) as MutationResult + if (res.result === 'success') { hideUpdateModal() onUpdate() handleAuthorize() @@ -146,9 +146,9 @@ const MCPDetailContent: FC = ({ if (!detail) return showDeleting() - const res = await deleteMCP(detail.id) + const res = await deleteMCP(detail.id) as MutationResult hideDeleting() - if ((res as any)?.result === 'success') { + if (res.result === 'success') { hideDeleteConfirm() onUpdate(true) } @@ -161,6 +161,8 @@ const MCPDetailContent: FC = ({ if (!detail) return null + const identifierLabel = t('mcp.identifier', { ns: 'tools' }) + const serverUrlLabel = t('mcp.modal.serverUrl', { ns: 'tools' }) return ( <> @@ -174,12 +176,37 @@ const MCPDetailContent: FC = ({
{detail.name}
- -
copy(detail.server_identifier || '')}>{detail.server_identifier}
+ + copy(detail.server_identifier || '')} + > + {detail.server_identifier} + + )} + /> + + {identifierLabel} +
·
- -
{detail.server_url}
+ + + {detail.server_url} +
+ )} + /> + + {serverUrlLabel} +
@@ -188,8 +215,8 @@ const MCPDetailContent: FC = ({ onEdit={showUpdateModal} onRemove={showDeleteConfirm} /> - - + + @@ -221,7 +248,7 @@ const MCPDetailContent: FC = ({ className="w-full" disabled > - + {t('mcp.authorizing', { ns: 'tools' })} )} @@ -262,7 +289,7 @@ const MCPDetailContent: FC = ({
diff --git a/web/app/components/workflow/block-selector/main.tsx b/web/app/components/workflow/block-selector/main.tsx index 384c03178d..76854ebf0a 100644 --- a/web/app/components/workflow/block-selector/main.tsx +++ b/web/app/components/workflow/block-selector/main.tsx @@ -159,10 +159,8 @@ const NodeSelector: FC = ({ if (onOpenChange) onOpenChange(newOpen) - }, [activeTab, disabled, onOpenChange]) + }, [disabled, onOpenChange]) const handleTrigger = useCallback>((e) => { - if (disabled) - return e.stopPropagation() }, []) diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index beb3f3733d..f885236ad9 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -1712,7 +1712,7 @@ export const useNodesInteractions = () => { nodeId: node.id, }, }) - handleNodeSelect(node.id) + handleNodeSelect(node.id, true) }, [workflowStore, handleNodeSelect], ) diff --git a/web/app/components/workflow/nodes/_base/node.tsx b/web/app/components/workflow/nodes/_base/node.tsx index ed83c58b6e..43f8e5773e 100644 --- a/web/app/components/workflow/nodes/_base/node.tsx +++ b/web/app/components/workflow/nodes/_base/node.tsx @@ -79,6 +79,7 @@ const BaseNode: FC = ({ const appId = useStore(s => s.appId) const { nodePanelPresence } = useCollaboration(appId as string) const controlMode = useStore(s => s.controlMode) + const isContextMenuTarget = useStore(s => s.nodeMenu?.nodeId === id) const currentUserPresence = useMemo(() => { const userId = userProfile?.id || '' @@ -123,7 +124,7 @@ const BaseNode: FC = ({ const { hasNodeInspectVars } = useInspectVarsCrud() const isLoading = data._runningStatus === NodeRunningStatus.Running || data._singleRunningStatus === NodeRunningStatus.Running const hasVarValue = hasNodeInspectVars(id) - const showSelectedBorder = Boolean(data.selected || data._isBundled || data._isEntering) + const showSelectedBorder = Boolean(data.selected || isContextMenuTarget || data._isBundled || data._isEntering) const { showRunningBorder, showSuccessBorder, diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/__tests__/variable-type-select.spec.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/__tests__/variable-type-select.spec.tsx index 3a7df8a3bf..d0831c319c 100644 --- a/web/app/components/workflow/panel/chat-variable-panel/components/__tests__/variable-type-select.spec.tsx +++ b/web/app/components/workflow/panel/chat-variable-panel/components/__tests__/variable-type-select.spec.tsx @@ -36,8 +36,9 @@ describe('VariableTypeSelector', () => { await user.keyboard('{Escape}') await waitFor(() => { - expect(screen.queryByText('number')).not.toBeInTheDocument() + expect(screen.getByRole('combobox')).toHaveAttribute('aria-expanded', 'false') }) + expect(screen.queryByRole('listbox')).not.toBeInTheDocument() }) it('keeps the custom popup class in in-cell mode', async () => { diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx index 94a0100de2..e1f776f3d5 100644 --- a/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx +++ b/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx @@ -1,38 +1,47 @@ 'use client' import { cn } from '@langgenius/dify-ui/cn' -import { RiArrowDownSLine, RiCheckLine } from '@remixicon/react' +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import * as React from 'react' import { useState } from 'react' -import { - PortalToFollowElem, - PortalToFollowElemContent, - PortalToFollowElemTrigger, -} from '@/app/components/base/portal-to-follow-elem' -type Props = { +type Props = { inCell?: boolean - value?: any - list: any - onSelect: (value: any) => void + value?: T + list: readonly T[] + onSelect: (value: T) => void popupClassName?: string } -const VariableTypeSelector = ({ +const VariableTypeSelector = ({ inCell = false, value, list, onSelect, popupClassName, -}: Props) => { +}: Props) => { const [open, setOpen] = useState(false) + const handleValueChange = (nextValue: string | null) => { + if (!nextValue) + return + + const nextItem = list.find(item => item === nextValue) + if (!nextItem) + return + + onSelect(nextItem) + } + return ( - setOpen(v => !v)} - placement="bottom" + onOpenChange={setOpen} + onValueChange={handleValueChange} > - setOpen(v => !v)}> +
{value}
- +
- -
- {list.map((item: any) => ( -
{ - onSelect(item) - setOpen(false) - }} - > -
{item}
- {value === item && } -
- ))} -
-
-
+ + + {list.map(item => ( + + {item} + + + ))} + + ) } diff --git a/web/i18n/ar-TN/app.json b/web/i18n/ar-TN/app.json index 154758077b..7bc25ccf42 100644 --- a/web/i18n/ar-TN/app.json +++ b/web/i18n/ar-TN/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "رموز تعبيرية", "iconPicker.image": "صورة", "iconPicker.ok": "موافق", + "importApp": "استيراد التطبيق", "importDSL": "استيراد ملف DSL", "importFromDSL": "استيراد من DSL", "importFromDSLFile": "من ملف DSL", "importFromDSLUrl": "من رابط", "importFromDSLUrlPlaceholder": "لصق رابط DSL هنا", "join": "انضم إلى المجتمع", + "marketplace.template.categories": "الفئات", + "marketplace.template.category.design": "التصميم", + "marketplace.template.category.it": "تكنولوجيا المعلومات", + "marketplace.template.category.knowledge": "المعرفة", + "marketplace.template.category.marketing": "التسويق", + "marketplace.template.category.operations": "العمليات", + "marketplace.template.category.sales": "المبيعات", + "marketplace.template.category.support": "الدعم", + "marketplace.template.fetchFailed": "فشل في جلب القالب", + "marketplace.template.importConfirm": "استيراد", + "marketplace.template.importFailed": "فشل في استيراد القالب", + "marketplace.template.modalTitle": "استيراد من Marketplace", + "marketplace.template.overview": "نظرة عامة", + "marketplace.template.publishedBy": "بواسطة", + "marketplace.template.usageCount": "الاستخدام", + "marketplace.template.viewOnMarketplace": "عرض على Marketplace", "maxActiveRequests": "أقصى عدد للطلبات المتزامنة", "maxActiveRequestsPlaceholder": "أدخل 0 لغير محدود", "maxActiveRequestsTip": "الحد الأقصى لعدد الطلبات النشطة المتزامنة لكل تطبيق (0 لغير محدود)", diff --git a/web/i18n/ar-TN/workflow.json b/web/i18n/ar-TN/workflow.json index 04a618fb3b..cc6c533ca1 100644 --- a/web/i18n/ar-TN/workflow.json +++ b/web/i18n/ar-TN/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "أدخل المحتوى في المربع أدناه لبدء تصحيح أخطاء Chatbot", "common.processData": "معالجة البيانات", "common.publish": "نشر", + "common.publishToMarketplace": "نشر على Marketplace", + "common.publishToMarketplaceFailed": "فشل النشر على Marketplace", "common.publishUpdate": "نشر التحديث", "common.published": "منشور", "common.publishedAt": "تم النشر في", + "common.publishingToMarketplace": "جارٍ النشر...", "common.redo": "إعادة", "common.restart": "إعادة تشغيل", "common.restore": "استعادة", diff --git a/web/i18n/de-DE/app.json b/web/i18n/de-DE/app.json index b316dcebce..c429e37802 100644 --- a/web/i18n/de-DE/app.json +++ b/web/i18n/de-DE/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Bild", "iconPicker.ok": "OK", + "importApp": "App importieren", "importDSL": "DSL-Datei importieren", "importFromDSL": "Import von DSL", "importFromDSLFile": "Aus DSL-Datei", "importFromDSLUrl": "Von URL", "importFromDSLUrlPlaceholder": "DSL-Link hier einfügen", "join": "Treten Sie der Gemeinschaft bei", + "marketplace.template.categories": "Kategorien", + "marketplace.template.category.design": "Design", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Wissen", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Betrieb", + "marketplace.template.category.sales": "Vertrieb", + "marketplace.template.category.support": "Support", + "marketplace.template.fetchFailed": "Vorlage konnte nicht abgerufen werden", + "marketplace.template.importConfirm": "Importieren", + "marketplace.template.importFailed": "Vorlage konnte nicht importiert werden", + "marketplace.template.modalTitle": "Aus Marketplace importieren", + "marketplace.template.overview": "Übersicht", + "marketplace.template.publishedBy": "Von", + "marketplace.template.usageCount": "Nutzung", + "marketplace.template.viewOnMarketplace": "Im Marketplace ansehen", "maxActiveRequests": "Maximale gleichzeitige Anfragen", "maxActiveRequestsPlaceholder": "Geben Sie 0 für unbegrenzt ein", "maxActiveRequestsTip": "Maximale Anzahl gleichzeitiger aktiver Anfragen pro App (0 für unbegrenzt)", diff --git a/web/i18n/de-DE/workflow.json b/web/i18n/de-DE/workflow.json index fe50c09651..426c023259 100644 --- a/web/i18n/de-DE/workflow.json +++ b/web/i18n/de-DE/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Geben Sie den Inhalt in das Feld unten ein, um das Debuggen des Chatbots zu starten", "common.processData": "Daten verarbeiten", "common.publish": "Veröffentlichen", + "common.publishToMarketplace": "Im Marketplace veröffentlichen", + "common.publishToMarketplaceFailed": "Veröffentlichung im Marketplace fehlgeschlagen", "common.publishUpdate": "Update veröffentlichen", "common.published": "Veröffentlicht", "common.publishedAt": "Veröffentlicht am", + "common.publishingToMarketplace": "Wird veröffentlicht...", "common.redo": "Wiederholen", "common.restart": "Neustarten", "common.restore": "Wiederherstellen", diff --git a/web/i18n/en-US/workflow.json b/web/i18n/en-US/workflow.json index 3bb285d501..c8cbed8f1e 100644 --- a/web/i18n/en-US/workflow.json +++ b/web/i18n/en-US/workflow.json @@ -229,6 +229,8 @@ "common.previewPlaceholder": "Enter content in the box below to start debugging the Chatbot", "common.processData": "Process Data", "common.publish": "Publish", + "common.publishAsEvaluationWorkflow": "Publish as Evaluation Workflow", + "common.publishAsStandardWorkflow": "Publish as Standard Workflow", "common.publishToMarketplace": "Publish to Marketplace", "common.publishToMarketplaceFailed": "Failed to publish to Marketplace", "common.publishUpdate": "Publish Update", @@ -247,6 +249,21 @@ "common.searchVar": "Search variable", "common.setVarValuePlaceholder": "Set variable", "common.showRunHistory": "Show Run History", + "common.switchToEvaluationWorkflow": "Switch to Evaluation Workflow", + "common.switchToEvaluationWorkflowDisabledTip": "Evaluation workflows do not support Human Input nodes or Trigger start nodes.", + "common.switchToEvaluationWorkflowTip": "Turns this workflow into a custom evaluator for batch testing. Disables public Web App access.", + "common.switchToStandardWorkflow": "Switch to Standard Workflow", + "common.switchToStandardWorkflowConfirm.activeIn_one": "This evaluator is currently active in {{count}} configuration.", + "common.switchToStandardWorkflowConfirm.activeIn_other": "This evaluator is currently active in {{count}} configurations.", + "common.switchToStandardWorkflowConfirm.dependentWorkflows": "Dependent workflows", + "common.switchToStandardWorkflowConfirm.description": "Switching to a standard workflow will break these dependencies and may cause active batch tests to fail.", + "common.switchToStandardWorkflowConfirm.loadFailed": "Failed to load dependent workflows.", + "common.switchToStandardWorkflowConfirm.switch": "Switch", + "common.switchToStandardWorkflowConfirm.targetTypes.app": "Workflow", + "common.switchToStandardWorkflowConfirm.targetTypes.knowledge_base": "Knowledge Base", + "common.switchToStandardWorkflowConfirm.targetTypes.snippets": "Snippet", + "common.switchToStandardWorkflowConfirm.title": "Switch to Standard Workflow?", + "common.switchToStandardWorkflowTip": "Turns this evaluator back into a standard workflow and restores public Web App access.", "common.syncingData": "Syncing data, just a few seconds.", "common.tagBound": "Number of apps using this tag", "common.undo": "Undo", @@ -1147,6 +1164,16 @@ "singleRun.testRun": "Test Run", "singleRun.testRunIteration": "Test Run Iteration", "singleRun.testRunLoop": "Test Run Loop", + "snippet.addToSnippet": "Add to snippet", + "snippet.confirm": "Confirm", + "snippet.createDialogTitle": "Create Snippet", + "snippet.createSuccess": "Snippet created", + "snippet.descriptionLabel": "Description (Optional)", + "snippet.descriptionPlaceholder": "Briefly describe your snippet", + "snippet.nameLabel": "Snippet Name & Icon", + "snippet.namePlaceholder": "Snippet name", + "snippet.shortcuts.press": "Press", + "snippet.shortcuts.toConfirm": "to confirm", "tabs.-": "Default", "tabs.addAll": "Add all", "tabs.agent": "Agent Strategy", @@ -1154,6 +1181,7 @@ "tabs.allTool": "All", "tabs.allTriggers": "All triggers", "tabs.blocks": "Nodes", + "tabs.createSnippet": "Create a snippet", "tabs.customTool": "Custom", "tabs.featuredTools": "Featured", "tabs.hideActions": "Hide tools", @@ -1163,19 +1191,23 @@ "tabs.noFeaturedTriggers": "Discover more triggers in Marketplace", "tabs.noPluginsFound": "No plugins were found", "tabs.noResult": "No match found", + "tabs.noSnippetsFound": "No snippets were found", "tabs.plugin": "Plugin", "tabs.pluginByAuthor": "By {{author}}", "tabs.question-understand": "Question Understand", "tabs.requestToCommunity": "Requests to the community", "tabs.searchBlock": "Search node", "tabs.searchDataSource": "Search Data Source", + "tabs.searchSnippets": "Search snippets...", "tabs.searchTool": "Search tool", "tabs.searchTrigger": "Search triggers...", "tabs.showLessFeatured": "Show less", "tabs.showMoreFeatured": "Show more", + "tabs.snippets": "Snippets", "tabs.sources": "Sources", "tabs.start": "Start", "tabs.startDisabledTip": "Trigger node and user input node are mutually exclusive.", + "tabs.startNotSupportedTip": "The Start tab is not supported in snippets.", "tabs.tools": "Tools", "tabs.transform": "Transform", "tabs.usePlugin": "Select tool", diff --git a/web/i18n/es-ES/app.json b/web/i18n/es-ES/app.json index 251746db7f..5cc805c8f6 100644 --- a/web/i18n/es-ES/app.json +++ b/web/i18n/es-ES/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Imagen", "iconPicker.ok": "OK", + "importApp": "Importar App", "importDSL": "Importar archivo DSL", "importFromDSL": "Importar desde DSL", "importFromDSLFile": "Desde el archivo DSL", "importFromDSLUrl": "URL de origen", "importFromDSLUrlPlaceholder": "Pegar enlace DSL aquí", "join": "Únete a la comunidad", + "marketplace.template.categories": "Categorías", + "marketplace.template.category.design": "Diseño", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Conocimiento", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Operaciones", + "marketplace.template.category.sales": "Ventas", + "marketplace.template.category.support": "Soporte", + "marketplace.template.fetchFailed": "Error al obtener la plantilla", + "marketplace.template.importConfirm": "Importar", + "marketplace.template.importFailed": "Error al importar la plantilla", + "marketplace.template.modalTitle": "Importar desde Marketplace", + "marketplace.template.overview": "Vista general", + "marketplace.template.publishedBy": "Por", + "marketplace.template.usageCount": "Uso", + "marketplace.template.viewOnMarketplace": "Ver en Marketplace", "maxActiveRequests": "Máximas solicitudes concurrentes", "maxActiveRequestsPlaceholder": "Introduce 0 para ilimitado", "maxActiveRequestsTip": "Número máximo de solicitudes activas concurrentes por aplicación (0 para ilimitado)", diff --git a/web/i18n/es-ES/workflow.json b/web/i18n/es-ES/workflow.json index 5da69241e7..c55ffdfc1e 100644 --- a/web/i18n/es-ES/workflow.json +++ b/web/i18n/es-ES/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Ingrese contenido en el cuadro de abajo para comenzar a depurar el Chatbot", "common.processData": "Procesar datos", "common.publish": "Publicar", + "common.publishToMarketplace": "Publicar en Marketplace", + "common.publishToMarketplaceFailed": "Error al publicar en Marketplace", "common.publishUpdate": "Publicar actualización", "common.published": "Publicado", "common.publishedAt": "Publicado el", + "common.publishingToMarketplace": "Publicando...", "common.redo": "Rehacer", "common.restart": "Reiniciar", "common.restore": "Restaurar", diff --git a/web/i18n/fa-IR/app.json b/web/i18n/fa-IR/app.json index ed253fc569..3bdba44440 100644 --- a/web/i18n/fa-IR/app.json +++ b/web/i18n/fa-IR/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "ایموجی", "iconPicker.image": "تصویر", "iconPicker.ok": "باشه", + "importApp": "وارد کردن برنامه", "importDSL": "وارد کردن فایل DSL", "importFromDSL": "وارد کردن از DSL", "importFromDSLFile": "از فایل DSL", "importFromDSLUrl": "از URL", "importFromDSLUrlPlaceholder": "لینک DSL را اینجا بچسبانید", "join": "پیوستن به جامعه", + "marketplace.template.categories": "دسته‌بندی‌ها", + "marketplace.template.category.design": "طراحی", + "marketplace.template.category.it": "فناوری اطلاعات", + "marketplace.template.category.knowledge": "دانش", + "marketplace.template.category.marketing": "بازاریابی", + "marketplace.template.category.operations": "عملیات", + "marketplace.template.category.sales": "فروش", + "marketplace.template.category.support": "پشتیبانی", + "marketplace.template.fetchFailed": "دریافت قالب ناموفق بود", + "marketplace.template.importConfirm": "وارد کردن", + "marketplace.template.importFailed": "وارد کردن قالب ناموفق بود", + "marketplace.template.modalTitle": "وارد کردن از Marketplace", + "marketplace.template.overview": "نمای کلی", + "marketplace.template.publishedBy": "توسط", + "marketplace.template.usageCount": "استفاده", + "marketplace.template.viewOnMarketplace": "مشاهده در Marketplace", "maxActiveRequests": "بیشترین درخواست‌های همزمان", "maxActiveRequestsPlaceholder": "برای نامحدود، 0 را وارد کنید", "maxActiveRequestsTip": "حداکثر تعداد درخواست‌های فعال همزمان در هر برنامه (0 برای نامحدود)", diff --git a/web/i18n/fa-IR/workflow.json b/web/i18n/fa-IR/workflow.json index 3210cf8919..c23c781a04 100644 --- a/web/i18n/fa-IR/workflow.json +++ b/web/i18n/fa-IR/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "محتوا را در کادر زیر وارد کنید تا اشکال‌زدایی چت‌بات آغاز شود", "common.processData": "پردازش داده‌ها", "common.publish": "انتشار", + "common.publishToMarketplace": "انتشار در Marketplace", + "common.publishToMarketplaceFailed": "انتشار در Marketplace ناموفق بود", "common.publishUpdate": "انتشار به‌روزرسانی", "common.published": "منتشر شده", "common.publishedAt": "منتشر شده در", + "common.publishingToMarketplace": "در حال انتشار...", "common.redo": "بازانجام", "common.restart": "راه‌اندازی مجدد", "common.restore": "بازیابی", diff --git a/web/i18n/fr-FR/app.json b/web/i18n/fr-FR/app.json index f6af8380bb..f90623ce18 100644 --- a/web/i18n/fr-FR/app.json +++ b/web/i18n/fr-FR/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Image", "iconPicker.ok": "OK", + "importApp": "Importer l'App", "importDSL": "Importer le fichier DSL", "importFromDSL": "Importation à partir d'une DSL", "importFromDSLFile": "À partir d’un fichier DSL", "importFromDSLUrl": "À partir de l’URL", "importFromDSLUrlPlaceholder": "Collez le lien DSL ici", "join": "Rejoindre la communauté", + "marketplace.template.categories": "Catégories", + "marketplace.template.category.design": "Design", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Connaissance", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Opérations", + "marketplace.template.category.sales": "Ventes", + "marketplace.template.category.support": "Support", + "marketplace.template.fetchFailed": "Échec de la récupération du modèle", + "marketplace.template.importConfirm": "Importer", + "marketplace.template.importFailed": "Échec de l'importation du modèle", + "marketplace.template.modalTitle": "Importer depuis le Marketplace", + "marketplace.template.overview": "Aperçu", + "marketplace.template.publishedBy": "Par", + "marketplace.template.usageCount": "Utilisation", + "marketplace.template.viewOnMarketplace": "Voir sur le Marketplace", "maxActiveRequests": "Nombre maximal de requêtes simultanées", "maxActiveRequestsPlaceholder": "Entrez 0 pour illimité", "maxActiveRequestsTip": "Nombre maximum de requêtes actives concurrentes par application (0 pour illimité)", diff --git a/web/i18n/fr-FR/workflow.json b/web/i18n/fr-FR/workflow.json index da3e69dab3..727c3a91e6 100644 --- a/web/i18n/fr-FR/workflow.json +++ b/web/i18n/fr-FR/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Entrez le contenu dans la boîte ci-dessous pour commencer à déboguer le Chatbot", "common.processData": "Traiter les données", "common.publish": "Publier", + "common.publishToMarketplace": "Publier sur le Marketplace", + "common.publishToMarketplaceFailed": "Échec de la publication sur le Marketplace", "common.publishUpdate": "Publier une mise à jour", "common.published": "Publié", "common.publishedAt": "Publié le", + "common.publishingToMarketplace": "Publication en cours...", "common.redo": "Réexécuter", "common.restart": "Redémarrer", "common.restore": "Restaurer", diff --git a/web/i18n/hi-IN/app.json b/web/i18n/hi-IN/app.json index 3705c4dec1..a7cc347820 100644 --- a/web/i18n/hi-IN/app.json +++ b/web/i18n/hi-IN/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "इमोजी", "iconPicker.image": "छवि", "iconPicker.ok": "ठीक है", + "importApp": "ऐप आयात करें", "importDSL": "डीएसएल फ़ाइल आयात करें", "importFromDSL": "DSL से आयात करें", "importFromDSLFile": "डीएसएल फ़ाइल से", "importFromDSLUrl": "यूआरएल से", "importFromDSLUrlPlaceholder": "डीएसएल लिंक यहां पेस्ट करें", "join": "समुदाय में शामिल हों", + "marketplace.template.categories": "श्रेणियाँ", + "marketplace.template.category.design": "डिज़ाइन", + "marketplace.template.category.it": "आईटी", + "marketplace.template.category.knowledge": "ज्ञान", + "marketplace.template.category.marketing": "मार्केटिंग", + "marketplace.template.category.operations": "संचालन", + "marketplace.template.category.sales": "बिक्री", + "marketplace.template.category.support": "समर्थन", + "marketplace.template.fetchFailed": "टेम्पलेट प्राप्त करने में विफल", + "marketplace.template.importConfirm": "आयात करें", + "marketplace.template.importFailed": "टेम्पलेट आयात करने में विफल", + "marketplace.template.modalTitle": "Marketplace से आयात करें", + "marketplace.template.overview": "अवलोकन", + "marketplace.template.publishedBy": "द्वारा", + "marketplace.template.usageCount": "उपयोग", + "marketplace.template.viewOnMarketplace": "Marketplace पर देखें", "maxActiveRequests": "अधिकतम समवर्ती अनुरोध", "maxActiveRequestsPlaceholder": "असीमित के लिए 0 दर्ज करें", "maxActiveRequestsTip": "प्रति ऐप सक्रिय अनुरोधों की अधिकतम संख्या (असीमित के लिए 0)", diff --git a/web/i18n/hi-IN/workflow.json b/web/i18n/hi-IN/workflow.json index 20845af0b8..8b5ea73535 100644 --- a/web/i18n/hi-IN/workflow.json +++ b/web/i18n/hi-IN/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "चैटबॉट का डीबग शुरू करने के लिए नीचे दिए गए बॉक्स में सामग्री दर्ज करें", "common.processData": "डेटा प्रोसेस करें", "common.publish": "प्रकाशित करें", + "common.publishToMarketplace": "Marketplace पर प्रकाशित करें", + "common.publishToMarketplaceFailed": "Marketplace पर प्रकाशित करने में विफल", "common.publishUpdate": "अपडेट प्रकाशित करें", "common.published": "प्रकाशित", "common.publishedAt": "प्रकाशित", + "common.publishingToMarketplace": "प्रकाशित हो रहा है...", "common.redo": "फिर से करें", "common.restart": "पुनः आरंभ करें", "common.restore": "पुनर्स्थापित करें", diff --git a/web/i18n/id-ID/app.json b/web/i18n/id-ID/app.json index 23aadc9da6..c47dda1886 100644 --- a/web/i18n/id-ID/app.json +++ b/web/i18n/id-ID/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Citra", "iconPicker.ok": "OK", + "importApp": "Impor Aplikasi", "importDSL": "Impor file DSL", "importFromDSL": "Impor dari DSL", "importFromDSLFile": "Dari file DSL", "importFromDSLUrl": "Dari URL", "importFromDSLUrlPlaceholder": "Tempel tautan DSL di sini", "join": "Bergabunglah dengan komunitas", + "marketplace.template.categories": "Kategori", + "marketplace.template.category.design": "Desain", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Pengetahuan", + "marketplace.template.category.marketing": "Pemasaran", + "marketplace.template.category.operations": "Operasi", + "marketplace.template.category.sales": "Penjualan", + "marketplace.template.category.support": "Dukungan", + "marketplace.template.fetchFailed": "Gagal mengambil templat", + "marketplace.template.importConfirm": "Impor", + "marketplace.template.importFailed": "Gagal mengimpor templat", + "marketplace.template.modalTitle": "Impor dari Marketplace", + "marketplace.template.overview": "Ikhtisar", + "marketplace.template.publishedBy": "Oleh", + "marketplace.template.usageCount": "Penggunaan", + "marketplace.template.viewOnMarketplace": "Lihat di Marketplace", "maxActiveRequests": "Permintaan bersamaan maksimum", "maxActiveRequestsPlaceholder": "Masukkan 0 untuk tidak terbatas", "maxActiveRequestsTip": "Jumlah maksimum permintaan aktif bersamaan per aplikasi (0 untuk tidak terbatas)", diff --git a/web/i18n/id-ID/workflow.json b/web/i18n/id-ID/workflow.json index 2c32f25aab..058c15334b 100644 --- a/web/i18n/id-ID/workflow.json +++ b/web/i18n/id-ID/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Masukkan konten di kotak di bawah ini untuk mulai men-debug Chatbot", "common.processData": "Proses Data", "common.publish": "Menerbitkan", + "common.publishToMarketplace": "Publikasikan ke Marketplace", + "common.publishToMarketplaceFailed": "Gagal mempublikasikan ke Marketplace", "common.publishUpdate": "Publikasikan Pembaruan", "common.published": "Diterbitkan", "common.publishedAt": "Diterbitkan", + "common.publishingToMarketplace": "Mempublikasikan...", "common.redo": "Ulangi", "common.restart": "Restart", "common.restore": "Mengembalikan", diff --git a/web/i18n/it-IT/app.json b/web/i18n/it-IT/app.json index e721ecf655..0719a49571 100644 --- a/web/i18n/it-IT/app.json +++ b/web/i18n/it-IT/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Immagine", "iconPicker.ok": "OK", + "importApp": "Importa App", "importDSL": "Importa file DSL", "importFromDSL": "Importazione da DSL", "importFromDSLFile": "Da file DSL", "importFromDSLUrl": "Dall'URL", "importFromDSLUrlPlaceholder": "Incolla qui il link DSL", "join": "Unisciti alla comunità", + "marketplace.template.categories": "Categorie", + "marketplace.template.category.design": "Design", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Conoscenza", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Operazioni", + "marketplace.template.category.sales": "Vendite", + "marketplace.template.category.support": "Supporto", + "marketplace.template.fetchFailed": "Impossibile recuperare il modello", + "marketplace.template.importConfirm": "Importa", + "marketplace.template.importFailed": "Impossibile importare il modello", + "marketplace.template.modalTitle": "Importa dal Marketplace", + "marketplace.template.overview": "Panoramica", + "marketplace.template.publishedBy": "Di", + "marketplace.template.usageCount": "Utilizzo", + "marketplace.template.viewOnMarketplace": "Visualizza sul Marketplace", "maxActiveRequests": "Massimo numero di richieste concorrenti", "maxActiveRequestsPlaceholder": "Inserisci 0 per illimitato", "maxActiveRequestsTip": "Numero massimo di richieste attive concorrenti per app (0 per illimitato)", diff --git a/web/i18n/it-IT/workflow.json b/web/i18n/it-IT/workflow.json index 1c779d1365..fbd3041fb9 100644 --- a/web/i18n/it-IT/workflow.json +++ b/web/i18n/it-IT/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Inserisci contenuto nella casella sottostante per avviare il debug del Chatbot", "common.processData": "Elabora Dati", "common.publish": "Pubblica", + "common.publishToMarketplace": "Pubblica sul Marketplace", + "common.publishToMarketplaceFailed": "Pubblicazione sul Marketplace non riuscita", "common.publishUpdate": "Pubblica aggiornamento", "common.published": "Pubblicato", "common.publishedAt": "Pubblicato", + "common.publishingToMarketplace": "Pubblicazione...", "common.redo": "Ripeti", "common.restart": "Riavvia", "common.restore": "Ripristina", diff --git a/web/i18n/ja-JP/app.json b/web/i18n/ja-JP/app.json index 925095d447..8ccaaababe 100644 --- a/web/i18n/ja-JP/app.json +++ b/web/i18n/ja-JP/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "絵文字", "iconPicker.image": "画像", "iconPicker.ok": "OK", + "importApp": "アプリをインポート", "importDSL": "DSL ファイルをインポート", "importFromDSL": "DSL からインポート", "importFromDSLFile": "DSL ファイルから", "importFromDSLUrl": "URL から", "importFromDSLUrlPlaceholder": "DSL リンクをここに貼り付けます", "join": "コミュニティに参加する", + "marketplace.template.categories": "カテゴリ", + "marketplace.template.category.design": "デザイン", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "知識", + "marketplace.template.category.marketing": "マーケティング", + "marketplace.template.category.operations": "オペレーション", + "marketplace.template.category.sales": "セールス", + "marketplace.template.category.support": "サポート", + "marketplace.template.fetchFailed": "テンプレートの取得に失敗しました", + "marketplace.template.importConfirm": "インポート", + "marketplace.template.importFailed": "テンプレートのインポートに失敗しました", + "marketplace.template.modalTitle": "マーケットプレイスからインポート", + "marketplace.template.overview": "概要", + "marketplace.template.publishedBy": "提供者", + "marketplace.template.usageCount": "使用数", + "marketplace.template.viewOnMarketplace": "マーケットプレイスで見る", "maxActiveRequests": "最大同時リクエスト数", "maxActiveRequestsPlaceholder": "無制限のために0を入力してください", "maxActiveRequestsTip": "アプリごとの同時アクティブリクエストの最大数(無制限の場合は0)", diff --git a/web/i18n/ja-JP/workflow.json b/web/i18n/ja-JP/workflow.json index 1ee43c17cf..1154a5baba 100644 --- a/web/i18n/ja-JP/workflow.json +++ b/web/i18n/ja-JP/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "入力欄にテキストを入力してチャットボットのデバッグを開始", "common.processData": "データ処理", "common.publish": "公開する", + "common.publishToMarketplace": "マーケットプレイスに公開", + "common.publishToMarketplaceFailed": "マーケットプレイスへの公開に失敗しました", "common.publishUpdate": "更新を公開", "common.published": "公開済み", "common.publishedAt": "公開日時", + "common.publishingToMarketplace": "公開中...", "common.redo": "やり直し", "common.restart": "再起動", "common.restore": "復元", diff --git a/web/i18n/ko-KR/app.json b/web/i18n/ko-KR/app.json index 4f29da5f1e..b9dd592f03 100644 --- a/web/i18n/ko-KR/app.json +++ b/web/i18n/ko-KR/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "이모지", "iconPicker.image": "이미지", "iconPicker.ok": "확인", + "importApp": "앱 가져오기", "importDSL": "DSL 파일 가져오기", "importFromDSL": "DSL 에서 가져오기", "importFromDSLFile": "DSL 파일에서", "importFromDSLUrl": "URL 에서", "importFromDSLUrlPlaceholder": "여기에 DSL 링크 붙여 넣기", "join": "커뮤니티에 참여하기", + "marketplace.template.categories": "카테고리", + "marketplace.template.category.design": "디자인", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "지식", + "marketplace.template.category.marketing": "마케팅", + "marketplace.template.category.operations": "운영", + "marketplace.template.category.sales": "영업", + "marketplace.template.category.support": "지원", + "marketplace.template.fetchFailed": "템플릿 가져오기 실패", + "marketplace.template.importConfirm": "가져오기", + "marketplace.template.importFailed": "템플릿 가져오기 실패", + "marketplace.template.modalTitle": "마켓플레이스에서 가져오기", + "marketplace.template.overview": "개요", + "marketplace.template.publishedBy": "제공:", + "marketplace.template.usageCount": "사용량", + "marketplace.template.viewOnMarketplace": "마켓플레이스에서 보기", "maxActiveRequests": "동시 최대 요청 수", "maxActiveRequestsPlaceholder": "무제한 사용을 원하시면 0을 입력하세요.", "maxActiveRequestsTip": "앱당 최대 동시 활성 요청 수(무제한은 0)", diff --git a/web/i18n/ko-KR/workflow.json b/web/i18n/ko-KR/workflow.json index b6291e4366..a80c34b294 100644 --- a/web/i18n/ko-KR/workflow.json +++ b/web/i18n/ko-KR/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "디버깅을 시작하려면 아래 상자에 내용을 입력하세요", "common.processData": "데이터 처리", "common.publish": "게시하기", + "common.publishToMarketplace": "마켓플레이스에 게시", + "common.publishToMarketplaceFailed": "마켓플레이스 게시 실패", "common.publishUpdate": "업데이트 게시", "common.published": "게시됨", "common.publishedAt": "발행일", + "common.publishingToMarketplace": "게시 중...", "common.redo": "다시 실행", "common.restart": "재시작", "common.restore": "복원", diff --git a/web/i18n/nl-NL/app.json b/web/i18n/nl-NL/app.json index 0ad608d53c..9bd50b5b92 100644 --- a/web/i18n/nl-NL/app.json +++ b/web/i18n/nl-NL/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Image", "iconPicker.ok": "OK", + "importApp": "App importeren", "importDSL": "Import DSL file", "importFromDSL": "Import from DSL", "importFromDSLFile": "From DSL file", "importFromDSLUrl": "From URL", "importFromDSLUrlPlaceholder": "Paste DSL link here", "join": "Join the community", + "marketplace.template.categories": "Categorieën", + "marketplace.template.category.design": "Ontwerp", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Kennis", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Operaties", + "marketplace.template.category.sales": "Verkoop", + "marketplace.template.category.support": "Ondersteuning", + "marketplace.template.fetchFailed": "Template ophalen mislukt", + "marketplace.template.importConfirm": "Importeren", + "marketplace.template.importFailed": "Template importeren mislukt", + "marketplace.template.modalTitle": "Importeren vanuit Marketplace", + "marketplace.template.overview": "Overzicht", + "marketplace.template.publishedBy": "Door", + "marketplace.template.usageCount": "Gebruik", + "marketplace.template.viewOnMarketplace": "Bekijken op Marketplace", "maxActiveRequests": "Max concurrent requests", "maxActiveRequestsPlaceholder": "Enter 0 for unlimited", "maxActiveRequestsTip": "Maximum number of concurrent active requests per app (0 for unlimited)", diff --git a/web/i18n/nl-NL/workflow.json b/web/i18n/nl-NL/workflow.json index c3d5824ef7..c8e8753eb4 100644 --- a/web/i18n/nl-NL/workflow.json +++ b/web/i18n/nl-NL/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Enter content in the box below to start debugging the Chatbot", "common.processData": "Process Data", "common.publish": "Publish", + "common.publishToMarketplace": "Publiceren op Marketplace", + "common.publishToMarketplaceFailed": "Publiceren op Marketplace mislukt", "common.publishUpdate": "Publish Update", "common.published": "Published", "common.publishedAt": "Published", + "common.publishingToMarketplace": "Publiceren...", "common.redo": "Redo", "common.restart": "Restart", "common.restore": "Restore", diff --git a/web/i18n/pl-PL/app.json b/web/i18n/pl-PL/app.json index a3ae06e3cd..0f6f5cd298 100644 --- a/web/i18n/pl-PL/app.json +++ b/web/i18n/pl-PL/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Obraz", "iconPicker.ok": "OK", + "importApp": "Importuj aplikację", "importDSL": "Importuj plik DSL", "importFromDSL": "Importowanie z DSL", "importFromDSLFile": "Z pliku DSL", "importFromDSLUrl": "Z adresu URL", "importFromDSLUrlPlaceholder": "Wklej tutaj link DSL", "join": "Dołącz do społeczności", + "marketplace.template.categories": "Kategorie", + "marketplace.template.category.design": "Design", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Wiedza", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Operacje", + "marketplace.template.category.sales": "Sprzedaż", + "marketplace.template.category.support": "Wsparcie", + "marketplace.template.fetchFailed": "Nie udało się pobrać szablonu", + "marketplace.template.importConfirm": "Importuj", + "marketplace.template.importFailed": "Nie udało się zaimportować szablonu", + "marketplace.template.modalTitle": "Importuj z Marketplace", + "marketplace.template.overview": "Przegląd", + "marketplace.template.publishedBy": "Przez", + "marketplace.template.usageCount": "Użycie", + "marketplace.template.viewOnMarketplace": "Zobacz na Marketplace", "maxActiveRequests": "Maksymalne równoczesne żądania", "maxActiveRequestsPlaceholder": "Wprowadź 0, aby uzyskać nielimitowane", "maxActiveRequestsTip": "Maksymalna liczba jednoczesnych aktywnych żądań na aplikację (0 dla nieograniczonej)", diff --git a/web/i18n/pl-PL/workflow.json b/web/i18n/pl-PL/workflow.json index 6b0bda1ff8..805960a851 100644 --- a/web/i18n/pl-PL/workflow.json +++ b/web/i18n/pl-PL/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Wprowadź treść w poniższym polu, aby rozpocząć debugowanie Chatbota", "common.processData": "Przetwórz dane", "common.publish": "Opublikuj", + "common.publishToMarketplace": "Publikuj na Marketplace", + "common.publishToMarketplaceFailed": "Nie udało się opublikować na Marketplace", "common.publishUpdate": "Opublikuj aktualizację", "common.published": "Opublikowane", "common.publishedAt": "Opublikowane", + "common.publishingToMarketplace": "Publikowanie...", "common.redo": "Ponów", "common.restart": "Uruchom ponownie", "common.restore": "Przywróć", diff --git a/web/i18n/pt-BR/app.json b/web/i18n/pt-BR/app.json index 43447c970c..3c59423e99 100644 --- a/web/i18n/pt-BR/app.json +++ b/web/i18n/pt-BR/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Imagem", "iconPicker.ok": "OK", + "importApp": "Importar App", "importDSL": "Importar arquivo DSL", "importFromDSL": "Importar de DSL", "importFromDSLFile": "Do arquivo DSL", "importFromDSLUrl": "Do URL", "importFromDSLUrlPlaceholder": "Cole o link DSL aqui", "join": "Participe da comunidade", + "marketplace.template.categories": "Categorias", + "marketplace.template.category.design": "Design", + "marketplace.template.category.it": "TI", + "marketplace.template.category.knowledge": "Conhecimento", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Operações", + "marketplace.template.category.sales": "Vendas", + "marketplace.template.category.support": "Suporte", + "marketplace.template.fetchFailed": "Falha ao buscar modelo", + "marketplace.template.importConfirm": "Importar", + "marketplace.template.importFailed": "Falha ao importar modelo", + "marketplace.template.modalTitle": "Importar do Marketplace", + "marketplace.template.overview": "Visão geral", + "marketplace.template.publishedBy": "Por", + "marketplace.template.usageCount": "Uso", + "marketplace.template.viewOnMarketplace": "Ver no Marketplace", "maxActiveRequests": "Máximo de solicitações simultâneas", "maxActiveRequestsPlaceholder": "Digite 0 para ilimitado", "maxActiveRequestsTip": "Número máximo de solicitações ativas simultâneas por aplicativo (0 para ilimitado)", diff --git a/web/i18n/pt-BR/workflow.json b/web/i18n/pt-BR/workflow.json index a8a7511100..de6c882e0c 100644 --- a/web/i18n/pt-BR/workflow.json +++ b/web/i18n/pt-BR/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Digite o conteúdo na caixa abaixo para começar a depurar o Chatbot", "common.processData": "Processar dados", "common.publish": "Publicar", + "common.publishToMarketplace": "Publicar no Marketplace", + "common.publishToMarketplaceFailed": "Falha ao publicar no Marketplace", "common.publishUpdate": "Publicar Atualização", "common.published": "Publicado", "common.publishedAt": "Publicado em", + "common.publishingToMarketplace": "Publicando...", "common.redo": "Refazer", "common.restart": "Reiniciar", "common.restore": "Restaurar", diff --git a/web/i18n/ro-RO/app.json b/web/i18n/ro-RO/app.json index cfa0b8aedc..f93e4f10a0 100644 --- a/web/i18n/ro-RO/app.json +++ b/web/i18n/ro-RO/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Imagine", "iconPicker.ok": "OK", + "importApp": "Importați aplicația", "importDSL": "Importă fișier DSL", "importFromDSL": "Import din DSL", "importFromDSLFile": "Din fișierul DSL", "importFromDSLUrl": "De la URL", "importFromDSLUrlPlaceholder": "Lipiți linkul DSL aici", "join": "Alătură-te comunității", + "marketplace.template.categories": "Categorii", + "marketplace.template.category.design": "Design", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Cunoaștere", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Operațiuni", + "marketplace.template.category.sales": "Vânzări", + "marketplace.template.category.support": "Suport", + "marketplace.template.fetchFailed": "Eroare la obținerea șablonului", + "marketplace.template.importConfirm": "Importați", + "marketplace.template.importFailed": "Eroare la importul șablonului", + "marketplace.template.modalTitle": "Importați din Marketplace", + "marketplace.template.overview": "Prezentare generală", + "marketplace.template.publishedBy": "De", + "marketplace.template.usageCount": "Utilizare", + "marketplace.template.viewOnMarketplace": "Vizualizați pe Marketplace", "maxActiveRequests": "Maxime cereri simultane", "maxActiveRequestsPlaceholder": "Introduceți 0 pentru nelimitat", "maxActiveRequestsTip": "Numărul maxim de cereri active concurente pe aplicație (0 pentru nelimitat)", diff --git a/web/i18n/ro-RO/workflow.json b/web/i18n/ro-RO/workflow.json index c15e8508ab..7b551294e8 100644 --- a/web/i18n/ro-RO/workflow.json +++ b/web/i18n/ro-RO/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Introduceți conținutul în caseta de mai jos pentru a începe depanarea Chatbotului", "common.processData": "Procesează date", "common.publish": "Publică", + "common.publishToMarketplace": "Publicați pe Marketplace", + "common.publishToMarketplaceFailed": "Eroare la publicarea pe Marketplace", "common.publishUpdate": "Publicați actualizarea", "common.published": "Publicat", "common.publishedAt": "Publicat la", + "common.publishingToMarketplace": "Se publică...", "common.redo": "Refă", "common.restart": "Repornește", "common.restore": "Restaurează", diff --git a/web/i18n/ru-RU/app.json b/web/i18n/ru-RU/app.json index 7b53ea61fb..8a9327e81e 100644 --- a/web/i18n/ru-RU/app.json +++ b/web/i18n/ru-RU/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Эмодзи", "iconPicker.image": "Изображение", "iconPicker.ok": "ОК", + "importApp": "Импортировать приложение", "importDSL": "Импортировать файл DSL", "importFromDSL": "Импортировать из DSL", "importFromDSLFile": "Из файла DSL", "importFromDSLUrl": "Из URL", "importFromDSLUrlPlaceholder": "Вставьте ссылку DSL сюда", "join": "Присоединяйтесь к сообществу", + "marketplace.template.categories": "Категории", + "marketplace.template.category.design": "Дизайн", + "marketplace.template.category.it": "ИТ", + "marketplace.template.category.knowledge": "Знания", + "marketplace.template.category.marketing": "Маркетинг", + "marketplace.template.category.operations": "Операции", + "marketplace.template.category.sales": "Продажи", + "marketplace.template.category.support": "Поддержка", + "marketplace.template.fetchFailed": "Не удалось получить шаблон", + "marketplace.template.importConfirm": "Импортировать", + "marketplace.template.importFailed": "Не удалось импортировать шаблон", + "marketplace.template.modalTitle": "Импортировать из Marketplace", + "marketplace.template.overview": "Обзор", + "marketplace.template.publishedBy": "От", + "marketplace.template.usageCount": "Использование", + "marketplace.template.viewOnMarketplace": "Открыть в Marketplace", "maxActiveRequests": "Максимальное количество параллельных запросов", "maxActiveRequestsPlaceholder": "Введите 0 для неограниченного количества", "maxActiveRequestsTip": "Максимальное количество одновременно активных запросов на одно приложение (0 для неограниченного количества)", diff --git a/web/i18n/ru-RU/workflow.json b/web/i18n/ru-RU/workflow.json index 55622ec730..89d2657208 100644 --- a/web/i18n/ru-RU/workflow.json +++ b/web/i18n/ru-RU/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Введите текст в поле ниже, чтобы начать отладку чат-бота", "common.processData": "Обработка данных", "common.publish": "Опубликовать", + "common.publishToMarketplace": "Опубликовать в Marketplace", + "common.publishToMarketplaceFailed": "Не удалось опубликовать в Marketplace", "common.publishUpdate": "Опубликовать обновление", "common.published": "Опубликовано", "common.publishedAt": "Опубликовано", + "common.publishingToMarketplace": "Публикация...", "common.redo": "Повторить", "common.restart": "Перезапустить", "common.restore": "Восстановить", diff --git a/web/i18n/sl-SI/app.json b/web/i18n/sl-SI/app.json index ce09d32059..a8a14d7488 100644 --- a/web/i18n/sl-SI/app.json +++ b/web/i18n/sl-SI/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Slika", "iconPicker.ok": "V redu", + "importApp": "Uvozi aplikacijo", "importDSL": "Uvozi datoteko DSL", "importFromDSL": "Uvozi iz DSL", "importFromDSLFile": "Iz datoteke DSL", "importFromDSLUrl": "Iz URL-ja", "importFromDSLUrlPlaceholder": "Tukaj prilepi povezavo DSL", "join": "Pridruži se skupnosti", + "marketplace.template.categories": "Kategorije", + "marketplace.template.category.design": "Oblikovanje", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "Znanje", + "marketplace.template.category.marketing": "Trženje", + "marketplace.template.category.operations": "Operacije", + "marketplace.template.category.sales": "Prodaja", + "marketplace.template.category.support": "Podpora", + "marketplace.template.fetchFailed": "Pridobivanje predloge ni uspelo", + "marketplace.template.importConfirm": "Uvozi", + "marketplace.template.importFailed": "Uvoz predloge ni uspel", + "marketplace.template.modalTitle": "Uvozi iz Marketplace", + "marketplace.template.overview": "Pregled", + "marketplace.template.publishedBy": "Avtor", + "marketplace.template.usageCount": "Uporaba", + "marketplace.template.viewOnMarketplace": "Ogled na Marketplace", "maxActiveRequests": "Maksimalno število hkratnih zahtevkov", "maxActiveRequestsPlaceholder": "Vnesite 0 za neomejeno", "maxActiveRequestsTip": "Največje število hkrati aktivnih zahtevkov na aplikacijo (0 za neomejeno)", diff --git a/web/i18n/sl-SI/workflow.json b/web/i18n/sl-SI/workflow.json index 7ea8dedec4..a7c2914626 100644 --- a/web/i18n/sl-SI/workflow.json +++ b/web/i18n/sl-SI/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Vnesite vsebino v spodnje polje, da začnete odpravljati napake v chatbotu", "common.processData": "Obdelava podatkov", "common.publish": "Objavi", + "common.publishToMarketplace": "Objavi na Marketplace", + "common.publishToMarketplaceFailed": "Objava na Marketplace ni uspela", "common.publishUpdate": "Objavi posodobitev", "common.published": "Objavljeno", "common.publishedAt": "Objavljeno", + "common.publishingToMarketplace": "Objavljanje...", "common.redo": "Ponovno naredi", "common.restart": "Znova zaženi", "common.restore": "Obnovi", diff --git a/web/i18n/th-TH/app.json b/web/i18n/th-TH/app.json index d59a5b8505..624d7b9ec9 100644 --- a/web/i18n/th-TH/app.json +++ b/web/i18n/th-TH/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "อิโมจิ", "iconPicker.image": "ภาพ", "iconPicker.ok": "ตกลง, ได้", + "importApp": "นำเข้าแอป", "importDSL": "นําเข้าไฟล์ DSL", "importFromDSL": "นําเข้าจาก DSL", "importFromDSLFile": "จากไฟล์ DSL", "importFromDSLUrl": "จาก URL", "importFromDSLUrlPlaceholder": "วางลิงค์ DSL ที่นี่", "join": "เข้าร่วมชุมชนนักพัฒนา", + "marketplace.template.categories": "หมวดหมู่", + "marketplace.template.category.design": "การออกแบบ", + "marketplace.template.category.it": "ไอที", + "marketplace.template.category.knowledge": "ความรู้", + "marketplace.template.category.marketing": "การตลาด", + "marketplace.template.category.operations": "การดำเนินงาน", + "marketplace.template.category.sales": "การขาย", + "marketplace.template.category.support": "การสนับสนุน", + "marketplace.template.fetchFailed": "ดึงข้อมูลเทมเพลตล้มเหลว", + "marketplace.template.importConfirm": "นำเข้า", + "marketplace.template.importFailed": "นำเข้าเทมเพลตล้มเหลว", + "marketplace.template.modalTitle": "นำเข้าจาก Marketplace", + "marketplace.template.overview": "ภาพรวม", + "marketplace.template.publishedBy": "โดย", + "marketplace.template.usageCount": "การใช้งาน", + "marketplace.template.viewOnMarketplace": "ดูบน Marketplace", "maxActiveRequests": "จำนวนคำขอพร้อมกันสูงสุด", "maxActiveRequestsPlaceholder": "ใส่ 0 สำหรับไม่จำกัด", "maxActiveRequestsTip": "จำนวนการร้องขอที่ใช้งานพร้อมกันสูงสุดต่อแอป (0 หมายถึงไม่จำกัด)", diff --git a/web/i18n/th-TH/workflow.json b/web/i18n/th-TH/workflow.json index e1280cf438..d8a9b53f2a 100644 --- a/web/i18n/th-TH/workflow.json +++ b/web/i18n/th-TH/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "ป้อนเนื้อหาในช่องด้านล่างเพื่อเริ่มแก้ไขข้อบกพร่องของแชทบอท", "common.processData": "ประมวลผลข้อมูล", "common.publish": "ตีพิมพ์", + "common.publishToMarketplace": "เผยแพร่ไปยัง Marketplace", + "common.publishToMarketplaceFailed": "เผยแพร่ไปยัง Marketplace ล้มเหลว", "common.publishUpdate": "เผยแพร่การอัปเดต", "common.published": "เผย แพร่", "common.publishedAt": "เผย แพร่", + "common.publishingToMarketplace": "กำลังเผยแพร่...", "common.redo": "พร้อม", "common.restart": "เริ่มใหม่", "common.restore": "ซ่อมแซม", diff --git a/web/i18n/tr-TR/app.json b/web/i18n/tr-TR/app.json index 2978f7cffd..aa10f954e9 100644 --- a/web/i18n/tr-TR/app.json +++ b/web/i18n/tr-TR/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Emoji", "iconPicker.image": "Görsel", "iconPicker.ok": "Tamam", + "importApp": "Uygulamayı İçe Aktar", "importDSL": "DSL dosyasını içe aktar", "importFromDSL": "DSL içe aktar", "importFromDSLFile": "DSL dosyasından", "importFromDSLUrl": "URL'den", "importFromDSLUrlPlaceholder": "DSL bağlantısını buraya yapıştır", "join": "Topluluğa katıl", + "marketplace.template.categories": "Kategoriler", + "marketplace.template.category.design": "Tasarım", + "marketplace.template.category.it": "BT", + "marketplace.template.category.knowledge": "Bilgi", + "marketplace.template.category.marketing": "Pazarlama", + "marketplace.template.category.operations": "Operasyonlar", + "marketplace.template.category.sales": "Satış", + "marketplace.template.category.support": "Destek", + "marketplace.template.fetchFailed": "Şablon alınamadı", + "marketplace.template.importConfirm": "İçe Aktar", + "marketplace.template.importFailed": "Şablon içe aktarılamadı", + "marketplace.template.modalTitle": "Marketplace'den İçe Aktar", + "marketplace.template.overview": "Genel Bakış", + "marketplace.template.publishedBy": "Yayıncı", + "marketplace.template.usageCount": "Kullanım", + "marketplace.template.viewOnMarketplace": "Marketplace'de Görüntüle", "maxActiveRequests": "Maksimum eş zamanlı istekler", "maxActiveRequestsPlaceholder": "Sınırsız için 0 girin", "maxActiveRequestsTip": "Her uygulama için maksimum eşzamanlı aktif istek sayısı (sınırsız için 0)", diff --git a/web/i18n/tr-TR/workflow.json b/web/i18n/tr-TR/workflow.json index 54ee28cf1c..7cd69d7df1 100644 --- a/web/i18n/tr-TR/workflow.json +++ b/web/i18n/tr-TR/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Sohbet Robotunu hata ayıklamak için aşağıdaki kutuya içerik girin", "common.processData": "Veriyi İşle", "common.publish": "Yayınla", + "common.publishToMarketplace": "Marketplace'de Yayınla", + "common.publishToMarketplaceFailed": "Marketplace'de Yayınlama Başarısız", "common.publishUpdate": "Güncellemeyi Yayınla", "common.published": "Yayınlandı", "common.publishedAt": "Yayınlandı", + "common.publishingToMarketplace": "Yayınlanıyor...", "common.redo": "Yinele", "common.restart": "Yeniden Başlat", "common.restore": "Geri Yükle", diff --git a/web/i18n/uk-UA/app.json b/web/i18n/uk-UA/app.json index f224f0c31f..f88e1e60f9 100644 --- a/web/i18n/uk-UA/app.json +++ b/web/i18n/uk-UA/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Емодзі", "iconPicker.image": "Зображення", "iconPicker.ok": "OK", + "importApp": "Імпортувати додаток", "importDSL": "Імпортувати файл DSL", "importFromDSL": "Імпорт з DSL", "importFromDSLFile": "З DSL-файлу", "importFromDSLUrl": "З URL", "importFromDSLUrlPlaceholder": "Вставте посилання на DSL тут", "join": "Приєднуйтесь до спільноти", + "marketplace.template.categories": "Категорії", + "marketplace.template.category.design": "Дизайн", + "marketplace.template.category.it": "ІТ", + "marketplace.template.category.knowledge": "Знання", + "marketplace.template.category.marketing": "Маркетинг", + "marketplace.template.category.operations": "Операції", + "marketplace.template.category.sales": "Продажі", + "marketplace.template.category.support": "Підтримка", + "marketplace.template.fetchFailed": "Не вдалося отримати шаблон", + "marketplace.template.importConfirm": "Імпортувати", + "marketplace.template.importFailed": "Не вдалося імпортувати шаблон", + "marketplace.template.modalTitle": "Імпортувати з Marketplace", + "marketplace.template.overview": "Огляд", + "marketplace.template.publishedBy": "Від", + "marketplace.template.usageCount": "Використання", + "marketplace.template.viewOnMarketplace": "Переглянути на Marketplace", "maxActiveRequests": "Максимальна кількість одночасних запитів", "maxActiveRequestsPlaceholder": "Введіть 0 для необмеженого", "maxActiveRequestsTip": "Максимальна кількість одночасних активних запитів на додаток (0 для необмеженої кількості)", diff --git a/web/i18n/uk-UA/workflow.json b/web/i18n/uk-UA/workflow.json index 94f869845e..44d527618e 100644 --- a/web/i18n/uk-UA/workflow.json +++ b/web/i18n/uk-UA/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Введіть вміст у поле нижче, щоб розпочати налагодження чат-бота", "common.processData": "Обробити дані", "common.publish": "Опублікувати", + "common.publishToMarketplace": "Опублікувати на Marketplace", + "common.publishToMarketplaceFailed": "Не вдалося опублікувати на Marketplace", "common.publishUpdate": "Опублікувати оновлення", "common.published": "Опубліковано", "common.publishedAt": "Опубліковано о", + "common.publishingToMarketplace": "Публікація...", "common.redo": "Повторити", "common.restart": "Перезапустити", "common.restore": "Відновити", diff --git a/web/i18n/vi-VN/app.json b/web/i18n/vi-VN/app.json index 399d2dccf5..2be7906afb 100644 --- a/web/i18n/vi-VN/app.json +++ b/web/i18n/vi-VN/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "Biểu tượng cảm xúc", "iconPicker.image": "Hình ảnh", "iconPicker.ok": "Đồng ý", + "importApp": "Nhập App", "importDSL": "Nhập tệp DSL", "importFromDSL": "Nhập từ DSL", "importFromDSLFile": "Từ tệp DSL", "importFromDSLUrl": "Từ URL", "importFromDSLUrlPlaceholder": "Dán liên kết DSL vào đây", "join": "Tham gia cộng đồng", + "marketplace.template.categories": "Danh mục", + "marketplace.template.category.design": "Thiết kế", + "marketplace.template.category.it": "CNTT", + "marketplace.template.category.knowledge": "Kiến thức", + "marketplace.template.category.marketing": "Marketing", + "marketplace.template.category.operations": "Vận hành", + "marketplace.template.category.sales": "Bán hàng", + "marketplace.template.category.support": "Hỗ trợ", + "marketplace.template.fetchFailed": "Không thể lấy mẫu", + "marketplace.template.importConfirm": "Nhập", + "marketplace.template.importFailed": "Không thể nhập mẫu", + "marketplace.template.modalTitle": "Nhập từ Marketplace", + "marketplace.template.overview": "Tổng quan", + "marketplace.template.publishedBy": "Bởi", + "marketplace.template.usageCount": "Lượt sử dụng", + "marketplace.template.viewOnMarketplace": "Xem trên Marketplace", "maxActiveRequests": "Số yêu cầu đồng thời tối đa", "maxActiveRequestsPlaceholder": "Nhập 0 để không giới hạn", "maxActiveRequestsTip": "Số yêu cầu hoạt động đồng thời tối đa cho mỗi ứng dụng (0 để không giới hạn)", diff --git a/web/i18n/vi-VN/workflow.json b/web/i18n/vi-VN/workflow.json index 377a794464..231c01bc82 100644 --- a/web/i18n/vi-VN/workflow.json +++ b/web/i18n/vi-VN/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "Nhập nội dung vào hộp bên dưới để bắt đầu gỡ lỗi Chatbot", "common.processData": "Xử lý dữ liệu", "common.publish": "Xuất bản", + "common.publishToMarketplace": "Xuất bản lên Marketplace", + "common.publishToMarketplaceFailed": "Xuất bản lên Marketplace thất bại", "common.publishUpdate": "Cập nhật xuất bản", "common.published": "Đã xuất bản", "common.publishedAt": "Đã xuất bản lúc", + "common.publishingToMarketplace": "Đang xuất bản...", "common.redo": "Làm lại", "common.restart": "Khởi động lại", "common.restore": "Khôi phục", diff --git a/web/i18n/zh-Hans/workflow.json b/web/i18n/zh-Hans/workflow.json index ac3a27af11..f9b8e9d652 100644 --- a/web/i18n/zh-Hans/workflow.json +++ b/web/i18n/zh-Hans/workflow.json @@ -229,6 +229,8 @@ "common.previewPlaceholder": "在下面的框中输入内容开始调试聊天机器人", "common.processData": "数据处理", "common.publish": "发布", + "common.publishAsEvaluationWorkflow": "发布为评测工作流", + "common.publishAsStandardWorkflow": "发布为标准工作流", "common.publishToMarketplace": "发布到市场", "common.publishToMarketplaceFailed": "发布到市场失败", "common.publishUpdate": "发布更新", @@ -247,6 +249,11 @@ "common.searchVar": "搜索变量", "common.setVarValuePlaceholder": "设置变量值", "common.showRunHistory": "显示运行历史", + "common.switchToEvaluationWorkflow": "切换为评测工作流", + "common.switchToEvaluationWorkflowDisabledTip": "评测工作流不支持 Human Input 节点或 Trigger 开始节点。", + "common.switchToEvaluationWorkflowTip": "将当前工作流转换为批量测试用的自定义评测器,并禁用公开 Web App 访问。", + "common.switchToStandardWorkflow": "切换为标准工作流", + "common.switchToStandardWorkflowTip": "将当前评测器转换回标准工作流,并恢复公开 Web App 访问。", "common.syncingData": "同步数据中,只需几秒钟。", "common.tagBound": "使用此标签的应用数量", "common.undo": "撤销", @@ -1147,6 +1154,16 @@ "singleRun.testRun": "测试运行", "singleRun.testRunIteration": "测试运行迭代", "singleRun.testRunLoop": "测试运行循环", + "snippet.addToSnippet": "添加到 snippet", + "snippet.confirm": "确认", + "snippet.createDialogTitle": "创建 Snippet", + "snippet.createSuccess": "Snippet 已创建", + "snippet.descriptionLabel": "描述(可选)", + "snippet.descriptionPlaceholder": "简要描述你的 snippet", + "snippet.nameLabel": "Snippet 名称和图标", + "snippet.namePlaceholder": "Snippet 名称", + "snippet.shortcuts.press": "按下", + "snippet.shortcuts.toConfirm": "确认", "tabs.-": "默认", "tabs.addAll": "添加全部", "tabs.agent": "Agent 策略", @@ -1154,6 +1171,7 @@ "tabs.allTool": "全部", "tabs.allTriggers": "全部触发器", "tabs.blocks": "节点", + "tabs.createSnippet": "创建 snippet", "tabs.customTool": "自定义", "tabs.featuredTools": "精选推荐", "tabs.hideActions": "收起工具", @@ -1163,19 +1181,23 @@ "tabs.noFeaturedTriggers": "前往插件市场查看更多触发器", "tabs.noPluginsFound": "未找到插件", "tabs.noResult": "未找到匹配项", + "tabs.noSnippetsFound": "未找到 snippets", "tabs.plugin": "插件", "tabs.pluginByAuthor": "来自 {{author}}", "tabs.question-understand": "问题理解", "tabs.requestToCommunity": "向社区反馈", "tabs.searchBlock": "搜索节点", "tabs.searchDataSource": "搜索数据源", + "tabs.searchSnippets": "搜索 snippets...", "tabs.searchTool": "搜索工具", "tabs.searchTrigger": "搜索触发器...", "tabs.showLessFeatured": "收起", "tabs.showMoreFeatured": "查看更多", + "tabs.snippets": "Snippets", "tabs.sources": "数据源", "tabs.start": "开始", "tabs.startDisabledTip": "触发节点与用户输入节点互斥。", + "tabs.startNotSupportedTip": "Snippet 暂不支持 Start 标签。", "tabs.tools": "工具", "tabs.transform": "转换", "tabs.usePlugin": "选择工具", diff --git a/web/i18n/zh-Hant/app.json b/web/i18n/zh-Hant/app.json index a7fbcfd65f..7c485b6520 100644 --- a/web/i18n/zh-Hant/app.json +++ b/web/i18n/zh-Hant/app.json @@ -118,12 +118,29 @@ "iconPicker.emoji": "表情符號", "iconPicker.image": "圖片", "iconPicker.ok": "確認", + "importApp": "匯入應用", "importDSL": "匯入 DSL 檔案", "importFromDSL": "從 DSL 導入", "importFromDSLFile": "從 DSL 檔", "importFromDSLUrl": "寄件者 URL", "importFromDSLUrlPlaceholder": "在此處貼上 DSL 連結", "join": "參與社群", + "marketplace.template.categories": "分類", + "marketplace.template.category.design": "設計", + "marketplace.template.category.it": "IT", + "marketplace.template.category.knowledge": "知識", + "marketplace.template.category.marketing": "行銷", + "marketplace.template.category.operations": "營運", + "marketplace.template.category.sales": "銷售", + "marketplace.template.category.support": "支援", + "marketplace.template.fetchFailed": "獲取模板失敗", + "marketplace.template.importConfirm": "匯入", + "marketplace.template.importFailed": "匯入模板失敗", + "marketplace.template.modalTitle": "從市場匯入", + "marketplace.template.overview": "概覽", + "marketplace.template.publishedBy": "由", + "marketplace.template.usageCount": "使用次數", + "marketplace.template.viewOnMarketplace": "在市場上查看", "maxActiveRequests": "同時最大請求數", "maxActiveRequestsPlaceholder": "輸入 0 以表示無限", "maxActiveRequestsTip": "每個應用程式可同時活躍請求的最大數量(0為無限制)", diff --git a/web/i18n/zh-Hant/workflow.json b/web/i18n/zh-Hant/workflow.json index 1e10badec0..9d296250db 100644 --- a/web/i18n/zh-Hant/workflow.json +++ b/web/i18n/zh-Hant/workflow.json @@ -229,9 +229,12 @@ "common.previewPlaceholder": "在下面的框中輸入內容開始測試聊天機器人", "common.processData": "資料處理", "common.publish": "發佈", + "common.publishToMarketplace": "發佈到市場", + "common.publishToMarketplaceFailed": "發佈到市場失敗", "common.publishUpdate": "發布更新", "common.published": "已發佈", "common.publishedAt": "發佈於", + "common.publishingToMarketplace": "發佈中...", "common.redo": "重做", "common.restart": "重新開始", "common.restore": "恢復",