diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 5413f83c27..d3f71c4647 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -16,14 +16,14 @@ jobs: - name: Check Docker Compose inputs id: docker-compose-changes - uses: tj-actions/changed-files@v46 + uses: tj-actions/changed-files@v47 with: files: | docker/generate_docker_compose docker/.env.example docker/docker-compose-template.yaml docker/docker-compose.yaml - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: "3.11" diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index bbf89236de..704d896192 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -112,7 +112,7 @@ jobs: context: "web" steps: - name: Download digests - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: path: /tmp/digests pattern: digests-${{ matrix.context }}-* diff --git a/.github/workflows/deploy-agent-dev.yml b/.github/workflows/deploy-agent-dev.yml index dff48b5510..dd759f7ba5 100644 --- a/.github/workflows/deploy-agent-dev.yml +++ b/.github/workflows/deploy-agent-dev.yml @@ -19,7 +19,7 @@ jobs: github.event.workflow_run.head_branch == 'deploy/agent-dev' steps: - name: Deploy to server - uses: appleboy/ssh-action@v0.1.8 + uses: appleboy/ssh-action@v1 with: host: ${{ secrets.AGENT_DEV_SSH_HOST }} username: ${{ secrets.SSH_USER }} diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml index cd1c86e668..38fa0b9a7f 100644 --- a/.github/workflows/deploy-dev.yml +++ b/.github/workflows/deploy-dev.yml @@ -16,7 +16,7 @@ jobs: github.event.workflow_run.head_branch == 'deploy/dev' steps: - name: Deploy to server - uses: appleboy/ssh-action@v0.1.8 + uses: appleboy/ssh-action@v1 with: host: ${{ secrets.SSH_HOST }} username: ${{ secrets.SSH_USER }} diff --git a/.github/workflows/deploy-hitl.yml b/.github/workflows/deploy-hitl.yml index 8144ba4f08..7d5f0a22e7 100644 --- a/.github/workflows/deploy-hitl.yml +++ b/.github/workflows/deploy-hitl.yml @@ -20,7 +20,7 @@ jobs: ) steps: - name: Deploy to server - uses: appleboy/ssh-action@v0.1.8 + uses: appleboy/ssh-action@v1 with: host: ${{ secrets.HITL_SSH_HOST }} username: ${{ secrets.SSH_USER }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 1870b1f670..b6df1d7e93 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -18,7 +18,7 @@ jobs: pull-requests: write steps: - - uses: actions/stale@v5 + - uses: actions/stale@v10 with: days-before-issue-stale: 15 days-before-issue-close: 3 diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index b96db5a390..86b66bf9df 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -109,12 +109,12 @@ jobs: pnpm run lint:report continue-on-error: true - - name: Annotate Code - if: steps.changed-files.outputs.any_changed == 'true' - uses: DerLev/eslint-annotations@51347b3a0abfb503fc8734d5ae31c4b151297fae - with: - eslint-report: web/eslint_report.json - github-token: ${{ secrets.GITHUB_TOKEN }} + # - name: Annotate Code + # if: steps.changed-files.outputs.any_changed == 'true' && github.event_name == 'pull_request' + # uses: DerLev/eslint-annotations@51347b3a0abfb503fc8734d5ae31c4b151297fae + # with: + # eslint-report: web/eslint_report.json + # github-token: ${{ secrets.GITHUB_TOKEN }} - name: Web type check if: steps.changed-files.outputs.any_changed == 'true' diff --git a/.github/workflows/trigger-i18n-sync.yml b/.github/workflows/trigger-i18n-sync.yml index de093c9235..66a29453b4 100644 --- a/.github/workflows/trigger-i18n-sync.yml +++ b/.github/workflows/trigger-i18n-sync.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 diff --git a/AGENTS.md b/AGENTS.md index 782861ad36..deab7c8629 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -12,12 +12,8 @@ The codebase is split into: ## Backend Workflow +- Read `api/AGENTS.md` for details - Run backend CLI commands through `uv run --project api `. - -- Before submission, all backend modifications must pass local checks: `make lint`, `make type-check`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`. - -- Use Makefile targets for linting and formatting; `make lint` and `make type-check` cover the required checks. - - Integration tests are CI-only and are not expected to run in the local environment. ## Frontend Workflow diff --git a/Makefile b/Makefile index 60c32948b9..e92a7b1314 100644 --- a/Makefile +++ b/Makefile @@ -61,7 +61,8 @@ check: lint: @echo "🔧 Running ruff format, check with fixes, import linter, and dotenv-linter..." - @uv run --project api --dev sh -c 'ruff format ./api && ruff check --fix ./api' + @uv run --project api --dev ruff format ./api + @uv run --project api --dev ruff check --fix ./api @uv run --directory api --dev lint-imports @uv run --project api --dev dotenv-linter ./api/.env.example ./web/.env.example @echo "✅ Linting complete" @@ -73,7 +74,12 @@ type-check: test: @echo "🧪 Running backend unit tests..." - @uv run --project api --dev dev/pytest/pytest_unit_tests.sh + @if [ -n "$(TARGET_TESTS)" ]; then \ + echo "Target: $(TARGET_TESTS)"; \ + uv run --project api --dev pytest $(TARGET_TESTS); \ + else \ + uv run --project api --dev dev/pytest/pytest_unit_tests.sh; \ + fi @echo "✅ Tests complete" # Build Docker images @@ -125,7 +131,7 @@ help: @echo " make check - Check code with ruff" @echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)" @echo " make type-check - Run type checking with basedpyright" - @echo " make test - Run backend unit tests" + @echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/)" @echo "" @echo "Docker Build Targets:" @echo " make build-web - Build web Docker image" diff --git a/agent-notes/.gitkeep b/agent-notes/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/AGENTS.md b/api/AGENTS.md index 17398ec4b8..6ce419828b 100644 --- a/api/AGENTS.md +++ b/api/AGENTS.md @@ -1,62 +1,236 @@ -# Agent Skill Index +# API Agent Guide + +## Agent Notes (must-check) + +Before you start work on any backend file under `api/`, you MUST check whether a related note exists under: + +- `agent-notes/.md` + +Rules: + +- **Path mapping**: for a target file `/.py`, the note must be `agent-notes//.py.md` (same folder structure, same filename, plus `.md`). +- **Before working**: + - If the note exists, read it first and follow any constraints/decisions recorded there. + - If the note conflicts with the current code, or references an "origin" file/path that has been deleted, renamed, or migrated, treat the **code as the single source of truth** and update the note to match reality. + - If the note does not exist, create it with a short architecture/intent summary and any relevant invariants/edge cases. +- **During working**: + - Keep the note in sync as you discover constraints, make decisions, or change approach. + - If you move/rename a file, migrate its note to the new mapped path (and fix any outdated references inside the note). + - Record non-obvious edge cases, trade-offs, and the test/verification plan as you go (not just at the end). + - Keep notes **coherent**: integrate new findings into the relevant sections and rewrite for clarity; avoid append-only “recent fix” / changelog-style additions unless the note is explicitly intended to be a changelog. +- **When finishing work**: + - Update the related note(s) to reflect what changed, why, and any new edge cases/tests. + - If a file is deleted, remove or clearly deprecate the corresponding note so it cannot be mistaken as current guidance. + - Keep notes concise and accurate; they are meant to prevent repeated rediscovery. + +## Skill Index Start with the section that best matches your need. Each entry lists the problems it solves plus key files/concepts so you know what to expect before opening it. -______________________________________________________________________ +### Platform Foundations -## Platform Foundations - -- **[Infrastructure Overview](agent_skills/infra.md)**\ - When to read this: +#### [Infrastructure Overview](agent_skills/infra.md) +- **When to read this** - You need to understand where a feature belongs in the architecture. - You’re wiring storage, Redis, vector stores, or OTEL. - - You’re about to add CLI commands or async jobs.\ - What it covers: configuration stack (`configs/app_config.py`, remote settings), storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`), Redis conventions (`extensions/ext_redis.py`), plugin runtime topology, vector-store factory (`core/rag/datasource/vdb/*`), observability hooks, SSRF proxy usage, and core CLI commands. + - You’re about to add CLI commands or async jobs. +- **What it covers** + - Configuration stack (`configs/app_config.py`, remote settings) + - Storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`) + - Redis conventions (`extensions/ext_redis.py`) + - Plugin runtime topology + - Vector-store factory (`core/rag/datasource/vdb/*`) + - Observability hooks + - SSRF proxy usage + - Core CLI commands -- **[Coding Style](agent_skills/coding_style.md)**\ - When to read this: +### Plugin & Extension Development - - You’re writing or reviewing backend code and need the authoritative checklist. - - You’re unsure about Pydantic validators, SQLAlchemy session usage, or logging patterns. - - You want the exact lint/type/test commands used in PRs.\ - Includes: Ruff & BasedPyright commands, no-annotation policy, session examples (`with Session(db.engine, ...)`), `@field_validator` usage, logging expectations, and the rule set for file size, helpers, and package management. - -______________________________________________________________________ - -## Plugin & Extension Development - -- **[Plugin Systems](agent_skills/plugin.md)**\ - When to read this: +#### [Plugin Systems](agent_skills/plugin.md) +- **When to read this** - You’re building or debugging a marketplace plugin. - - You need to know how manifests, providers, daemons, and migrations fit together.\ - What it covers: plugin manifests (`core/plugin/entities/plugin.py`), installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands), runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent), daemon coordination (`core/plugin/entities/plugin_daemon.py`), and how provider registries surface capabilities to the rest of the platform. + - You need to know how manifests, providers, daemons, and migrations fit together. +- **What it covers** + - Plugin manifests (`core/plugin/entities/plugin.py`) + - Installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands) + - Runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent) + - Daemon coordination (`core/plugin/entities/plugin_daemon.py`) + - How provider registries surface capabilities to the rest of the platform -- **[Plugin OAuth](agent_skills/plugin_oauth.md)**\ - When to read this: +#### [Plugin OAuth](agent_skills/plugin_oauth.md) +- **When to read this** - You must integrate OAuth for a plugin or datasource. - - You’re handling credential encryption or refresh flows.\ - Topics: credential storage, encryption helpers (`core/helper/provider_encryption.py`), OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`), and how console/API layers expose the flows. + - You’re handling credential encryption or refresh flows. +- **Topics** + - Credential storage + - Encryption helpers (`core/helper/provider_encryption.py`) + - OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`) + - How console/API layers expose the flows -______________________________________________________________________ +### Workflow Entry & Execution -## Workflow Entry & Execution +#### [Trigger Concepts](agent_skills/trigger.md) -- **[Trigger Concepts](agent_skills/trigger.md)**\ - When to read this: +- **When to read this** - You’re debugging why a workflow didn’t start. - You’re adding a new trigger type or hook. - - You need to trace async execution, draft debugging, or webhook/schedule pipelines.\ - Details: Start-node taxonomy, webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`), async orchestration (`services/async_workflow_service.py`, Celery queues), debug event bus, and storage/logging interactions. + - You need to trace async execution, draft debugging, or webhook/schedule pipelines. +- **Details** + - Start-node taxonomy + - Webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`) + - Async orchestration (`services/async_workflow_service.py`, Celery queues) + - Debug event bus + - Storage/logging interactions -______________________________________________________________________ +## General Reminders -## Additional Notes for Agents - -- All skill docs assume you follow the coding style guide—run Ruff/BasedPyright/tests listed there before submitting changes. +- All skill docs assume you follow the coding style rules below—run the lint/type/test commands before submitting changes. - When you cannot find an answer in these briefs, search the codebase using the paths referenced (e.g., `core/plugin/impl/tool.py`, `services/dataset_service.py`). - If you run into cross-cutting concerns (tenancy, configuration, storage), check the infrastructure guide first; it links to most supporting modules. - Keep multi-tenancy and configuration central: everything flows through `configs.dify_config` and `tenant_id`. - When touching plugins or triggers, consult both the system overview and the specialised doc to ensure you adjust lifecycle, storage, and observability consistently. + +## Coding Style + +This is the default standard for backend code in this repo. Follow it for new code and use it as the checklist when reviewing changes. + +### Linting & Formatting + +- Use Ruff for formatting and linting (follow `.ruff.toml`). +- Keep each line under 120 characters (including spaces). + +### Naming Conventions + +- Use `snake_case` for variables and functions. +- Use `PascalCase` for classes. +- Use `UPPER_CASE` for constants. + +### Typing & Class Layout + +- Code should usually include type annotations that match the repo’s current Python version (avoid untyped public APIs and “mystery” values). +- Prefer modern typing forms (e.g. `list[str]`, `dict[str, int]`) and avoid `Any` unless there’s a strong reason. +- For classes, declare member variables at the top of the class body (before `__init__`) so the class shape is obvious at a glance: + +```python +from datetime import datetime + + +class Example: + user_id: str + created_at: datetime + + def __init__(self, user_id: str, created_at: datetime) -> None: + self.user_id = user_id + self.created_at = created_at +``` + +### General Rules + +- Use Pydantic v2 conventions. +- Use `uv` for Python package management in this repo (usually with `--project api`). +- Prefer simple functions over small “utility classes” for lightweight helpers. +- Avoid implementing dunder methods unless it’s clearly needed and matches existing patterns. +- Never start long-running services as part of agent work (`uv run app.py`, `flask run`, etc.); running tests is allowed. +- Keep files below ~800 lines; split when necessary. +- Keep code readable and explicit—avoid clever hacks. + +### Architecture & Boundaries + +- Mirror the layered architecture: controller → service → core/domain. +- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions. +- Optimise for observability: deterministic control flow, clear logging, actionable errors. + +### Logging & Errors + +- Never use `print`; use a module-level logger: + - `logger = logging.getLogger(__name__)` +- Include tenant/app/workflow identifiers in log context when relevant. +- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate them into HTTP responses in controllers. +- Log retryable events at `warning`, terminal failures at `error`. + +### SQLAlchemy Patterns + +- Models inherit from `models.base.TypeBase`; do not create ad-hoc metadata or engines. +- Open sessions with context managers: + +```python +from sqlalchemy.orm import Session + +with Session(db.engine, expire_on_commit=False) as session: + stmt = select(Workflow).where( + Workflow.id == workflow_id, + Workflow.tenant_id == tenant_id, + ) + workflow = session.execute(stmt).scalar_one_or_none() +``` + +- Prefer SQLAlchemy expressions; avoid raw SQL unless necessary. +- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.). +- Introduce repository abstractions only for very large tables (e.g., workflow executions) or when alternative storage strategies are required. + +### Storage & External I/O + +- Access storage via `extensions.ext_storage.storage`. +- Use `core.helper.ssrf_proxy` for outbound HTTP fetches. +- Background tasks that touch storage must be idempotent, and should log relevant object identifiers. + +### Pydantic Usage + +- Define DTOs with Pydantic v2 models and forbid extras by default. +- Use `@field_validator` / `@model_validator` for domain rules. + +Example: + +```python +from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator + + +class TriggerConfig(BaseModel): + endpoint: HttpUrl + secret: str + + model_config = ConfigDict(extra="forbid") + + @field_validator("secret") + def ensure_secret_prefix(cls, value: str) -> str: + if not value.startswith("dify_"): + raise ValueError("secret must start with dify_") + return value +``` + +### Generics & Protocols + +- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces). +- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers. +- Validate dynamic inputs at runtime when generics cannot enforce safety alone. + +### Tooling & Checks + +Quick checks while iterating: + +- Format: `make format` +- Lint (includes auto-fix): `make lint` +- Type check: `make type-check` +- Targeted tests: `make test TARGET_TESTS=./api/tests/` + +Before opening a PR / submitting: + +- `make lint` +- `make type-check` +- `make test` + +### Controllers & Services + +- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic. +- Services: coordinate repositories, providers, background tasks; keep side effects explicit. +- Document non-obvious behaviour with concise comments. + +### Miscellaneous + +- Use `configs.dify_config` for configuration—never read environment variables directly. +- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources. +- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection. +- Keep experimental scripts under `dev/`; do not ship them in production builds. diff --git a/api/agent_skills/coding_style.md b/api/agent_skills/coding_style.md deleted file mode 100644 index a2b66f0bd5..0000000000 --- a/api/agent_skills/coding_style.md +++ /dev/null @@ -1,115 +0,0 @@ -## Linter - -- Always follow `.ruff.toml`. -- Run `uv run ruff check --fix --unsafe-fixes`. -- Keep each line under 100 characters (including spaces). - -## Code Style - -- `snake_case` for variables and functions. -- `PascalCase` for classes. -- `UPPER_CASE` for constants. - -## Rules - -- Use Pydantic v2 standard. -- Use `uv` for package management. -- Do not override dunder methods like `__init__`, `__iadd__`, etc. -- Never launch services (`uv run app.py`, `flask run`, etc.); running tests under `tests/` is allowed. -- Prefer simple functions over classes for lightweight helpers. -- Keep files below 800 lines; split when necessary. -- Keep code readable—no clever hacks. -- Never use `print`; log with `logger = logging.getLogger(__name__)`. - -## Guiding Principles - -- Mirror the project’s layered architecture: controller → service → core/domain. -- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions. -- Optimise for observability: deterministic control flow, clear logging, actionable errors. - -## SQLAlchemy Patterns - -- Models inherit from `models.base.Base`; never create ad-hoc metadata or engines. - -- Open sessions with context managers: - - ```python - from sqlalchemy.orm import Session - - with Session(db.engine, expire_on_commit=False) as session: - stmt = select(Workflow).where( - Workflow.id == workflow_id, - Workflow.tenant_id == tenant_id, - ) - workflow = session.execute(stmt).scalar_one_or_none() - ``` - -- Use SQLAlchemy expressions; avoid raw SQL unless necessary. - -- Introduce repository abstractions only for very large tables (e.g., workflow executions) to support alternative storage strategies. - -- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.). - -## Storage & External IO - -- Access storage via `extensions.ext_storage.storage`. -- Use `core.helper.ssrf_proxy` for outbound HTTP fetches. -- Background tasks that touch storage must be idempotent and log the relevant object identifiers. - -## Pydantic Usage - -- Define DTOs with Pydantic v2 models and forbid extras by default. - -- Use `@field_validator` / `@model_validator` for domain rules. - -- Example: - - ```python - from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator - - class TriggerConfig(BaseModel): - endpoint: HttpUrl - secret: str - - model_config = ConfigDict(extra="forbid") - - @field_validator("secret") - def ensure_secret_prefix(cls, value: str) -> str: - if not value.startswith("dify_"): - raise ValueError("secret must start with dify_") - return value - ``` - -## Generics & Protocols - -- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces). -- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers. -- Validate dynamic inputs at runtime when generics cannot enforce safety alone. - -## Error Handling & Logging - -- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate to HTTP responses in controllers. -- Declare `logger = logging.getLogger(__name__)` at module top. -- Include tenant/app/workflow identifiers in log context. -- Log retryable events at `warning`, terminal failures at `error`. - -## Tooling & Checks - -- Format/lint: `uv run --project api --dev ruff format ./api` and `uv run --project api --dev ruff check --fix --unsafe-fixes ./api`. -- Type checks: `uv run --directory api --dev basedpyright`. -- Tests: `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`. -- Run all of the above before submitting your work. - -## Controllers & Services - -- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic. -- Services: coordinate repositories, providers, background tasks; keep side effects explicit. -- Avoid repositories unless necessary; direct SQLAlchemy usage is preferred for typical tables. -- Document non-obvious behaviour with concise comments. - -## Miscellaneous - -- Use `configs.dify_config` for configuration—never read environment variables directly. -- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources. -- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection. -- Keep experimental scripts under `dev/`; do not ship them in production builds. diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py index e9fbb515e4..023ffc991a 100644 --- a/api/controllers/console/tag/tags.py +++ b/api/controllers/console/tag/tags.py @@ -30,6 +30,11 @@ class TagBindingRemovePayload(BaseModel): type: Literal["knowledge", "app"] | None = Field(default=None, description="Tag type") +class TagListQueryParam(BaseModel): + type: Literal["knowledge", "app", ""] = Field("", description="Tag type filter") + keyword: str | None = Field(None, description="Search keyword") + + register_schema_models( console_ns, TagBasePayload, @@ -43,12 +48,15 @@ class TagListApi(Resource): @setup_required @login_required @account_initialization_required + @console_ns.doc( + params={"type": 'Tag type filter. Can be "knowledge" or "app".', "keyword": "Search keyword for tag name."} + ) @marshal_with(dataset_tag_fields) def get(self): _, current_tenant_id = current_account_with_tenant() - tag_type = request.args.get("type", type=str, default="") - keyword = request.args.get("keyword", default=None, type=str) - tags = TagService.get_tags(tag_type, current_tenant_id, keyword) + raw_args = request.args.to_dict() + param = TagListQueryParam.model_validate(raw_args) + tags = TagService.get_tags(param.type, current_tenant_id, param.keyword) return tags, 200 diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 09ce558346..9f4febf552 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -69,8 +69,8 @@ class LLMGenerator: response: LLMResult = model_instance.invoke_llm( prompt_messages=list(prompts), model_parameters={"max_tokens": 500, "temperature": 1}, stream=False ) - answer = cast(str, response.message.content) - if answer is None: + answer = response.message.get_text_content() + if answer == "": return "" try: result_dict = json.loads(answer) @@ -182,7 +182,7 @@ class LLMGenerator: prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False ) - rule_config["prompt"] = cast(str, response.message.content) + rule_config["prompt"] = response.message.get_text_content() except InvokeError as e: error = str(e) @@ -235,13 +235,11 @@ class LLMGenerator: return rule_config - rule_config["prompt"] = cast(str, prompt_content.message.content) + rule_config["prompt"] = prompt_content.message.get_text_content() - if not isinstance(prompt_content.message.content, str): - raise NotImplementedError("prompt content is not a string") parameter_generate_prompt = parameter_template.format( inputs={ - "INPUT_TEXT": prompt_content.message.content, + "INPUT_TEXT": prompt_content.message.get_text_content(), }, remove_template_variables=False, ) @@ -251,7 +249,7 @@ class LLMGenerator: statement_generate_prompt = statement_template.format( inputs={ "TASK_DESCRIPTION": instruction, - "INPUT_TEXT": prompt_content.message.content, + "INPUT_TEXT": prompt_content.message.get_text_content(), }, remove_template_variables=False, ) @@ -261,7 +259,7 @@ class LLMGenerator: parameter_content: LLMResult = model_instance.invoke_llm( prompt_messages=list(parameter_messages), model_parameters=model_parameters, stream=False ) - rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', cast(str, parameter_content.message.content)) + rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.get_text_content()) except InvokeError as e: error = str(e) error_step = "generate variables" @@ -270,7 +268,7 @@ class LLMGenerator: statement_content: LLMResult = model_instance.invoke_llm( prompt_messages=list(statement_messages), model_parameters=model_parameters, stream=False ) - rule_config["opening_statement"] = cast(str, statement_content.message.content) + rule_config["opening_statement"] = statement_content.message.get_text_content() except InvokeError as e: error = str(e) error_step = "generate conversation opener" @@ -342,7 +340,7 @@ class LLMGenerator: prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False ) - generated_code = cast(str, response.message.content) + generated_code = response.message.get_text_content() return {"code": generated_code, "language": code_language, "error": ""} except InvokeError as e: @@ -378,7 +376,7 @@ class LLMGenerator: raise TypeError("Expected LLMResult when stream=False") response = result - answer = cast(str, response.message.content) + answer = response.message.get_text_content() return answer.strip() @classmethod @@ -402,10 +400,7 @@ class LLMGenerator: prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False ) - raw_content = response.message.content - - if not isinstance(raw_content, str): - raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}") + raw_content = response.message.get_text_content() try: parsed_content = json.loads(raw_content) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx index 939e4e9fe6..81b4f2474e 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx @@ -65,15 +65,17 @@ const CardView: FC = ({ appId, isInPanel, className }) => {
{t('overview.disableTooltip.triggerMode', { ns: 'appOverview', feature: featureName })}
-
{ event.stopPropagation() - window.open(triggerDocUrl, '_blank') }} > {t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })} -
+ ), [t, triggerDocUrl]) diff --git a/web/app/components/base/search-input/index.tsx b/web/app/components/base/search-input/index.tsx index fe92a5d092..a2752373f1 100644 --- a/web/app/components/base/search-input/index.tsx +++ b/web/app/components/base/search-input/index.tsx @@ -20,6 +20,7 @@ const SearchInput: FC = ({ white, }) => { const { t } = useTranslation() + const inputRef = useRef(null) const [focus, setFocus] = useState(false) const isComposing = useRef(false) const [compositionValue, setCompositionValue] = useState('') @@ -36,6 +37,7 @@ const SearchInput: FC = ({