mirror of https://github.com/langgenius/dify.git
Merge branch 'main' into feat/mcp-06-18
This commit is contained in:
commit
4736819dd9
|
|
@ -11,7 +11,7 @@
|
|||
"nodeGypDependencies": true,
|
||||
"version": "lts"
|
||||
},
|
||||
"ghcr.io/devcontainers-contrib/features/npm-package:1": {
|
||||
"ghcr.io/devcontainers-extra/features/npm-package:1": {
|
||||
"package": "typescript",
|
||||
"version": "latest"
|
||||
},
|
||||
|
|
|
|||
10
README.md
10
README.md
|
|
@ -63,7 +63,7 @@ Dify is an open-source platform for developing LLM applications. Its intuitive i
|
|||
> - CPU >= 2 Core
|
||||
> - RAM >= 4 GiB
|
||||
|
||||
</br>
|
||||
<br/>
|
||||
|
||||
The easiest way to start the Dify server is through [Docker Compose](docker/docker-compose.yaml). Before running Dify with the following commands, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
|
||||
|
||||
|
|
@ -109,15 +109,15 @@ All of Dify's offerings come with corresponding APIs, so you could effortlessly
|
|||
|
||||
## Using Dify
|
||||
|
||||
- **Cloud </br>**
|
||||
- **Cloud <br/>**
|
||||
We host a [Dify Cloud](https://dify.ai) service for anyone to try with zero setup. It provides all the capabilities of the self-deployed version, and includes 200 free GPT-4 calls in the sandbox plan.
|
||||
|
||||
- **Self-hosting Dify Community Edition</br>**
|
||||
- **Self-hosting Dify Community Edition<br/>**
|
||||
Quickly get Dify running in your environment with this [starter guide](#quick-start).
|
||||
Use our [documentation](https://docs.dify.ai) for further references and more in-depth instructions.
|
||||
|
||||
- **Dify for enterprise / organizations</br>**
|
||||
We provide additional enterprise-centric features. [Log your questions for us through this chatbot](https://udify.app/chat/22L1zSxg6yW1cWQg) or [send us an email](mailto:business@dify.ai?subject=%5BGitHub%5DBusiness%20License%20Inquiry) to discuss enterprise needs. </br>
|
||||
- **Dify for enterprise / organizations<br/>**
|
||||
We provide additional enterprise-centric features. [Log your questions for us through this chatbot](https://udify.app/chat/22L1zSxg6yW1cWQg) or [send us an email](mailto:business@dify.ai?subject=%5BGitHub%5DBusiness%20License%20Inquiry) to discuss enterprise needs. <br/>
|
||||
|
||||
> For startups and small businesses using AWS, check out [Dify Premium on AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) and deploy it to your own AWS VPC with one click. It's an affordable AMI offering with the option to create apps with custom logo and branding.
|
||||
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@
|
|||
1. If you need to handle and debug the async tasks (e.g. dataset importing and documents indexing), please start the worker service.
|
||||
|
||||
```bash
|
||||
uv run celery -A app.celery worker -P gevent -c 2 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation
|
||||
uv run celery -A app.celery worker -P gevent -c 2 --loglevel INFO -Q dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation
|
||||
```
|
||||
|
||||
Additionally, if you want to debug the celery scheduled tasks, you can run the following command in another terminal to start the beat service:
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ from libs.token import (
|
|||
clear_access_token_from_cookie,
|
||||
clear_csrf_token_from_cookie,
|
||||
clear_refresh_token_from_cookie,
|
||||
extract_refresh_token,
|
||||
set_access_token_to_cookie,
|
||||
set_csrf_token_to_cookie,
|
||||
set_refresh_token_to_cookie,
|
||||
|
|
@ -270,7 +271,7 @@ class EmailCodeLoginApi(Resource):
|
|||
class RefreshTokenApi(Resource):
|
||||
def post(self):
|
||||
# Get refresh token from cookie instead of request body
|
||||
refresh_token = request.cookies.get("refresh_token")
|
||||
refresh_token = extract_refresh_token(request)
|
||||
|
||||
if not refresh_token:
|
||||
return {"result": "fail", "message": "No refresh token provided"}, 401
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ class FileApi(Resource):
|
|||
return {
|
||||
"file_size_limit": dify_config.UPLOAD_FILE_SIZE_LIMIT,
|
||||
"batch_count_limit": dify_config.UPLOAD_FILE_BATCH_LIMIT,
|
||||
"file_upload_limit": dify_config.BATCH_UPLOAD_LIMIT,
|
||||
"image_file_size_limit": dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT,
|
||||
"video_file_size_limit": dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT,
|
||||
"audio_file_size_limit": dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT,
|
||||
|
|
|
|||
|
|
@ -14,10 +14,25 @@ from services.file_service import FileService
|
|||
|
||||
@files_ns.route("/<uuid:file_id>/image-preview")
|
||||
class ImagePreviewApi(Resource):
|
||||
"""
|
||||
Deprecated
|
||||
"""
|
||||
"""Deprecated endpoint for retrieving image previews."""
|
||||
|
||||
@files_ns.doc("get_image_preview")
|
||||
@files_ns.doc(description="Retrieve a signed image preview for a file")
|
||||
@files_ns.doc(
|
||||
params={
|
||||
"file_id": "ID of the file to preview",
|
||||
"timestamp": "Unix timestamp used in the signature",
|
||||
"nonce": "Random string used in the signature",
|
||||
"sign": "HMAC signature verifying the request",
|
||||
}
|
||||
)
|
||||
@files_ns.doc(
|
||||
responses={
|
||||
200: "Image preview returned successfully",
|
||||
400: "Missing or invalid signature parameters",
|
||||
415: "Unsupported file type",
|
||||
}
|
||||
)
|
||||
def get(self, file_id):
|
||||
file_id = str(file_id)
|
||||
|
||||
|
|
@ -43,6 +58,25 @@ class ImagePreviewApi(Resource):
|
|||
|
||||
@files_ns.route("/<uuid:file_id>/file-preview")
|
||||
class FilePreviewApi(Resource):
|
||||
@files_ns.doc("get_file_preview")
|
||||
@files_ns.doc(description="Download a file preview or attachment using signed parameters")
|
||||
@files_ns.doc(
|
||||
params={
|
||||
"file_id": "ID of the file to preview",
|
||||
"timestamp": "Unix timestamp used in the signature",
|
||||
"nonce": "Random string used in the signature",
|
||||
"sign": "HMAC signature verifying the request",
|
||||
"as_attachment": "Whether to download the file as an attachment",
|
||||
}
|
||||
)
|
||||
@files_ns.doc(
|
||||
responses={
|
||||
200: "File stream returned successfully",
|
||||
400: "Missing or invalid signature parameters",
|
||||
404: "File not found",
|
||||
415: "Unsupported file type",
|
||||
}
|
||||
)
|
||||
def get(self, file_id):
|
||||
file_id = str(file_id)
|
||||
|
||||
|
|
@ -101,6 +135,20 @@ class FilePreviewApi(Resource):
|
|||
|
||||
@files_ns.route("/workspaces/<uuid:workspace_id>/webapp-logo")
|
||||
class WorkspaceWebappLogoApi(Resource):
|
||||
@files_ns.doc("get_workspace_webapp_logo")
|
||||
@files_ns.doc(description="Fetch the custom webapp logo for a workspace")
|
||||
@files_ns.doc(
|
||||
params={
|
||||
"workspace_id": "Workspace identifier",
|
||||
}
|
||||
)
|
||||
@files_ns.doc(
|
||||
responses={
|
||||
200: "Logo returned successfully",
|
||||
404: "Webapp logo not configured",
|
||||
415: "Unsupported file type",
|
||||
}
|
||||
)
|
||||
def get(self, workspace_id):
|
||||
workspace_id = str(workspace_id)
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,26 @@ from extensions.ext_database import db as global_db
|
|||
|
||||
@files_ns.route("/tools/<uuid:file_id>.<string:extension>")
|
||||
class ToolFileApi(Resource):
|
||||
@files_ns.doc("get_tool_file")
|
||||
@files_ns.doc(description="Download a tool file by ID using signed parameters")
|
||||
@files_ns.doc(
|
||||
params={
|
||||
"file_id": "Tool file identifier",
|
||||
"extension": "Expected file extension",
|
||||
"timestamp": "Unix timestamp used in the signature",
|
||||
"nonce": "Random string used in the signature",
|
||||
"sign": "HMAC signature verifying the request",
|
||||
"as_attachment": "Whether to download the file as an attachment",
|
||||
}
|
||||
)
|
||||
@files_ns.doc(
|
||||
responses={
|
||||
200: "Tool file stream returned successfully",
|
||||
403: "Forbidden - invalid signature",
|
||||
404: "File not found",
|
||||
415: "Unsupported file type",
|
||||
}
|
||||
)
|
||||
def get(self, file_id, extension):
|
||||
file_id = str(file_id)
|
||||
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ class OpenSearchVector(BaseVector):
|
|||
logger.exception("Error deleting document: %s", error)
|
||||
|
||||
def delete(self):
|
||||
self._client.indices.delete(index=self._collection_name.lower())
|
||||
self._client.indices.delete(index=self._collection_name.lower(), ignore_unavailable=True)
|
||||
|
||||
def text_exists(self, id: str) -> bool:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ VARIABLE_TO_PARAMETER_TYPE_MAPPING = {
|
|||
VariableEntityType.PARAGRAPH: ToolParameter.ToolParameterType.STRING,
|
||||
VariableEntityType.SELECT: ToolParameter.ToolParameterType.SELECT,
|
||||
VariableEntityType.NUMBER: ToolParameter.ToolParameterType.NUMBER,
|
||||
VariableEntityType.CHECKBOX: ToolParameter.ToolParameterType.BOOLEAN,
|
||||
VariableEntityType.FILE: ToolParameter.ToolParameterType.FILE,
|
||||
VariableEntityType.FILE_LIST: ToolParameter.ToolParameterType.FILES,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ from core.workflow.graph_events import (
|
|||
NodeRunLoopStartedEvent,
|
||||
NodeRunLoopSucceededEvent,
|
||||
NodeRunPauseRequestedEvent,
|
||||
NodeRunRetrieverResourceEvent,
|
||||
NodeRunRetryEvent,
|
||||
NodeRunStartedEvent,
|
||||
NodeRunStreamChunkEvent,
|
||||
|
|
@ -112,6 +113,7 @@ class EventHandler:
|
|||
@_dispatch.register(NodeRunLoopSucceededEvent)
|
||||
@_dispatch.register(NodeRunLoopFailedEvent)
|
||||
@_dispatch.register(NodeRunAgentLogEvent)
|
||||
@_dispatch.register(NodeRunRetrieverResourceEvent)
|
||||
def _(self, event: GraphNodeEventBase) -> None:
|
||||
self._event_collector.collect(event)
|
||||
|
||||
|
|
|
|||
|
|
@ -193,15 +193,19 @@ class QuestionClassifierNode(Node):
|
|||
finish_reason = event.finish_reason
|
||||
break
|
||||
|
||||
category_name = node_data.classes[0].name
|
||||
category_id = node_data.classes[0].id
|
||||
rendered_classes = [
|
||||
c.model_copy(update={"name": variable_pool.convert_template(c.name).text}) for c in node_data.classes
|
||||
]
|
||||
|
||||
category_name = rendered_classes[0].name
|
||||
category_id = rendered_classes[0].id
|
||||
if "<think>" in result_text:
|
||||
result_text = re.sub(r"<think[^>]*>[\s\S]*?</think>", "", result_text, flags=re.IGNORECASE)
|
||||
result_text_json = parse_and_check_json_markdown(result_text, [])
|
||||
# result_text_json = json.loads(result_text.strip('```JSON\n'))
|
||||
if "category_name" in result_text_json and "category_id" in result_text_json:
|
||||
category_id_result = result_text_json["category_id"]
|
||||
classes = node_data.classes
|
||||
classes = rendered_classes
|
||||
classes_map = {class_.id: class_.name for class_ in classes}
|
||||
category_ids = [_class.id for _class in classes]
|
||||
if category_id_result in category_ids:
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import json
|
|||
from collections.abc import Mapping, Sequence
|
||||
from collections.abc import Mapping as TypingMapping
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Protocol
|
||||
|
||||
from pydantic.json import pydantic_encoder
|
||||
|
|
@ -106,6 +107,23 @@ class GraphProtocol(Protocol):
|
|||
def get_outgoing_edges(self, node_id: str) -> Sequence[object]: ...
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class _GraphRuntimeStateSnapshot:
|
||||
"""Immutable view of a serialized runtime state snapshot."""
|
||||
|
||||
start_at: float
|
||||
total_tokens: int
|
||||
node_run_steps: int
|
||||
llm_usage: LLMUsage
|
||||
outputs: dict[str, Any]
|
||||
variable_pool: VariablePool
|
||||
has_variable_pool: bool
|
||||
ready_queue_dump: str | None
|
||||
graph_execution_dump: str | None
|
||||
response_coordinator_dump: str | None
|
||||
paused_nodes: tuple[str, ...]
|
||||
|
||||
|
||||
class GraphRuntimeState:
|
||||
"""Mutable runtime state shared across graph execution components."""
|
||||
|
||||
|
|
@ -293,69 +311,28 @@ class GraphRuntimeState:
|
|||
|
||||
return json.dumps(snapshot, default=pydantic_encoder)
|
||||
|
||||
def loads(self, data: str | Mapping[str, Any]) -> None:
|
||||
@classmethod
|
||||
def from_snapshot(cls, data: str | Mapping[str, Any]) -> GraphRuntimeState:
|
||||
"""Restore runtime state from a serialized snapshot."""
|
||||
|
||||
payload: dict[str, Any]
|
||||
if isinstance(data, str):
|
||||
payload = json.loads(data)
|
||||
else:
|
||||
payload = dict(data)
|
||||
snapshot = cls._parse_snapshot_payload(data)
|
||||
|
||||
version = payload.get("version")
|
||||
if version != "1.0":
|
||||
raise ValueError(f"Unsupported GraphRuntimeState snapshot version: {version}")
|
||||
state = cls(
|
||||
variable_pool=snapshot.variable_pool,
|
||||
start_at=snapshot.start_at,
|
||||
total_tokens=snapshot.total_tokens,
|
||||
llm_usage=snapshot.llm_usage,
|
||||
outputs=snapshot.outputs,
|
||||
node_run_steps=snapshot.node_run_steps,
|
||||
)
|
||||
state._apply_snapshot(snapshot)
|
||||
return state
|
||||
|
||||
self._start_at = float(payload.get("start_at", 0.0))
|
||||
total_tokens = int(payload.get("total_tokens", 0))
|
||||
if total_tokens < 0:
|
||||
raise ValueError("total_tokens must be non-negative")
|
||||
self._total_tokens = total_tokens
|
||||
def loads(self, data: str | Mapping[str, Any]) -> None:
|
||||
"""Restore runtime state from a serialized snapshot (legacy API)."""
|
||||
|
||||
node_run_steps = int(payload.get("node_run_steps", 0))
|
||||
if node_run_steps < 0:
|
||||
raise ValueError("node_run_steps must be non-negative")
|
||||
self._node_run_steps = node_run_steps
|
||||
|
||||
llm_usage_payload = payload.get("llm_usage", {})
|
||||
self._llm_usage = LLMUsage.model_validate(llm_usage_payload)
|
||||
|
||||
self._outputs = deepcopy(payload.get("outputs", {}))
|
||||
|
||||
variable_pool_payload = payload.get("variable_pool")
|
||||
if variable_pool_payload is not None:
|
||||
self._variable_pool = VariablePool.model_validate(variable_pool_payload)
|
||||
|
||||
ready_queue_payload = payload.get("ready_queue")
|
||||
if ready_queue_payload is not None:
|
||||
self._ready_queue = self._build_ready_queue()
|
||||
self._ready_queue.loads(ready_queue_payload)
|
||||
else:
|
||||
self._ready_queue = None
|
||||
|
||||
graph_execution_payload = payload.get("graph_execution")
|
||||
self._graph_execution = None
|
||||
self._pending_graph_execution_workflow_id = None
|
||||
if graph_execution_payload is not None:
|
||||
try:
|
||||
execution_payload = json.loads(graph_execution_payload)
|
||||
self._pending_graph_execution_workflow_id = execution_payload.get("workflow_id")
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
self._pending_graph_execution_workflow_id = None
|
||||
self.graph_execution.loads(graph_execution_payload)
|
||||
|
||||
response_payload = payload.get("response_coordinator")
|
||||
if response_payload is not None:
|
||||
if self._graph is not None:
|
||||
self.response_coordinator.loads(response_payload)
|
||||
else:
|
||||
self._pending_response_coordinator_dump = response_payload
|
||||
else:
|
||||
self._pending_response_coordinator_dump = None
|
||||
self._response_coordinator = None
|
||||
|
||||
paused_nodes_payload = payload.get("paused_nodes", [])
|
||||
self._paused_nodes = set(map(str, paused_nodes_payload))
|
||||
snapshot = self._parse_snapshot_payload(data)
|
||||
self._apply_snapshot(snapshot)
|
||||
|
||||
def register_paused_node(self, node_id: str) -> None:
|
||||
"""Record a node that should resume when execution is continued."""
|
||||
|
|
@ -391,3 +368,106 @@ class GraphRuntimeState:
|
|||
module = importlib.import_module("core.workflow.graph_engine.response_coordinator")
|
||||
coordinator_cls = module.ResponseStreamCoordinator
|
||||
return coordinator_cls(variable_pool=self.variable_pool, graph=graph)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Snapshot helpers
|
||||
# ------------------------------------------------------------------
|
||||
@classmethod
|
||||
def _parse_snapshot_payload(cls, data: str | Mapping[str, Any]) -> _GraphRuntimeStateSnapshot:
|
||||
payload: dict[str, Any]
|
||||
if isinstance(data, str):
|
||||
payload = json.loads(data)
|
||||
else:
|
||||
payload = dict(data)
|
||||
|
||||
version = payload.get("version")
|
||||
if version != "1.0":
|
||||
raise ValueError(f"Unsupported GraphRuntimeState snapshot version: {version}")
|
||||
|
||||
start_at = float(payload.get("start_at", 0.0))
|
||||
|
||||
total_tokens = int(payload.get("total_tokens", 0))
|
||||
if total_tokens < 0:
|
||||
raise ValueError("total_tokens must be non-negative")
|
||||
|
||||
node_run_steps = int(payload.get("node_run_steps", 0))
|
||||
if node_run_steps < 0:
|
||||
raise ValueError("node_run_steps must be non-negative")
|
||||
|
||||
llm_usage_payload = payload.get("llm_usage", {})
|
||||
llm_usage = LLMUsage.model_validate(llm_usage_payload)
|
||||
|
||||
outputs_payload = deepcopy(payload.get("outputs", {}))
|
||||
|
||||
variable_pool_payload = payload.get("variable_pool")
|
||||
has_variable_pool = variable_pool_payload is not None
|
||||
variable_pool = VariablePool.model_validate(variable_pool_payload) if has_variable_pool else VariablePool()
|
||||
|
||||
ready_queue_payload = payload.get("ready_queue")
|
||||
graph_execution_payload = payload.get("graph_execution")
|
||||
response_payload = payload.get("response_coordinator")
|
||||
paused_nodes_payload = payload.get("paused_nodes", [])
|
||||
|
||||
return _GraphRuntimeStateSnapshot(
|
||||
start_at=start_at,
|
||||
total_tokens=total_tokens,
|
||||
node_run_steps=node_run_steps,
|
||||
llm_usage=llm_usage,
|
||||
outputs=outputs_payload,
|
||||
variable_pool=variable_pool,
|
||||
has_variable_pool=has_variable_pool,
|
||||
ready_queue_dump=ready_queue_payload,
|
||||
graph_execution_dump=graph_execution_payload,
|
||||
response_coordinator_dump=response_payload,
|
||||
paused_nodes=tuple(map(str, paused_nodes_payload)),
|
||||
)
|
||||
|
||||
def _apply_snapshot(self, snapshot: _GraphRuntimeStateSnapshot) -> None:
|
||||
self._start_at = snapshot.start_at
|
||||
self._total_tokens = snapshot.total_tokens
|
||||
self._node_run_steps = snapshot.node_run_steps
|
||||
self._llm_usage = snapshot.llm_usage.model_copy()
|
||||
self._outputs = deepcopy(snapshot.outputs)
|
||||
if snapshot.has_variable_pool or self._variable_pool is None:
|
||||
self._variable_pool = snapshot.variable_pool
|
||||
|
||||
self._restore_ready_queue(snapshot.ready_queue_dump)
|
||||
self._restore_graph_execution(snapshot.graph_execution_dump)
|
||||
self._restore_response_coordinator(snapshot.response_coordinator_dump)
|
||||
self._paused_nodes = set(snapshot.paused_nodes)
|
||||
|
||||
def _restore_ready_queue(self, payload: str | None) -> None:
|
||||
if payload is not None:
|
||||
self._ready_queue = self._build_ready_queue()
|
||||
self._ready_queue.loads(payload)
|
||||
else:
|
||||
self._ready_queue = None
|
||||
|
||||
def _restore_graph_execution(self, payload: str | None) -> None:
|
||||
self._graph_execution = None
|
||||
self._pending_graph_execution_workflow_id = None
|
||||
|
||||
if payload is None:
|
||||
return
|
||||
|
||||
try:
|
||||
execution_payload = json.loads(payload)
|
||||
self._pending_graph_execution_workflow_id = execution_payload.get("workflow_id")
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
self._pending_graph_execution_workflow_id = None
|
||||
|
||||
self.graph_execution.loads(payload)
|
||||
|
||||
def _restore_response_coordinator(self, payload: str | None) -> None:
|
||||
if payload is None:
|
||||
self._pending_response_coordinator_dump = None
|
||||
self._response_coordinator = None
|
||||
return
|
||||
|
||||
if self._graph is not None:
|
||||
self.response_coordinator.loads(payload)
|
||||
self._pending_response_coordinator_dump = None
|
||||
return
|
||||
|
||||
self._pending_response_coordinator_dump = payload
|
||||
self._response_coordinator = None
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ if [[ "${MODE}" == "worker" ]]; then
|
|||
|
||||
exec celery -A celery_entrypoint.celery worker -P ${CELERY_WORKER_CLASS:-gevent} $CONCURRENCY_OPTION \
|
||||
--max-tasks-per-child ${MAX_TASKS_PER_CHILD:-50} --loglevel ${LOG_LEVEL:-INFO} \
|
||||
-Q ${CELERY_QUEUES:-dataset,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation} \
|
||||
-Q ${CELERY_QUEUES:-dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation} \
|
||||
--prefetch-multiplier=1
|
||||
|
||||
elif [[ "${MODE}" == "beat" ]]; then
|
||||
|
|
|
|||
|
|
@ -6,10 +6,11 @@ from flask_login import user_loaded_from_request, user_logged_in
|
|||
from werkzeug.exceptions import NotFound, Unauthorized
|
||||
|
||||
from configs import dify_config
|
||||
from constants import HEADER_NAME_APP_CODE
|
||||
from dify_app import DifyApp
|
||||
from extensions.ext_database import db
|
||||
from libs.passport import PassportService
|
||||
from libs.token import extract_access_token
|
||||
from libs.token import extract_access_token, extract_webapp_passport
|
||||
from models import Account, Tenant, TenantAccountJoin
|
||||
from models.model import AppMCPServer, EndUser
|
||||
from services.account_service import AccountService
|
||||
|
|
@ -61,14 +62,30 @@ def load_user_from_request(request_from_flask_login):
|
|||
logged_in_account = AccountService.load_logged_in_account(account_id=user_id)
|
||||
return logged_in_account
|
||||
elif request.blueprint == "web":
|
||||
decoded = PassportService().verify(auth_token)
|
||||
end_user_id = decoded.get("end_user_id")
|
||||
if not end_user_id:
|
||||
raise Unauthorized("Invalid Authorization token.")
|
||||
end_user = db.session.query(EndUser).where(EndUser.id == decoded["end_user_id"]).first()
|
||||
if not end_user:
|
||||
raise NotFound("End user not found.")
|
||||
return end_user
|
||||
app_code = request.headers.get(HEADER_NAME_APP_CODE)
|
||||
webapp_token = extract_webapp_passport(app_code, request) if app_code else None
|
||||
|
||||
if webapp_token:
|
||||
decoded = PassportService().verify(webapp_token)
|
||||
end_user_id = decoded.get("end_user_id")
|
||||
if not end_user_id:
|
||||
raise Unauthorized("Invalid Authorization token.")
|
||||
end_user = db.session.query(EndUser).where(EndUser.id == end_user_id).first()
|
||||
if not end_user:
|
||||
raise NotFound("End user not found.")
|
||||
return end_user
|
||||
else:
|
||||
if not auth_token:
|
||||
raise Unauthorized("Invalid Authorization token.")
|
||||
decoded = PassportService().verify(auth_token)
|
||||
end_user_id = decoded.get("end_user_id")
|
||||
if end_user_id:
|
||||
end_user = db.session.query(EndUser).where(EndUser.id == end_user_id).first()
|
||||
if not end_user:
|
||||
raise NotFound("End user not found.")
|
||||
return end_user
|
||||
else:
|
||||
raise Unauthorized("Invalid Authorization token for web API.")
|
||||
elif request.blueprint == "mcp":
|
||||
server_code = request.view_args.get("server_code") if request.view_args else None
|
||||
if not server_code:
|
||||
|
|
|
|||
|
|
@ -38,9 +38,6 @@ def _real_cookie_name(cookie_name: str) -> str:
|
|||
|
||||
|
||||
def _try_extract_from_header(request: Request) -> str | None:
|
||||
"""
|
||||
Try to extract access token from header
|
||||
"""
|
||||
auth_header = request.headers.get("Authorization")
|
||||
if auth_header:
|
||||
if " " not in auth_header:
|
||||
|
|
@ -55,27 +52,19 @@ def _try_extract_from_header(request: Request) -> str | None:
|
|||
return None
|
||||
|
||||
|
||||
def extract_refresh_token(request: Request) -> str | None:
|
||||
return request.cookies.get(_real_cookie_name(COOKIE_NAME_REFRESH_TOKEN))
|
||||
|
||||
|
||||
def extract_csrf_token(request: Request) -> str | None:
|
||||
"""
|
||||
Try to extract CSRF token from header or cookie.
|
||||
"""
|
||||
return request.headers.get(HEADER_NAME_CSRF_TOKEN)
|
||||
|
||||
|
||||
def extract_csrf_token_from_cookie(request: Request) -> str | None:
|
||||
"""
|
||||
Try to extract CSRF token from cookie.
|
||||
"""
|
||||
return request.cookies.get(_real_cookie_name(COOKIE_NAME_CSRF_TOKEN))
|
||||
|
||||
|
||||
def extract_access_token(request: Request) -> str | None:
|
||||
"""
|
||||
Try to extract access token from cookie, header or params.
|
||||
|
||||
Access token is either for console session or webapp passport exchange.
|
||||
"""
|
||||
|
||||
def _try_extract_from_cookie(request: Request) -> str | None:
|
||||
return request.cookies.get(_real_cookie_name(COOKIE_NAME_ACCESS_TOKEN))
|
||||
|
||||
|
|
@ -83,20 +72,10 @@ def extract_access_token(request: Request) -> str | None:
|
|||
|
||||
|
||||
def extract_webapp_access_token(request: Request) -> str | None:
|
||||
"""
|
||||
Try to extract webapp access token from cookie, then header.
|
||||
"""
|
||||
|
||||
return request.cookies.get(_real_cookie_name(COOKIE_NAME_WEBAPP_ACCESS_TOKEN)) or _try_extract_from_header(request)
|
||||
|
||||
|
||||
def extract_webapp_passport(app_code: str, request: Request) -> str | None:
|
||||
"""
|
||||
Try to extract app token from header or params.
|
||||
|
||||
Webapp access token (part of passport) is only used for webapp session.
|
||||
"""
|
||||
|
||||
def _try_extract_passport_token_from_cookie(request: Request) -> str | None:
|
||||
return request.cookies.get(_real_cookie_name(COOKIE_NAME_PASSPORT + "-" + app_code))
|
||||
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ dev = [
|
|||
"pytest-cov~=4.1.0",
|
||||
"pytest-env~=1.1.3",
|
||||
"pytest-mock~=3.14.0",
|
||||
"testcontainers~=4.10.0",
|
||||
"testcontainers~=4.13.2",
|
||||
"types-aiofiles~=24.1.0",
|
||||
"types-beautifulsoup4~=4.12.0",
|
||||
"types-cachetools~=5.5.0",
|
||||
|
|
|
|||
|
|
@ -82,54 +82,51 @@ class AudioService:
|
|||
message_id: str | None = None,
|
||||
is_draft: bool = False,
|
||||
):
|
||||
from app import app
|
||||
|
||||
def invoke_tts(text_content: str, app_model: App, voice: str | None = None, is_draft: bool = False):
|
||||
with app.app_context():
|
||||
if voice is None:
|
||||
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||
if is_draft:
|
||||
workflow = WorkflowService().get_draft_workflow(app_model=app_model)
|
||||
else:
|
||||
workflow = app_model.workflow
|
||||
if (
|
||||
workflow is None
|
||||
or "text_to_speech" not in workflow.features_dict
|
||||
or not workflow.features_dict["text_to_speech"].get("enabled")
|
||||
):
|
||||
if voice is None:
|
||||
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||
if is_draft:
|
||||
workflow = WorkflowService().get_draft_workflow(app_model=app_model)
|
||||
else:
|
||||
workflow = app_model.workflow
|
||||
if (
|
||||
workflow is None
|
||||
or "text_to_speech" not in workflow.features_dict
|
||||
or not workflow.features_dict["text_to_speech"].get("enabled")
|
||||
):
|
||||
raise ValueError("TTS is not enabled")
|
||||
|
||||
voice = workflow.features_dict["text_to_speech"].get("voice")
|
||||
else:
|
||||
if not is_draft:
|
||||
if app_model.app_model_config is None:
|
||||
raise ValueError("AppModelConfig not found")
|
||||
text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
|
||||
|
||||
if not text_to_speech_dict.get("enabled"):
|
||||
raise ValueError("TTS is not enabled")
|
||||
|
||||
voice = workflow.features_dict["text_to_speech"].get("voice")
|
||||
else:
|
||||
if not is_draft:
|
||||
if app_model.app_model_config is None:
|
||||
raise ValueError("AppModelConfig not found")
|
||||
text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
|
||||
voice = text_to_speech_dict.get("voice")
|
||||
|
||||
if not text_to_speech_dict.get("enabled"):
|
||||
raise ValueError("TTS is not enabled")
|
||||
|
||||
voice = text_to_speech_dict.get("voice")
|
||||
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_default_model_instance(
|
||||
tenant_id=app_model.tenant_id, model_type=ModelType.TTS
|
||||
)
|
||||
try:
|
||||
if not voice:
|
||||
voices = model_instance.get_tts_voices()
|
||||
if voices:
|
||||
voice = voices[0].get("value")
|
||||
if not voice:
|
||||
raise ValueError("Sorry, no voice available.")
|
||||
else:
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_default_model_instance(
|
||||
tenant_id=app_model.tenant_id, model_type=ModelType.TTS
|
||||
)
|
||||
try:
|
||||
if not voice:
|
||||
voices = model_instance.get_tts_voices()
|
||||
if voices:
|
||||
voice = voices[0].get("value")
|
||||
if not voice:
|
||||
raise ValueError("Sorry, no voice available.")
|
||||
else:
|
||||
raise ValueError("Sorry, no voice available.")
|
||||
|
||||
return model_instance.invoke_tts(
|
||||
content_text=text_content.strip(), user=end_user, tenant_id=app_model.tenant_id, voice=voice
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
return model_instance.invoke_tts(
|
||||
content_text=text_content.strip(), user=end_user, tenant_id=app_model.tenant_id, voice=voice
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if message_id:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -283,7 +283,7 @@ class VariableTruncator:
|
|||
break
|
||||
|
||||
remaining_budget = target_size - used_size
|
||||
if item is None or isinstance(item, (str, list, dict, bool, int, float)):
|
||||
if item is None or isinstance(item, (str, list, dict, bool, int, float, UpdatedVariable)):
|
||||
part_result = self._truncate_json_primitives(item, remaining_budget)
|
||||
else:
|
||||
raise UnknownTypeError(f"got unknown type {type(item)} in array truncation")
|
||||
|
|
@ -373,6 +373,11 @@ class VariableTruncator:
|
|||
|
||||
return _PartResult(truncated_obj, used_size, truncated)
|
||||
|
||||
@overload
|
||||
def _truncate_json_primitives(
|
||||
self, val: UpdatedVariable, target_size: int
|
||||
) -> _PartResult[Mapping[str, object]]: ...
|
||||
|
||||
@overload
|
||||
def _truncate_json_primitives(self, val: str, target_size: int) -> _PartResult[str]: ...
|
||||
|
||||
|
|
|
|||
|
|
@ -182,6 +182,28 @@ class TestOpenSearchVector:
|
|||
assert len(ids) == 1
|
||||
assert ids[0] == "mock_id"
|
||||
|
||||
def test_delete_nonexistent_index(self):
|
||||
"""Test deleting a non-existent index."""
|
||||
# Create a vector instance with a non-existent collection name
|
||||
self.vector._client.indices.exists.return_value = False
|
||||
|
||||
# Should not raise an exception
|
||||
self.vector.delete()
|
||||
|
||||
# Verify that exists was called but delete was not
|
||||
self.vector._client.indices.exists.assert_called_once_with(index=self.collection_name.lower())
|
||||
self.vector._client.indices.delete.assert_not_called()
|
||||
|
||||
def test_delete_existing_index(self):
|
||||
"""Test deleting an existing index."""
|
||||
self.vector._client.indices.exists.return_value = True
|
||||
|
||||
self.vector.delete()
|
||||
|
||||
# Verify both exists and delete were called
|
||||
self.vector._client.indices.exists.assert_called_once_with(index=self.collection_name.lower())
|
||||
self.vector._client.indices.delete.assert_called_once_with(index=self.collection_name.lower())
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_mock_redis")
|
||||
class TestOpenSearchVectorWithRedis:
|
||||
|
|
|
|||
|
|
@ -8,6 +8,18 @@ from core.model_runtime.entities.llm_entities import LLMUsage
|
|||
from core.workflow.runtime import GraphRuntimeState, ReadOnlyGraphRuntimeStateWrapper, VariablePool
|
||||
|
||||
|
||||
class StubCoordinator:
|
||||
def __init__(self) -> None:
|
||||
self.state = "initial"
|
||||
|
||||
def dumps(self) -> str:
|
||||
return json.dumps({"state": self.state})
|
||||
|
||||
def loads(self, data: str) -> None:
|
||||
payload = json.loads(data)
|
||||
self.state = payload["state"]
|
||||
|
||||
|
||||
class TestGraphRuntimeState:
|
||||
def test_property_getters_and_setters(self):
|
||||
# FIXME(-LAN-): Mock VariablePool if needed
|
||||
|
|
@ -191,17 +203,6 @@ class TestGraphRuntimeState:
|
|||
graph_execution.exceptions_count = 4
|
||||
graph_execution.started = True
|
||||
|
||||
class StubCoordinator:
|
||||
def __init__(self) -> None:
|
||||
self.state = "initial"
|
||||
|
||||
def dumps(self) -> str:
|
||||
return json.dumps({"state": self.state})
|
||||
|
||||
def loads(self, data: str) -> None:
|
||||
payload = json.loads(data)
|
||||
self.state = payload["state"]
|
||||
|
||||
mock_graph = MagicMock()
|
||||
stub = StubCoordinator()
|
||||
with patch.object(GraphRuntimeState, "_build_response_coordinator", return_value=stub):
|
||||
|
|
@ -211,8 +212,7 @@ class TestGraphRuntimeState:
|
|||
|
||||
snapshot = state.dumps()
|
||||
|
||||
restored = GraphRuntimeState(variable_pool=VariablePool(), start_at=0.0)
|
||||
restored.loads(snapshot)
|
||||
restored = GraphRuntimeState.from_snapshot(snapshot)
|
||||
|
||||
assert restored.total_tokens == 10
|
||||
assert restored.node_run_steps == 3
|
||||
|
|
@ -235,3 +235,47 @@ class TestGraphRuntimeState:
|
|||
restored.attach_graph(mock_graph)
|
||||
|
||||
assert new_stub.state == "configured"
|
||||
|
||||
def test_loads_rehydrates_existing_instance(self):
|
||||
variable_pool = VariablePool()
|
||||
variable_pool.add(("node", "key"), "value")
|
||||
|
||||
state = GraphRuntimeState(variable_pool=variable_pool, start_at=time())
|
||||
state.total_tokens = 7
|
||||
state.node_run_steps = 2
|
||||
state.set_output("foo", "bar")
|
||||
state.ready_queue.put("node-1")
|
||||
|
||||
execution = state.graph_execution
|
||||
execution.workflow_id = "wf-456"
|
||||
execution.started = True
|
||||
|
||||
mock_graph = MagicMock()
|
||||
original_stub = StubCoordinator()
|
||||
with patch.object(GraphRuntimeState, "_build_response_coordinator", return_value=original_stub):
|
||||
state.attach_graph(mock_graph)
|
||||
|
||||
original_stub.state = "configured"
|
||||
snapshot = state.dumps()
|
||||
|
||||
new_stub = StubCoordinator()
|
||||
with patch.object(GraphRuntimeState, "_build_response_coordinator", return_value=new_stub):
|
||||
restored = GraphRuntimeState(variable_pool=VariablePool(), start_at=0.0)
|
||||
restored.attach_graph(mock_graph)
|
||||
restored.loads(snapshot)
|
||||
|
||||
assert restored.total_tokens == 7
|
||||
assert restored.node_run_steps == 2
|
||||
assert restored.get_output("foo") == "bar"
|
||||
assert restored.ready_queue.qsize() == 1
|
||||
assert restored.ready_queue.get(timeout=0.01) == "node-1"
|
||||
|
||||
restored_segment = restored.variable_pool.get(("node", "key"))
|
||||
assert restored_segment is not None
|
||||
assert restored_segment.value == "value"
|
||||
|
||||
restored_execution = restored.graph_execution
|
||||
assert restored_execution.workflow_id == "wf-456"
|
||||
assert restored_execution.started is True
|
||||
|
||||
assert new_stub.state == "configured"
|
||||
|
|
|
|||
|
|
@ -1590,7 +1590,7 @@ dev = [
|
|||
{ name = "ruff", specifier = "~=0.14.0" },
|
||||
{ name = "scipy-stubs", specifier = ">=1.15.3.0" },
|
||||
{ name = "sseclient-py", specifier = ">=1.8.0" },
|
||||
{ name = "testcontainers", specifier = "~=4.10.0" },
|
||||
{ name = "testcontainers", specifier = "~=4.13.2" },
|
||||
{ name = "ty", specifier = "~=0.0.1a19" },
|
||||
{ name = "types-aiofiles", specifier = "~=24.1.0" },
|
||||
{ name = "types-beautifulsoup4", specifier = "~=4.12.0" },
|
||||
|
|
@ -5907,7 +5907,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "testcontainers"
|
||||
version = "4.10.0"
|
||||
version = "4.13.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "docker" },
|
||||
|
|
@ -5916,9 +5916,9 @@ dependencies = [
|
|||
{ name = "urllib3" },
|
||||
{ name = "wrapt" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/49/9c618aff1c50121d183cdfbc3a4a5cf2727a2cde1893efe6ca55c7009196/testcontainers-4.10.0.tar.gz", hash = "sha256:03f85c3e505d8b4edeb192c72a961cebbcba0dd94344ae778b4a159cb6dcf8d3", size = 63327, upload-time = "2025-04-02T16:13:27.582Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/18/51/edac83edab339d8b4dce9a7b659163afb1ea7e011bfed1d5573d495a4485/testcontainers-4.13.2.tar.gz", hash = "sha256:2315f1e21b059427a9d11e8921f85fef322fbe0d50749bcca4eaa11271708ba4", size = 78692, upload-time = "2025-10-07T21:53:07.531Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/0a/824b0c1ecf224802125279c3effff2e25ed785ed046e67da6e53d928de4c/testcontainers-4.10.0-py3-none-any.whl", hash = "sha256:31ed1a81238c7e131a2a29df6db8f23717d892b592fa5a1977fd0dcd0c23fc23", size = 107414, upload-time = "2025-04-02T16:13:25.785Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/5e/73aa94770f1df0595364aed526f31d54440db5492911e2857318ed326e51/testcontainers-4.13.2-py3-none-any.whl", hash = "sha256:0209baf8f4274b568cde95bef2cadf7b1d33b375321f793790462e235cd684ee", size = 124771, upload-time = "2025-10-07T21:53:05.937Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
|||
|
|
@ -201,6 +201,10 @@ ENABLE_WEBSITE_JINAREADER=true
|
|||
ENABLE_WEBSITE_FIRECRAWL=true
|
||||
ENABLE_WEBSITE_WATERCRAWL=true
|
||||
|
||||
# Enable inline LaTeX rendering with single dollar signs ($...$) in the web frontend
|
||||
# Default is false for security reasons to prevent conflicts with regular text
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false
|
||||
|
||||
# ------------------------------
|
||||
# Database Configuration
|
||||
# The database uses PostgreSQL. Please use the public schema.
|
||||
|
|
@ -260,16 +264,18 @@ POSTGRES_MAINTENANCE_WORK_MEM=64MB
|
|||
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
|
||||
|
||||
# Sets the maximum allowed duration of any statement before termination.
|
||||
# Default is 60000 milliseconds.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT
|
||||
POSTGRES_STATEMENT_TIMEOUT=60000
|
||||
# A value of 0 prevents the server from timing out statements.
|
||||
POSTGRES_STATEMENT_TIMEOUT=0
|
||||
|
||||
# Sets the maximum allowed duration of any idle in-transaction session before termination.
|
||||
# Default is 60000 milliseconds.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=60000
|
||||
# A value of 0 prevents the server from terminating idle sessions.
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
|
||||
|
||||
# ------------------------------
|
||||
# Redis Configuration
|
||||
|
|
@ -314,7 +320,7 @@ REDIS_CLUSTERS_PASSWORD=
|
|||
# Celery Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Use standalone redis as the broker, and redis db 1 for celery broker. (redis_username is usually set by defualt as empty)
|
||||
# Use standalone redis as the broker, and redis db 1 for celery broker. (redis_username is usually set by default as empty)
|
||||
# Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>`.
|
||||
# Example: redis://:difyai123456@redis:6379/1
|
||||
# If use Redis Sentinel, format as follows: `sentinel://<redis_username>:<redis_password>@<sentinel_host1>:<sentinel_port>/<redis_database>`
|
||||
|
|
|
|||
|
|
@ -115,8 +115,8 @@ services:
|
|||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-60000}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@ services:
|
|||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-60000}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
|
||||
ports:
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ x-shared-env: &shared-api-worker-env
|
|||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
DB_USERNAME: ${DB_USERNAME:-postgres}
|
||||
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
DB_HOST: ${DB_HOST:-db}
|
||||
|
|
@ -68,8 +69,8 @@ x-shared-env: &shared-api-worker-env
|
|||
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
|
||||
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
|
||||
POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-60000}
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}
|
||||
POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-0}
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_USERNAME: ${REDIS_USERNAME:-}
|
||||
|
|
@ -724,8 +725,8 @@ services:
|
|||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-60000}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-60000}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
|
|
|
|||
|
|
@ -41,16 +41,18 @@ POSTGRES_MAINTENANCE_WORK_MEM=64MB
|
|||
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
|
||||
|
||||
# Sets the maximum allowed duration of any statement before termination.
|
||||
# Default is 60000 milliseconds.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT
|
||||
POSTGRES_STATEMENT_TIMEOUT=60000
|
||||
# A value of 0 prevents the server from timing out statements.
|
||||
POSTGRES_STATEMENT_TIMEOUT=0
|
||||
|
||||
# Sets the maximum allowed duration of any idle in-transaction session before termination.
|
||||
# Default is 60000 milliseconds.
|
||||
# Default is 0 (no timeout).
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=60000
|
||||
# A value of 0 prevents the server from terminating idle sessions.
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
|
||||
|
||||
# -----------------------------
|
||||
# Environment Variables for redis Service
|
||||
|
|
|
|||
|
|
@ -61,5 +61,9 @@ NEXT_PUBLIC_ENABLE_WEBSITE_JINAREADER=true
|
|||
NEXT_PUBLIC_ENABLE_WEBSITE_FIRECRAWL=true
|
||||
NEXT_PUBLIC_ENABLE_WEBSITE_WATERCRAWL=true
|
||||
|
||||
# Enable inline LaTeX rendering with single dollar signs ($...$)
|
||||
# Default is false for security reasons to prevent conflicts with regular text
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false
|
||||
|
||||
# The maximum number of tree node depth for workflow
|
||||
NEXT_PUBLIC_MAX_TREE_DEPTH=50
|
||||
|
|
|
|||
|
|
@ -2,14 +2,14 @@ import React from 'react'
|
|||
import Main from '@/app/components/explore/installed-app'
|
||||
|
||||
export type IInstalledAppProps = {
|
||||
params: {
|
||||
params?: Promise<{
|
||||
appId: string
|
||||
}
|
||||
}>
|
||||
}
|
||||
|
||||
// Using Next.js page convention for async server components
|
||||
async function InstalledApp({ params }: IInstalledAppProps) {
|
||||
const appId = (await params).appId
|
||||
const { appId } = await (params ?? Promise.reject(new Error('Missing params')))
|
||||
return (
|
||||
<Main id={appId} />
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
export const OAUTH_AUTHORIZE_PENDING_KEY = 'oauth_authorize_pending'
|
||||
export const REDIRECT_URL_KEY = 'oauth_redirect_url'
|
||||
export const OAUTH_AUTHORIZE_PENDING_TTL = 60 * 3
|
||||
|
|
@ -19,11 +19,11 @@ import {
|
|||
} from '@remixicon/react'
|
||||
import dayjs from 'dayjs'
|
||||
import { useIsLogin } from '@/service/use-common'
|
||||
|
||||
export const OAUTH_AUTHORIZE_PENDING_KEY = 'oauth_authorize_pending'
|
||||
export const REDIRECT_URL_KEY = 'oauth_redirect_url'
|
||||
|
||||
const OAUTH_AUTHORIZE_PENDING_TTL = 60 * 3
|
||||
import {
|
||||
OAUTH_AUTHORIZE_PENDING_KEY,
|
||||
OAUTH_AUTHORIZE_PENDING_TTL,
|
||||
REDIRECT_URL_KEY,
|
||||
} from './constants'
|
||||
|
||||
function setItemWithExpiry(key: string, value: string, ttl: number) {
|
||||
const item = {
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import { appDefaultIconBackground } from '@/config'
|
|||
import type { PublishWorkflowParams } from '@/types/workflow'
|
||||
import { useAppWhiteListSubjects, useGetUserCanAccessApp } from '@/service/access-control'
|
||||
import { AccessMode } from '@/models/access-control'
|
||||
import { fetchAppDetail } from '@/service/apps'
|
||||
import { fetchAppDetailDirect } from '@/service/apps'
|
||||
import { useGlobalPublicStore } from '@/context/global-public-context'
|
||||
import { useFormatTimeFromNow } from '@/hooks/use-format-time-from-now'
|
||||
|
||||
|
|
@ -162,11 +162,16 @@ const AppPublisher = ({
|
|||
}
|
||||
}, [appDetail?.id])
|
||||
|
||||
const handleAccessControlUpdate = useCallback(() => {
|
||||
fetchAppDetail({ url: '/apps', id: appDetail!.id }).then((res) => {
|
||||
const handleAccessControlUpdate = useCallback(async () => {
|
||||
if (!appDetail)
|
||||
return
|
||||
try {
|
||||
const res = await fetchAppDetailDirect({ url: '/apps', id: appDetail.id })
|
||||
setAppDetail(res)
|
||||
}
|
||||
finally {
|
||||
setShowAppAccessControl(false)
|
||||
})
|
||||
}
|
||||
}, [appDetail, setAppDetail])
|
||||
|
||||
const [embeddingModalOpen, setEmbeddingModalOpen] = useState(false)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ import { TransferMethod } from '@/app/components/base/chat/types'
|
|||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
import { noop } from 'lodash-es'
|
||||
import { cloneDeep, noop } from 'lodash-es'
|
||||
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
|
||||
|
||||
type TextGenerationItemProps = {
|
||||
modelAndParameter: ModelAndParameter
|
||||
|
|
@ -50,8 +51,8 @@ const TextGenerationItem: FC<TextGenerationItemProps> = ({
|
|||
const config: TextGenerationConfig = {
|
||||
pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '',
|
||||
prompt_type: promptMode,
|
||||
chat_prompt_config: isAdvancedMode ? chatPromptConfig : {},
|
||||
completion_prompt_config: isAdvancedMode ? completionPromptConfig : {},
|
||||
chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG),
|
||||
completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG),
|
||||
user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables),
|
||||
dataset_query_variable: contextVar || '',
|
||||
// features
|
||||
|
|
@ -74,6 +75,7 @@ const TextGenerationItem: FC<TextGenerationItemProps> = ({
|
|||
datasets: [...postDatasets],
|
||||
} as any,
|
||||
},
|
||||
system_parameters: modelConfig.system_parameters,
|
||||
}
|
||||
const {
|
||||
completion,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import {
|
|||
import Chat from '@/app/components/base/chat/chat'
|
||||
import { useChat } from '@/app/components/base/chat/chat/hooks'
|
||||
import { useDebugConfigurationContext } from '@/context/debug-configuration'
|
||||
import type { ChatConfig, ChatItem, ChatItemInTree, OnSend } from '@/app/components/base/chat/types'
|
||||
import type { ChatConfig, ChatItem, OnSend } from '@/app/components/base/chat/types'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import {
|
||||
fetchConversationMessages,
|
||||
|
|
@ -126,7 +126,7 @@ const DebugWithSingleModel = (
|
|||
)
|
||||
}, [appId, chatList, checkCanSend, completionParams, config, handleSend, inputs, modelConfig.mode, modelConfig.model_id, modelConfig.provider, textGenerationModelList])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(editedQuestion ? editedQuestion.message : question.content,
|
||||
|
|
|
|||
|
|
@ -12,12 +12,15 @@ import type {
|
|||
ChatConfig,
|
||||
ChatItem,
|
||||
} from '@/app/components/base/chat/types'
|
||||
import cloneDeep from 'lodash-es/cloneDeep'
|
||||
import {
|
||||
AgentStrategy,
|
||||
} from '@/types/app'
|
||||
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||
import { promptVariablesToUserInputsForm } from '@/utils/model-config'
|
||||
import { useDebugConfigurationContext } from '@/context/debug-configuration'
|
||||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
|
||||
|
||||
export const useDebugWithSingleOrMultipleModel = (appId: string) => {
|
||||
const localeDebugWithSingleOrMultipleModelConfigs = localStorage.getItem('app-debug-with-single-or-multiple-models')
|
||||
|
|
@ -95,16 +98,14 @@ export const useConfigFromDebugContext = () => {
|
|||
const config: ChatConfig = {
|
||||
pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '',
|
||||
prompt_type: promptMode,
|
||||
chat_prompt_config: isAdvancedMode ? chatPromptConfig : {},
|
||||
completion_prompt_config: isAdvancedMode ? completionPromptConfig : {},
|
||||
chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG),
|
||||
completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG),
|
||||
user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables),
|
||||
dataset_query_variable: contextVar || '',
|
||||
opening_statement: introduction,
|
||||
more_like_this: {
|
||||
enabled: false,
|
||||
},
|
||||
more_like_this: modelConfig.more_like_this ?? { enabled: false },
|
||||
suggested_questions: openingSuggestedQuestions,
|
||||
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
|
||||
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig ?? { enabled: false },
|
||||
text_to_speech: textToSpeechConfig,
|
||||
speech_to_text: speechToTextConfig,
|
||||
retriever_resource: citationConfig,
|
||||
|
|
@ -121,8 +122,13 @@ export const useConfigFromDebugContext = () => {
|
|||
},
|
||||
file_upload: {
|
||||
image: visionConfig,
|
||||
allowed_file_upload_methods: visionConfig.transfer_methods ?? [],
|
||||
allowed_file_types: [SupportUploadFileTypes.image],
|
||||
max_length: visionConfig.number_limits ?? 0,
|
||||
number_limits: visionConfig.number_limits,
|
||||
},
|
||||
annotation_reply: annotationConfig,
|
||||
system_parameters: modelConfig.system_parameters,
|
||||
|
||||
supportAnnotation: true,
|
||||
appId,
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import type { FC } from 'react'
|
|||
import { useTranslation } from 'react-i18next'
|
||||
import React, { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { produce, setAutoFreeze } from 'immer'
|
||||
import cloneDeep from 'lodash-es/cloneDeep'
|
||||
import { useBoolean } from 'ahooks'
|
||||
import {
|
||||
RiAddLine,
|
||||
|
|
@ -36,7 +37,7 @@ import ActionButton, { ActionButtonState } from '@/app/components/base/action-bu
|
|||
import type { ModelConfig as BackendModelConfig, VisionFile, VisionSettings } from '@/types/app'
|
||||
import { formatBooleanInputs, promptVariablesToUserInputsForm } from '@/utils/model-config'
|
||||
import TextGeneration from '@/app/components/app/text-generate/item'
|
||||
import { IS_CE_EDITION } from '@/config'
|
||||
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG, IS_CE_EDITION } from '@/config'
|
||||
import type { Inputs } from '@/models/debug'
|
||||
import { useDefaultModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
|
||||
import { ModelFeatureEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
|
|
@ -90,6 +91,7 @@ const Debug: FC<IDebug> = ({
|
|||
completionParams,
|
||||
hasSetContextVar,
|
||||
datasetConfigs,
|
||||
externalDataToolsConfig,
|
||||
} = useContext(ConfigContext)
|
||||
const { eventEmitter } = useEventEmitterContextContext()
|
||||
const { data: text2speechDefaultModel } = useDefaultModel(ModelTypeEnum.textEmbedding)
|
||||
|
|
@ -223,8 +225,8 @@ const Debug: FC<IDebug> = ({
|
|||
const postModelConfig: BackendModelConfig = {
|
||||
pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '',
|
||||
prompt_type: promptMode,
|
||||
chat_prompt_config: {},
|
||||
completion_prompt_config: {},
|
||||
chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG),
|
||||
completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG),
|
||||
user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables),
|
||||
dataset_query_variable: contextVar || '',
|
||||
dataset_configs: {
|
||||
|
|
@ -251,11 +253,8 @@ const Debug: FC<IDebug> = ({
|
|||
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
|
||||
speech_to_text: speechToTextConfig,
|
||||
retriever_resource: citationConfig,
|
||||
}
|
||||
|
||||
if (isAdvancedMode) {
|
||||
postModelConfig.chat_prompt_config = chatPromptConfig
|
||||
postModelConfig.completion_prompt_config = completionPromptConfig
|
||||
system_parameters: modelConfig.system_parameters,
|
||||
external_data_tools: externalDataToolsConfig,
|
||||
}
|
||||
|
||||
const data: Record<string, any> = {
|
||||
|
|
|
|||
|
|
@ -36,14 +36,14 @@ import type {
|
|||
} from '@/models/debug'
|
||||
import type { ExternalDataTool } from '@/models/common'
|
||||
import type { DataSet } from '@/models/datasets'
|
||||
import type { ModelConfig as BackendModelConfig, VisionSettings } from '@/types/app'
|
||||
import type { ModelConfig as BackendModelConfig, UserInputFormItem, VisionSettings } from '@/types/app'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import Config from '@/app/components/app/configuration/config'
|
||||
import Debug from '@/app/components/app/configuration/debug'
|
||||
import Confirm from '@/app/components/base/confirm'
|
||||
import { ModelFeatureEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { ToastContext } from '@/app/components/base/toast'
|
||||
import { fetchAppDetail, updateAppModelConfig } from '@/service/apps'
|
||||
import { fetchAppDetailDirect, updateAppModelConfig } from '@/service/apps'
|
||||
import { promptVariablesToUserInputsForm, userInputsFormToPromptVariables } from '@/utils/model-config'
|
||||
import { fetchDatasets } from '@/service/datasets'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
|
|
@ -186,6 +186,8 @@ const Configuration: FC = () => {
|
|||
prompt_template: '',
|
||||
prompt_variables: [] as PromptVariable[],
|
||||
},
|
||||
chat_prompt_config: clone(DEFAULT_CHAT_PROMPT_CONFIG),
|
||||
completion_prompt_config: clone(DEFAULT_COMPLETION_PROMPT_CONFIG),
|
||||
more_like_this: null,
|
||||
opening_statement: '',
|
||||
suggested_questions: [],
|
||||
|
|
@ -196,6 +198,14 @@ const Configuration: FC = () => {
|
|||
suggested_questions_after_answer: null,
|
||||
retriever_resource: null,
|
||||
annotation_reply: null,
|
||||
external_data_tools: [],
|
||||
system_parameters: {
|
||||
audio_file_size_limit: 0,
|
||||
file_size_limit: 0,
|
||||
image_file_size_limit: 0,
|
||||
video_file_size_limit: 0,
|
||||
workflow_file_upload_limit: 0,
|
||||
},
|
||||
dataSets: [],
|
||||
agentConfig: DEFAULT_AGENT_SETTING,
|
||||
})
|
||||
|
|
@ -543,169 +553,169 @@ const Configuration: FC = () => {
|
|||
})
|
||||
}
|
||||
setCollectionList(collectionList)
|
||||
fetchAppDetail({ url: '/apps', id: appId }).then(async (res: any) => {
|
||||
setMode(res.mode)
|
||||
const modelConfig = res.model_config
|
||||
const promptMode = modelConfig.prompt_type === PromptMode.advanced ? PromptMode.advanced : PromptMode.simple
|
||||
doSetPromptMode(promptMode)
|
||||
if (promptMode === PromptMode.advanced) {
|
||||
if (modelConfig.chat_prompt_config && modelConfig.chat_prompt_config.prompt.length > 0)
|
||||
setChatPromptConfig(modelConfig.chat_prompt_config)
|
||||
else
|
||||
setChatPromptConfig(clone(DEFAULT_CHAT_PROMPT_CONFIG))
|
||||
setCompletionPromptConfig(modelConfig.completion_prompt_config || clone(DEFAULT_COMPLETION_PROMPT_CONFIG) as any)
|
||||
setCanReturnToSimpleMode(false)
|
||||
}
|
||||
const res = await fetchAppDetailDirect({ url: '/apps', id: appId })
|
||||
setMode(res.mode)
|
||||
const modelConfig = res.model_config as BackendModelConfig
|
||||
const promptMode = modelConfig.prompt_type === PromptMode.advanced ? PromptMode.advanced : PromptMode.simple
|
||||
doSetPromptMode(promptMode)
|
||||
if (promptMode === PromptMode.advanced) {
|
||||
if (modelConfig.chat_prompt_config && modelConfig.chat_prompt_config.prompt.length > 0)
|
||||
setChatPromptConfig(modelConfig.chat_prompt_config)
|
||||
else
|
||||
setChatPromptConfig(clone(DEFAULT_CHAT_PROMPT_CONFIG))
|
||||
setCompletionPromptConfig(modelConfig.completion_prompt_config || clone(DEFAULT_COMPLETION_PROMPT_CONFIG) as any)
|
||||
setCanReturnToSimpleMode(false)
|
||||
}
|
||||
|
||||
const model = res.model_config.model
|
||||
const model = modelConfig.model
|
||||
|
||||
let datasets: any = null
|
||||
let datasets: any = null
|
||||
// old dataset struct
|
||||
if (modelConfig.agent_mode?.tools?.find(({ dataset }: any) => dataset?.enabled))
|
||||
datasets = modelConfig.agent_mode?.tools.filter(({ dataset }: any) => dataset?.enabled)
|
||||
if (modelConfig.agent_mode?.tools?.find(({ dataset }: any) => dataset?.enabled))
|
||||
datasets = modelConfig.agent_mode?.tools.filter(({ dataset }: any) => dataset?.enabled)
|
||||
// new dataset struct
|
||||
else if (modelConfig.dataset_configs.datasets?.datasets?.length > 0)
|
||||
datasets = modelConfig.dataset_configs?.datasets?.datasets
|
||||
else if (modelConfig.dataset_configs.datasets?.datasets?.length > 0)
|
||||
datasets = modelConfig.dataset_configs?.datasets?.datasets
|
||||
|
||||
if (dataSets && datasets?.length && datasets?.length > 0) {
|
||||
const { data: dataSetsWithDetail } = await fetchDatasets({ url: '/datasets', params: { page: 1, ids: datasets.map(({ dataset }: any) => dataset.id) } })
|
||||
datasets = dataSetsWithDetail
|
||||
setDataSets(datasets)
|
||||
}
|
||||
if (dataSets && datasets?.length && datasets?.length > 0) {
|
||||
const { data: dataSetsWithDetail } = await fetchDatasets({ url: '/datasets', params: { page: 1, ids: datasets.map(({ dataset }: any) => dataset.id) } })
|
||||
datasets = dataSetsWithDetail
|
||||
setDataSets(datasets)
|
||||
}
|
||||
|
||||
setIntroduction(modelConfig.opening_statement)
|
||||
setSuggestedQuestions(modelConfig.suggested_questions || [])
|
||||
if (modelConfig.more_like_this)
|
||||
setMoreLikeThisConfig(modelConfig.more_like_this)
|
||||
setIntroduction(modelConfig.opening_statement)
|
||||
setSuggestedQuestions(modelConfig.suggested_questions || [])
|
||||
if (modelConfig.more_like_this)
|
||||
setMoreLikeThisConfig(modelConfig.more_like_this)
|
||||
|
||||
if (modelConfig.suggested_questions_after_answer)
|
||||
setSuggestedQuestionsAfterAnswerConfig(modelConfig.suggested_questions_after_answer)
|
||||
if (modelConfig.suggested_questions_after_answer)
|
||||
setSuggestedQuestionsAfterAnswerConfig(modelConfig.suggested_questions_after_answer)
|
||||
|
||||
if (modelConfig.speech_to_text)
|
||||
setSpeechToTextConfig(modelConfig.speech_to_text)
|
||||
if (modelConfig.speech_to_text)
|
||||
setSpeechToTextConfig(modelConfig.speech_to_text)
|
||||
|
||||
if (modelConfig.text_to_speech)
|
||||
setTextToSpeechConfig(modelConfig.text_to_speech)
|
||||
if (modelConfig.text_to_speech)
|
||||
setTextToSpeechConfig(modelConfig.text_to_speech)
|
||||
|
||||
if (modelConfig.retriever_resource)
|
||||
setCitationConfig(modelConfig.retriever_resource)
|
||||
if (modelConfig.retriever_resource)
|
||||
setCitationConfig(modelConfig.retriever_resource)
|
||||
|
||||
if (modelConfig.annotation_reply) {
|
||||
let annotationConfig = modelConfig.annotation_reply
|
||||
if (modelConfig.annotation_reply.enabled) {
|
||||
annotationConfig = {
|
||||
...modelConfig.annotation_reply,
|
||||
embedding_model: {
|
||||
...modelConfig.annotation_reply.embedding_model,
|
||||
embedding_provider_name: correctModelProvider(modelConfig.annotation_reply.embedding_model.embedding_provider_name),
|
||||
},
|
||||
}
|
||||
if (modelConfig.annotation_reply) {
|
||||
let annotationConfig = modelConfig.annotation_reply
|
||||
if (modelConfig.annotation_reply.enabled) {
|
||||
annotationConfig = {
|
||||
...modelConfig.annotation_reply,
|
||||
embedding_model: {
|
||||
...modelConfig.annotation_reply.embedding_model,
|
||||
embedding_provider_name: correctModelProvider(modelConfig.annotation_reply.embedding_model.embedding_provider_name),
|
||||
},
|
||||
}
|
||||
setAnnotationConfig(annotationConfig, true)
|
||||
}
|
||||
setAnnotationConfig(annotationConfig, true)
|
||||
}
|
||||
|
||||
if (modelConfig.sensitive_word_avoidance)
|
||||
setModerationConfig(modelConfig.sensitive_word_avoidance)
|
||||
if (modelConfig.sensitive_word_avoidance)
|
||||
setModerationConfig(modelConfig.sensitive_word_avoidance)
|
||||
|
||||
if (modelConfig.external_data_tools)
|
||||
setExternalDataToolsConfig(modelConfig.external_data_tools)
|
||||
if (modelConfig.external_data_tools)
|
||||
setExternalDataToolsConfig(modelConfig.external_data_tools)
|
||||
|
||||
const config = {
|
||||
modelConfig: {
|
||||
provider: correctModelProvider(model.provider),
|
||||
model_id: model.name,
|
||||
mode: model.mode,
|
||||
configs: {
|
||||
prompt_template: modelConfig.pre_prompt || '',
|
||||
prompt_variables: userInputsFormToPromptVariables(
|
||||
[
|
||||
...modelConfig.user_input_form,
|
||||
...(
|
||||
modelConfig.external_data_tools?.length
|
||||
? modelConfig.external_data_tools.map((item: any) => {
|
||||
return {
|
||||
external_data_tool: {
|
||||
variable: item.variable as string,
|
||||
label: item.label as string,
|
||||
enabled: item.enabled,
|
||||
type: item.type as string,
|
||||
config: item.config,
|
||||
required: true,
|
||||
icon: item.icon,
|
||||
icon_background: item.icon_background,
|
||||
},
|
||||
}
|
||||
})
|
||||
: []
|
||||
),
|
||||
],
|
||||
modelConfig.dataset_query_variable,
|
||||
),
|
||||
},
|
||||
more_like_this: modelConfig.more_like_this,
|
||||
opening_statement: modelConfig.opening_statement,
|
||||
suggested_questions: modelConfig.suggested_questions,
|
||||
sensitive_word_avoidance: modelConfig.sensitive_word_avoidance,
|
||||
speech_to_text: modelConfig.speech_to_text,
|
||||
text_to_speech: modelConfig.text_to_speech,
|
||||
file_upload: modelConfig.file_upload,
|
||||
suggested_questions_after_answer: modelConfig.suggested_questions_after_answer,
|
||||
retriever_resource: modelConfig.retriever_resource,
|
||||
annotation_reply: modelConfig.annotation_reply,
|
||||
external_data_tools: modelConfig.external_data_tools,
|
||||
dataSets: datasets || [],
|
||||
agentConfig: res.mode === 'agent-chat' ? {
|
||||
max_iteration: DEFAULT_AGENT_SETTING.max_iteration,
|
||||
...modelConfig.agent_mode,
|
||||
const config: PublishConfig = {
|
||||
modelConfig: {
|
||||
provider: correctModelProvider(model.provider),
|
||||
model_id: model.name,
|
||||
mode: model.mode,
|
||||
configs: {
|
||||
prompt_template: modelConfig.pre_prompt || '',
|
||||
prompt_variables: userInputsFormToPromptVariables(
|
||||
([
|
||||
...modelConfig.user_input_form,
|
||||
...(
|
||||
modelConfig.external_data_tools?.length
|
||||
? modelConfig.external_data_tools.map((item: any) => {
|
||||
return {
|
||||
external_data_tool: {
|
||||
variable: item.variable as string,
|
||||
label: item.label as string,
|
||||
enabled: item.enabled,
|
||||
type: item.type as string,
|
||||
config: item.config,
|
||||
required: true,
|
||||
icon: item.icon,
|
||||
icon_background: item.icon_background,
|
||||
},
|
||||
}
|
||||
})
|
||||
: []
|
||||
),
|
||||
]) as unknown as UserInputFormItem[],
|
||||
modelConfig.dataset_query_variable,
|
||||
),
|
||||
},
|
||||
more_like_this: modelConfig.more_like_this ?? { enabled: false },
|
||||
opening_statement: modelConfig.opening_statement,
|
||||
suggested_questions: modelConfig.suggested_questions ?? [],
|
||||
sensitive_word_avoidance: modelConfig.sensitive_word_avoidance,
|
||||
speech_to_text: modelConfig.speech_to_text,
|
||||
text_to_speech: modelConfig.text_to_speech,
|
||||
file_upload: modelConfig.file_upload ?? null,
|
||||
suggested_questions_after_answer: modelConfig.suggested_questions_after_answer ?? { enabled: false },
|
||||
retriever_resource: modelConfig.retriever_resource,
|
||||
annotation_reply: modelConfig.annotation_reply ?? null,
|
||||
external_data_tools: modelConfig.external_data_tools ?? [],
|
||||
system_parameters: modelConfig.system_parameters,
|
||||
dataSets: datasets || [],
|
||||
agentConfig: res.mode === 'agent-chat' ? {
|
||||
max_iteration: DEFAULT_AGENT_SETTING.max_iteration,
|
||||
...modelConfig.agent_mode,
|
||||
// remove dataset
|
||||
enabled: true, // modelConfig.agent_mode?.enabled is not correct. old app: the value of app with dataset's is always true
|
||||
tools: modelConfig.agent_mode?.tools.filter((tool: any) => {
|
||||
return !tool.dataset
|
||||
}).map((tool: any) => {
|
||||
const toolInCollectionList = collectionList.find(c => tool.provider_id === c.id)
|
||||
return {
|
||||
...tool,
|
||||
isDeleted: res.deleted_tools?.some((deletedTool: any) => deletedTool.id === tool.id && deletedTool.tool_name === tool.tool_name),
|
||||
notAuthor: toolInCollectionList?.is_team_authorization === false,
|
||||
...(tool.provider_type === 'builtin' ? {
|
||||
provider_id: correctToolProvider(tool.provider_name, !!toolInCollectionList),
|
||||
provider_name: correctToolProvider(tool.provider_name, !!toolInCollectionList),
|
||||
} : {}),
|
||||
}
|
||||
}),
|
||||
} : DEFAULT_AGENT_SETTING,
|
||||
},
|
||||
completionParams: model.completion_params,
|
||||
}
|
||||
enabled: true, // modelConfig.agent_mode?.enabled is not correct. old app: the value of app with dataset's is always true
|
||||
tools: (modelConfig.agent_mode?.tools ?? []).filter((tool: any) => {
|
||||
return !tool.dataset
|
||||
}).map((tool: any) => {
|
||||
const toolInCollectionList = collectionList.find(c => tool.provider_id === c.id)
|
||||
return {
|
||||
...tool,
|
||||
isDeleted: res.deleted_tools?.some((deletedTool: any) => deletedTool.id === tool.id && deletedTool.tool_name === tool.tool_name) ?? false,
|
||||
notAuthor: toolInCollectionList?.is_team_authorization === false,
|
||||
...(tool.provider_type === 'builtin' ? {
|
||||
provider_id: correctToolProvider(tool.provider_name, !!toolInCollectionList),
|
||||
provider_name: correctToolProvider(tool.provider_name, !!toolInCollectionList),
|
||||
} : {}),
|
||||
}
|
||||
}),
|
||||
strategy: modelConfig.agent_mode?.strategy ?? AgentStrategy.react,
|
||||
} : DEFAULT_AGENT_SETTING,
|
||||
},
|
||||
completionParams: model.completion_params,
|
||||
}
|
||||
|
||||
if (modelConfig.file_upload)
|
||||
handleSetVisionConfig(modelConfig.file_upload.image, true)
|
||||
if (modelConfig.file_upload)
|
||||
handleSetVisionConfig(modelConfig.file_upload.image, true)
|
||||
|
||||
syncToPublishedConfig(config)
|
||||
setPublishedConfig(config)
|
||||
const retrievalConfig = getMultipleRetrievalConfig({
|
||||
...modelConfig.dataset_configs,
|
||||
reranking_model: modelConfig.dataset_configs.reranking_model && {
|
||||
provider: modelConfig.dataset_configs.reranking_model.reranking_provider_name,
|
||||
model: modelConfig.dataset_configs.reranking_model.reranking_model_name,
|
||||
},
|
||||
}, datasets, datasets, {
|
||||
provider: currentRerankProvider?.provider,
|
||||
model: currentRerankModel?.model,
|
||||
})
|
||||
setDatasetConfigs({
|
||||
retrieval_model: RETRIEVE_TYPE.multiWay,
|
||||
...modelConfig.dataset_configs,
|
||||
...retrievalConfig,
|
||||
...(retrievalConfig.reranking_model ? {
|
||||
reranking_model: {
|
||||
reranking_model_name: retrievalConfig.reranking_model.model,
|
||||
reranking_provider_name: correctModelProvider(retrievalConfig.reranking_model.provider),
|
||||
},
|
||||
} : {}),
|
||||
})
|
||||
setHasFetchedDetail(true)
|
||||
syncToPublishedConfig(config)
|
||||
setPublishedConfig(config)
|
||||
const retrievalConfig = getMultipleRetrievalConfig({
|
||||
...modelConfig.dataset_configs,
|
||||
reranking_model: modelConfig.dataset_configs.reranking_model && {
|
||||
provider: modelConfig.dataset_configs.reranking_model.reranking_provider_name,
|
||||
model: modelConfig.dataset_configs.reranking_model.reranking_model_name,
|
||||
},
|
||||
}, datasets, datasets, {
|
||||
provider: currentRerankProvider?.provider,
|
||||
model: currentRerankModel?.model,
|
||||
})
|
||||
setDatasetConfigs({
|
||||
...modelConfig.dataset_configs,
|
||||
...retrievalConfig,
|
||||
...(retrievalConfig.reranking_model ? {
|
||||
reranking_model: {
|
||||
reranking_model_name: retrievalConfig.reranking_model.model,
|
||||
reranking_provider_name: correctModelProvider(retrievalConfig.reranking_model.provider),
|
||||
},
|
||||
} : {}),
|
||||
} as DatasetConfigs)
|
||||
setHasFetchedDetail(true)
|
||||
})()
|
||||
}, [appId])
|
||||
|
||||
|
|
@ -780,8 +790,8 @@ const Configuration: FC = () => {
|
|||
// Simple Mode prompt
|
||||
pre_prompt: !isAdvancedMode ? promptTemplate : '',
|
||||
prompt_type: promptMode,
|
||||
chat_prompt_config: {},
|
||||
completion_prompt_config: {},
|
||||
chat_prompt_config: isAdvancedMode ? chatPromptConfig : clone(DEFAULT_CHAT_PROMPT_CONFIG),
|
||||
completion_prompt_config: isAdvancedMode ? completionPromptConfig : clone(DEFAULT_COMPLETION_PROMPT_CONFIG),
|
||||
user_input_form: promptVariablesToUserInputsForm(promptVariables),
|
||||
dataset_query_variable: contextVar || '',
|
||||
// features
|
||||
|
|
@ -798,6 +808,7 @@ const Configuration: FC = () => {
|
|||
...modelConfig.agentConfig,
|
||||
strategy: isFunctionCall ? AgentStrategy.functionCall : AgentStrategy.react,
|
||||
},
|
||||
external_data_tools: externalDataToolsConfig,
|
||||
model: {
|
||||
provider: modelAndParameter?.provider || modelConfig.provider,
|
||||
name: modelId,
|
||||
|
|
@ -810,11 +821,7 @@ const Configuration: FC = () => {
|
|||
datasets: [...postDatasets],
|
||||
} as any,
|
||||
},
|
||||
}
|
||||
|
||||
if (isAdvancedMode) {
|
||||
data.chat_prompt_config = chatPromptConfig
|
||||
data.completion_prompt_config = completionPromptConfig
|
||||
system_parameters: modelConfig.system_parameters,
|
||||
}
|
||||
|
||||
await updateAppModelConfig({ url: `/apps/${appId}/model-config`, body: data })
|
||||
|
|
|
|||
|
|
@ -132,8 +132,6 @@ const CreateFromDSLModal = ({ show, onSuccess, onClose, activeTab = CreateFromDS
|
|||
importedVersion: imported_dsl_version ?? '',
|
||||
systemVersion: current_dsl_version ?? '',
|
||||
})
|
||||
if (onClose)
|
||||
onClose()
|
||||
setTimeout(() => {
|
||||
setShowErrorModal(true)
|
||||
}, 300)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import timezone from 'dayjs/plugin/timezone'
|
|||
import { createContext, useContext } from 'use-context-selector'
|
||||
import { useShallow } from 'zustand/react/shallow'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { usePathname, useRouter, useSearchParams } from 'next/navigation'
|
||||
import type { ChatItemInTree } from '../../base/chat/types'
|
||||
import Indicator from '../../header/indicator'
|
||||
import VarPanel from './var-panel'
|
||||
|
|
@ -43,10 +42,6 @@ import cn from '@/utils/classnames'
|
|||
import { noop } from 'lodash-es'
|
||||
import PromptLogModal from '../../base/prompt-log-modal'
|
||||
|
||||
type AppStoreState = ReturnType<typeof useAppStore.getState>
|
||||
type ConversationListItem = ChatConversationGeneralDetail | CompletionConversationGeneralDetail
|
||||
type ConversationSelection = ConversationListItem | { id: string; isPlaceholder?: true }
|
||||
|
||||
dayjs.extend(utc)
|
||||
dayjs.extend(timezone)
|
||||
|
||||
|
|
@ -206,7 +201,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
|
|||
const { formatTime } = useTimestamp()
|
||||
const { onClose, appDetail } = useContext(DrawerContext)
|
||||
const { notify } = useContext(ToastContext)
|
||||
const { currentLogItem, setCurrentLogItem, showMessageLogModal, setShowMessageLogModal, showPromptLogModal, setShowPromptLogModal, currentLogModalActiveTab } = useAppStore(useShallow((state: AppStoreState) => ({
|
||||
const { currentLogItem, setCurrentLogItem, showMessageLogModal, setShowMessageLogModal, showPromptLogModal, setShowPromptLogModal, currentLogModalActiveTab } = useAppStore(useShallow(state => ({
|
||||
currentLogItem: state.currentLogItem,
|
||||
setCurrentLogItem: state.setCurrentLogItem,
|
||||
showMessageLogModal: state.showMessageLogModal,
|
||||
|
|
@ -898,113 +893,20 @@ const ChatConversationDetailComp: FC<{ appId?: string; conversationId?: string }
|
|||
const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh }) => {
|
||||
const { t } = useTranslation()
|
||||
const { formatTime } = useTimestamp()
|
||||
const router = useRouter()
|
||||
const pathname = usePathname()
|
||||
const searchParams = useSearchParams()
|
||||
const conversationIdInUrl = searchParams.get('conversation_id') ?? undefined
|
||||
|
||||
const media = useBreakpoints()
|
||||
const isMobile = media === MediaType.mobile
|
||||
|
||||
const [showDrawer, setShowDrawer] = useState<boolean>(false) // Whether to display the chat details drawer
|
||||
const [currentConversation, setCurrentConversation] = useState<ConversationSelection | undefined>() // Currently selected conversation
|
||||
const closingConversationIdRef = useRef<string | null>(null)
|
||||
const pendingConversationIdRef = useRef<string | null>(null)
|
||||
const pendingConversationCacheRef = useRef<ConversationSelection | undefined>(undefined)
|
||||
const [currentConversation, setCurrentConversation] = useState<ChatConversationGeneralDetail | CompletionConversationGeneralDetail | undefined>() // Currently selected conversation
|
||||
const isChatMode = appDetail.mode !== 'completion' // Whether the app is a chat app
|
||||
const isChatflow = appDetail.mode === 'advanced-chat' // Whether the app is a chatflow app
|
||||
const { setShowPromptLogModal, setShowAgentLogModal, setShowMessageLogModal } = useAppStore(useShallow((state: AppStoreState) => ({
|
||||
const { setShowPromptLogModal, setShowAgentLogModal, setShowMessageLogModal } = useAppStore(useShallow(state => ({
|
||||
setShowPromptLogModal: state.setShowPromptLogModal,
|
||||
setShowAgentLogModal: state.setShowAgentLogModal,
|
||||
setShowMessageLogModal: state.setShowMessageLogModal,
|
||||
})))
|
||||
|
||||
const activeConversationId = conversationIdInUrl ?? pendingConversationIdRef.current ?? currentConversation?.id
|
||||
|
||||
const buildUrlWithConversation = useCallback((conversationId?: string) => {
|
||||
const params = new URLSearchParams(searchParams.toString())
|
||||
if (conversationId)
|
||||
params.set('conversation_id', conversationId)
|
||||
else
|
||||
params.delete('conversation_id')
|
||||
|
||||
const queryString = params.toString()
|
||||
return queryString ? `${pathname}?${queryString}` : pathname
|
||||
}, [pathname, searchParams])
|
||||
|
||||
const handleRowClick = useCallback((log: ConversationListItem) => {
|
||||
if (conversationIdInUrl === log.id) {
|
||||
if (!showDrawer)
|
||||
setShowDrawer(true)
|
||||
|
||||
if (!currentConversation || currentConversation.id !== log.id)
|
||||
setCurrentConversation(log)
|
||||
return
|
||||
}
|
||||
|
||||
pendingConversationIdRef.current = log.id
|
||||
pendingConversationCacheRef.current = log
|
||||
if (!showDrawer)
|
||||
setShowDrawer(true)
|
||||
|
||||
if (currentConversation?.id !== log.id)
|
||||
setCurrentConversation(undefined)
|
||||
|
||||
router.push(buildUrlWithConversation(log.id), { scroll: false })
|
||||
}, [buildUrlWithConversation, conversationIdInUrl, currentConversation, router, showDrawer])
|
||||
|
||||
const currentConversationId = currentConversation?.id
|
||||
|
||||
useEffect(() => {
|
||||
if (!conversationIdInUrl) {
|
||||
if (pendingConversationIdRef.current)
|
||||
return
|
||||
|
||||
if (showDrawer || currentConversationId) {
|
||||
setShowDrawer(false)
|
||||
setCurrentConversation(undefined)
|
||||
}
|
||||
closingConversationIdRef.current = null
|
||||
pendingConversationCacheRef.current = undefined
|
||||
return
|
||||
}
|
||||
|
||||
if (closingConversationIdRef.current === conversationIdInUrl)
|
||||
return
|
||||
|
||||
if (pendingConversationIdRef.current === conversationIdInUrl)
|
||||
pendingConversationIdRef.current = null
|
||||
|
||||
const matchedConversation = logs?.data?.find((item: ConversationListItem) => item.id === conversationIdInUrl)
|
||||
const nextConversation: ConversationSelection = matchedConversation
|
||||
?? pendingConversationCacheRef.current
|
||||
?? { id: conversationIdInUrl, isPlaceholder: true }
|
||||
|
||||
if (!showDrawer)
|
||||
setShowDrawer(true)
|
||||
|
||||
if (!currentConversation || currentConversation.id !== conversationIdInUrl || (matchedConversation && currentConversation !== matchedConversation))
|
||||
setCurrentConversation(nextConversation)
|
||||
|
||||
if (pendingConversationCacheRef.current?.id === conversationIdInUrl || matchedConversation)
|
||||
pendingConversationCacheRef.current = undefined
|
||||
}, [conversationIdInUrl, currentConversation, isChatMode, logs?.data, showDrawer])
|
||||
|
||||
const onCloseDrawer = useCallback(() => {
|
||||
onRefresh()
|
||||
setShowDrawer(false)
|
||||
setCurrentConversation(undefined)
|
||||
setShowPromptLogModal(false)
|
||||
setShowAgentLogModal(false)
|
||||
setShowMessageLogModal(false)
|
||||
pendingConversationIdRef.current = null
|
||||
pendingConversationCacheRef.current = undefined
|
||||
closingConversationIdRef.current = conversationIdInUrl ?? null
|
||||
|
||||
if (conversationIdInUrl)
|
||||
router.replace(buildUrlWithConversation(), { scroll: false })
|
||||
}, [buildUrlWithConversation, conversationIdInUrl, onRefresh, router, setShowAgentLogModal, setShowMessageLogModal, setShowPromptLogModal])
|
||||
|
||||
// Annotated data needs to be highlighted
|
||||
const renderTdValue = (value: string | number | null, isEmptyStyle: boolean, isHighlight = false, annotation?: LogAnnotation) => {
|
||||
return (
|
||||
|
|
@ -1023,6 +925,15 @@ const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh })
|
|||
)
|
||||
}
|
||||
|
||||
const onCloseDrawer = () => {
|
||||
onRefresh()
|
||||
setShowDrawer(false)
|
||||
setCurrentConversation(undefined)
|
||||
setShowPromptLogModal(false)
|
||||
setShowAgentLogModal(false)
|
||||
setShowMessageLogModal(false)
|
||||
}
|
||||
|
||||
if (!logs)
|
||||
return <Loading />
|
||||
|
||||
|
|
@ -1049,8 +960,11 @@ const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh })
|
|||
const rightValue = get(log, isChatMode ? 'message_count' : 'message.answer')
|
||||
return <tr
|
||||
key={log.id}
|
||||
className={cn('cursor-pointer border-b border-divider-subtle hover:bg-background-default-hover', activeConversationId !== log.id ? '' : 'bg-background-default-hover')}
|
||||
onClick={() => handleRowClick(log)}>
|
||||
className={cn('cursor-pointer border-b border-divider-subtle hover:bg-background-default-hover', currentConversation?.id !== log.id ? '' : 'bg-background-default-hover')}
|
||||
onClick={() => {
|
||||
setShowDrawer(true)
|
||||
setCurrentConversation(log)
|
||||
}}>
|
||||
<td className='h-4'>
|
||||
{!log.read_at && (
|
||||
<div className='flex items-center p-3 pr-0.5'>
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import Chat from '../chat'
|
|||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
ChatItemInTree,
|
||||
OnSend,
|
||||
} from '../types'
|
||||
import { useChat } from '../chat/hooks'
|
||||
|
|
@ -149,7 +148,7 @@ const ChatWrapper = () => {
|
|||
)
|
||||
}, [chatList, handleNewConversationCompleted, handleSend, currentConversationId, currentConversationInputs, newConversationInputs, isInstalledApp, appId])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(editedQuestion ? editedQuestion.message : question.content,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import Chat from '../chat'
|
|||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
ChatItemInTree,
|
||||
OnSend,
|
||||
} from '../types'
|
||||
import { useChat } from '../chat/hooks'
|
||||
|
|
@ -147,7 +146,7 @@ const ChatWrapper = () => {
|
|||
)
|
||||
}, [currentConversationId, currentConversationInputs, newConversationInputs, chatList, handleSend, isInstalledApp, appId, handleNewConversationCompleted])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(editedQuestion ? editedQuestion.message : question.content,
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ export type OnSend = {
|
|||
(message: string, files: FileEntity[] | undefined, isRegenerate: boolean, lastAnswer?: ChatItem | null): void
|
||||
}
|
||||
|
||||
export type OnRegenerate = (chatItem: ChatItem) => void
|
||||
export type OnRegenerate = (chatItem: ChatItem, editedQuestion?: { message: string; files?: FileEntity[] }) => void
|
||||
|
||||
export type Callback = {
|
||||
onSuccess: () => void
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ const meta = {
|
|||
},
|
||||
args: {
|
||||
show: false,
|
||||
children: null,
|
||||
},
|
||||
} satisfies Meta<typeof ContentDialog>
|
||||
|
||||
|
|
@ -92,6 +93,9 @@ const DemoWrapper = (props: Props) => {
|
|||
}
|
||||
|
||||
export const Default: Story = {
|
||||
args: {
|
||||
children: null,
|
||||
},
|
||||
render: args => <DemoWrapper {...args} />,
|
||||
}
|
||||
|
||||
|
|
@ -99,6 +103,7 @@ export const NarrowPanel: Story = {
|
|||
render: args => <DemoWrapper {...args} />,
|
||||
args: {
|
||||
className: 'max-w-[420px]',
|
||||
children: null,
|
||||
},
|
||||
parameters: {
|
||||
docs: {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import { fireEvent, render, screen } from '@testing-library/react'
|
|||
import TimePicker from './index'
|
||||
import dayjs from '../utils/dayjs'
|
||||
import { isDayjsObject } from '../utils/dayjs'
|
||||
import type { TimePickerProps } from '../types'
|
||||
|
||||
jest.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
|
|
@ -30,9 +31,10 @@ jest.mock('./options', () => () => <div data-testid="time-options" />)
|
|||
jest.mock('./header', () => () => <div data-testid="time-header" />)
|
||||
|
||||
describe('TimePicker', () => {
|
||||
const baseProps = {
|
||||
const baseProps: Pick<TimePickerProps, 'onChange' | 'onClear' | 'value'> = {
|
||||
onChange: jest.fn(),
|
||||
onClear: jest.fn(),
|
||||
value: undefined,
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ export const toDayjs = (value: string | Dayjs | undefined, options: ToDayjsOptio
|
|||
|
||||
if (format) {
|
||||
const parsedWithFormat = tzName
|
||||
? dayjs.tz(trimmed, format, tzName, true)
|
||||
? dayjs(trimmed, format, true).tz(tzName, true)
|
||||
: dayjs(trimmed, format, true)
|
||||
if (parsedWithFormat.isValid())
|
||||
return parsedWithFormat
|
||||
|
|
@ -191,7 +191,7 @@ export const toDayjs = (value: string | Dayjs | undefined, options: ToDayjsOptio
|
|||
const candidateFormats = formats ?? COMMON_PARSE_FORMATS
|
||||
for (const fmt of candidateFormats) {
|
||||
const parsed = tzName
|
||||
? dayjs.tz(trimmed, fmt, tzName, true)
|
||||
? dayjs(trimmed, fmt, true).tz(tzName, true)
|
||||
: dayjs(trimmed, fmt, true)
|
||||
if (parsed.isValid())
|
||||
return parsed
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ const meta = {
|
|||
args: {
|
||||
title: 'Manage API Keys',
|
||||
show: false,
|
||||
children: null,
|
||||
},
|
||||
} satisfies Meta<typeof Dialog>
|
||||
|
||||
|
|
@ -102,6 +103,7 @@ export const Default: Story = {
|
|||
</button>
|
||||
</>
|
||||
),
|
||||
children: null,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -110,6 +112,7 @@ export const WithoutFooter: Story = {
|
|||
args: {
|
||||
footer: undefined,
|
||||
title: 'Read-only summary',
|
||||
children: null,
|
||||
},
|
||||
parameters: {
|
||||
docs: {
|
||||
|
|
@ -140,6 +143,7 @@ export const CustomStyling: Story = {
|
|||
</div>
|
||||
</>
|
||||
),
|
||||
children: null,
|
||||
},
|
||||
parameters: {
|
||||
docs: {
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ export type FormOption = {
|
|||
icon?: string
|
||||
}
|
||||
|
||||
export type AnyValidators = FieldValidators<any, any, any, any, any, any, any, any, any, any>
|
||||
export type AnyValidators = FieldValidators<any, any, any, any, any, any, any, any, any, any, any, any>
|
||||
|
||||
export type FormSchema = {
|
||||
type: FormTypeEnum
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import React, { useEffect, useRef, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useChatContext } from '../chat/chat/context'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
const hasEndThink = (children: any): boolean => {
|
||||
if (typeof children === 'string')
|
||||
|
|
@ -40,7 +41,7 @@ const useThinkTimer = (children: any) => {
|
|||
const [startTime] = useState(() => Date.now())
|
||||
const [elapsedTime, setElapsedTime] = useState(0)
|
||||
const [isComplete, setIsComplete] = useState(false)
|
||||
const timerRef = useRef<NodeJS.Timeout>()
|
||||
const timerRef = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
useEffect(() => {
|
||||
if (isComplete) return
|
||||
|
|
@ -63,16 +64,26 @@ const useThinkTimer = (children: any) => {
|
|||
return { elapsedTime, isComplete }
|
||||
}
|
||||
|
||||
const ThinkBlock = ({ children, ...props }: React.ComponentProps<'details'>) => {
|
||||
type ThinkBlockProps = React.ComponentProps<'details'> & {
|
||||
'data-think'?: boolean
|
||||
}
|
||||
|
||||
const ThinkBlock = ({ children, ...props }: ThinkBlockProps) => {
|
||||
const { elapsedTime, isComplete } = useThinkTimer(children)
|
||||
const displayContent = removeEndThink(children)
|
||||
const { t } = useTranslation()
|
||||
const { 'data-think': isThink = false, className, open, ...rest } = props
|
||||
|
||||
if (!(props['data-think'] ?? false))
|
||||
if (!isThink)
|
||||
return (<details {...props}>{children}</details>)
|
||||
|
||||
return (
|
||||
<details {...(!isComplete && { open: true })} className="group">
|
||||
<details
|
||||
{...rest}
|
||||
data-think={isThink}
|
||||
className={cn('group', className)}
|
||||
open={isComplete ? open : true}
|
||||
>
|
||||
<summary className="flex cursor-pointer select-none list-none items-center whitespace-nowrap pl-2 font-bold text-text-secondary">
|
||||
<div className="flex shrink-0 items-center">
|
||||
<svg
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import RemarkBreaks from 'remark-breaks'
|
|||
import RehypeKatex from 'rehype-katex'
|
||||
import RemarkGfm from 'remark-gfm'
|
||||
import RehypeRaw from 'rehype-raw'
|
||||
import { ENABLE_SINGLE_DOLLAR_LATEX } from '@/config'
|
||||
import AudioBlock from '@/app/components/base/markdown-blocks/audio-block'
|
||||
import Img from '@/app/components/base/markdown-blocks/img'
|
||||
import Link from '@/app/components/base/markdown-blocks/link'
|
||||
|
|
@ -34,7 +35,7 @@ export const ReactMarkdownWrapper: FC<ReactMarkdownWrapperProps> = (props) => {
|
|||
<ReactMarkdown
|
||||
remarkPlugins={[
|
||||
RemarkGfm,
|
||||
[RemarkMath, { singleDollarTextMath: false }],
|
||||
[RemarkMath, { singleDollarTextMath: ENABLE_SINGLE_DOLLAR_LATEX }],
|
||||
RemarkBreaks,
|
||||
]}
|
||||
rehypePlugins={[
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ const meta = {
|
|||
hideCloseBtn: false,
|
||||
onClose: () => console.log('close'),
|
||||
onConfirm: () => console.log('confirm'),
|
||||
children: null,
|
||||
},
|
||||
} satisfies Meta<typeof ModalLikeWrap>
|
||||
|
||||
|
|
@ -68,6 +69,9 @@ export const Default: Story = {
|
|||
<BaseContent />
|
||||
</ModalLikeWrap>
|
||||
),
|
||||
args: {
|
||||
children: null,
|
||||
},
|
||||
}
|
||||
|
||||
export const WithBackLink: Story = {
|
||||
|
|
@ -90,6 +94,7 @@ export const WithBackLink: Story = {
|
|||
),
|
||||
args: {
|
||||
title: 'Select metadata type',
|
||||
children: null,
|
||||
},
|
||||
parameters: {
|
||||
docs: {
|
||||
|
|
@ -114,6 +119,7 @@ export const CustomWidth: Story = {
|
|||
),
|
||||
args: {
|
||||
title: 'Advanced configuration',
|
||||
children: null,
|
||||
},
|
||||
parameters: {
|
||||
docs: {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { Popover, PopoverButton, PopoverPanel, Transition } from '@headlessui/react'
|
||||
import { Fragment, cloneElement, useRef } from 'react'
|
||||
import { Fragment, cloneElement, isValidElement, useRef } from 'react'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
export type HtmlContentProps = {
|
||||
|
|
@ -103,15 +103,17 @@ export default function CustomPopover({
|
|||
})
|
||||
}
|
||||
>
|
||||
{cloneElement(htmlContent as React.ReactElement, {
|
||||
open,
|
||||
onClose: close,
|
||||
...(manualClose
|
||||
? {
|
||||
onClick: close,
|
||||
}
|
||||
: {}),
|
||||
})}
|
||||
{isValidElement(htmlContent)
|
||||
? cloneElement(htmlContent as React.ReactElement<HtmlContentProps>, {
|
||||
open,
|
||||
onClose: close,
|
||||
...(manualClose
|
||||
? {
|
||||
onClick: close,
|
||||
}
|
||||
: {}),
|
||||
})
|
||||
: htmlContent}
|
||||
</div>
|
||||
)}
|
||||
</PopoverPanel>
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ export const PortalToFollowElemTrigger = (
|
|||
children,
|
||||
asChild = false,
|
||||
...props
|
||||
}: React.HTMLProps<HTMLElement> & { ref?: React.RefObject<HTMLElement>, asChild?: boolean },
|
||||
}: React.HTMLProps<HTMLElement> & { ref?: React.RefObject<HTMLElement | null>, asChild?: boolean },
|
||||
) => {
|
||||
const context = usePortalToFollowElemContext()
|
||||
const childrenRef = (children as any).props?.ref
|
||||
|
|
@ -133,12 +133,13 @@ export const PortalToFollowElemTrigger = (
|
|||
|
||||
// `asChild` allows the user to pass any element as the anchor
|
||||
if (asChild && React.isValidElement(children)) {
|
||||
const childProps = (children.props ?? {}) as Record<string, unknown>
|
||||
return React.cloneElement(
|
||||
children,
|
||||
context.getReferenceProps({
|
||||
ref,
|
||||
...props,
|
||||
...children.props,
|
||||
...childProps,
|
||||
'data-state': context.open ? 'open' : 'closed',
|
||||
} as React.HTMLProps<HTMLElement>),
|
||||
)
|
||||
|
|
@ -164,7 +165,7 @@ export const PortalToFollowElemContent = (
|
|||
style,
|
||||
...props
|
||||
}: React.HTMLProps<HTMLDivElement> & {
|
||||
ref?: React.RefObject<HTMLDivElement>;
|
||||
ref?: React.RefObject<HTMLDivElement | null>;
|
||||
},
|
||||
) => {
|
||||
const context = usePortalToFollowElemContext()
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ import { DELETE_QUERY_BLOCK_COMMAND } from './plugins/query-block'
|
|||
import type { CustomTextNode } from './plugins/custom-text/node'
|
||||
import { registerLexicalTextEntity } from './utils'
|
||||
|
||||
export type UseSelectOrDeleteHandler = (nodeKey: string, command?: LexicalCommand<undefined>) => [RefObject<HTMLDivElement>, boolean]
|
||||
export type UseSelectOrDeleteHandler = (nodeKey: string, command?: LexicalCommand<undefined>) => [RefObject<HTMLDivElement | null>, boolean]
|
||||
export const useSelectOrDelete: UseSelectOrDeleteHandler = (nodeKey: string, command?: LexicalCommand<undefined>) => {
|
||||
const ref = useRef<HTMLDivElement>(null)
|
||||
const [editor] = useLexicalComposerContext()
|
||||
|
|
@ -110,7 +110,7 @@ export const useSelectOrDelete: UseSelectOrDeleteHandler = (nodeKey: string, com
|
|||
return [ref, isSelected]
|
||||
}
|
||||
|
||||
export type UseTriggerHandler = () => [RefObject<HTMLDivElement>, boolean, Dispatch<SetStateAction<boolean>>]
|
||||
export type UseTriggerHandler = () => [RefObject<HTMLDivElement | null>, boolean, Dispatch<SetStateAction<boolean>>]
|
||||
export const useTrigger: UseTriggerHandler = () => {
|
||||
const triggerRef = useRef<HTMLDivElement>(null)
|
||||
const [open, setOpen] = useState(false)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { memo } from 'react'
|
||||
import type { ReactNode } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
|
|
@ -8,7 +9,7 @@ const Placeholder = ({
|
|||
className,
|
||||
}: {
|
||||
compact?: boolean
|
||||
value?: string | JSX.Element
|
||||
value?: ReactNode
|
||||
className?: string
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
|
|
|
|||
|
|
@ -14,13 +14,19 @@ export const convertToMp3 = (recorder: any) => {
|
|||
const { channels, sampleRate } = wav
|
||||
const mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128)
|
||||
const result = recorder.getChannelData()
|
||||
const buffer = []
|
||||
const buffer: BlobPart[] = []
|
||||
|
||||
const leftData = result.left && new Int16Array(result.left.buffer, 0, result.left.byteLength / 2)
|
||||
const rightData = result.right && new Int16Array(result.right.buffer, 0, result.right.byteLength / 2)
|
||||
const remaining = leftData.length + (rightData ? rightData.length : 0)
|
||||
|
||||
const maxSamples = 1152
|
||||
const toArrayBuffer = (bytes: Int8Array) => {
|
||||
const arrayBuffer = new ArrayBuffer(bytes.length)
|
||||
new Uint8Array(arrayBuffer).set(bytes)
|
||||
return arrayBuffer
|
||||
}
|
||||
|
||||
for (let i = 0; i < remaining; i += maxSamples) {
|
||||
const left = leftData.subarray(i, i + maxSamples)
|
||||
let right = null
|
||||
|
|
@ -35,13 +41,13 @@ export const convertToMp3 = (recorder: any) => {
|
|||
}
|
||||
|
||||
if (mp3buf.length > 0)
|
||||
buffer.push(mp3buf)
|
||||
buffer.push(toArrayBuffer(mp3buf))
|
||||
}
|
||||
|
||||
const enc = mp3enc.flush()
|
||||
|
||||
if (enc.length > 0)
|
||||
buffer.push(enc)
|
||||
buffer.push(toArrayBuffer(enc))
|
||||
|
||||
return new Blob(buffer, { type: 'audio/mp3' })
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ const Pricing: FC<PricingProps> = ({
|
|||
const [planRange, setPlanRange] = React.useState<PlanRange>(PlanRange.monthly)
|
||||
const [currentCategory, setCurrentCategory] = useState<Category>(CategoryEnum.CLOUD)
|
||||
const canPay = isCurrentWorkspaceManager
|
||||
|
||||
useKeyPress(['esc'], onCancel)
|
||||
|
||||
const pricingPageLanguage = useGetPricingPageLanguage()
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import SelfHostedPlanItem from './self-hosted-plan-item'
|
|||
|
||||
type PlansProps = {
|
||||
plan: {
|
||||
type: BasicPlan
|
||||
type: Plan
|
||||
usage: UsagePlanInfo
|
||||
total: UsagePlanInfo
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@ const Plans = ({
|
|||
planRange,
|
||||
canPay,
|
||||
}: PlansProps) => {
|
||||
const currentPlanType: BasicPlan = plan.type === Plan.enterprise ? Plan.team : plan.type
|
||||
return (
|
||||
<div className='flex w-full justify-center border-t border-divider-accent px-10'>
|
||||
<div className='flex max-w-[1680px] grow border-x border-divider-accent'>
|
||||
|
|
@ -28,21 +29,21 @@ const Plans = ({
|
|||
currentPlan === 'cloud' && (
|
||||
<>
|
||||
<CloudPlanItem
|
||||
currentPlan={plan.type}
|
||||
currentPlan={currentPlanType}
|
||||
plan={Plan.sandbox}
|
||||
planRange={planRange}
|
||||
canPay={canPay}
|
||||
/>
|
||||
<Divider type='vertical' className='mx-0 shrink-0 bg-divider-accent' />
|
||||
<CloudPlanItem
|
||||
currentPlan={plan.type}
|
||||
currentPlan={currentPlanType}
|
||||
plan={Plan.professional}
|
||||
planRange={planRange}
|
||||
canPay={canPay}
|
||||
/>
|
||||
<Divider type='vertical' className='mx-0 shrink-0 bg-divider-accent' />
|
||||
<CloudPlanItem
|
||||
currentPlan={plan.type}
|
||||
currentPlan={currentPlanType}
|
||||
plan={Plan.team}
|
||||
planRange={planRange}
|
||||
canPay={canPay}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,13 @@ import DocumentFileIcon from '../../common/document-file-icon'
|
|||
import cn from '@/utils/classnames'
|
||||
import { FieldInfo } from '@/app/components/datasets/documents/detail/metadata'
|
||||
import Button from '@/app/components/base/button'
|
||||
import type { FullDocumentDetail, IndexingStatusResponse, ProcessRuleResponse } from '@/models/datasets'
|
||||
import type {
|
||||
DataSourceInfo,
|
||||
FullDocumentDetail,
|
||||
IndexingStatusResponse,
|
||||
LegacyDataSourceInfo,
|
||||
ProcessRuleResponse,
|
||||
} from '@/models/datasets'
|
||||
import { fetchIndexingStatusBatch as doFetchIndexingStatus, fetchProcessRule } from '@/service/datasets'
|
||||
import { DataSourceType, ProcessMode } from '@/models/datasets'
|
||||
import NotionIcon from '@/app/components/base/notion-icon'
|
||||
|
|
@ -241,10 +247,16 @@ const EmbeddingProcess: FC<Props> = ({ datasetId, batchId, documents = [], index
|
|||
return doc?.data_source_type as DataSourceType
|
||||
}
|
||||
|
||||
const isLegacyDataSourceInfo = (info: DataSourceInfo): info is LegacyDataSourceInfo => {
|
||||
return info != null && typeof (info as LegacyDataSourceInfo).upload_file === 'object'
|
||||
}
|
||||
|
||||
const getIcon = (id: string) => {
|
||||
const doc = documents.find(document => document.id === id)
|
||||
|
||||
return doc?.data_source_info.notion_page_icon
|
||||
const info = doc?.data_source_info
|
||||
if (info && isLegacyDataSourceInfo(info))
|
||||
return info.notion_page_icon
|
||||
return undefined
|
||||
}
|
||||
const isSourceEmbedding = (detail: IndexingStatusResponse) =>
|
||||
['indexing', 'splitting', 'parsing', 'cleaning', 'waiting'].includes(detail.indexing_status || '')
|
||||
|
|
|
|||
|
|
@ -19,8 +19,6 @@ import { IS_CE_EDITION } from '@/config'
|
|||
import { Theme } from '@/types/app'
|
||||
import useTheme from '@/hooks/use-theme'
|
||||
|
||||
const FILES_NUMBER_LIMIT = 20
|
||||
|
||||
type IFileUploaderProps = {
|
||||
fileList: FileItem[]
|
||||
titleClassName?: string
|
||||
|
|
@ -72,6 +70,7 @@ const FileUploader = ({
|
|||
const fileUploadConfig = useMemo(() => fileUploadConfigResponse ?? {
|
||||
file_size_limit: 15,
|
||||
batch_count_limit: 5,
|
||||
file_upload_limit: 5,
|
||||
}, [fileUploadConfigResponse])
|
||||
|
||||
const fileListRef = useRef<FileItem[]>([])
|
||||
|
|
@ -106,6 +105,8 @@ const FileUploader = ({
|
|||
return isValidType && isValidSize
|
||||
}, [fileUploadConfig, notify, t, ACCEPTS])
|
||||
|
||||
type UploadResult = Awaited<ReturnType<typeof upload>>
|
||||
|
||||
const fileUpload = useCallback(async (fileItem: FileItem): Promise<FileItem> => {
|
||||
const formData = new FormData()
|
||||
formData.append('file', fileItem.file)
|
||||
|
|
@ -121,10 +122,10 @@ const FileUploader = ({
|
|||
data: formData,
|
||||
onprogress: onProgress,
|
||||
}, false, undefined, '?source=datasets')
|
||||
.then((res: File) => {
|
||||
.then((res) => {
|
||||
const completeFile = {
|
||||
fileID: fileItem.fileID,
|
||||
file: res,
|
||||
file: res as unknown as File,
|
||||
progress: -1,
|
||||
}
|
||||
const index = fileListRef.current.findIndex(item => item.fileID === fileItem.fileID)
|
||||
|
|
@ -163,11 +164,12 @@ const FileUploader = ({
|
|||
}, [fileUploadConfig, uploadBatchFiles])
|
||||
|
||||
const initialUpload = useCallback((files: File[]) => {
|
||||
const filesCountLimit = fileUploadConfig.file_upload_limit
|
||||
if (!files.length)
|
||||
return false
|
||||
|
||||
if (files.length + fileList.length > FILES_NUMBER_LIMIT && !IS_CE_EDITION) {
|
||||
notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.filesNumber', { filesNumber: FILES_NUMBER_LIMIT }) })
|
||||
if (files.length + fileList.length > filesCountLimit && !IS_CE_EDITION) {
|
||||
notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.filesNumber', { filesNumber: filesCountLimit }) })
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
@ -180,7 +182,7 @@ const FileUploader = ({
|
|||
prepareFileList(newFiles)
|
||||
fileListRef.current = newFiles
|
||||
uploadMultipleFiles(preparedFiles)
|
||||
}, [prepareFileList, uploadMultipleFiles, notify, t, fileList])
|
||||
}, [prepareFileList, uploadMultipleFiles, notify, t, fileList, fileUploadConfig])
|
||||
|
||||
const handleDragEnter = (e: DragEvent) => {
|
||||
e.preventDefault()
|
||||
|
|
@ -255,10 +257,11 @@ const FileUploader = ({
|
|||
)
|
||||
let files = nested.flat()
|
||||
if (notSupportBatchUpload) files = files.slice(0, 1)
|
||||
files = files.slice(0, fileUploadConfig.batch_count_limit)
|
||||
const valid = files.filter(isValid)
|
||||
initialUpload(valid)
|
||||
},
|
||||
[initialUpload, isValid, notSupportBatchUpload, traverseFileEntry],
|
||||
[initialUpload, isValid, notSupportBatchUpload, traverseFileEntry, fileUploadConfig],
|
||||
)
|
||||
const selectHandle = () => {
|
||||
if (fileUploader.current)
|
||||
|
|
@ -273,9 +276,10 @@ const FileUploader = ({
|
|||
onFileListUpdate?.([...fileListRef.current])
|
||||
}
|
||||
const fileChangeHandle = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const files = [...(e.target.files ?? [])] as File[]
|
||||
let files = [...(e.target.files ?? [])] as File[]
|
||||
files = files.slice(0, fileUploadConfig.batch_count_limit)
|
||||
initialUpload(files.filter(isValid))
|
||||
}, [isValid, initialUpload])
|
||||
}, [isValid, initialUpload, fileUploadConfig])
|
||||
|
||||
const { theme } = useTheme()
|
||||
const chartColor = useMemo(() => theme === Theme.dark ? '#5289ff' : '#296dff', [theme])
|
||||
|
|
@ -324,7 +328,8 @@ const FileUploader = ({
|
|||
<div>{t('datasetCreation.stepOne.uploader.tip', {
|
||||
size: fileUploadConfig.file_size_limit,
|
||||
supportTypes: supportTypesShowNames,
|
||||
batchCount: fileUploadConfig.batch_count_limit,
|
||||
batchCount: notSupportBatchUpload ? 1 : fileUploadConfig.batch_count_limit,
|
||||
totalCount: fileUploadConfig.file_upload_limit,
|
||||
})}</div>
|
||||
{dragging && <div ref={dragRef} className='absolute left-0 top-0 h-full w-full' />}
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -121,6 +121,8 @@ const LocalFile = ({
|
|||
return isValidType && isValidSize
|
||||
}, [fileUploadConfig, notify, t, ACCEPTS])
|
||||
|
||||
type UploadResult = Awaited<ReturnType<typeof upload>>
|
||||
|
||||
const fileUpload = useCallback(async (fileItem: FileItem): Promise<FileItem> => {
|
||||
const formData = new FormData()
|
||||
formData.append('file', fileItem.file)
|
||||
|
|
@ -136,10 +138,14 @@ const LocalFile = ({
|
|||
data: formData,
|
||||
onprogress: onProgress,
|
||||
}, false, undefined, '?source=datasets')
|
||||
.then((res: File) => {
|
||||
const completeFile = {
|
||||
.then((res: UploadResult) => {
|
||||
const updatedFile = Object.assign({}, fileItem.file, {
|
||||
id: res.id,
|
||||
...(res as Partial<File>),
|
||||
}) as File
|
||||
const completeFile: FileItem = {
|
||||
fileID: fileItem.fileID,
|
||||
file: res,
|
||||
file: updatedFile,
|
||||
progress: -1,
|
||||
}
|
||||
const index = fileListRef.current.findIndex(item => item.fileID === fileItem.fileID)
|
||||
|
|
@ -287,7 +293,7 @@ const LocalFile = ({
|
|||
<RiUploadCloud2Line className='mr-2 size-5' />
|
||||
|
||||
<span>
|
||||
{t('datasetCreation.stepOne.uploader.button')}
|
||||
{notSupportBatchUpload ? t('datasetCreation.stepOne.uploader.buttonSingleFile') : t('datasetCreation.stepOne.uploader.button')}
|
||||
{allowedExtensions.length > 0 && (
|
||||
<label className='ml-1 cursor-pointer text-text-accent' onClick={selectHandle}>{t('datasetCreation.stepOne.uploader.browse')}</label>
|
||||
)}
|
||||
|
|
@ -296,7 +302,7 @@ const LocalFile = ({
|
|||
<div>{t('datasetCreation.stepOne.uploader.tip', {
|
||||
size: fileUploadConfig.file_size_limit,
|
||||
supportTypes: supportTypesShowNames,
|
||||
batchCount: fileUploadConfig.batch_count_limit,
|
||||
batchCount: notSupportBatchUpload ? 1 : fileUploadConfig.batch_count_limit,
|
||||
})}</div>
|
||||
{dragging && <div ref={dragRef} className='absolute left-0 top-0 h-full w-full' />}
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ const CSVUploader: FC<Props> = ({
|
|||
file_size_limit: 15,
|
||||
}, [fileUploadConfigResponse])
|
||||
|
||||
type UploadResult = Awaited<ReturnType<typeof upload>>
|
||||
|
||||
const fileUpload = useCallback(async (fileItem: FileItem): Promise<FileItem> => {
|
||||
fileItem.progress = 0
|
||||
|
||||
|
|
@ -58,10 +60,14 @@ const CSVUploader: FC<Props> = ({
|
|||
data: formData,
|
||||
onprogress: onProgress,
|
||||
}, false, undefined, '?source=datasets')
|
||||
.then((res: File) => {
|
||||
const completeFile = {
|
||||
.then((res: UploadResult) => {
|
||||
const updatedFile = Object.assign({}, fileItem.file, {
|
||||
id: res.id,
|
||||
...(res as Partial<File>),
|
||||
}) as File
|
||||
const completeFile: FileItem = {
|
||||
fileID: fileItem.fileID,
|
||||
file: res,
|
||||
file: updatedFile,
|
||||
progress: 100,
|
||||
}
|
||||
updateFile(completeFile)
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import Divider from '@/app/components/base/divider'
|
|||
import Loading from '@/app/components/base/loading'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import { ChunkingMode } from '@/models/datasets'
|
||||
import type { FileItem } from '@/models/datasets'
|
||||
import type { DataSourceInfo, FileItem, LegacyDataSourceInfo } from '@/models/datasets'
|
||||
import { useDatasetDetailContextWithSelector } from '@/context/dataset-detail'
|
||||
import FloatRightContainer from '@/app/components/base/float-right-container'
|
||||
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
|
||||
|
|
@ -109,6 +109,18 @@ const DocumentDetail: FC<DocumentDetailProps> = ({ datasetId, documentId }) => {
|
|||
|
||||
const embedding = ['queuing', 'indexing', 'paused'].includes((documentDetail?.display_status || '').toLowerCase())
|
||||
|
||||
const isLegacyDataSourceInfo = (info?: DataSourceInfo): info is LegacyDataSourceInfo => {
|
||||
return !!info && 'upload_file' in info
|
||||
}
|
||||
|
||||
const documentUploadFile = useMemo(() => {
|
||||
if (!documentDetail?.data_source_info)
|
||||
return undefined
|
||||
if (isLegacyDataSourceInfo(documentDetail.data_source_info))
|
||||
return documentDetail.data_source_info.upload_file
|
||||
return undefined
|
||||
}, [documentDetail?.data_source_info])
|
||||
|
||||
const invalidChunkList = useInvalid(useSegmentListKey)
|
||||
const invalidChildChunkList = useInvalid(useChildSegmentListKey)
|
||||
const invalidDocumentList = useInvalidDocumentList(datasetId)
|
||||
|
|
@ -153,7 +165,7 @@ const DocumentDetail: FC<DocumentDetailProps> = ({ datasetId, documentId }) => {
|
|||
</div>
|
||||
<DocumentTitle
|
||||
datasetId={datasetId}
|
||||
extension={documentDetail?.data_source_info?.upload_file?.extension}
|
||||
extension={documentUploadFile?.extension}
|
||||
name={documentDetail?.name}
|
||||
wrapperCls='mr-2'
|
||||
parent_mode={parentMode}
|
||||
|
|
|
|||
|
|
@ -131,9 +131,15 @@ type IMetadataProps = {
|
|||
onUpdate: () => void
|
||||
}
|
||||
|
||||
type MetadataState = {
|
||||
documentType?: DocType | ''
|
||||
metadata: Record<string, string>
|
||||
}
|
||||
|
||||
const Metadata: FC<IMetadataProps> = ({ docDetail, loading, onUpdate }) => {
|
||||
const { doc_metadata = {} } = docDetail || {}
|
||||
const doc_type = docDetail?.doc_type || ''
|
||||
const rawDocType = docDetail?.doc_type ?? ''
|
||||
const doc_type = rawDocType === 'others' ? '' : rawDocType
|
||||
|
||||
const { t } = useTranslation()
|
||||
const metadataMap = useMetadataMap()
|
||||
|
|
@ -143,18 +149,16 @@ const Metadata: FC<IMetadataProps> = ({ docDetail, loading, onUpdate }) => {
|
|||
const businessDocCategoryMap = useBusinessDocCategories()
|
||||
const [editStatus, setEditStatus] = useState(!doc_type) // if no documentType, in editing status by default
|
||||
// the initial values are according to the documentType
|
||||
const [metadataParams, setMetadataParams] = useState<{
|
||||
documentType?: DocType | ''
|
||||
metadata: { [key: string]: string }
|
||||
}>(
|
||||
const [metadataParams, setMetadataParams] = useState<MetadataState>(
|
||||
doc_type
|
||||
? {
|
||||
documentType: doc_type,
|
||||
metadata: doc_metadata || {},
|
||||
documentType: doc_type as DocType,
|
||||
metadata: (doc_metadata || {}) as Record<string, string>,
|
||||
}
|
||||
: { metadata: {} })
|
||||
: { metadata: {} },
|
||||
)
|
||||
const [showDocTypes, setShowDocTypes] = useState(!doc_type) // whether show doc types
|
||||
const [tempDocType, setTempDocType] = useState<DocType | undefined | ''>('') // for remember icon click
|
||||
const [tempDocType, setTempDocType] = useState<DocType | ''>('') // for remember icon click
|
||||
const [saveLoading, setSaveLoading] = useState(false)
|
||||
|
||||
const { notify } = useContext(ToastContext)
|
||||
|
|
@ -165,13 +169,13 @@ const Metadata: FC<IMetadataProps> = ({ docDetail, loading, onUpdate }) => {
|
|||
if (docDetail?.doc_type) {
|
||||
setEditStatus(false)
|
||||
setShowDocTypes(false)
|
||||
setTempDocType(docDetail?.doc_type)
|
||||
setTempDocType(doc_type as DocType | '')
|
||||
setMetadataParams({
|
||||
documentType: docDetail?.doc_type,
|
||||
metadata: docDetail?.doc_metadata || {},
|
||||
documentType: doc_type as DocType | '',
|
||||
metadata: (docDetail?.doc_metadata || {}) as Record<string, string>,
|
||||
})
|
||||
}
|
||||
}, [docDetail?.doc_type])
|
||||
}, [docDetail?.doc_type, docDetail?.doc_metadata, doc_type])
|
||||
|
||||
// confirm doc type
|
||||
const confirmDocType = () => {
|
||||
|
|
@ -179,7 +183,7 @@ const Metadata: FC<IMetadataProps> = ({ docDetail, loading, onUpdate }) => {
|
|||
return
|
||||
setMetadataParams({
|
||||
documentType: tempDocType,
|
||||
metadata: tempDocType === metadataParams.documentType ? metadataParams.metadata : {}, // change doc type, clear metadata
|
||||
metadata: tempDocType === metadataParams.documentType ? metadataParams.metadata : {} as Record<string, string>, // change doc type, clear metadata
|
||||
})
|
||||
setEditStatus(true)
|
||||
setShowDocTypes(false)
|
||||
|
|
@ -187,7 +191,7 @@ const Metadata: FC<IMetadataProps> = ({ docDetail, loading, onUpdate }) => {
|
|||
|
||||
// cancel doc type
|
||||
const cancelDocType = () => {
|
||||
setTempDocType(metadataParams.documentType)
|
||||
setTempDocType(metadataParams.documentType ?? '')
|
||||
setEditStatus(true)
|
||||
setShowDocTypes(false)
|
||||
}
|
||||
|
|
@ -209,7 +213,7 @@ const Metadata: FC<IMetadataProps> = ({ docDetail, loading, onUpdate }) => {
|
|||
<span className={s.title}>{t('datasetDocuments.metadata.docTypeChangeTitle')}</span>
|
||||
<span className={s.changeTip}>{t('datasetDocuments.metadata.docTypeSelectWarning')}</span>
|
||||
</>}
|
||||
<Radio.Group value={tempDocType ?? documentType} onChange={setTempDocType} className={s.radioGroup}>
|
||||
<Radio.Group value={tempDocType ?? documentType ?? ''} onChange={setTempDocType} className={s.radioGroup}>
|
||||
{CUSTOMIZABLE_DOC_TYPES.map((type, index) => {
|
||||
const currValue = tempDocType ?? documentType
|
||||
return <Radio key={index} value={type} className={`${s.radio} ${currValue === type ? 'shadow-none' : ''}`}>
|
||||
|
|
|
|||
|
|
@ -4,7 +4,17 @@ import { useBoolean } from 'ahooks'
|
|||
import { useContext } from 'use-context-selector'
|
||||
import { useRouter } from 'next/navigation'
|
||||
import DatasetDetailContext from '@/context/dataset-detail'
|
||||
import type { CrawlOptions, CustomFile, DataSourceType } from '@/models/datasets'
|
||||
import type {
|
||||
CrawlOptions,
|
||||
CustomFile,
|
||||
DataSourceInfo,
|
||||
DataSourceType,
|
||||
LegacyDataSourceInfo,
|
||||
LocalFileInfo,
|
||||
OnlineDocumentInfo,
|
||||
WebsiteCrawlInfo,
|
||||
} from '@/models/datasets'
|
||||
import type { DataSourceProvider } from '@/models/common'
|
||||
import Loading from '@/app/components/base/loading'
|
||||
import StepTwo from '@/app/components/datasets/create/step-two'
|
||||
import AccountSetting from '@/app/components/header/account-setting'
|
||||
|
|
@ -42,15 +52,78 @@ const DocumentSettings = ({ datasetId, documentId }: DocumentSettingsProps) => {
|
|||
params: { metadata: 'without' },
|
||||
})
|
||||
|
||||
const dataSourceInfo = documentDetail?.data_source_info
|
||||
|
||||
const isLegacyDataSourceInfo = (info: DataSourceInfo | undefined): info is LegacyDataSourceInfo => {
|
||||
return !!info && 'upload_file' in info
|
||||
}
|
||||
const isWebsiteCrawlInfo = (info: DataSourceInfo | undefined): info is WebsiteCrawlInfo => {
|
||||
return !!info && 'source_url' in info && 'title' in info
|
||||
}
|
||||
const isOnlineDocumentInfo = (info: DataSourceInfo | undefined): info is OnlineDocumentInfo => {
|
||||
return !!info && 'page' in info
|
||||
}
|
||||
const isLocalFileInfo = (info: DataSourceInfo | undefined): info is LocalFileInfo => {
|
||||
return !!info && 'related_id' in info && 'transfer_method' in info
|
||||
}
|
||||
const legacyInfo = isLegacyDataSourceInfo(dataSourceInfo) ? dataSourceInfo : undefined
|
||||
const websiteInfo = isWebsiteCrawlInfo(dataSourceInfo) ? dataSourceInfo : undefined
|
||||
const onlineDocumentInfo = isOnlineDocumentInfo(dataSourceInfo) ? dataSourceInfo : undefined
|
||||
const localFileInfo = isLocalFileInfo(dataSourceInfo) ? dataSourceInfo : undefined
|
||||
|
||||
const currentPage = useMemo(() => {
|
||||
return {
|
||||
workspace_id: documentDetail?.data_source_info.notion_workspace_id,
|
||||
page_id: documentDetail?.data_source_info.notion_page_id,
|
||||
page_name: documentDetail?.name,
|
||||
page_icon: documentDetail?.data_source_info.notion_page_icon,
|
||||
type: documentDetail?.data_source_type,
|
||||
if (legacyInfo) {
|
||||
return {
|
||||
workspace_id: legacyInfo.notion_workspace_id ?? '',
|
||||
page_id: legacyInfo.notion_page_id ?? '',
|
||||
page_name: documentDetail?.name,
|
||||
page_icon: legacyInfo.notion_page_icon,
|
||||
type: documentDetail?.data_source_type,
|
||||
}
|
||||
}
|
||||
}, [documentDetail])
|
||||
if (onlineDocumentInfo) {
|
||||
return {
|
||||
workspace_id: onlineDocumentInfo.workspace_id,
|
||||
page_id: onlineDocumentInfo.page.page_id,
|
||||
page_name: onlineDocumentInfo.page.page_name,
|
||||
page_icon: onlineDocumentInfo.page.page_icon,
|
||||
type: onlineDocumentInfo.page.type,
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}, [documentDetail?.data_source_type, documentDetail?.name, legacyInfo, onlineDocumentInfo])
|
||||
|
||||
const files = useMemo<CustomFile[]>(() => {
|
||||
if (legacyInfo?.upload_file)
|
||||
return [legacyInfo.upload_file as CustomFile]
|
||||
if (localFileInfo) {
|
||||
const { related_id, name, extension } = localFileInfo
|
||||
return [{
|
||||
id: related_id,
|
||||
name,
|
||||
extension,
|
||||
} as unknown as CustomFile]
|
||||
}
|
||||
return []
|
||||
}, [legacyInfo?.upload_file, localFileInfo])
|
||||
|
||||
const websitePages = useMemo(() => {
|
||||
if (!websiteInfo)
|
||||
return []
|
||||
return [{
|
||||
title: websiteInfo.title,
|
||||
source_url: websiteInfo.source_url,
|
||||
content: websiteInfo.content,
|
||||
description: websiteInfo.description,
|
||||
}]
|
||||
}, [websiteInfo])
|
||||
|
||||
const crawlOptions = (dataSourceInfo && typeof dataSourceInfo === 'object' && 'includes' in dataSourceInfo && 'excludes' in dataSourceInfo)
|
||||
? dataSourceInfo as unknown as CrawlOptions
|
||||
: undefined
|
||||
|
||||
const websiteCrawlProvider = (websiteInfo?.provider ?? legacyInfo?.provider) as DataSourceProvider | undefined
|
||||
const websiteCrawlJobId = websiteInfo?.job_id ?? legacyInfo?.job_id
|
||||
|
||||
if (error)
|
||||
return <AppUnavailable code={500} unknownReason={t('datasetCreation.error.unavailable') as string} />
|
||||
|
|
@ -65,22 +138,16 @@ const DocumentSettings = ({ datasetId, documentId }: DocumentSettingsProps) => {
|
|||
onSetting={showSetAPIKey}
|
||||
datasetId={datasetId}
|
||||
dataSourceType={documentDetail.data_source_type as DataSourceType}
|
||||
notionPages={[currentPage as unknown as NotionPage]}
|
||||
websitePages={[
|
||||
{
|
||||
title: documentDetail.name,
|
||||
source_url: documentDetail.data_source_info?.url,
|
||||
content: '',
|
||||
description: '',
|
||||
},
|
||||
]}
|
||||
websiteCrawlProvider={documentDetail.data_source_info?.provider}
|
||||
websiteCrawlJobId={documentDetail.data_source_info?.job_id}
|
||||
crawlOptions={documentDetail.data_source_info as unknown as CrawlOptions}
|
||||
notionPages={currentPage ? [currentPage as unknown as NotionPage] : []}
|
||||
notionCredentialId={legacyInfo?.credential_id || onlineDocumentInfo?.credential_id || ''}
|
||||
websitePages={websitePages}
|
||||
websiteCrawlProvider={websiteCrawlProvider}
|
||||
websiteCrawlJobId={websiteCrawlJobId || ''}
|
||||
crawlOptions={crawlOptions}
|
||||
indexingType={indexingTechnique}
|
||||
isSetting
|
||||
documentDetail={documentDetail}
|
||||
files={[documentDetail.data_source_info.upload_file as CustomFile]}
|
||||
files={files}
|
||||
onSave={saveHandler}
|
||||
onCancel={cancelHandler}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import type {
|
|||
Model,
|
||||
ModelItem,
|
||||
} from '../declarations'
|
||||
import type { ModelFeatureEnum } from '../declarations'
|
||||
import { useCurrentProviderAndModel } from '../hooks'
|
||||
import ModelTrigger from './model-trigger'
|
||||
import EmptyTrigger from './empty-trigger'
|
||||
|
|
@ -24,7 +25,7 @@ type ModelSelectorProps = {
|
|||
popupClassName?: string
|
||||
onSelect?: (model: DefaultModel) => void
|
||||
readonly?: boolean
|
||||
scopeFeatures?: string[]
|
||||
scopeFeatures?: ModelFeatureEnum[]
|
||||
deprecatedClassName?: string
|
||||
showDeprecatedWarnIcon?: boolean
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ type PopupProps = {
|
|||
defaultModel?: DefaultModel
|
||||
modelList: Model[]
|
||||
onSelect: (provider: string, model: ModelItem) => void
|
||||
scopeFeatures?: string[]
|
||||
scopeFeatures?: ModelFeatureEnum[]
|
||||
onHide: () => void
|
||||
}
|
||||
const Popup: FC<PopupProps> = ({
|
||||
|
|
|
|||
|
|
@ -5,15 +5,17 @@ import { isEmpty } from 'lodash-es'
|
|||
export const pluginManifestToCardPluginProps = (pluginManifest: PluginDeclaration): Plugin => {
|
||||
return {
|
||||
plugin_id: pluginManifest.plugin_unique_identifier,
|
||||
type: pluginManifest.category,
|
||||
type: pluginManifest.category as Plugin['type'],
|
||||
category: pluginManifest.category,
|
||||
name: pluginManifest.name,
|
||||
version: pluginManifest.version,
|
||||
latest_version: '',
|
||||
latest_package_identifier: '',
|
||||
org: pluginManifest.author,
|
||||
author: pluginManifest.author,
|
||||
label: pluginManifest.label,
|
||||
brief: pluginManifest.description,
|
||||
description: pluginManifest.description,
|
||||
icon: pluginManifest.icon,
|
||||
verified: pluginManifest.verified,
|
||||
introduction: '',
|
||||
|
|
@ -22,14 +24,17 @@ export const pluginManifestToCardPluginProps = (pluginManifest: PluginDeclaratio
|
|||
endpoint: {
|
||||
settings: [],
|
||||
},
|
||||
tags: [],
|
||||
tags: pluginManifest.tags.map(tag => ({ name: tag })),
|
||||
badges: [],
|
||||
verification: { authorized_category: 'langgenius' },
|
||||
from: 'package',
|
||||
}
|
||||
}
|
||||
|
||||
export const pluginManifestInMarketToPluginProps = (pluginManifest: PluginManifestInMarket): Plugin => {
|
||||
return {
|
||||
plugin_id: pluginManifest.plugin_unique_identifier,
|
||||
type: pluginManifest.category,
|
||||
type: pluginManifest.category as Plugin['type'],
|
||||
category: pluginManifest.category,
|
||||
name: pluginManifest.name,
|
||||
version: pluginManifest.latest_version,
|
||||
|
|
@ -38,6 +43,7 @@ export const pluginManifestInMarketToPluginProps = (pluginManifest: PluginManife
|
|||
org: pluginManifest.org,
|
||||
label: pluginManifest.label,
|
||||
brief: pluginManifest.brief,
|
||||
description: pluginManifest.brief,
|
||||
icon: pluginManifest.icon,
|
||||
verified: true,
|
||||
introduction: pluginManifest.introduction,
|
||||
|
|
@ -49,6 +55,7 @@ export const pluginManifestInMarketToPluginProps = (pluginManifest: PluginManife
|
|||
tags: [],
|
||||
badges: pluginManifest.badges,
|
||||
verification: isEmpty(pluginManifest.verification) ? { authorized_category: 'langgenius' } : pluginManifest.verification,
|
||||
from: pluginManifest.from,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ const EndpointModal: FC<Props> = ({
|
|||
|
||||
// Fix: Process boolean fields to ensure they are sent as proper boolean values
|
||||
const processedCredential = { ...tempCredential }
|
||||
formSchemas.forEach((field) => {
|
||||
formSchemas.forEach((field: any) => {
|
||||
if (field.type === 'boolean' && processedCredential[field.name] !== undefined) {
|
||||
const value = processedCredential[field.name]
|
||||
if (typeof value === 'string')
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import { useTranslation } from 'react-i18next'
|
|||
import type {
|
||||
DefaultModel,
|
||||
FormValue,
|
||||
ModelFeatureEnum,
|
||||
} from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { ModelStatusEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import ModelSelector from '@/app/components/header/account-setting/model-provider-page/model-selector'
|
||||
|
|
@ -57,7 +58,7 @@ const ModelParameterModal: FC<ModelParameterModalProps> = ({
|
|||
const { isAPIKeySet } = useProviderContext()
|
||||
const [open, setOpen] = useState(false)
|
||||
const scopeArray = scope.split('&')
|
||||
const scopeFeatures = useMemo(() => {
|
||||
const scopeFeatures = useMemo((): ModelFeatureEnum[] => {
|
||||
if (scopeArray.includes('all'))
|
||||
return []
|
||||
return scopeArray.filter(item => ![
|
||||
|
|
@ -67,7 +68,7 @@ const ModelParameterModal: FC<ModelParameterModalProps> = ({
|
|||
ModelTypeEnum.moderation,
|
||||
ModelTypeEnum.speech2text,
|
||||
ModelTypeEnum.tts,
|
||||
].includes(item as ModelTypeEnum))
|
||||
].includes(item as ModelTypeEnum)).map(item => item as ModelFeatureEnum)
|
||||
}, [scopeArray])
|
||||
|
||||
const { data: textGenerationList } = useModelList(ModelTypeEnum.textGeneration)
|
||||
|
|
|
|||
|
|
@ -56,10 +56,10 @@ const SwrInitializer = ({
|
|||
}
|
||||
|
||||
const redirectUrl = resolvePostLoginRedirect(searchParams)
|
||||
if (redirectUrl)
|
||||
if (redirectUrl) {
|
||||
location.replace(redirectUrl)
|
||||
else
|
||||
router.replace(pathname)
|
||||
return
|
||||
}
|
||||
|
||||
setInit(true)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import I18n from '@/context/i18n'
|
|||
import { getLanguage } from '@/i18n-config/language'
|
||||
import { useStore as useLabelStore } from '@/app/components/tools/labels/store'
|
||||
import { fetchLabelList } from '@/service/tools'
|
||||
import { renderI18nObject } from '@/i18n-config'
|
||||
|
||||
type Props = {
|
||||
value: string
|
||||
|
|
@ -55,14 +56,24 @@ const Category = ({
|
|||
<Apps02 className='mr-2 h-4 w-4 shrink-0' />
|
||||
{t('tools.type.all')}
|
||||
</div>
|
||||
{labelList.map(label => (
|
||||
<div key={label.name} title={label.label[language]} className={cn('mb-0.5 flex cursor-pointer items-center overflow-hidden truncate rounded-lg p-1 pl-3 text-sm leading-5 text-gray-700 hover:bg-white', value === label.name && '!bg-white font-medium !text-primary-600')} onClick={() => onSelect(label.name)}>
|
||||
<div className='mr-2 h-4 w-4 shrink-0'>
|
||||
<Icon active={value === label.name} svgString={label.icon} />
|
||||
{labelList.map((label) => {
|
||||
const labelText = typeof label.label === 'string'
|
||||
? label.label
|
||||
: (label.label ? renderI18nObject(label.label, language) : '')
|
||||
return (
|
||||
<div
|
||||
key={label.name}
|
||||
title={labelText}
|
||||
className={cn('mb-0.5 flex cursor-pointer items-center overflow-hidden truncate rounded-lg p-1 pl-3 text-sm leading-5 text-gray-700 hover:bg-white', value === label.name && '!bg-white font-medium !text-primary-600')}
|
||||
onClick={() => onSelect(label.name)}
|
||||
>
|
||||
<div className='mr-2 h-4 w-4 shrink-0'>
|
||||
<Icon active={value === label.name} svgString={label.icon || ''} />
|
||||
</div>
|
||||
{labelText}
|
||||
</div>
|
||||
{label.label[language]}
|
||||
</div>
|
||||
))}
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import {
|
|||
} from '@remixicon/react'
|
||||
import { useMount } from 'ahooks'
|
||||
import type { Collection, CustomCollectionBackend, Tool } from '../types'
|
||||
import type { CollectionType } from '../types'
|
||||
import Type from './type'
|
||||
import Category from './category'
|
||||
import Tools from './tools'
|
||||
|
|
@ -129,7 +130,7 @@ const AddToolModal: FC<Props> = ({
|
|||
const nexModelConfig = produce(modelConfig, (draft: ModelConfig) => {
|
||||
draft.agentConfig.tools.push({
|
||||
provider_id: collection.id || collection.name,
|
||||
provider_type: collection.type,
|
||||
provider_type: collection.type as CollectionType,
|
||||
provider_name: collection.name,
|
||||
tool_name: tool.name,
|
||||
tool_label: tool.label[locale] || tool.label[locale.replaceAll('-', '_')],
|
||||
|
|
|
|||
|
|
@ -23,6 +23,14 @@ import type { Tool } from '@/app/components/tools/types'
|
|||
import { CollectionType } from '@/app/components/tools/types'
|
||||
import type { AgentTool } from '@/types/app'
|
||||
import { MAX_TOOLS_NUM } from '@/config'
|
||||
import type { TypeWithI18N } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { renderI18nObject } from '@/i18n-config'
|
||||
|
||||
const resolveI18nText = (value: TypeWithI18N | string | undefined, language: string): string => {
|
||||
if (!value)
|
||||
return ''
|
||||
return typeof value === 'string' ? value : renderI18nObject(value, language)
|
||||
}
|
||||
|
||||
type ToolsProps = {
|
||||
showWorkflowEmpty: boolean
|
||||
|
|
@ -53,7 +61,7 @@ const Blocks = ({
|
|||
className='group mb-1 last-of-type:mb-0'
|
||||
>
|
||||
<div className='flex h-[22px] w-full items-center justify-between pl-3 pr-1 text-xs font-medium text-gray-500'>
|
||||
{toolWithProvider.label[language]}
|
||||
{resolveI18nText(toolWithProvider.label, language)}
|
||||
<a className='hidden cursor-pointer items-center group-hover:flex' href={`${basePath}/tools?category=${toolWithProvider.type}`} target='_blank'>{t('tools.addToolModal.manageInTools')}<ArrowUpRight className='ml-0.5 h-3 w-3' /></a>
|
||||
</div>
|
||||
{list.map((tool) => {
|
||||
|
|
@ -62,7 +70,7 @@ const Blocks = ({
|
|||
return ''
|
||||
return tool.labels.map((name) => {
|
||||
const label = labelList.find(item => item.name === name)
|
||||
return label?.label[language]
|
||||
return resolveI18nText(label?.label, language)
|
||||
}).filter(Boolean).join(', ')
|
||||
})()
|
||||
const added = !!addedTools?.find(v => v.provider_id === toolWithProvider.id && v.provider_type === toolWithProvider.type && v.tool_name === tool.name)
|
||||
|
|
@ -79,8 +87,8 @@ const Blocks = ({
|
|||
type={BlockEnum.Tool}
|
||||
toolIcon={toolWithProvider.icon}
|
||||
/>
|
||||
<div className='mb-1 text-sm leading-5 text-gray-900'>{tool.label[language]}</div>
|
||||
<div className='text-xs leading-[18px] text-gray-700'>{tool.description[language]}</div>
|
||||
<div className='mb-1 text-sm leading-5 text-gray-900'>{resolveI18nText(tool.label, language)}</div>
|
||||
<div className='text-xs leading-[18px] text-gray-700'>{resolveI18nText(tool.description, language)}</div>
|
||||
{tool.labels?.length > 0 && (
|
||||
<div className='mt-1 flex shrink-0 items-center'>
|
||||
<div className='relative flex w-full items-center gap-1 rounded-md py-1 text-gray-500' title={labelContent}>
|
||||
|
|
@ -98,7 +106,7 @@ const Blocks = ({
|
|||
type={BlockEnum.Tool}
|
||||
toolIcon={toolWithProvider.icon}
|
||||
/>
|
||||
<div className={cn('grow truncate text-sm text-gray-900', needAuth && 'opacity-30')}>{tool.label[language]}</div>
|
||||
<div className={cn('grow truncate text-sm text-gray-900', needAuth && 'opacity-30')}>{resolveI18nText(tool.label, language)}</div>
|
||||
{!needAuth && added && (
|
||||
<div className='flex items-center gap-1 rounded-[6px] border border-gray-100 bg-white px-2 py-[3px] text-xs font-medium leading-[18px] text-gray-300'>
|
||||
<Check className='h-3 w-3' />
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import ConversationVariableModal from './conversation-variable-modal'
|
|||
import { useChat } from './hooks'
|
||||
import type { ChatWrapperRefType } from './index'
|
||||
import Chat from '@/app/components/base/chat/chat'
|
||||
import type { ChatItem, ChatItemInTree, OnSend } from '@/app/components/base/chat/types'
|
||||
import type { ChatItem, OnSend } from '@/app/components/base/chat/types'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
import {
|
||||
fetchSuggestedQuestions,
|
||||
|
|
@ -117,7 +117,7 @@ const ChatWrapper = (
|
|||
)
|
||||
}, [handleSend, workflowStore, conversationId, chatList, appDetail])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const doRegenerate = useCallback((chatItem: ChatItem, editedQuestion?: { message: string, files?: FileEntity[] }) => {
|
||||
const question = editedQuestion ? chatItem : chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(editedQuestion ? editedQuestion.message : question.content,
|
||||
|
|
|
|||
|
|
@ -4,18 +4,18 @@ import { cloneDeep } from 'lodash-es'
|
|||
import type {
|
||||
Edge,
|
||||
Node,
|
||||
} from '../types'
|
||||
} from '@/app/components/workflow/types'
|
||||
import {
|
||||
BlockEnum,
|
||||
} from '../types'
|
||||
} from '@/app/components/workflow/types'
|
||||
import {
|
||||
CUSTOM_NODE,
|
||||
NODE_LAYOUT_HORIZONTAL_PADDING,
|
||||
NODE_LAYOUT_VERTICAL_PADDING,
|
||||
} from '../constants'
|
||||
import { CUSTOM_ITERATION_START_NODE } from '../nodes/iteration-start/constants'
|
||||
import { CUSTOM_LOOP_START_NODE } from '../nodes/loop-start/constants'
|
||||
import type { CaseItem, IfElseNodeType } from '../nodes/if-else/types'
|
||||
} from '@/app/components/workflow/constants'
|
||||
import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants'
|
||||
import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants'
|
||||
import type { CaseItem, IfElseNodeType } from '@/app/components/workflow/nodes/if-else/types'
|
||||
|
||||
// Although the file name refers to Dagre, the implementation now relies on ELK's layered algorithm.
|
||||
// Keep the export signatures unchanged to minimise the blast radius while we migrate the layout stack.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
export * from './node'
|
||||
export * from './edge'
|
||||
export * from './workflow-init'
|
||||
export * from './layout'
|
||||
export * from './elk-layout'
|
||||
export * from './common'
|
||||
export * from './tool'
|
||||
export * from './workflow'
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@ const LocaleLayout = async ({
|
|||
[DatasetAttr.DATA_PUBLIC_ENABLE_WEBSITE_JINAREADER]: process.env.NEXT_PUBLIC_ENABLE_WEBSITE_JINAREADER,
|
||||
[DatasetAttr.DATA_PUBLIC_ENABLE_WEBSITE_FIRECRAWL]: process.env.NEXT_PUBLIC_ENABLE_WEBSITE_FIRECRAWL,
|
||||
[DatasetAttr.DATA_PUBLIC_ENABLE_WEBSITE_WATERCRAWL]: process.env.NEXT_PUBLIC_ENABLE_WEBSITE_WATERCRAWL,
|
||||
[DatasetAttr.DATA_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX]: process.env.NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX,
|
||||
[DatasetAttr.NEXT_PUBLIC_ZENDESK_WIDGET_KEY]: process.env.NEXT_PUBLIC_ZENDESK_WIDGET_KEY,
|
||||
[DatasetAttr.NEXT_PUBLIC_ZENDESK_FIELD_ID_ENVIRONMENT]: process.env.NEXT_PUBLIC_ZENDESK_FIELD_ID_ENVIRONMENT,
|
||||
[DatasetAttr.NEXT_PUBLIC_ZENDESK_FIELD_ID_VERSION]: process.env.NEXT_PUBLIC_ZENDESK_FIELD_ID_VERSION,
|
||||
|
|
|
|||
|
|
@ -135,8 +135,8 @@ const NormalForm = () => {
|
|||
{!systemFeatures.branding.enabled && <p className='body-md-regular mt-2 text-text-tertiary'>{t('login.joinTipStart')}{workspaceName}{t('login.joinTipEnd')}</p>}
|
||||
</div>
|
||||
: <div className="mx-auto w-full">
|
||||
<h2 className="title-4xl-semi-bold text-text-primary">{t('login.pageTitle')}</h2>
|
||||
{!systemFeatures.branding.enabled && <p className='body-md-regular mt-2 text-text-tertiary'>{t('login.welcome')}</p>}
|
||||
<h2 className="title-4xl-semi-bold text-text-primary">{systemFeatures.branding.enabled ? t('login.pageTitleForE') : t('login.pageTitle')}</h2>
|
||||
<p className='body-md-regular mt-2 text-text-tertiary'>{t('login.welcome')}</p>
|
||||
</div>}
|
||||
<div className="relative">
|
||||
<div className="mt-6 flex flex-col gap-3">
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { OAUTH_AUTHORIZE_PENDING_KEY, REDIRECT_URL_KEY } from '@/app/account/oauth/authorize/page'
|
||||
import { OAUTH_AUTHORIZE_PENDING_KEY, REDIRECT_URL_KEY } from '@/app/account/oauth/authorize/constants'
|
||||
import dayjs from 'dayjs'
|
||||
import type { ReadonlyURLSearchParams } from 'next/navigation'
|
||||
|
||||
|
|
|
|||
|
|
@ -375,6 +375,11 @@ export const ENABLE_WEBSITE_WATERCRAWL = getBooleanConfig(
|
|||
DatasetAttr.DATA_PUBLIC_ENABLE_WEBSITE_WATERCRAWL,
|
||||
false,
|
||||
)
|
||||
export const ENABLE_SINGLE_DOLLAR_LATEX = getBooleanConfig(
|
||||
process.env.NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX,
|
||||
DatasetAttr.DATA_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX,
|
||||
false,
|
||||
)
|
||||
|
||||
export const VALUE_SELECTOR_DELIMITER = '@@@'
|
||||
|
||||
|
|
|
|||
|
|
@ -210,6 +210,8 @@ const DebugConfigurationContext = createContext<IDebugConfiguration>({
|
|||
prompt_template: '',
|
||||
prompt_variables: [],
|
||||
},
|
||||
chat_prompt_config: DEFAULT_CHAT_PROMPT_CONFIG,
|
||||
completion_prompt_config: DEFAULT_COMPLETION_PROMPT_CONFIG,
|
||||
more_like_this: null,
|
||||
opening_statement: '',
|
||||
suggested_questions: [],
|
||||
|
|
@ -220,6 +222,14 @@ const DebugConfigurationContext = createContext<IDebugConfiguration>({
|
|||
suggested_questions_after_answer: null,
|
||||
retriever_resource: null,
|
||||
annotation_reply: null,
|
||||
external_data_tools: [],
|
||||
system_parameters: {
|
||||
audio_file_size_limit: 0,
|
||||
file_size_limit: 0,
|
||||
image_file_size_limit: 0,
|
||||
video_file_size_limit: 0,
|
||||
workflow_file_upload_limit: 0,
|
||||
},
|
||||
dataSets: [],
|
||||
agentConfig: DEFAULT_AGENT_SETTING,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ export NEXT_PUBLIC_MAX_TOOLS_NUM=${MAX_TOOLS_NUM}
|
|||
export NEXT_PUBLIC_ENABLE_WEBSITE_JINAREADER=${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
export NEXT_PUBLIC_ENABLE_WEBSITE_FIRECRAWL=${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
export NEXT_PUBLIC_ENABLE_WEBSITE_WATERCRAWL=${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
export NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
export NEXT_PUBLIC_LOOP_NODE_MAX_COUNT=${LOOP_NODE_MAX_COUNT}
|
||||
export NEXT_PUBLIC_MAX_PARALLEL_LIMIT=${MAX_PARALLEL_LIMIT}
|
||||
export NEXT_PUBLIC_MAX_ITERATIONS_NUM=${MAX_ITERATIONS_NUM}
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
noAccount: 'Haben Sie kein Konto?',
|
||||
verifyMail: 'Fahren Sie mit dem Bestätigungscode fort',
|
||||
},
|
||||
pageTitleForE: 'Hey, lass uns anfangen!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ const translation = {
|
|||
button: 'Drag and drop file or folder, or',
|
||||
buttonSingleFile: 'Drag and drop file, or',
|
||||
browse: 'Browse',
|
||||
tip: 'Supports {{supportTypes}}. Max {{batchCount}} in a batch and {{size}} MB each.',
|
||||
tip: 'Supports {{supportTypes}}. Max {{batchCount}} in a batch and {{size}} MB each. Max total {{totalCount}} files.',
|
||||
validation: {
|
||||
typeError: 'File type not supported',
|
||||
size: 'File too large. Maximum is {{size}}MB',
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
const translation = {
|
||||
pageTitle: 'Log in to Dify',
|
||||
pageTitleForE: 'Hey, let\'s get started!',
|
||||
welcome: '👋 Welcome! Please log in to get started.',
|
||||
email: 'Email address',
|
||||
emailPlaceholder: 'Your email',
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
welcome: '👋 ¡Bienvenido! Por favor, completa los detalles para comenzar.',
|
||||
verifyMail: 'Continuar con el código de verificación',
|
||||
},
|
||||
pageTitleForE: '¡Hola, vamos a empezar!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
noAccount: 'حساب کاربری ندارید؟',
|
||||
verifyMail: 'ادامه با کد تأیید',
|
||||
},
|
||||
pageTitleForE: 'هی، بیا شروع کنیم!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
verifyMail: 'Continuez avec le code de vérification',
|
||||
createAccount: 'Créez votre compte',
|
||||
},
|
||||
pageTitleForE: 'Hé, commençons !',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -125,6 +125,7 @@ const translation = {
|
|||
welcome: '👋 स्वागत है! कृपया शुरू करने के लिए विवरण भरें।',
|
||||
haveAccount: 'क्या आपका पहले से एक खाता है?',
|
||||
},
|
||||
pageTitleForE: 'अरे, चलो शुरू करें!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
noAccount: 'Tidak punya akun?',
|
||||
welcome: '👋 Selamat datang! Silakan isi detail untuk memulai.',
|
||||
},
|
||||
pageTitleForE: 'Hei, ayo kita mulai!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -130,6 +130,7 @@ const translation = {
|
|||
signUp: 'Iscriviti',
|
||||
welcome: '👋 Benvenuto! Per favore compila i dettagli per iniziare.',
|
||||
},
|
||||
pageTitleForE: 'Ehi, cominciamo!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
const translation = {
|
||||
pageTitle: 'Dify にログイン',
|
||||
pageTitleForE: 'はじめましょう!',
|
||||
welcome: '👋 ようこそ!まずはログインしてご利用ください。',
|
||||
email: 'メールアドレス',
|
||||
emailPlaceholder: 'メールアドレスを入力してください',
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
noAccount: '계정이 없으신가요?',
|
||||
welcome: '👋 환영합니다! 시작하려면 세부 정보를 입력해 주세요.',
|
||||
},
|
||||
pageTitleForE: '이봐, 시작하자!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -125,6 +125,7 @@ const translation = {
|
|||
haveAccount: 'Masz już konto?',
|
||||
welcome: '👋 Witaj! Proszę wypełnić szczegóły, aby rozpocząć.',
|
||||
},
|
||||
pageTitleForE: 'Hej, zaczynajmy!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
signUp: 'Inscreva-se',
|
||||
welcome: '👋 Bem-vindo! Por favor, preencha os detalhes para começar.',
|
||||
},
|
||||
pageTitleForE: 'Ei, vamos começar!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
createAccount: 'Creează-ți contul',
|
||||
welcome: '👋 Buna! Te rugăm să completezi detaliile pentru a începe.',
|
||||
},
|
||||
pageTitleForE: 'Hei, hai să începem!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
verifyMail: 'Продолжите с кодом проверки',
|
||||
welcome: '👋 Добро пожаловать! Пожалуйста, заполните данные, чтобы начать.',
|
||||
},
|
||||
pageTitleForE: 'Привет, давай начнем!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
noAccount: 'Nimate računa?',
|
||||
welcome: '👋 Dobrodošli! Prosimo, izpolnite podatke, da začnete.',
|
||||
},
|
||||
pageTitleForE: 'Hej, začnimo!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
verifyMail: 'โปรดดำเนินการต่อด้วยรหัสการตรวจสอบ',
|
||||
haveAccount: 'มีบัญชีอยู่แล้วใช่ไหม?',
|
||||
},
|
||||
pageTitleForE: 'เฮ้ เรามาเริ่มกันเถอะ!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
haveAccount: 'Zaten bir hesabınız var mı?',
|
||||
welcome: '👋 Hoş geldiniz! Başlamak için lütfen detayları doldurun.',
|
||||
},
|
||||
pageTitleForE: 'Hey, haydi başlayalım!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ const translation = {
|
|||
noAccount: 'Не маєте облікового запису?',
|
||||
welcome: '👋 Ласкаво просимо! Будь ласка, заповніть деталі, щоб почати.',
|
||||
},
|
||||
pageTitleForE: 'Гей, давай почнемо!',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue