mirror of https://github.com/langgenius/dify.git
Merge branch 'main' into fix/secret_variable
This commit is contained in:
commit
682bad8c68
|
|
@ -14,4 +14,4 @@ yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/tidb/docker-compose.ya
|
|||
yq eval '.services.oceanbase.ports += ["2881:2881"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.opengauss.ports += ["6600:6600"]' -i docker/docker-compose.yaml
|
||||
|
||||
echo "Ports exposed for sandbox, weaviate, tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase, opengauss"
|
||||
echo "Ports exposed for sandbox, weaviate (HTTP 8080, gRPC 50051), tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase, opengauss"
|
||||
|
|
|
|||
10
README.md
10
README.md
|
|
@ -129,8 +129,18 @@ Star Dify on GitHub and be instantly notified of new releases.
|
|||
|
||||
## Advanced Setup
|
||||
|
||||
### Custom configurations
|
||||
|
||||
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Metrics Monitoring with Grafana
|
||||
|
||||
Import the dashboard to Grafana, using Dify's PostgreSQL database as data source, to monitor metrics in granularity of apps, tenants, messages, and more.
|
||||
|
||||
- [Grafana Dashboard by @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Deployment with Kubernetes
|
||||
|
||||
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -8,15 +8,81 @@ from controllers.console.app.wraps import get_app_model
|
|||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from fields.workflow_run_fields import (
|
||||
advanced_chat_workflow_run_pagination_fields,
|
||||
workflow_run_count_fields,
|
||||
workflow_run_detail_fields,
|
||||
workflow_run_node_execution_list_fields,
|
||||
workflow_run_pagination_fields,
|
||||
)
|
||||
from libs.custom_inputs import time_duration
|
||||
from libs.helper import uuid_value
|
||||
from libs.login import current_user, login_required
|
||||
from models import Account, App, AppMode, EndUser
|
||||
from models import Account, App, AppMode, EndUser, WorkflowRunTriggeredFrom
|
||||
from services.workflow_run_service import WorkflowRunService
|
||||
|
||||
# Workflow run status choices for filtering
|
||||
WORKFLOW_RUN_STATUS_CHOICES = ["running", "succeeded", "failed", "stopped", "partial-succeeded"]
|
||||
|
||||
|
||||
def _parse_workflow_run_list_args():
|
||||
"""
|
||||
Parse common arguments for workflow run list endpoints.
|
||||
|
||||
Returns:
|
||||
Parsed arguments containing last_id, limit, status, and triggered_from filters
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("last_id", type=uuid_value, location="args")
|
||||
parser.add_argument("limit", type=int_range(1, 100), required=False, default=20, location="args")
|
||||
parser.add_argument(
|
||||
"status",
|
||||
type=str,
|
||||
choices=WORKFLOW_RUN_STATUS_CHOICES,
|
||||
location="args",
|
||||
required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"triggered_from",
|
||||
type=str,
|
||||
choices=["debugging", "app-run"],
|
||||
location="args",
|
||||
required=False,
|
||||
help="Filter by trigger source: debugging or app-run",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def _parse_workflow_run_count_args():
|
||||
"""
|
||||
Parse common arguments for workflow run count endpoints.
|
||||
|
||||
Returns:
|
||||
Parsed arguments containing status, time_range, and triggered_from filters
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument(
|
||||
"status",
|
||||
type=str,
|
||||
choices=WORKFLOW_RUN_STATUS_CHOICES,
|
||||
location="args",
|
||||
required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"time_range",
|
||||
type=time_duration,
|
||||
location="args",
|
||||
required=False,
|
||||
help="Time range filter (e.g., 7d, 4h, 30m, 30s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"triggered_from",
|
||||
type=str,
|
||||
choices=["debugging", "app-run"],
|
||||
location="args",
|
||||
required=False,
|
||||
help="Filter by trigger source: debugging or app-run",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@console_ns.route("/apps/<uuid:app_id>/advanced-chat/workflow-runs")
|
||||
class AdvancedChatAppWorkflowRunListApi(Resource):
|
||||
|
|
@ -24,6 +90,8 @@ class AdvancedChatAppWorkflowRunListApi(Resource):
|
|||
@api.doc(description="Get advanced chat workflow run list")
|
||||
@api.doc(params={"app_id": "Application ID"})
|
||||
@api.doc(params={"last_id": "Last run ID for pagination", "limit": "Number of items per page (1-100)"})
|
||||
@api.doc(params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"})
|
||||
@api.doc(params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"})
|
||||
@api.response(200, "Workflow runs retrieved successfully", advanced_chat_workflow_run_pagination_fields)
|
||||
@setup_required
|
||||
@login_required
|
||||
|
|
@ -34,13 +102,64 @@ class AdvancedChatAppWorkflowRunListApi(Resource):
|
|||
"""
|
||||
Get advanced chat app workflow run list
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("last_id", type=uuid_value, location="args")
|
||||
parser.add_argument("limit", type=int_range(1, 100), required=False, default=20, location="args")
|
||||
args = parser.parse_args()
|
||||
args = _parse_workflow_run_list_args()
|
||||
|
||||
# Default to DEBUGGING if not specified
|
||||
triggered_from = (
|
||||
WorkflowRunTriggeredFrom(args.get("triggered_from"))
|
||||
if args.get("triggered_from")
|
||||
else WorkflowRunTriggeredFrom.DEBUGGING
|
||||
)
|
||||
|
||||
workflow_run_service = WorkflowRunService()
|
||||
result = workflow_run_service.get_paginate_advanced_chat_workflow_runs(app_model=app_model, args=args)
|
||||
result = workflow_run_service.get_paginate_advanced_chat_workflow_runs(
|
||||
app_model=app_model, args=args, triggered_from=triggered_from
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@console_ns.route("/apps/<uuid:app_id>/advanced-chat/workflow-runs/count")
|
||||
class AdvancedChatAppWorkflowRunCountApi(Resource):
|
||||
@api.doc("get_advanced_chat_workflow_runs_count")
|
||||
@api.doc(description="Get advanced chat workflow runs count statistics")
|
||||
@api.doc(params={"app_id": "Application ID"})
|
||||
@api.doc(params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"})
|
||||
@api.doc(
|
||||
params={
|
||||
"time_range": (
|
||||
"Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), "
|
||||
"30m (30 minutes), 30s (30 seconds). Filters by created_at field."
|
||||
)
|
||||
}
|
||||
)
|
||||
@api.doc(params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"})
|
||||
@api.response(200, "Workflow runs count retrieved successfully", workflow_run_count_fields)
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@get_app_model(mode=[AppMode.ADVANCED_CHAT])
|
||||
@marshal_with(workflow_run_count_fields)
|
||||
def get(self, app_model: App):
|
||||
"""
|
||||
Get advanced chat workflow runs count statistics
|
||||
"""
|
||||
args = _parse_workflow_run_count_args()
|
||||
|
||||
# Default to DEBUGGING if not specified
|
||||
triggered_from = (
|
||||
WorkflowRunTriggeredFrom(args.get("triggered_from"))
|
||||
if args.get("triggered_from")
|
||||
else WorkflowRunTriggeredFrom.DEBUGGING
|
||||
)
|
||||
|
||||
workflow_run_service = WorkflowRunService()
|
||||
result = workflow_run_service.get_workflow_runs_count(
|
||||
app_model=app_model,
|
||||
status=args.get("status"),
|
||||
time_range=args.get("time_range"),
|
||||
triggered_from=triggered_from,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
|
@ -51,6 +170,8 @@ class WorkflowRunListApi(Resource):
|
|||
@api.doc(description="Get workflow run list")
|
||||
@api.doc(params={"app_id": "Application ID"})
|
||||
@api.doc(params={"last_id": "Last run ID for pagination", "limit": "Number of items per page (1-100)"})
|
||||
@api.doc(params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"})
|
||||
@api.doc(params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"})
|
||||
@api.response(200, "Workflow runs retrieved successfully", workflow_run_pagination_fields)
|
||||
@setup_required
|
||||
@login_required
|
||||
|
|
@ -61,13 +182,64 @@ class WorkflowRunListApi(Resource):
|
|||
"""
|
||||
Get workflow run list
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("last_id", type=uuid_value, location="args")
|
||||
parser.add_argument("limit", type=int_range(1, 100), required=False, default=20, location="args")
|
||||
args = parser.parse_args()
|
||||
args = _parse_workflow_run_list_args()
|
||||
|
||||
# Default to DEBUGGING for workflow if not specified (backward compatibility)
|
||||
triggered_from = (
|
||||
WorkflowRunTriggeredFrom(args.get("triggered_from"))
|
||||
if args.get("triggered_from")
|
||||
else WorkflowRunTriggeredFrom.DEBUGGING
|
||||
)
|
||||
|
||||
workflow_run_service = WorkflowRunService()
|
||||
result = workflow_run_service.get_paginate_workflow_runs(app_model=app_model, args=args)
|
||||
result = workflow_run_service.get_paginate_workflow_runs(
|
||||
app_model=app_model, args=args, triggered_from=triggered_from
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@console_ns.route("/apps/<uuid:app_id>/workflow-runs/count")
|
||||
class WorkflowRunCountApi(Resource):
|
||||
@api.doc("get_workflow_runs_count")
|
||||
@api.doc(description="Get workflow runs count statistics")
|
||||
@api.doc(params={"app_id": "Application ID"})
|
||||
@api.doc(params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"})
|
||||
@api.doc(
|
||||
params={
|
||||
"time_range": (
|
||||
"Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), "
|
||||
"30m (30 minutes), 30s (30 seconds). Filters by created_at field."
|
||||
)
|
||||
}
|
||||
)
|
||||
@api.doc(params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"})
|
||||
@api.response(200, "Workflow runs count retrieved successfully", workflow_run_count_fields)
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
|
||||
@marshal_with(workflow_run_count_fields)
|
||||
def get(self, app_model: App):
|
||||
"""
|
||||
Get workflow runs count statistics
|
||||
"""
|
||||
args = _parse_workflow_run_count_args()
|
||||
|
||||
# Default to DEBUGGING for workflow if not specified (backward compatibility)
|
||||
triggered_from = (
|
||||
WorkflowRunTriggeredFrom(args.get("triggered_from"))
|
||||
if args.get("triggered_from")
|
||||
else WorkflowRunTriggeredFrom.DEBUGGING
|
||||
)
|
||||
|
||||
workflow_run_service = WorkflowRunService()
|
||||
result = workflow_run_service.get_workflow_runs_count(
|
||||
app_model=app_model,
|
||||
status=args.get("status"),
|
||||
time_range=args.get("time_range"),
|
||||
triggered_from=triggered_from,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
|||
|
|
@ -468,7 +468,6 @@ class DatasetApi(Resource):
|
|||
dataset_id_str = str(dataset_id)
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
# The role of the current user in the ta table must be admin, owner, or editor
|
||||
if not (current_user.has_edit_permission or current_user.is_dataset_operator):
|
||||
raise Forbidden()
|
||||
|
||||
|
|
|
|||
|
|
@ -150,7 +150,6 @@ class ExternalApiTemplateApi(Resource):
|
|||
current_user, current_tenant_id = current_account_with_tenant()
|
||||
external_knowledge_api_id = str(external_knowledge_api_id)
|
||||
|
||||
# The role of the current user in the ta table must be admin, owner, or editor
|
||||
if not (current_user.has_edit_permission or current_user.is_dataset_operator):
|
||||
raise Forbidden()
|
||||
|
||||
|
|
|
|||
|
|
@ -937,11 +937,10 @@ class RagPipelineTransformApi(Resource):
|
|||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@edit_permission_required
|
||||
def post(self, dataset_id: str):
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
if not current_user.is_dataset_operator:
|
||||
if not (current_user.has_edit_permission or current_user.is_dataset_operator):
|
||||
raise Forbidden()
|
||||
|
||||
dataset_id = str(dataset_id)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ from core.errors.error import (
|
|||
from core.model_runtime.errors.invoke import InvokeError
|
||||
from core.workflow.graph_engine.manager import GraphEngineManager
|
||||
from libs import helper
|
||||
from libs.login import current_user
|
||||
from libs.login import current_user as current_user_
|
||||
from models.model import AppMode, InstalledApp
|
||||
from services.app_generate_service import AppGenerateService
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
|
|
@ -31,6 +31,8 @@ from .. import console_ns
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
current_user = current_user_._get_current_object() # type: ignore
|
||||
|
||||
|
||||
@console_ns.route("/installed-apps/<uuid:installed_app_id>/workflows/run")
|
||||
class InstalledAppWorkflowRunApi(InstalledAppResource):
|
||||
|
|
|
|||
|
|
@ -67,7 +67,6 @@ class APIBasedExtensionAPI(Resource):
|
|||
@account_initialization_required
|
||||
@marshal_with(api_based_extension_fields)
|
||||
def post(self):
|
||||
_, current_tenant_id = current_account_with_tenant()
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, location="json")
|
||||
parser.add_argument("api_endpoint", type=str, required=True, location="json")
|
||||
|
|
|
|||
|
|
@ -303,7 +303,12 @@ def edit_permission_required(f: Callable[P, R]):
|
|||
def decorated_function(*args: P.args, **kwargs: P.kwargs):
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
current_user, _ = current_account_with_tenant()
|
||||
from libs.login import current_user
|
||||
from models import Account
|
||||
|
||||
user = current_user._get_current_object() # type: ignore
|
||||
if not isinstance(user, Account):
|
||||
raise Forbidden()
|
||||
if not current_user.has_edit_permission:
|
||||
raise Forbidden()
|
||||
return f(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,9 @@
|
|||
import logging
|
||||
from threading import Lock
|
||||
from typing import Union
|
||||
|
||||
import contexts
|
||||
from core.datasource.__base.datasource_plugin import DatasourcePlugin
|
||||
from core.datasource.__base.datasource_provider import DatasourcePluginProviderController
|
||||
from core.datasource.entities.common_entities import I18nObject
|
||||
from core.datasource.entities.datasource_entities import DatasourceProviderType
|
||||
from core.datasource.errors import DatasourceProviderNotFoundError
|
||||
from core.datasource.local_file.local_file_provider import LocalFileDatasourcePluginProviderController
|
||||
|
|
@ -18,11 +16,6 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class DatasourceManager:
|
||||
_builtin_provider_lock = Lock()
|
||||
_hardcoded_providers: dict[str, DatasourcePluginProviderController] = {}
|
||||
_builtin_providers_loaded = False
|
||||
_builtin_tools_labels: dict[str, Union[I18nObject, None]] = {}
|
||||
|
||||
@classmethod
|
||||
def get_datasource_plugin_provider(
|
||||
cls, provider_id: str, tenant_id: str, datasource_type: DatasourceProviderType
|
||||
|
|
|
|||
|
|
@ -250,7 +250,6 @@ class WeaviateVector(BaseVector):
|
|||
)
|
||||
)
|
||||
|
||||
batch_size = max(1, int(dify_config.WEAVIATE_BATCH_SIZE or 100))
|
||||
with col.batch.dynamic() as batch:
|
||||
for obj in objs:
|
||||
batch.add_object(properties=obj.properties, uuid=obj.uuid, vector=obj.vector)
|
||||
|
|
@ -348,7 +347,10 @@ class WeaviateVector(BaseVector):
|
|||
for obj in res.objects:
|
||||
properties = dict(obj.properties or {})
|
||||
text = properties.pop(Field.TEXT_KEY.value, "")
|
||||
distance = (obj.metadata.distance if obj.metadata else None) or 1.0
|
||||
if obj.metadata and obj.metadata.distance is not None:
|
||||
distance = obj.metadata.distance
|
||||
else:
|
||||
distance = 1.0
|
||||
score = 1.0 - distance
|
||||
|
||||
if score > score_threshold:
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import logging
|
|||
from collections.abc import Generator
|
||||
from typing import Any
|
||||
|
||||
from flask import has_request_context
|
||||
from sqlalchemy import select
|
||||
|
||||
from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod
|
||||
|
|
@ -18,7 +19,8 @@ from core.tools.errors import ToolInvokeError
|
|||
from extensions.ext_database import db
|
||||
from factories.file_factory import build_from_mapping
|
||||
from libs.login import current_user
|
||||
from models.model import App
|
||||
from models import Account, Tenant
|
||||
from models.model import App, EndUser
|
||||
from models.workflow import Workflow
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -79,11 +81,16 @@ class WorkflowTool(Tool):
|
|||
generator = WorkflowAppGenerator()
|
||||
assert self.runtime is not None
|
||||
assert self.runtime.invoke_from is not None
|
||||
assert current_user is not None
|
||||
|
||||
user = self._resolve_user(user_id=user_id)
|
||||
|
||||
if user is None:
|
||||
raise ToolInvokeError("User not found")
|
||||
|
||||
result = generator.generate(
|
||||
app_model=app,
|
||||
workflow=workflow,
|
||||
user=current_user,
|
||||
user=user,
|
||||
args={"inputs": tool_parameters, "files": files},
|
||||
invoke_from=self.runtime.invoke_from,
|
||||
streaming=False,
|
||||
|
|
@ -123,6 +130,51 @@ class WorkflowTool(Tool):
|
|||
label=self.label,
|
||||
)
|
||||
|
||||
def _resolve_user(self, user_id: str) -> Account | EndUser | None:
|
||||
"""
|
||||
Resolve user object in both HTTP and worker contexts.
|
||||
|
||||
In HTTP context: dereference the current_user LocalProxy (can return Account or EndUser).
|
||||
In worker context: load Account from database by user_id (only returns Account, never EndUser).
|
||||
|
||||
Returns:
|
||||
Account | EndUser | None: The resolved user object, or None if resolution fails.
|
||||
"""
|
||||
if has_request_context():
|
||||
return self._resolve_user_from_request()
|
||||
else:
|
||||
return self._resolve_user_from_database(user_id=user_id)
|
||||
|
||||
def _resolve_user_from_request(self) -> Account | EndUser | None:
|
||||
"""
|
||||
Resolve user from Flask request context.
|
||||
"""
|
||||
try:
|
||||
# Note: `current_user` is a LocalProxy. Never compare it with None directly.
|
||||
return getattr(current_user, "_get_current_object", lambda: current_user)()
|
||||
except Exception as e:
|
||||
logger.warning("Failed to resolve user from request context: %s", e)
|
||||
return None
|
||||
|
||||
def _resolve_user_from_database(self, user_id: str) -> Account | None:
|
||||
"""
|
||||
Resolve user from database (worker/Celery context).
|
||||
"""
|
||||
|
||||
user_stmt = select(Account).where(Account.id == user_id)
|
||||
user = db.session.scalar(user_stmt)
|
||||
if not user:
|
||||
return None
|
||||
|
||||
tenant_stmt = select(Tenant).where(Tenant.id == self.runtime.tenant_id)
|
||||
tenant = db.session.scalar(tenant_stmt)
|
||||
if not tenant:
|
||||
return None
|
||||
|
||||
user.current_tenant = tenant
|
||||
|
||||
return user
|
||||
|
||||
def _get_workflow(self, app_id: str, version: str) -> Workflow:
|
||||
"""
|
||||
get the workflow by app id and version
|
||||
|
|
|
|||
|
|
@ -99,6 +99,8 @@ class Dispatcher:
|
|||
self._execution_coordinator.check_commands()
|
||||
self._event_queue.task_done()
|
||||
except queue.Empty:
|
||||
# Process commands even when no new events arrive so abort requests are not missed
|
||||
self._execution_coordinator.check_commands()
|
||||
# Check if execution is complete
|
||||
if self._execution_coordinator.is_execution_complete():
|
||||
break
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import json
|
||||
import re
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
|
|
@ -194,6 +195,8 @@ class QuestionClassifierNode(Node):
|
|||
|
||||
category_name = node_data.classes[0].name
|
||||
category_id = node_data.classes[0].id
|
||||
if "<think>" in result_text:
|
||||
result_text = re.sub(r"<think[^>]*>[\s\S]*?</think>", "", result_text, flags=re.IGNORECASE)
|
||||
result_text_json = parse_and_check_json_markdown(result_text, [])
|
||||
# result_text_json = json.loads(result_text.strip('```JSON\n'))
|
||||
if "category_name" in result_text_json and "category_id" in result_text_json:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ def build_from_message_files(
|
|||
*,
|
||||
message_files: Sequence["MessageFile"],
|
||||
tenant_id: str,
|
||||
config: FileUploadConfig,
|
||||
config: FileUploadConfig | None = None,
|
||||
) -> Sequence[File]:
|
||||
results = [
|
||||
build_from_message_file(message_file=file, tenant_id=tenant_id, config=config)
|
||||
|
|
@ -35,15 +35,18 @@ def build_from_message_file(
|
|||
*,
|
||||
message_file: "MessageFile",
|
||||
tenant_id: str,
|
||||
config: FileUploadConfig,
|
||||
config: FileUploadConfig | None,
|
||||
):
|
||||
mapping = {
|
||||
"transfer_method": message_file.transfer_method,
|
||||
"url": message_file.url,
|
||||
"id": message_file.id,
|
||||
"type": message_file.type,
|
||||
}
|
||||
|
||||
# Only include id if it exists (message_file has been committed to DB)
|
||||
if message_file.id:
|
||||
mapping["id"] = message_file.id
|
||||
|
||||
# Set the correct ID field based on transfer method
|
||||
if message_file.transfer_method == FileTransferMethod.TOOL_FILE:
|
||||
mapping["tool_file_id"] = message_file.upload_file_id
|
||||
|
|
@ -163,7 +166,10 @@ def _build_from_local_file(
|
|||
if strict_type_validation and detected_file_type.value != specified_type:
|
||||
raise ValueError("Detected file type does not match the specified type. Please verify the file.")
|
||||
|
||||
file_type = FileType(specified_type) if specified_type and specified_type != FileType.CUSTOM else detected_file_type
|
||||
if specified_type and specified_type != "custom":
|
||||
file_type = FileType(specified_type)
|
||||
else:
|
||||
file_type = detected_file_type
|
||||
|
||||
return File(
|
||||
id=mapping.get("id"),
|
||||
|
|
@ -211,9 +217,10 @@ def _build_from_remote_url(
|
|||
if strict_type_validation and specified_type and detected_file_type.value != specified_type:
|
||||
raise ValueError("Detected file type does not match the specified type. Please verify the file.")
|
||||
|
||||
file_type = (
|
||||
FileType(specified_type) if specified_type and specified_type != FileType.CUSTOM else detected_file_type
|
||||
)
|
||||
if specified_type and specified_type != "custom":
|
||||
file_type = FileType(specified_type)
|
||||
else:
|
||||
file_type = detected_file_type
|
||||
|
||||
return File(
|
||||
id=mapping.get("id"),
|
||||
|
|
@ -235,10 +242,17 @@ def _build_from_remote_url(
|
|||
mime_type, filename, file_size = _get_remote_file_info(url)
|
||||
extension = mimetypes.guess_extension(mime_type) or ("." + filename.split(".")[-1] if "." in filename else ".bin")
|
||||
|
||||
file_type = _standardize_file_type(extension=extension, mime_type=mime_type)
|
||||
if file_type.value != mapping.get("type", "custom"):
|
||||
detected_file_type = _standardize_file_type(extension=extension, mime_type=mime_type)
|
||||
specified_type = mapping.get("type")
|
||||
|
||||
if strict_type_validation and specified_type and detected_file_type.value != specified_type:
|
||||
raise ValueError("Detected file type does not match the specified type. Please verify the file.")
|
||||
|
||||
if specified_type and specified_type != "custom":
|
||||
file_type = FileType(specified_type)
|
||||
else:
|
||||
file_type = detected_file_type
|
||||
|
||||
return File(
|
||||
id=mapping.get("id"),
|
||||
filename=filename,
|
||||
|
|
@ -328,7 +342,10 @@ def _build_from_tool_file(
|
|||
if strict_type_validation and specified_type and detected_file_type.value != specified_type:
|
||||
raise ValueError("Detected file type does not match the specified type. Please verify the file.")
|
||||
|
||||
file_type = FileType(specified_type) if specified_type and specified_type != FileType.CUSTOM else detected_file_type
|
||||
if specified_type and specified_type != "custom":
|
||||
file_type = FileType(specified_type)
|
||||
else:
|
||||
file_type = detected_file_type
|
||||
|
||||
return File(
|
||||
id=mapping.get("id"),
|
||||
|
|
@ -373,7 +390,10 @@ def _build_from_datasource_file(
|
|||
if strict_type_validation and specified_type and detected_file_type.value != specified_type:
|
||||
raise ValueError("Detected file type does not match the specified type. Please verify the file.")
|
||||
|
||||
file_type = FileType(specified_type) if specified_type and specified_type != FileType.CUSTOM else detected_file_type
|
||||
if specified_type and specified_type != "custom":
|
||||
file_type = FileType(specified_type)
|
||||
else:
|
||||
file_type = detected_file_type
|
||||
|
||||
return File(
|
||||
id=mapping.get("datasource_file_id"),
|
||||
|
|
|
|||
|
|
@ -64,6 +64,15 @@ workflow_run_pagination_fields = {
|
|||
"data": fields.List(fields.Nested(workflow_run_for_list_fields), attribute="data"),
|
||||
}
|
||||
|
||||
workflow_run_count_fields = {
|
||||
"total": fields.Integer,
|
||||
"running": fields.Integer,
|
||||
"succeeded": fields.Integer,
|
||||
"failed": fields.Integer,
|
||||
"stopped": fields.Integer,
|
||||
"partial_succeeded": fields.Integer(attribute="partial-succeeded"),
|
||||
}
|
||||
|
||||
workflow_run_detail_fields = {
|
||||
"id": fields.String,
|
||||
"version": fields.String,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
"""Custom input types for Flask-RESTX request parsing."""
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def time_duration(value: str) -> str:
|
||||
"""
|
||||
Validate and return time duration string.
|
||||
|
||||
Accepts formats: <number>d (days), <number>h (hours), <number>m (minutes), <number>s (seconds)
|
||||
Examples: 7d, 4h, 30m, 30s
|
||||
|
||||
Args:
|
||||
value: The time duration string
|
||||
|
||||
Returns:
|
||||
The validated time duration string
|
||||
|
||||
Raises:
|
||||
ValueError: If the format is invalid
|
||||
"""
|
||||
if not value:
|
||||
raise ValueError("Time duration cannot be empty")
|
||||
|
||||
pattern = r"^(\d+)([dhms])$"
|
||||
if not re.match(pattern, value.lower()):
|
||||
raise ValueError(
|
||||
"Invalid time duration format. Use: <number>d (days), <number>h (hours), "
|
||||
"<number>m (minutes), or <number>s (seconds). Examples: 7d, 4h, 30m, 30s"
|
||||
)
|
||||
|
||||
return value.lower()
|
||||
|
|
@ -6,22 +6,22 @@ from core.llm_generator.output_parser.errors import OutputParserError
|
|||
def parse_json_markdown(json_string: str):
|
||||
# Get json from the backticks/braces
|
||||
json_string = json_string.strip()
|
||||
starts = ["```json", "```", "``", "`", "{"]
|
||||
ends = ["```", "``", "`", "}"]
|
||||
starts = ["```json", "```", "``", "`", "{", "["]
|
||||
ends = ["```", "``", "`", "}", "]"]
|
||||
end_index = -1
|
||||
start_index = 0
|
||||
parsed: dict = {}
|
||||
for s in starts:
|
||||
start_index = json_string.find(s)
|
||||
if start_index != -1:
|
||||
if json_string[start_index] != "{":
|
||||
if json_string[start_index] not in ("{", "["):
|
||||
start_index += len(s)
|
||||
break
|
||||
if start_index != -1:
|
||||
for e in ends:
|
||||
end_index = json_string.rfind(e, start_index)
|
||||
if end_index != -1:
|
||||
if json_string[end_index] == "}":
|
||||
if json_string[end_index] in ("}", "]"):
|
||||
end_index += 1
|
||||
break
|
||||
if start_index != -1 and end_index != -1 and start_index < end_index:
|
||||
|
|
@ -38,6 +38,12 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]):
|
|||
json_obj = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise OutputParserError(f"got invalid json object. error: {e}")
|
||||
|
||||
if isinstance(json_obj, list):
|
||||
if len(json_obj) == 1 and isinstance(json_obj[0], dict):
|
||||
json_obj = json_obj[0]
|
||||
else:
|
||||
raise OutputParserError(f"got invalid return object. obj:{json_obj}")
|
||||
for key in expected_keys:
|
||||
if key not in json_obj:
|
||||
raise OutputParserError(
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from collections.abc import Callable
|
||||
from functools import wraps
|
||||
from typing import Union, cast
|
||||
from typing import Any
|
||||
|
||||
from flask import current_app, g, has_request_context, request
|
||||
from flask_login.config import EXEMPT_METHODS # type: ignore
|
||||
|
|
@ -10,16 +10,21 @@ from configs import dify_config
|
|||
from models import Account
|
||||
from models.model import EndUser
|
||||
|
||||
#: A proxy for the current user. If no user is logged in, this will be an
|
||||
#: anonymous user
|
||||
current_user = cast(Union[Account, EndUser, None], LocalProxy(lambda: _get_user()))
|
||||
|
||||
|
||||
def current_account_with_tenant():
|
||||
if not isinstance(current_user, Account):
|
||||
"""
|
||||
Resolve the underlying account for the current user proxy and ensure tenant context exists.
|
||||
Allows tests to supply plain Account mocks without the LocalProxy helper.
|
||||
"""
|
||||
user_proxy = current_user
|
||||
|
||||
get_current_object = getattr(user_proxy, "_get_current_object", None)
|
||||
user = get_current_object() if callable(get_current_object) else user_proxy # type: ignore
|
||||
|
||||
if not isinstance(user, Account):
|
||||
raise ValueError("current_user must be an Account instance")
|
||||
assert current_user.current_tenant_id is not None, "The tenant information should be loaded."
|
||||
return current_user, current_user.current_tenant_id
|
||||
assert user.current_tenant_id is not None, "The tenant information should be loaded."
|
||||
return user, user.current_tenant_id
|
||||
|
||||
|
||||
from typing import ParamSpec, TypeVar
|
||||
|
|
@ -81,3 +86,9 @@ def _get_user() -> EndUser | Account | None:
|
|||
return g._login_user # type: ignore
|
||||
|
||||
return None
|
||||
|
||||
|
||||
#: A proxy for the current user. If no user is logged in, this will be an
|
||||
#: anonymous user
|
||||
# NOTE: Any here, but use _get_current_object to check the fields
|
||||
current_user: Any = LocalProxy(lambda: _get_user())
|
||||
|
|
|
|||
|
|
@ -0,0 +1,67 @@
|
|||
"""Time duration parser utility."""
|
||||
|
||||
import re
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
|
||||
def parse_time_duration(duration_str: str) -> timedelta | None:
|
||||
"""
|
||||
Parse time duration string to timedelta.
|
||||
|
||||
Supported formats:
|
||||
- 7d: 7 days
|
||||
- 4h: 4 hours
|
||||
- 30m: 30 minutes
|
||||
- 30s: 30 seconds
|
||||
|
||||
Args:
|
||||
duration_str: Duration string (e.g., "7d", "4h", "30m", "30s")
|
||||
|
||||
Returns:
|
||||
timedelta object or None if invalid format
|
||||
"""
|
||||
if not duration_str:
|
||||
return None
|
||||
|
||||
# Pattern: number followed by unit (d, h, m, s)
|
||||
pattern = r"^(\d+)([dhms])$"
|
||||
match = re.match(pattern, duration_str.lower())
|
||||
|
||||
if not match:
|
||||
return None
|
||||
|
||||
value = int(match.group(1))
|
||||
unit = match.group(2)
|
||||
|
||||
if unit == "d":
|
||||
return timedelta(days=value)
|
||||
elif unit == "h":
|
||||
return timedelta(hours=value)
|
||||
elif unit == "m":
|
||||
return timedelta(minutes=value)
|
||||
elif unit == "s":
|
||||
return timedelta(seconds=value)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_time_threshold(duration_str: str | None) -> datetime | None:
|
||||
"""
|
||||
Get datetime threshold from duration string.
|
||||
|
||||
Calculates the datetime that is duration_str ago from now.
|
||||
|
||||
Args:
|
||||
duration_str: Duration string (e.g., "7d", "4h", "30m", "30s")
|
||||
|
||||
Returns:
|
||||
datetime object representing the threshold time, or None if no duration
|
||||
"""
|
||||
if not duration_str:
|
||||
return None
|
||||
|
||||
duration = parse_time_duration(duration_str)
|
||||
if duration is None:
|
||||
return None
|
||||
|
||||
return datetime.now(UTC) - duration
|
||||
|
|
@ -1479,7 +1479,7 @@ class EndUser(Base, UserMixin):
|
|||
sa.Index("end_user_tenant_session_id_idx", "tenant_id", "session_id", "type"),
|
||||
)
|
||||
|
||||
id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"))
|
||||
id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"))
|
||||
tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
app_id = mapped_column(StringUUID, nullable=True)
|
||||
type: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ dependencies = [
|
|||
"sendgrid~=6.12.3",
|
||||
"flask-restx~=1.3.0",
|
||||
"packaging~=23.2",
|
||||
"weaviate-client==4.17.0",
|
||||
]
|
||||
# Before adding new dependency, consider place it in
|
||||
# alphabet order (a-z) and suitable group.
|
||||
|
|
@ -166,6 +167,7 @@ dev = [
|
|||
"mypy~=1.17.1",
|
||||
# "locust>=2.40.4", # Temporarily removed due to compatibility issues. Uncomment when resolved.
|
||||
"sseclient-py>=1.8.0",
|
||||
"pytest-timeout>=2.4.0",
|
||||
]
|
||||
|
||||
############################################################
|
||||
|
|
@ -214,7 +216,7 @@ vdb = [
|
|||
"tidb-vector==0.0.9",
|
||||
"upstash-vector==0.6.0",
|
||||
"volcengine-compat~=1.0.0",
|
||||
"weaviate-client>=4.0.0,<5.0.0",
|
||||
"weaviate-client==4.17.0",
|
||||
"xinference-client~=1.2.2",
|
||||
"mo-vector~=0.1.13",
|
||||
"mysql-connector-python>=9.3.0",
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol):
|
|||
triggered_from: str,
|
||||
limit: int = 20,
|
||||
last_id: str | None = None,
|
||||
status: str | None = None,
|
||||
) -> InfiniteScrollPagination:
|
||||
"""
|
||||
Get paginated workflow runs with filtering.
|
||||
|
|
@ -73,6 +74,7 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol):
|
|||
triggered_from: Filter by trigger source (e.g., "debugging", "app-run")
|
||||
limit: Maximum number of records to return (default: 20)
|
||||
last_id: Cursor for pagination - ID of the last record from previous page
|
||||
status: Optional filter by status (e.g., "running", "succeeded", "failed")
|
||||
|
||||
Returns:
|
||||
InfiniteScrollPagination object containing:
|
||||
|
|
@ -107,6 +109,43 @@ class APIWorkflowRunRepository(WorkflowExecutionRepository, Protocol):
|
|||
"""
|
||||
...
|
||||
|
||||
def get_workflow_runs_count(
|
||||
self,
|
||||
tenant_id: str,
|
||||
app_id: str,
|
||||
triggered_from: str,
|
||||
status: str | None = None,
|
||||
time_range: str | None = None,
|
||||
) -> dict[str, int]:
|
||||
"""
|
||||
Get workflow runs count statistics.
|
||||
|
||||
Retrieves total count and count by status for workflow runs
|
||||
matching the specified filters.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier for multi-tenant isolation
|
||||
app_id: Application identifier
|
||||
triggered_from: Filter by trigger source (e.g., "debugging", "app-run")
|
||||
status: Optional filter by specific status
|
||||
time_range: Optional time range filter (e.g., "7d", "4h", "30m", "30s")
|
||||
Filters records based on created_at field
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- total: Total count of all workflow runs (or filtered by status)
|
||||
- running: Count of workflow runs with status "running"
|
||||
- succeeded: Count of workflow runs with status "succeeded"
|
||||
- failed: Count of workflow runs with status "failed"
|
||||
- stopped: Count of workflow runs with status "stopped"
|
||||
- partial_succeeded: Count of workflow runs with status "partial-succeeded"
|
||||
|
||||
Note: If a status is provided, 'total' will be the count for that status,
|
||||
and the specific status count will also be set to this value, with all
|
||||
other status counts being 0.
|
||||
"""
|
||||
...
|
||||
|
||||
def get_expired_runs_batch(
|
||||
self,
|
||||
tenant_id: str,
|
||||
|
|
|
|||
|
|
@ -24,11 +24,12 @@ from collections.abc import Sequence
|
|||
from datetime import datetime
|
||||
from typing import cast
|
||||
|
||||
from sqlalchemy import delete, select
|
||||
from sqlalchemy import delete, func, select
|
||||
from sqlalchemy.engine import CursorResult
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from libs.time_parser import get_time_threshold
|
||||
from models.workflow import WorkflowRun
|
||||
from repositories.api_workflow_run_repository import APIWorkflowRunRepository
|
||||
|
||||
|
|
@ -63,6 +64,7 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository):
|
|||
triggered_from: str,
|
||||
limit: int = 20,
|
||||
last_id: str | None = None,
|
||||
status: str | None = None,
|
||||
) -> InfiniteScrollPagination:
|
||||
"""
|
||||
Get paginated workflow runs with filtering.
|
||||
|
|
@ -79,6 +81,10 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository):
|
|||
WorkflowRun.triggered_from == triggered_from,
|
||||
)
|
||||
|
||||
# Add optional status filter
|
||||
if status:
|
||||
base_stmt = base_stmt.where(WorkflowRun.status == status)
|
||||
|
||||
if last_id:
|
||||
# Get the last workflow run for cursor-based pagination
|
||||
last_run_stmt = base_stmt.where(WorkflowRun.id == last_id)
|
||||
|
|
@ -120,6 +126,73 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository):
|
|||
)
|
||||
return session.scalar(stmt)
|
||||
|
||||
def get_workflow_runs_count(
|
||||
self,
|
||||
tenant_id: str,
|
||||
app_id: str,
|
||||
triggered_from: str,
|
||||
status: str | None = None,
|
||||
time_range: str | None = None,
|
||||
) -> dict[str, int]:
|
||||
"""
|
||||
Get workflow runs count statistics grouped by status.
|
||||
"""
|
||||
_initial_status_counts = {
|
||||
"running": 0,
|
||||
"succeeded": 0,
|
||||
"failed": 0,
|
||||
"stopped": 0,
|
||||
"partial-succeeded": 0,
|
||||
}
|
||||
|
||||
with self._session_maker() as session:
|
||||
# Build base where conditions
|
||||
base_conditions = [
|
||||
WorkflowRun.tenant_id == tenant_id,
|
||||
WorkflowRun.app_id == app_id,
|
||||
WorkflowRun.triggered_from == triggered_from,
|
||||
]
|
||||
|
||||
# Add time range filter if provided
|
||||
if time_range:
|
||||
time_threshold = get_time_threshold(time_range)
|
||||
if time_threshold:
|
||||
base_conditions.append(WorkflowRun.created_at >= time_threshold)
|
||||
|
||||
# If status filter is provided, return simple count
|
||||
if status:
|
||||
count_stmt = select(func.count(WorkflowRun.id)).where(*base_conditions, WorkflowRun.status == status)
|
||||
total = session.scalar(count_stmt) or 0
|
||||
|
||||
result = {"total": total} | _initial_status_counts
|
||||
|
||||
# Set the count for the filtered status
|
||||
if status in result:
|
||||
result[status] = total
|
||||
|
||||
return result
|
||||
|
||||
# No status filter - get counts grouped by status
|
||||
base_stmt = (
|
||||
select(WorkflowRun.status, func.count(WorkflowRun.id).label("count"))
|
||||
.where(*base_conditions)
|
||||
.group_by(WorkflowRun.status)
|
||||
)
|
||||
|
||||
# Execute query
|
||||
results = session.execute(base_stmt).all()
|
||||
|
||||
# Build response dictionary
|
||||
status_counts = _initial_status_counts.copy()
|
||||
|
||||
total = 0
|
||||
for status_val, count in results:
|
||||
total += count
|
||||
if status_val in status_counts:
|
||||
status_counts[status_val] = count
|
||||
|
||||
return {"total": total} | status_counts
|
||||
|
||||
def get_expired_runs_batch(
|
||||
self,
|
||||
tenant_id: str,
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ from core.tools.entities.tool_entities import CredentialType
|
|||
from core.tools.utils.encryption import ProviderConfigCache, ProviderConfigEncrypter, create_provider_encrypter
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from libs.login import current_account_with_tenant
|
||||
from models.oauth import DatasourceOauthParamConfig, DatasourceOauthTenantParamConfig, DatasourceProvider
|
||||
from models.provider_ids import DatasourceProviderID
|
||||
from services.plugin.plugin_service import PluginService
|
||||
|
|
@ -25,6 +24,16 @@ from services.plugin.plugin_service import PluginService
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_current_user():
|
||||
from libs.login import current_user
|
||||
from models.account import Account
|
||||
from models.model import EndUser
|
||||
|
||||
if not isinstance(current_user._get_current_object(), (Account, EndUser)): # type: ignore
|
||||
raise TypeError(f"current_user must be Account or EndUser, got {type(current_user).__name__}")
|
||||
return current_user
|
||||
|
||||
|
||||
class DatasourceProviderService:
|
||||
"""
|
||||
Model Provider Service
|
||||
|
|
@ -93,8 +102,6 @@ class DatasourceProviderService:
|
|||
"""
|
||||
get credential by id
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
with Session(db.engine) as session:
|
||||
if credential_id:
|
||||
datasource_provider = (
|
||||
|
|
@ -111,6 +118,7 @@ class DatasourceProviderService:
|
|||
return {}
|
||||
# refresh the credentials
|
||||
if datasource_provider.expires_at != -1 and (datasource_provider.expires_at - 60) < int(time.time()):
|
||||
current_user = get_current_user()
|
||||
decrypted_credentials = self.decrypt_datasource_provider_credentials(
|
||||
tenant_id=tenant_id,
|
||||
datasource_provider=datasource_provider,
|
||||
|
|
@ -159,8 +167,6 @@ class DatasourceProviderService:
|
|||
"""
|
||||
get all datasource credentials by provider
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
with Session(db.engine) as session:
|
||||
datasource_providers = (
|
||||
session.query(DatasourceProvider)
|
||||
|
|
@ -170,6 +176,7 @@ class DatasourceProviderService:
|
|||
)
|
||||
if not datasource_providers:
|
||||
return []
|
||||
current_user = get_current_user()
|
||||
# refresh the credentials
|
||||
real_credentials_list = []
|
||||
for datasource_provider in datasource_providers:
|
||||
|
|
@ -608,7 +615,6 @@ class DatasourceProviderService:
|
|||
"""
|
||||
provider_name = provider_id.provider_name
|
||||
plugin_id = provider_id.plugin_id
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
with Session(db.engine) as session:
|
||||
lock = f"datasource_provider_create_lock:{tenant_id}_{provider_id}_{CredentialType.API_KEY}"
|
||||
|
|
@ -630,6 +636,7 @@ class DatasourceProviderService:
|
|||
raise ValueError("Authorization name is already exists")
|
||||
|
||||
try:
|
||||
current_user = get_current_user()
|
||||
self.provider_manager.validate_provider_credentials(
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.id,
|
||||
|
|
@ -907,7 +914,6 @@ class DatasourceProviderService:
|
|||
"""
|
||||
update datasource credentials.
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
with Session(db.engine) as session:
|
||||
datasource_provider = (
|
||||
|
|
@ -944,6 +950,7 @@ class DatasourceProviderService:
|
|||
for key, value in credentials.items()
|
||||
}
|
||||
try:
|
||||
current_user = get_current_user()
|
||||
self.provider_manager.validate_provider_credentials(
|
||||
tenant_id=tenant_id,
|
||||
user_id=current_user.id,
|
||||
|
|
|
|||
|
|
@ -26,13 +26,15 @@ class WorkflowRunService:
|
|||
)
|
||||
self._workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker)
|
||||
|
||||
def get_paginate_advanced_chat_workflow_runs(self, app_model: App, args: dict) -> InfiniteScrollPagination:
|
||||
def get_paginate_advanced_chat_workflow_runs(
|
||||
self, app_model: App, args: dict, triggered_from: WorkflowRunTriggeredFrom = WorkflowRunTriggeredFrom.DEBUGGING
|
||||
) -> InfiniteScrollPagination:
|
||||
"""
|
||||
Get advanced chat app workflow run list
|
||||
Only return triggered_from == advanced_chat
|
||||
|
||||
:param app_model: app model
|
||||
:param args: request args
|
||||
:param triggered_from: workflow run triggered from (default: DEBUGGING for preview runs)
|
||||
"""
|
||||
|
||||
class WorkflowWithMessage:
|
||||
|
|
@ -45,7 +47,7 @@ class WorkflowRunService:
|
|||
def __getattr__(self, item):
|
||||
return getattr(self._workflow_run, item)
|
||||
|
||||
pagination = self.get_paginate_workflow_runs(app_model, args)
|
||||
pagination = self.get_paginate_workflow_runs(app_model, args, triggered_from)
|
||||
|
||||
with_message_workflow_runs = []
|
||||
for workflow_run in pagination.data:
|
||||
|
|
@ -60,23 +62,27 @@ class WorkflowRunService:
|
|||
pagination.data = with_message_workflow_runs
|
||||
return pagination
|
||||
|
||||
def get_paginate_workflow_runs(self, app_model: App, args: dict) -> InfiniteScrollPagination:
|
||||
def get_paginate_workflow_runs(
|
||||
self, app_model: App, args: dict, triggered_from: WorkflowRunTriggeredFrom = WorkflowRunTriggeredFrom.DEBUGGING
|
||||
) -> InfiniteScrollPagination:
|
||||
"""
|
||||
Get debug workflow run list
|
||||
Only return triggered_from == debugging
|
||||
Get workflow run list
|
||||
|
||||
:param app_model: app model
|
||||
:param args: request args
|
||||
:param triggered_from: workflow run triggered from (default: DEBUGGING)
|
||||
"""
|
||||
limit = int(args.get("limit", 20))
|
||||
last_id = args.get("last_id")
|
||||
status = args.get("status")
|
||||
|
||||
return self._workflow_run_repo.get_paginated_workflow_runs(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
triggered_from=triggered_from,
|
||||
limit=limit,
|
||||
last_id=last_id,
|
||||
status=status,
|
||||
)
|
||||
|
||||
def get_workflow_run(self, app_model: App, run_id: str) -> WorkflowRun | None:
|
||||
|
|
@ -92,6 +98,30 @@ class WorkflowRunService:
|
|||
run_id=run_id,
|
||||
)
|
||||
|
||||
def get_workflow_runs_count(
|
||||
self,
|
||||
app_model: App,
|
||||
status: str | None = None,
|
||||
time_range: str | None = None,
|
||||
triggered_from: WorkflowRunTriggeredFrom = WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
) -> dict[str, int]:
|
||||
"""
|
||||
Get workflow runs count statistics
|
||||
|
||||
:param app_model: app model
|
||||
:param status: optional status filter
|
||||
:param time_range: optional time range filter (e.g., "7d", "4h", "30m", "30s")
|
||||
:param triggered_from: workflow run triggered from (default: DEBUGGING)
|
||||
:return: dict with total and status counts
|
||||
"""
|
||||
return self._workflow_run_repo.get_workflow_runs_count(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
triggered_from=triggered_from,
|
||||
status=status,
|
||||
time_range=time_range,
|
||||
)
|
||||
|
||||
def get_workflow_run_node_executions(
|
||||
self,
|
||||
app_model: App,
|
||||
|
|
|
|||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -96,7 +98,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -107,7 +110,7 @@
|
|||
<div class="header"></div>
|
||||
<p class="title">Confirm Your New Email Address</p>
|
||||
<div class="description">
|
||||
<p class="content1">You’re updating the email address linked to your Dify account.</p>
|
||||
<p class="content1">You're updating the email address linked to your account.</p>
|
||||
<p class="content2">To confirm this action, please use the verification code below.</p>
|
||||
<p class="content3">This code will only be valid for the next 5 minutes:</p>
|
||||
</div>
|
||||
|
|
@ -118,5 +121,4 @@
|
|||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -96,7 +98,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -107,7 +110,7 @@
|
|||
<div class="header"></div>
|
||||
<p class="title">确认您的邮箱地址变更</p>
|
||||
<div class="description">
|
||||
<p class="content1">您正在更新与您的 Dify 账户关联的邮箱地址。</p>
|
||||
<p class="content1">您正在更新与您的账户关联的邮箱地址。</p>
|
||||
<p class="content2">为了确认此操作,请使用以下验证码。</p>
|
||||
<p class="content3">此验证码仅在接下来的5分钟内有效:</p>
|
||||
</div>
|
||||
|
|
@ -118,5 +121,4 @@
|
|||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -96,7 +98,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -107,7 +110,7 @@
|
|||
<div class="header"></div>
|
||||
<p class="title">Verify Your Request to Change Email</p>
|
||||
<div class="description">
|
||||
<p class="content1">We received a request to change the email address associated with your Dify account.</p>
|
||||
<p class="content1">We received a request to change the email address associated with your account.</p>
|
||||
<p class="content2">To confirm this action, please use the verification code below.</p>
|
||||
<p class="content3">This code will only be valid for the next 5 minutes:</p>
|
||||
</div>
|
||||
|
|
@ -118,5 +121,4 @@
|
|||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -96,7 +98,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -107,7 +110,7 @@
|
|||
<div class="header"></div>
|
||||
<p class="title">验证您的邮箱变更请求</p>
|
||||
<div class="description">
|
||||
<p class="content1">我们收到了一个变更您 Dify 账户关联邮箱地址的请求。</p>
|
||||
<p class="content1">我们收到了一个变更您账户关联邮箱地址的请求。</p>
|
||||
<p class="content3">此验证码仅在接下来的5分钟内有效:</p>
|
||||
</div>
|
||||
<div class="code-content">
|
||||
|
|
@ -117,5 +120,4 @@
|
|||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<style>
|
||||
body {
|
||||
|
|
@ -10,6 +11,7 @@
|
|||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.container {
|
||||
width: 504px;
|
||||
min-height: 444px;
|
||||
|
|
@ -30,6 +32,7 @@
|
|||
max-width: 63px;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
.button {
|
||||
display: block;
|
||||
padding: 8px 12px;
|
||||
|
|
@ -45,46 +48,56 @@
|
|||
font-size: 14px;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
}
|
||||
|
||||
.button:hover {
|
||||
background-color: #004AEB;
|
||||
border: 0.5px solid rgba(16, 24, 40, 0.08);
|
||||
box-shadow: 0px 1px 2px 0px rgba(9, 9, 11, 0.05);
|
||||
}
|
||||
|
||||
.content {
|
||||
color: #354052;
|
||||
font-family: Inter;
|
||||
font-size: 14px;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
.content1 {
|
||||
margin: 0;
|
||||
padding-top: 24px;
|
||||
padding-bottom: 12px;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.content2 {
|
||||
margin: 0;
|
||||
padding-bottom: 12px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="header"></div>
|
||||
<div class="content">
|
||||
<p class="content1">Dear {{ to }},</p>
|
||||
<p class="content2">{{ inviter_name }} is pleased to invite you to join our workspace on {{application_title}}, a platform specifically designed for LLM application development. On {{application_title}}, you can explore, create, and collaborate to build and operate AI applications.</p>
|
||||
<p class="content2">{{ inviter_name }} is pleased to invite you to join our workspace on {{application_title}}, a
|
||||
platform specifically designed for LLM application development. On {{application_title}}, you can explore,
|
||||
create, and collaborate to build and operate AI applications.</p>
|
||||
<p class="content2">Click the button below to log in to {{application_title}} and join the workspace.</p>
|
||||
<p style="text-align: center; margin: 0; margin-bottom: 32px;"><a style="color: #fff; text-decoration: none" class="button" href="{{ url }}">Login Here</a></p>
|
||||
<p style="text-align: center; margin: 0; margin-bottom: 32px;"><a style="color: #fff; text-decoration: none"
|
||||
class="button" href="{{ url }}">Login Here</a></p>
|
||||
<p class="content2">Best regards,</p>
|
||||
<p class="content2">{{application_title}} Team</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -80,10 +82,9 @@
|
|||
<div class="description">
|
||||
<p class="content1">You have been assigned as the new owner of the workspace "{{WorkspaceName}}".</p>
|
||||
<p class="content2">As the new owner, you now have full administrative privileges for this workspace.</p>
|
||||
<p class="content3">If you have any questions, please contact support@dify.ai.</p>
|
||||
<p class="content3">If you have any questions, please contact support.</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -80,10 +82,9 @@
|
|||
<div class="description">
|
||||
<p class="content1">您已被分配为工作空间“{{WorkspaceName}}”的新所有者。</p>
|
||||
<p class="content2">作为新所有者,您现在对该工作空间拥有完全的管理权限。</p>
|
||||
<p class="content3">如果您有任何问题,请联系support@dify.ai。</p>
|
||||
<p class="content3">如果您有任何问题,请联系支持团队。</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -97,7 +99,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -108,12 +111,14 @@
|
|||
<div class="header"></div>
|
||||
<p class="title">Workspace ownership has been transferred</p>
|
||||
<div class="description">
|
||||
<p class="content1">You have successfully transferred ownership of the workspace "{{WorkspaceName}}" to {{NewOwnerEmail}}.</p>
|
||||
<p class="content2">You no longer have owner privileges for this workspace. Your access level has been changed to Admin.</p>
|
||||
<p class="content3">If you did not initiate this transfer or have concerns about this change, please contact support@dify.ai immediately.</p>
|
||||
<p class="content1">You have successfully transferred ownership of the workspace "{{WorkspaceName}}" to
|
||||
{{NewOwnerEmail}}.</p>
|
||||
<p class="content2">You no longer have owner privileges for this workspace. Your access level has been changed to
|
||||
Admin.</p>
|
||||
<p class="content3">If you did not initiate this transfer or have concerns about this change, please contact
|
||||
support immediately.</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -42,7 +42,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
line-height: 120%; /* 28.8px */
|
||||
line-height: 120%;
|
||||
/* 28.8px */
|
||||
}
|
||||
|
||||
.description {
|
||||
|
|
@ -51,7 +52,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
|
||||
|
|
@ -97,7 +99,8 @@
|
|||
font-family: Inter;
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
line-height: 20px; /* 142.857% */
|
||||
line-height: 20px;
|
||||
/* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -110,10 +113,9 @@
|
|||
<div class="description">
|
||||
<p class="content1">您已成功将工作空间“{{WorkspaceName}}”的所有权转移给{{NewOwnerEmail}}。</p>
|
||||
<p class="content2">您不再拥有此工作空间的拥有者权限。您的访问级别已更改为管理员。</p>
|
||||
<p class="content3">如果您没有发起此转移或对此变更有任何疑问,请立即联系support@dify.ai。</p>
|
||||
<p class="content3">如果您没有发起此转移或对此变更有任何疑问,请立即联系支持团队。</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
</html>
|
||||
|
|
@ -0,0 +1,134 @@
|
|||
"""
|
||||
TestContainers-based integration tests for mail_register_task.py
|
||||
|
||||
This module provides integration tests for email registration tasks
|
||||
using TestContainers to ensure real database and service interactions.
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from faker import Faker
|
||||
|
||||
from libs.email_i18n import EmailType
|
||||
from tasks.mail_register_task import send_email_register_mail_task, send_email_register_mail_task_when_account_exist
|
||||
|
||||
|
||||
class TestMailRegisterTask:
|
||||
"""Integration tests for mail_register_task using testcontainers."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_mail_dependencies(self):
|
||||
"""Mock setup for mail service dependencies."""
|
||||
with (
|
||||
patch("tasks.mail_register_task.mail") as mock_mail,
|
||||
patch("tasks.mail_register_task.get_email_i18n_service") as mock_get_email_service,
|
||||
):
|
||||
# Setup mock mail service
|
||||
mock_mail.is_inited.return_value = True
|
||||
|
||||
# Setup mock email i18n service
|
||||
mock_email_service = MagicMock()
|
||||
mock_get_email_service.return_value = mock_email_service
|
||||
|
||||
yield {
|
||||
"mail": mock_mail,
|
||||
"email_service": mock_email_service,
|
||||
"get_email_service": mock_get_email_service,
|
||||
}
|
||||
|
||||
def test_send_email_register_mail_task_success(self, db_session_with_containers, mock_mail_dependencies):
|
||||
"""Test successful email registration mail sending."""
|
||||
fake = Faker()
|
||||
language = "en-US"
|
||||
to_email = fake.email()
|
||||
code = fake.numerify("######")
|
||||
|
||||
send_email_register_mail_task(language=language, to=to_email, code=code)
|
||||
|
||||
mock_mail_dependencies["mail"].is_inited.assert_called_once()
|
||||
mock_mail_dependencies["email_service"].send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_REGISTER,
|
||||
language_code=language,
|
||||
to=to_email,
|
||||
template_context={
|
||||
"to": to_email,
|
||||
"code": code,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_email_register_mail_task_mail_not_initialized(
|
||||
self, db_session_with_containers, mock_mail_dependencies
|
||||
):
|
||||
"""Test email registration task when mail service is not initialized."""
|
||||
mock_mail_dependencies["mail"].is_inited.return_value = False
|
||||
|
||||
send_email_register_mail_task(language="en-US", to="test@example.com", code="123456")
|
||||
|
||||
mock_mail_dependencies["get_email_service"].assert_not_called()
|
||||
mock_mail_dependencies["email_service"].send_email.assert_not_called()
|
||||
|
||||
def test_send_email_register_mail_task_exception_handling(self, db_session_with_containers, mock_mail_dependencies):
|
||||
"""Test email registration task exception handling."""
|
||||
mock_mail_dependencies["email_service"].send_email.side_effect = Exception("Email service error")
|
||||
|
||||
fake = Faker()
|
||||
to_email = fake.email()
|
||||
code = fake.numerify("######")
|
||||
|
||||
with patch("tasks.mail_register_task.logger") as mock_logger:
|
||||
send_email_register_mail_task(language="en-US", to=to_email, code=code)
|
||||
mock_logger.exception.assert_called_once_with("Send email register mail to %s failed", to_email)
|
||||
|
||||
def test_send_email_register_mail_task_when_account_exist_success(
|
||||
self, db_session_with_containers, mock_mail_dependencies
|
||||
):
|
||||
"""Test successful email registration mail sending when account exists."""
|
||||
fake = Faker()
|
||||
language = "en-US"
|
||||
to_email = fake.email()
|
||||
account_name = fake.name()
|
||||
|
||||
with patch("tasks.mail_register_task.dify_config") as mock_config:
|
||||
mock_config.CONSOLE_WEB_URL = "https://console.dify.ai"
|
||||
|
||||
send_email_register_mail_task_when_account_exist(language=language, to=to_email, account_name=account_name)
|
||||
|
||||
mock_mail_dependencies["email_service"].send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_REGISTER_WHEN_ACCOUNT_EXIST,
|
||||
language_code=language,
|
||||
to=to_email,
|
||||
template_context={
|
||||
"to": to_email,
|
||||
"login_url": "https://console.dify.ai/signin",
|
||||
"reset_password_url": "https://console.dify.ai/reset-password",
|
||||
"account_name": account_name,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_email_register_mail_task_when_account_exist_mail_not_initialized(
|
||||
self, db_session_with_containers, mock_mail_dependencies
|
||||
):
|
||||
"""Test account exist email task when mail service is not initialized."""
|
||||
mock_mail_dependencies["mail"].is_inited.return_value = False
|
||||
|
||||
send_email_register_mail_task_when_account_exist(
|
||||
language="en-US", to="test@example.com", account_name="Test User"
|
||||
)
|
||||
|
||||
mock_mail_dependencies["get_email_service"].assert_not_called()
|
||||
mock_mail_dependencies["email_service"].send_email.assert_not_called()
|
||||
|
||||
def test_send_email_register_mail_task_when_account_exist_exception_handling(
|
||||
self, db_session_with_containers, mock_mail_dependencies
|
||||
):
|
||||
"""Test account exist email task exception handling."""
|
||||
mock_mail_dependencies["email_service"].send_email.side_effect = Exception("Email service error")
|
||||
|
||||
fake = Faker()
|
||||
to_email = fake.email()
|
||||
account_name = fake.name()
|
||||
|
||||
with patch("tasks.mail_register_task.logger") as mock_logger:
|
||||
send_email_register_mail_task_when_account_exist(language="en-US", to=to_email, account_name=account_name)
|
||||
mock_logger.exception.assert_called_once_with("Send email register mail to %s failed", to_email)
|
||||
|
|
@ -34,12 +34,17 @@ def test_workflow_tool_should_raise_tool_invoke_error_when_result_has_error_fiel
|
|||
monkeypatch.setattr(tool, "_get_app", lambda *args, **kwargs: None)
|
||||
monkeypatch.setattr(tool, "_get_workflow", lambda *args, **kwargs: None)
|
||||
|
||||
# Mock user resolution to avoid database access
|
||||
from unittest.mock import Mock
|
||||
|
||||
mock_user = Mock()
|
||||
monkeypatch.setattr(tool, "_resolve_user", lambda *args, **kwargs: mock_user)
|
||||
|
||||
# replace `WorkflowAppGenerator.generate` 's return value.
|
||||
monkeypatch.setattr(
|
||||
"core.app.apps.workflow.app_generator.WorkflowAppGenerator.generate",
|
||||
lambda *args, **kwargs: {"data": {"error": "oops"}},
|
||||
)
|
||||
monkeypatch.setattr("libs.login.current_user", lambda *args, **kwargs: None)
|
||||
|
||||
with pytest.raises(ToolInvokeError) as exc_info:
|
||||
# WorkflowTool always returns a generator, so we need to iterate to
|
||||
|
|
|
|||
|
|
@ -7,14 +7,11 @@ This test suite validates the behavior of a workflow that:
|
|||
3. Handles multiple answer nodes with different outputs
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from core.workflow.graph_events import (
|
||||
GraphRunStartedEvent,
|
||||
GraphRunSucceededEvent,
|
||||
NodeRunStartedEvent,
|
||||
NodeRunStreamChunkEvent,
|
||||
NodeRunSucceededEvent,
|
||||
)
|
||||
|
||||
from .test_mock_config import MockConfigBuilder
|
||||
|
|
@ -29,7 +26,6 @@ class TestComplexBranchWorkflow:
|
|||
self.runner = TableTestRunner()
|
||||
self.fixture_path = "test_complex_branch"
|
||||
|
||||
@pytest.mark.skip(reason="output in this workflow can be random")
|
||||
def test_hello_branch_with_llm(self):
|
||||
"""
|
||||
Test when query contains 'hello' - should trigger true branch.
|
||||
|
|
@ -41,42 +37,17 @@ class TestComplexBranchWorkflow:
|
|||
fixture_path=self.fixture_path,
|
||||
query="hello world",
|
||||
expected_outputs={
|
||||
"answer": f"{mock_text_1}contains 'hello'",
|
||||
"answer": f"contains 'hello'{mock_text_1}",
|
||||
},
|
||||
description="Basic hello case with parallel LLM execution",
|
||||
use_auto_mock=True,
|
||||
mock_config=(MockConfigBuilder().with_node_output("1755502777322", {"text": mock_text_1}).build()),
|
||||
expected_event_sequence=[
|
||||
GraphRunStartedEvent,
|
||||
# Start
|
||||
NodeRunStartedEvent,
|
||||
NodeRunSucceededEvent,
|
||||
# If/Else (no streaming)
|
||||
NodeRunStartedEvent,
|
||||
NodeRunSucceededEvent,
|
||||
# LLM (with streaming)
|
||||
NodeRunStartedEvent,
|
||||
]
|
||||
# LLM
|
||||
+ [NodeRunStreamChunkEvent] * (mock_text_1.count(" ") + 2)
|
||||
+ [
|
||||
# Answer's text
|
||||
NodeRunStreamChunkEvent,
|
||||
NodeRunSucceededEvent,
|
||||
# Answer
|
||||
NodeRunStartedEvent,
|
||||
NodeRunSucceededEvent,
|
||||
# Answer 2
|
||||
NodeRunStartedEvent,
|
||||
NodeRunSucceededEvent,
|
||||
GraphRunSucceededEvent,
|
||||
],
|
||||
),
|
||||
WorkflowTestCase(
|
||||
fixture_path=self.fixture_path,
|
||||
query="say hello to everyone",
|
||||
expected_outputs={
|
||||
"answer": "Mocked response for greetingcontains 'hello'",
|
||||
"answer": "contains 'hello'Mocked response for greeting",
|
||||
},
|
||||
description="Hello in middle of sentence",
|
||||
use_auto_mock=True,
|
||||
|
|
@ -93,6 +64,35 @@ class TestComplexBranchWorkflow:
|
|||
for result in suite_result.results:
|
||||
assert result.success, f"Test '{result.test_case.description}' failed: {result.error}"
|
||||
assert result.actual_outputs
|
||||
assert any(isinstance(event, GraphRunStartedEvent) for event in result.events)
|
||||
assert any(isinstance(event, GraphRunSucceededEvent) for event in result.events)
|
||||
|
||||
start_index = next(
|
||||
idx for idx, event in enumerate(result.events) if isinstance(event, GraphRunStartedEvent)
|
||||
)
|
||||
success_index = max(
|
||||
idx for idx, event in enumerate(result.events) if isinstance(event, GraphRunSucceededEvent)
|
||||
)
|
||||
assert start_index < success_index
|
||||
|
||||
started_node_ids = {event.node_id for event in result.events if isinstance(event, NodeRunStartedEvent)}
|
||||
assert {"1755502773326", "1755502777322"}.issubset(started_node_ids), (
|
||||
f"Branch or LLM nodes missing in events: {started_node_ids}"
|
||||
)
|
||||
|
||||
assert any(isinstance(event, NodeRunStreamChunkEvent) for event in result.events), (
|
||||
"Expected streaming chunks from LLM execution"
|
||||
)
|
||||
|
||||
llm_start_index = next(
|
||||
idx
|
||||
for idx, event in enumerate(result.events)
|
||||
if isinstance(event, NodeRunStartedEvent) and event.node_id == "1755502777322"
|
||||
)
|
||||
assert any(
|
||||
idx > llm_start_index and isinstance(event, NodeRunStreamChunkEvent)
|
||||
for idx, event in enumerate(result.events)
|
||||
), "Streaming chunks should follow LLM node start"
|
||||
|
||||
def test_non_hello_branch_with_llm(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -95,10 +95,10 @@ def _make_succeeded_event() -> NodeRunSucceededEvent:
|
|||
)
|
||||
|
||||
|
||||
def test_dispatcher_checks_commands_after_node_completion() -> None:
|
||||
"""Dispatcher should only check commands after node completion events."""
|
||||
def test_dispatcher_checks_commands_during_idle_and_on_completion() -> None:
|
||||
"""Dispatcher polls commands when idle and re-checks after completion events."""
|
||||
started_checks = _run_dispatcher_for_event(_make_started_event())
|
||||
succeeded_checks = _run_dispatcher_for_event(_make_succeeded_event())
|
||||
|
||||
assert started_checks == 0
|
||||
assert succeeded_checks == 1
|
||||
assert started_checks == 1
|
||||
assert succeeded_checks == 2
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom,
|
|||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
|
||||
from core.workflow.entities import GraphInitParams, GraphRuntimeState, VariablePool
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.nodes.llm import llm_utils
|
||||
from core.workflow.nodes.llm.entities import (
|
||||
ContextConfig,
|
||||
|
|
@ -83,14 +82,6 @@ def graph_init_params() -> GraphInitParams:
|
|||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def graph() -> Graph:
|
||||
# TODO: This fixture uses old Graph constructor parameters that are incompatible
|
||||
# with the new queue-based engine. Need to rewrite for new engine architecture.
|
||||
pytest.skip("Graph fixture incompatible with new queue-based engine - needs rewrite for ResponseStreamCoordinator")
|
||||
return Graph()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def graph_runtime_state() -> GraphRuntimeState:
|
||||
variable_pool = VariablePool(
|
||||
|
|
@ -105,7 +96,7 @@ def graph_runtime_state() -> GraphRuntimeState:
|
|||
|
||||
@pytest.fixture
|
||||
def llm_node(
|
||||
llm_node_data: LLMNodeData, graph_init_params: GraphInitParams, graph: Graph, graph_runtime_state: GraphRuntimeState
|
||||
llm_node_data: LLMNodeData, graph_init_params: GraphInitParams, graph_runtime_state: GraphRuntimeState
|
||||
) -> LLMNode:
|
||||
mock_file_saver = mock.MagicMock(spec=LLMFileSaver)
|
||||
node_config = {
|
||||
|
|
@ -493,9 +484,7 @@ def test_handle_list_messages_basic(llm_node):
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def llm_node_for_multimodal(
|
||||
llm_node_data, graph_init_params, graph, graph_runtime_state
|
||||
) -> tuple[LLMNode, LLMFileSaver]:
|
||||
def llm_node_for_multimodal(llm_node_data, graph_init_params, graph_runtime_state) -> tuple[LLMNode, LLMFileSaver]:
|
||||
mock_file_saver: LLMFileSaver = mock.MagicMock(spec=LLMFileSaver)
|
||||
node_config = {
|
||||
"id": "1",
|
||||
|
|
@ -655,7 +644,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown:
|
|||
gen = llm_node._save_multimodal_output_and_convert_result_to_markdown(
|
||||
contents=frozenset(["hello world"]), file_saver=mock_file_saver, file_outputs=[]
|
||||
)
|
||||
assert list(gen) == ["frozenset({'hello world'})"]
|
||||
assert list(gen) == ["hello world"]
|
||||
mock_file_saver.save_binary_string.assert_not_called()
|
||||
mock_file_saver.save_remote_url.assert_not_called()
|
||||
|
||||
|
|
|
|||
|
|
@ -150,6 +150,42 @@ def test_build_from_remote_url(mock_http_head):
|
|||
assert file.size == 2048
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("file_type", "should_pass", "expected_error"),
|
||||
[
|
||||
("image", True, None),
|
||||
("document", False, "Detected file type does not match the specified type"),
|
||||
("video", False, "Detected file type does not match the specified type"),
|
||||
],
|
||||
)
|
||||
def test_build_from_remote_url_strict_validation(mock_http_head, file_type, should_pass, expected_error):
|
||||
"""Test strict type validation for remote_url."""
|
||||
mapping = {
|
||||
"transfer_method": "remote_url",
|
||||
"url": TEST_REMOTE_URL,
|
||||
"type": file_type,
|
||||
}
|
||||
if should_pass:
|
||||
file = build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID, strict_type_validation=True)
|
||||
assert file.type == FileType(file_type)
|
||||
else:
|
||||
with pytest.raises(ValueError, match=expected_error):
|
||||
build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID, strict_type_validation=True)
|
||||
|
||||
|
||||
def test_build_from_remote_url_without_strict_validation(mock_http_head):
|
||||
"""Test that remote_url allows type mismatch when strict_type_validation is False."""
|
||||
mapping = {
|
||||
"transfer_method": "remote_url",
|
||||
"url": TEST_REMOTE_URL,
|
||||
"type": "document",
|
||||
}
|
||||
file = build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID, strict_type_validation=False)
|
||||
assert file.transfer_method == FileTransferMethod.REMOTE_URL
|
||||
assert file.type == FileType.DOCUMENT
|
||||
assert file.filename == "remote_test.jpg"
|
||||
|
||||
|
||||
def test_tool_file_not_found():
|
||||
"""Test ToolFile not found in database."""
|
||||
with patch("factories.file_factory.db.session.scalar", return_value=None):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
"""Unit tests for custom input types."""
|
||||
|
||||
import pytest
|
||||
|
||||
from libs.custom_inputs import time_duration
|
||||
|
||||
|
||||
class TestTimeDuration:
|
||||
"""Test time_duration input validator."""
|
||||
|
||||
def test_valid_days(self):
|
||||
"""Test valid days format."""
|
||||
result = time_duration("7d")
|
||||
assert result == "7d"
|
||||
|
||||
def test_valid_hours(self):
|
||||
"""Test valid hours format."""
|
||||
result = time_duration("4h")
|
||||
assert result == "4h"
|
||||
|
||||
def test_valid_minutes(self):
|
||||
"""Test valid minutes format."""
|
||||
result = time_duration("30m")
|
||||
assert result == "30m"
|
||||
|
||||
def test_valid_seconds(self):
|
||||
"""Test valid seconds format."""
|
||||
result = time_duration("30s")
|
||||
assert result == "30s"
|
||||
|
||||
def test_uppercase_conversion(self):
|
||||
"""Test uppercase units are converted to lowercase."""
|
||||
result = time_duration("7D")
|
||||
assert result == "7d"
|
||||
|
||||
result = time_duration("4H")
|
||||
assert result == "4h"
|
||||
|
||||
def test_invalid_format_no_unit(self):
|
||||
"""Test invalid format without unit."""
|
||||
with pytest.raises(ValueError, match="Invalid time duration format"):
|
||||
time_duration("7")
|
||||
|
||||
def test_invalid_format_wrong_unit(self):
|
||||
"""Test invalid format with wrong unit."""
|
||||
with pytest.raises(ValueError, match="Invalid time duration format"):
|
||||
time_duration("7days")
|
||||
|
||||
with pytest.raises(ValueError, match="Invalid time duration format"):
|
||||
time_duration("7x")
|
||||
|
||||
def test_invalid_format_no_number(self):
|
||||
"""Test invalid format without number."""
|
||||
with pytest.raises(ValueError, match="Invalid time duration format"):
|
||||
time_duration("d")
|
||||
|
||||
with pytest.raises(ValueError, match="Invalid time duration format"):
|
||||
time_duration("abc")
|
||||
|
||||
def test_empty_string(self):
|
||||
"""Test empty string."""
|
||||
with pytest.raises(ValueError, match="Time duration cannot be empty"):
|
||||
time_duration("")
|
||||
|
||||
def test_none(self):
|
||||
"""Test None value."""
|
||||
with pytest.raises(ValueError, match="Time duration cannot be empty"):
|
||||
time_duration(None)
|
||||
|
|
@ -86,3 +86,24 @@ def test_parse_and_check_json_markdown_multiple_blocks_fails():
|
|||
# opening fence to the last closing fence, causing JSON decode failure.
|
||||
with pytest.raises(OutputParserError):
|
||||
parse_and_check_json_markdown(src, [])
|
||||
|
||||
|
||||
def test_parse_and_check_json_markdown_handles_think_fenced_and_raw_variants():
|
||||
expected = {"keywords": ["2"], "category_id": "2", "category_name": "2"}
|
||||
cases = [
|
||||
"""
|
||||
```json
|
||||
[{"keywords": ["2"], "category_id": "2", "category_name": "2"}]
|
||||
```, error: Expecting value: line 1 column 1 (char 0)
|
||||
""",
|
||||
"""
|
||||
```json
|
||||
{"keywords": ["2"], "category_id": "2", "category_name": "2"}
|
||||
```, error: Extra data: line 2 column 5 (char 66)
|
||||
""",
|
||||
'{"keywords": ["2"], "category_id": "2", "category_name": "2"}',
|
||||
'[{"keywords": ["2"], "category_id": "2", "category_name": "2"}]',
|
||||
]
|
||||
for src in cases:
|
||||
obj = parse_and_check_json_markdown(src, ["keywords", "category_id", "category_name"])
|
||||
assert obj == expected
|
||||
|
|
|
|||
|
|
@ -0,0 +1,91 @@
|
|||
"""Unit tests for time parser utility."""
|
||||
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
from libs.time_parser import get_time_threshold, parse_time_duration
|
||||
|
||||
|
||||
class TestParseTimeDuration:
|
||||
"""Test parse_time_duration function."""
|
||||
|
||||
def test_parse_days(self):
|
||||
"""Test parsing days."""
|
||||
result = parse_time_duration("7d")
|
||||
assert result == timedelta(days=7)
|
||||
|
||||
def test_parse_hours(self):
|
||||
"""Test parsing hours."""
|
||||
result = parse_time_duration("4h")
|
||||
assert result == timedelta(hours=4)
|
||||
|
||||
def test_parse_minutes(self):
|
||||
"""Test parsing minutes."""
|
||||
result = parse_time_duration("30m")
|
||||
assert result == timedelta(minutes=30)
|
||||
|
||||
def test_parse_seconds(self):
|
||||
"""Test parsing seconds."""
|
||||
result = parse_time_duration("30s")
|
||||
assert result == timedelta(seconds=30)
|
||||
|
||||
def test_parse_uppercase(self):
|
||||
"""Test parsing uppercase units."""
|
||||
result = parse_time_duration("7D")
|
||||
assert result == timedelta(days=7)
|
||||
|
||||
def test_parse_invalid_format(self):
|
||||
"""Test parsing invalid format."""
|
||||
result = parse_time_duration("7days")
|
||||
assert result is None
|
||||
|
||||
result = parse_time_duration("abc")
|
||||
assert result is None
|
||||
|
||||
result = parse_time_duration("7")
|
||||
assert result is None
|
||||
|
||||
def test_parse_empty_string(self):
|
||||
"""Test parsing empty string."""
|
||||
result = parse_time_duration("")
|
||||
assert result is None
|
||||
|
||||
def test_parse_none(self):
|
||||
"""Test parsing None."""
|
||||
result = parse_time_duration(None)
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestGetTimeThreshold:
|
||||
"""Test get_time_threshold function."""
|
||||
|
||||
def test_get_threshold_days(self):
|
||||
"""Test getting threshold for days."""
|
||||
before = datetime.now(UTC)
|
||||
result = get_time_threshold("7d")
|
||||
after = datetime.now(UTC)
|
||||
|
||||
assert result is not None
|
||||
# Result should be approximately 7 days ago
|
||||
expected = before - timedelta(days=7)
|
||||
# Allow 1 second tolerance for test execution time
|
||||
assert abs((result - expected).total_seconds()) < 1
|
||||
|
||||
def test_get_threshold_hours(self):
|
||||
"""Test getting threshold for hours."""
|
||||
before = datetime.now(UTC)
|
||||
result = get_time_threshold("4h")
|
||||
after = datetime.now(UTC)
|
||||
|
||||
assert result is not None
|
||||
expected = before - timedelta(hours=4)
|
||||
assert abs((result - expected).total_seconds()) < 1
|
||||
|
||||
def test_get_threshold_invalid(self):
|
||||
"""Test getting threshold with invalid duration."""
|
||||
result = get_time_threshold("invalid")
|
||||
assert result is None
|
||||
|
||||
def test_get_threshold_none(self):
|
||||
"""Test getting threshold with None."""
|
||||
result = get_time_threshold(None)
|
||||
assert result is None
|
||||
|
|
@ -0,0 +1,251 @@
|
|||
"""Unit tests for workflow run repository with status filter."""
|
||||
|
||||
import uuid
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from models import WorkflowRun, WorkflowRunTriggeredFrom
|
||||
from repositories.sqlalchemy_api_workflow_run_repository import DifyAPISQLAlchemyWorkflowRunRepository
|
||||
|
||||
|
||||
class TestDifyAPISQLAlchemyWorkflowRunRepository:
|
||||
"""Test workflow run repository with status filtering."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_session_maker(self):
|
||||
"""Create a mock session maker."""
|
||||
return MagicMock(spec=sessionmaker)
|
||||
|
||||
@pytest.fixture
|
||||
def repository(self, mock_session_maker):
|
||||
"""Create repository instance with mock session."""
|
||||
return DifyAPISQLAlchemyWorkflowRunRepository(mock_session_maker)
|
||||
|
||||
def test_get_paginated_workflow_runs_without_status(self, repository, mock_session_maker):
|
||||
"""Test getting paginated workflow runs without status filter."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
mock_runs = [MagicMock(spec=WorkflowRun) for _ in range(3)]
|
||||
mock_session.scalars.return_value.all.return_value = mock_runs
|
||||
|
||||
# Act
|
||||
result = repository.get_paginated_workflow_runs(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
limit=20,
|
||||
last_id=None,
|
||||
status=None,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 3
|
||||
assert result.limit == 20
|
||||
assert result.has_more is False
|
||||
|
||||
def test_get_paginated_workflow_runs_with_status_filter(self, repository, mock_session_maker):
|
||||
"""Test getting paginated workflow runs with status filter."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
mock_runs = [MagicMock(spec=WorkflowRun, status="succeeded") for _ in range(2)]
|
||||
mock_session.scalars.return_value.all.return_value = mock_runs
|
||||
|
||||
# Act
|
||||
result = repository.get_paginated_workflow_runs(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
limit=20,
|
||||
last_id=None,
|
||||
status="succeeded",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert len(result.data) == 2
|
||||
assert all(run.status == "succeeded" for run in result.data)
|
||||
|
||||
def test_get_workflow_runs_count_without_status(self, repository, mock_session_maker):
|
||||
"""Test getting workflow runs count without status filter."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
# Mock the GROUP BY query results
|
||||
mock_results = [
|
||||
("succeeded", 5),
|
||||
("failed", 2),
|
||||
("running", 1),
|
||||
]
|
||||
mock_session.execute.return_value.all.return_value = mock_results
|
||||
|
||||
# Act
|
||||
result = repository.get_workflow_runs_count(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
status=None,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["total"] == 8
|
||||
assert result["succeeded"] == 5
|
||||
assert result["failed"] == 2
|
||||
assert result["running"] == 1
|
||||
assert result["stopped"] == 0
|
||||
assert result["partial-succeeded"] == 0
|
||||
|
||||
def test_get_workflow_runs_count_with_status_filter(self, repository, mock_session_maker):
|
||||
"""Test getting workflow runs count with status filter."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
# Mock the count query for succeeded status
|
||||
mock_session.scalar.return_value = 5
|
||||
|
||||
# Act
|
||||
result = repository.get_workflow_runs_count(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
status="succeeded",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["total"] == 5
|
||||
assert result["succeeded"] == 5
|
||||
assert result["running"] == 0
|
||||
assert result["failed"] == 0
|
||||
assert result["stopped"] == 0
|
||||
assert result["partial-succeeded"] == 0
|
||||
|
||||
def test_get_workflow_runs_count_with_invalid_status(self, repository, mock_session_maker):
|
||||
"""Test that invalid status is still counted in total but not in any specific status."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
# Mock count query returning 0 for invalid status
|
||||
mock_session.scalar.return_value = 0
|
||||
|
||||
# Act
|
||||
result = repository.get_workflow_runs_count(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
status="invalid_status",
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["total"] == 0
|
||||
assert all(result[status] == 0 for status in ["running", "succeeded", "failed", "stopped", "partial-succeeded"])
|
||||
|
||||
def test_get_workflow_runs_count_with_time_range(self, repository, mock_session_maker):
|
||||
"""Test getting workflow runs count with time range filter verifies SQL query construction."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
# Mock the GROUP BY query results
|
||||
mock_results = [
|
||||
("succeeded", 3),
|
||||
("running", 2),
|
||||
]
|
||||
mock_session.execute.return_value.all.return_value = mock_results
|
||||
|
||||
# Act
|
||||
result = repository.get_workflow_runs_count(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
status=None,
|
||||
time_range="1d",
|
||||
)
|
||||
|
||||
# Assert results
|
||||
assert result["total"] == 5
|
||||
assert result["succeeded"] == 3
|
||||
assert result["running"] == 2
|
||||
assert result["failed"] == 0
|
||||
|
||||
# Verify that execute was called (which means GROUP BY query was used)
|
||||
assert mock_session.execute.called, "execute should have been called for GROUP BY query"
|
||||
|
||||
# Verify SQL query includes time filter by checking the statement
|
||||
call_args = mock_session.execute.call_args
|
||||
assert call_args is not None, "execute should have been called with a statement"
|
||||
|
||||
# The first argument should be the SQL statement
|
||||
stmt = call_args[0][0]
|
||||
# Convert to string to inspect the query
|
||||
query_str = str(stmt.compile(compile_kwargs={"literal_binds": True}))
|
||||
|
||||
# Verify the query includes created_at filter
|
||||
# The query should have a WHERE clause with created_at comparison
|
||||
assert "created_at" in query_str.lower() or "workflow_runs.created_at" in query_str.lower(), (
|
||||
"Query should include created_at filter for time range"
|
||||
)
|
||||
|
||||
def test_get_workflow_runs_count_with_status_and_time_range(self, repository, mock_session_maker):
|
||||
"""Test getting workflow runs count with both status and time range filters verifies SQL query."""
|
||||
# Arrange
|
||||
tenant_id = str(uuid.uuid4())
|
||||
app_id = str(uuid.uuid4())
|
||||
mock_session = MagicMock()
|
||||
mock_session_maker.return_value.__enter__.return_value = mock_session
|
||||
|
||||
# Mock the count query for running status within time range
|
||||
mock_session.scalar.return_value = 2
|
||||
|
||||
# Act
|
||||
result = repository.get_workflow_runs_count(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
triggered_from=WorkflowRunTriggeredFrom.DEBUGGING,
|
||||
status="running",
|
||||
time_range="1d",
|
||||
)
|
||||
|
||||
# Assert results
|
||||
assert result["total"] == 2
|
||||
assert result["running"] == 2
|
||||
assert result["succeeded"] == 0
|
||||
assert result["failed"] == 0
|
||||
|
||||
# Verify that scalar was called (which means COUNT query was used)
|
||||
assert mock_session.scalar.called, "scalar should have been called for count query"
|
||||
|
||||
# Verify SQL query includes both status and time filter
|
||||
call_args = mock_session.scalar.call_args
|
||||
assert call_args is not None, "scalar should have been called with a statement"
|
||||
|
||||
# The first argument should be the SQL statement
|
||||
stmt = call_args[0][0]
|
||||
# Convert to string to inspect the query
|
||||
query_str = str(stmt.compile(compile_kwargs={"literal_binds": True}))
|
||||
|
||||
# Verify the query includes both filters
|
||||
assert "created_at" in query_str.lower() or "workflow_runs.created_at" in query_str.lower(), (
|
||||
"Query should include created_at filter for time range"
|
||||
)
|
||||
assert "status" in query_str.lower() or "workflow_runs.status" in query_str.lower(), (
|
||||
"Query should include status filter"
|
||||
)
|
||||
|
|
@ -181,14 +181,11 @@ class TestAuthIntegration:
|
|||
)
|
||||
def test_all_providers_factory_creation(self, provider, credentials):
|
||||
"""Test factory creation for all supported providers"""
|
||||
try:
|
||||
auth_class = ApiKeyAuthFactory.get_apikey_auth_factory(provider)
|
||||
assert auth_class is not None
|
||||
auth_class = ApiKeyAuthFactory.get_apikey_auth_factory(provider)
|
||||
assert auth_class is not None
|
||||
|
||||
factory = ApiKeyAuthFactory(provider, credentials)
|
||||
assert factory.auth is not None
|
||||
except ImportError:
|
||||
pytest.skip(f"Provider {provider} not implemented yet")
|
||||
factory = ApiKeyAuthFactory(provider, credentials)
|
||||
assert factory.auth is not None
|
||||
|
||||
def _create_success_response(self, status_code=200):
|
||||
"""Create successful HTTP response mock"""
|
||||
|
|
|
|||
|
|
@ -41,7 +41,10 @@ class TestMetadataBugCompleteValidation:
|
|||
mock_user.current_tenant_id = "tenant-123"
|
||||
mock_user.id = "user-456"
|
||||
|
||||
with patch("services.metadata_service.current_user", mock_user):
|
||||
with patch(
|
||||
"services.metadata_service.current_account_with_tenant",
|
||||
return_value=(mock_user, mock_user.current_tenant_id),
|
||||
):
|
||||
# Should crash with TypeError
|
||||
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
|
||||
MetadataService.create_metadata("dataset-123", mock_metadata_args)
|
||||
|
|
@ -51,7 +54,10 @@ class TestMetadataBugCompleteValidation:
|
|||
mock_user.current_tenant_id = "tenant-123"
|
||||
mock_user.id = "user-456"
|
||||
|
||||
with patch("services.metadata_service.current_user", mock_user):
|
||||
with patch(
|
||||
"services.metadata_service.current_account_with_tenant",
|
||||
return_value=(mock_user, mock_user.current_tenant_id),
|
||||
):
|
||||
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
|
||||
MetadataService.update_metadata_name("dataset-123", "metadata-456", None)
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,10 @@ class TestMetadataNullableBug:
|
|||
mock_user.current_tenant_id = "tenant-123"
|
||||
mock_user.id = "user-456"
|
||||
|
||||
with patch("services.metadata_service.current_user", mock_user):
|
||||
with patch(
|
||||
"services.metadata_service.current_account_with_tenant",
|
||||
return_value=(mock_user, mock_user.current_tenant_id),
|
||||
):
|
||||
# This should crash with TypeError when calling len(None)
|
||||
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
|
||||
MetadataService.create_metadata("dataset-123", mock_metadata_args)
|
||||
|
|
@ -40,7 +43,10 @@ class TestMetadataNullableBug:
|
|||
mock_user.current_tenant_id = "tenant-123"
|
||||
mock_user.id = "user-456"
|
||||
|
||||
with patch("services.metadata_service.current_user", mock_user):
|
||||
with patch(
|
||||
"services.metadata_service.current_account_with_tenant",
|
||||
return_value=(mock_user, mock_user.current_tenant_id),
|
||||
):
|
||||
# This should crash with TypeError when calling len(None)
|
||||
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
|
||||
MetadataService.update_metadata_name("dataset-123", "metadata-456", None)
|
||||
|
|
@ -88,7 +94,10 @@ class TestMetadataNullableBug:
|
|||
mock_user.current_tenant_id = "tenant-123"
|
||||
mock_user.id = "user-456"
|
||||
|
||||
with patch("services.metadata_service.current_user", mock_user):
|
||||
with patch(
|
||||
"services.metadata_service.current_account_with_tenant",
|
||||
return_value=(mock_user, mock_user.current_tenant_id),
|
||||
):
|
||||
# Step 4: Service layer crashes on len(None)
|
||||
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
|
||||
MetadataService.create_metadata("dataset-123", mock_metadata_args)
|
||||
|
|
|
|||
20
api/uv.lock
20
api/uv.lock
|
|
@ -1,5 +1,5 @@
|
|||
version = 1
|
||||
revision = 2
|
||||
revision = 3
|
||||
requires-python = ">=3.11, <3.13"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
|
|
@ -1372,6 +1372,7 @@ dependencies = [
|
|||
{ name = "transformers" },
|
||||
{ name = "unstructured", extra = ["docx", "epub", "md", "ppt", "pptx"] },
|
||||
{ name = "weave" },
|
||||
{ name = "weaviate-client" },
|
||||
{ name = "webvtt-py" },
|
||||
{ name = "yarl" },
|
||||
]
|
||||
|
|
@ -1394,6 +1395,7 @@ dev = [
|
|||
{ name = "pytest-cov" },
|
||||
{ name = "pytest-env" },
|
||||
{ name = "pytest-mock" },
|
||||
{ name = "pytest-timeout" },
|
||||
{ name = "ruff" },
|
||||
{ name = "scipy-stubs" },
|
||||
{ name = "sseclient-py" },
|
||||
|
|
@ -1561,6 +1563,7 @@ requires-dist = [
|
|||
{ name = "transformers", specifier = "~=4.56.1" },
|
||||
{ name = "unstructured", extras = ["docx", "epub", "md", "ppt", "pptx"], specifier = "~=0.16.1" },
|
||||
{ name = "weave", specifier = "~=0.51.0" },
|
||||
{ name = "weaviate-client", specifier = "==4.17.0" },
|
||||
{ name = "webvtt-py", specifier = "~=0.5.1" },
|
||||
{ name = "yarl", specifier = "~=1.18.3" },
|
||||
]
|
||||
|
|
@ -1583,6 +1586,7 @@ dev = [
|
|||
{ name = "pytest-cov", specifier = "~=4.1.0" },
|
||||
{ name = "pytest-env", specifier = "~=1.1.3" },
|
||||
{ name = "pytest-mock", specifier = "~=3.14.0" },
|
||||
{ name = "pytest-timeout", specifier = ">=2.4.0" },
|
||||
{ name = "ruff", specifier = "~=0.14.0" },
|
||||
{ name = "scipy-stubs", specifier = ">=1.15.3.0" },
|
||||
{ name = "sseclient-py", specifier = ">=1.8.0" },
|
||||
|
|
@ -1667,7 +1671,7 @@ vdb = [
|
|||
{ name = "tidb-vector", specifier = "==0.0.9" },
|
||||
{ name = "upstash-vector", specifier = "==0.6.0" },
|
||||
{ name = "volcengine-compat", specifier = "~=1.0.0" },
|
||||
{ name = "weaviate-client", specifier = ">=4.0.0,<5.0.0" },
|
||||
{ name = "weaviate-client", specifier = "==4.17.0" },
|
||||
{ name = "xinference-client", specifier = "~=1.2.2" },
|
||||
]
|
||||
|
||||
|
|
@ -4979,6 +4983,18 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-timeout"
|
||||
version = "2.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ac/82/4c9ecabab13363e72d880f2fb504c5f750433b2b6f16e99f4ec21ada284c/pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a", size = 17973, upload-time = "2025-05-05T19:44:34.99Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-calamine"
|
||||
version = "0.5.3"
|
||||
|
|
|
|||
|
|
@ -4,4 +4,6 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
pytest api/tests/artifact_tests/
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-120}"
|
||||
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/artifact_tests/
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
pytest api/tests/integration_tests/model_runtime/anthropic \
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-180}"
|
||||
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/integration_tests/model_runtime/anthropic \
|
||||
api/tests/integration_tests/model_runtime/azure_openai \
|
||||
api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm \
|
||||
api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference \
|
||||
|
|
|
|||
|
|
@ -4,4 +4,6 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
pytest api/tests/test_containers_integration_tests
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-120}"
|
||||
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/test_containers_integration_tests
|
||||
|
|
|
|||
|
|
@ -4,4 +4,6 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
pytest api/tests/integration_tests/tools
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-120}"
|
||||
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/integration_tests/tools
|
||||
|
|
|
|||
|
|
@ -4,5 +4,7 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-20}"
|
||||
|
||||
# libs
|
||||
pytest api/tests/unit_tests
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/unit_tests
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
pytest api/tests/integration_tests/vdb/chroma \
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-180}"
|
||||
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/integration_tests/vdb/chroma \
|
||||
api/tests/integration_tests/vdb/milvus \
|
||||
api/tests/integration_tests/vdb/pgvecto_rs \
|
||||
api/tests/integration_tests/vdb/pgvector \
|
||||
|
|
|
|||
|
|
@ -4,4 +4,6 @@ set -x
|
|||
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
pytest api/tests/integration_tests/workflow
|
||||
PYTEST_TIMEOUT="${PYTEST_TIMEOUT:-120}"
|
||||
|
||||
pytest --timeout "${PYTEST_TIMEOUT}" api/tests/integration_tests/workflow
|
||||
|
|
|
|||
|
|
@ -24,6 +24,13 @@ services:
|
|||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
# TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
uv pip install --system weaviate-client==4.17.0
|
||||
exec /bin/bash /app/api/docker/entrypoint.sh
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
|
@ -51,6 +58,13 @@ services:
|
|||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
# TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
uv pip install --system weaviate-client==4.17.0
|
||||
exec /bin/bash /app/api/docker/entrypoint.sh
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
|
@ -331,7 +345,6 @@ services:
|
|||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- ""
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -1,9 +0,0 @@
|
|||
services:
|
||||
api:
|
||||
volumes:
|
||||
- ../api/core/rag/datasource/vdb/weaviate/weaviate_vector.py:/app/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py:ro
|
||||
command: >
|
||||
sh -c "
|
||||
pip install --no-cache-dir 'weaviate>=4.0.0' &&
|
||||
/bin/bash /entrypoint.sh
|
||||
"
|
||||
|
|
@ -631,6 +631,13 @@ services:
|
|||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
# TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
uv pip install --system weaviate-client==4.17.0
|
||||
exec /bin/bash /app/api/docker/entrypoint.sh
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
|
@ -658,6 +665,13 @@ services:
|
|||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
# TODO: Remove this entrypoint override when weaviate-client 4.17.0 is included in the next Dify release
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
uv pip install --system weaviate-client==4.17.0
|
||||
exec /bin/bash /app/api/docker/entrypoint.sh
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
|
@ -938,7 +952,6 @@ services:
|
|||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- ""
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -115,6 +115,14 @@ docker compose up -d
|
|||
|
||||
إذا كنت بحاجة إلى تخصيص الإعدادات، فيرجى الرجوع إلى التعليقات في ملف [.env.example](../../docker/.env.example) وتحديث القيم المقابلة في ملف `.env`. بالإضافة إلى ذلك، قد تحتاج إلى إجراء تعديلات على ملف `docker-compose.yaml` نفسه، مثل تغيير إصدارات الصور أو تعيينات المنافذ أو نقاط تحميل وحدات التخزين، بناءً على بيئة النشر ومتطلباتك الخاصة. بعد إجراء أي تغييرات، يرجى إعادة تشغيل `docker-compose up -d`. يمكنك العثور على قائمة كاملة بمتغيرات البيئة المتاحة [هنا](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### مراقبة المقاييس باستخدام Grafana
|
||||
|
||||
استيراد لوحة التحكم إلى Grafana، باستخدام قاعدة بيانات PostgreSQL الخاصة بـ Dify كمصدر للبيانات، لمراقبة المقاييس بدقة للتطبيقات والمستأجرين والرسائل وغير ذلك.
|
||||
|
||||
- [لوحة تحكم Grafana بواسطة @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### النشر باستخدام Kubernetes
|
||||
|
||||
يوجد مجتمع خاص بـ [Helm Charts](https://helm.sh/) وملفات YAML التي تسمح بتنفيذ Dify على Kubernetes للنظام من الإيجابيات العلوية.
|
||||
|
||||
- [رسم بياني Helm من قبل @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -132,6 +132,14 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন
|
|||
যদি আপনার কনফিগারেশনটি কাস্টমাইজ করার প্রয়োজন হয়, তাহলে অনুগ্রহ করে আমাদের [.env.example](../../docker/.env.example) ফাইল দেখুন এবং আপনার `.env` ফাইলে সংশ্লিষ্ট মানগুলি আপডেট করুন। এছাড়াও, আপনার নির্দিষ্ট এনভায়রনমেন্ট এবং প্রয়োজনীয়তার উপর ভিত্তি করে আপনাকে `docker-compose.yaml` ফাইলে সমন্বয় করতে হতে পারে, যেমন ইমেজ ভার্সন পরিবর্তন করা, পোর্ট ম্যাপিং করা, অথবা ভলিউম মাউন্ট করা।
|
||||
যেকোনো পরিবর্তন করার পর, অনুগ্রহ করে `docker-compose up -d` পুনরায় চালান। ভেরিয়েবলের সম্পূর্ণ তালিকা [এখানে] (https://docs.dify.ai/getting-started/install-self-hosted/environments) খুঁজে পেতে পারেন।
|
||||
|
||||
### Grafana দিয়ে মেট্রিক্স মনিটরিং
|
||||
|
||||
Dify-এর PostgreSQL ডাটাবেসকে ডেটা সোর্স হিসাবে ব্যবহার করে, অ্যাপ, টেন্যান্ট, মেসেজ ইত্যাদির গ্র্যানুলারিটিতে মেট্রিক্স মনিটর করার জন্য Grafana-তে ড্যাশবোর্ড ইম্পোর্ট করুন।
|
||||
|
||||
- [@bowenliang123 কর্তৃক Grafana ড্যাশবোর্ড](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Kubernetes এর সাথে ডেপ্লয়মেন্ট
|
||||
|
||||
যদি আপনি একটি হাইলি এভেইলেবল সেটআপ কনফিগার করতে চান, তাহলে কমিউনিটি [Helm Charts](https://helm.sh/) এবং YAML ফাইল রয়েছে যা Dify কে Kubernetes-এ ডিপ্লয় করার প্রক্রিয়া বর্ণনা করে।
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -130,6 +130,14 @@ Star Dify auf GitHub und lassen Sie sich sofort über neue Releases benachrichti
|
|||
|
||||
Falls Sie die Konfiguration anpassen müssen, lesen Sie bitte die Kommentare in unserer [.env.example](../../docker/.env.example)-Datei und aktualisieren Sie die entsprechenden Werte in Ihrer `.env`-Datei. Zusätzlich müssen Sie eventuell Anpassungen an der `docker-compose.yaml`-Datei vornehmen, wie zum Beispiel das Ändern von Image-Versionen, Portzuordnungen oder Volumen-Mounts, je nach Ihrer spezifischen Einsatzumgebung und Ihren Anforderungen. Nachdem Sie Änderungen vorgenommen haben, starten Sie `docker-compose up -d` erneut. Eine vollständige Liste der verfügbaren Umgebungsvariablen finden Sie [hier](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Metriküberwachung mit Grafana
|
||||
|
||||
Importieren Sie das Dashboard in Grafana, wobei Sie die PostgreSQL-Datenbank von Dify als Datenquelle verwenden, um Metriken in der Granularität von Apps, Mandanten, Nachrichten und mehr zu überwachen.
|
||||
|
||||
- [Grafana-Dashboard von @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Bereitstellung mit Kubernetes
|
||||
|
||||
Falls Sie eine hochverfügbare Konfiguration einrichten möchten, gibt es von der Community bereitgestellte [Helm Charts](https://helm.sh/) und YAML-Dateien, die es ermöglichen, Dify auf Kubernetes bereitzustellen.
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -128,6 +128,14 @@ Si necesita personalizar la configuración, consulte los comentarios en nuestro
|
|||
|
||||
. Después de realizar los cambios, ejecuta `docker-compose up -d` nuevamente. Puedes ver la lista completa de variables de entorno [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Monitorización de Métricas con Grafana
|
||||
|
||||
Importe el panel a Grafana, utilizando la base de datos PostgreSQL de Dify como fuente de datos, para monitorizar métricas en granularidad de aplicaciones, inquilinos, mensajes y más.
|
||||
|
||||
- [Panel de Grafana por @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Implementación con Kubernetes
|
||||
|
||||
Si desea configurar una configuración de alta disponibilidad, la comunidad proporciona [Gráficos Helm](https://helm.sh/) y archivos YAML, a través de los cuales puede desplegar Dify en Kubernetes.
|
||||
|
||||
- [Gráfico Helm por @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -126,6 +126,14 @@ Après l'exécution, vous pouvez accéder au tableau de bord Dify dans votre nav
|
|||
|
||||
Si vous devez personnaliser la configuration, veuillez vous référer aux commentaires dans notre fichier [.env.example](../../docker/.env.example) et mettre à jour les valeurs correspondantes dans votre fichier `.env`. De plus, vous devrez peut-être apporter des modifications au fichier `docker-compose.yaml` lui-même, comme changer les versions d'image, les mappages de ports ou les montages de volumes, en fonction de votre environnement de déploiement et de vos exigences spécifiques. Après avoir effectué des modifications, veuillez réexécuter `docker-compose up -d`. Vous pouvez trouver la liste complète des variables d'environnement disponibles [ici](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Surveillance des Métriques avec Grafana
|
||||
|
||||
Importez le tableau de bord dans Grafana, en utilisant la base de données PostgreSQL de Dify comme source de données, pour surveiller les métriques avec une granularité d'applications, de locataires, de messages et plus.
|
||||
|
||||
- [Tableau de bord Grafana par @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Déploiement avec Kubernetes
|
||||
|
||||
Si vous souhaitez configurer une configuration haute disponibilité, la communauté fournit des [Helm Charts](https://helm.sh/) et des fichiers YAML, à travers lesquels vous pouvez déployer Dify sur Kubernetes.
|
||||
|
||||
- [Helm Chart par @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -127,6 +127,14 @@ docker compose up -d
|
|||
|
||||
設定をカスタマイズする必要がある場合は、[.env.example](../../docker/.env.example) ファイルのコメントを参照し、`.env` ファイルの対応する値を更新してください。さらに、デプロイ環境や要件に応じて、`docker-compose.yaml` ファイル自体を調整する必要がある場合があります。たとえば、イメージのバージョン、ポートのマッピング、ボリュームのマウントなどを変更します。変更を加えた後は、`docker-compose up -d` を再実行してください。利用可能な環境変数の全一覧は、[こちら](https://docs.dify.ai/getting-started/install-self-hosted/environments)で確認できます。
|
||||
|
||||
### Grafanaを使用したメトリクス監視
|
||||
|
||||
Grafanaにダッシュボードをインポートし、DifyのPostgreSQLデータベースをデータソースとして使用して、アプリ、テナント、メッセージなどの粒度でメトリクスを監視します。
|
||||
|
||||
- [@bowenliang123によるGrafanaダッシュボード](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Kubernetesでのデプロイ
|
||||
|
||||
高可用性設定を設定する必要がある場合、コミュニティは[Helm Charts](https://helm.sh/)とYAMLファイルにより、DifyをKubernetesにデプロイすることができます。
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -120,6 +120,14 @@ docker compose up -d
|
|||
|
||||
구성을 사용자 정의해야 하는 경우 [.env.example](../../docker/.env.example) 파일의 주석을 참조하고 `.env` 파일에서 해당 값을 업데이트하십시오. 또한 특정 배포 환경 및 요구 사항에 따라 `docker-compose.yaml` 파일 자체를 조정해야 할 수도 있습니다. 예를 들어 이미지 버전, 포트 매핑 또는 볼륨 마운트를 변경합니다. 변경 한 후 `docker-compose up -d`를 다시 실행하십시오. 사용 가능한 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 찾을 수 있습니다.
|
||||
|
||||
### Grafana를 사용한 메트릭 모니터링
|
||||
|
||||
Dify의 PostgreSQL 데이터베이스를 데이터 소스로 사용하여 앱, 테넌트, 메시지 등에 대한 세분화된 메트릭을 모니터링하기 위해 대시보드를 Grafana로 가져옵니다.
|
||||
|
||||
- [@bowenliang123의 Grafana 대시보드](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Kubernetes를 통한 배포
|
||||
|
||||
Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했다는 커뮤니티가 제공하는 [Helm Charts](https://helm.sh/)와 YAML 파일이 존재합니다.
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -126,6 +126,14 @@ Após a execução, você pode acessar o painel do Dify no navegador em [http://
|
|||
|
||||
Se precisar personalizar a configuração, consulte os comentários no nosso arquivo [.env.example](../../docker/.env.example) e atualize os valores correspondentes no seu arquivo `.env`. Além disso, talvez seja necessário fazer ajustes no próprio arquivo `docker-compose.yaml`, como alterar versões de imagem, mapeamentos de portas ou montagens de volumes, com base no seu ambiente de implantação específico e nas suas necessidades. Após fazer quaisquer alterações, execute novamente `docker-compose up -d`. Você pode encontrar a lista completa de variáveis de ambiente disponíveis [aqui](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Monitoramento de Métricas com Grafana
|
||||
|
||||
Importe o dashboard para o Grafana, usando o banco de dados PostgreSQL do Dify como fonte de dados, para monitorar métricas na granularidade de aplicativos, inquilinos, mensagens e muito mais.
|
||||
|
||||
- [Dashboard do Grafana por @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Implantação com Kubernetes
|
||||
|
||||
Se deseja configurar uma instalação de alta disponibilidade, há [Helm Charts](https://helm.sh/) e arquivos YAML contribuídos pela comunidade que permitem a implantação do Dify no Kubernetes.
|
||||
|
||||
- [Helm Chart de @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -128,6 +128,14 @@ Star Dify on GitHub and be instantly notified of new releases.
|
|||
|
||||
Če morate prilagoditi konfiguracijo, si oglejte komentarje v naši datoteki .env.example in posodobite ustrezne vrednosti v svoji .env datoteki. Poleg tega boste morda morali prilagoditi docker-compose.yamlsamo datoteko, na primer spremeniti različice slike, preslikave vrat ali namestitve nosilca, glede na vaše specifično okolje in zahteve za uvajanje. Po kakršnih koli spremembah ponovno zaženite docker-compose up -d. Celoten seznam razpoložljivih spremenljivk okolja najdete tukaj .
|
||||
|
||||
### Spremljanje metrik z Grafana
|
||||
|
||||
Uvoz nadzorne plošče v Grafana, z uporabo Difyjeve PostgreSQL baze podatkov kot vir podatkov, za spremljanje metrike glede na podrobnost aplikacij, najemnikov, sporočil in drugega.
|
||||
|
||||
- [Nadzorna plošča Grafana avtorja @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Namestitev s Kubernetes
|
||||
|
||||
Če želite konfigurirati visoko razpoložljivo nastavitev, so na voljo Helm Charts in datoteke YAML, ki jih prispeva skupnost, ki omogočajo uvedbo Difyja v Kubernetes.
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -120,6 +120,14 @@ docker compose up -d
|
|||
|
||||
Yapılandırmayı özelleştirmeniz gerekiyorsa, lütfen [.env.example](../../docker/.env.example) dosyamızdaki yorumlara bakın ve `.env` dosyanızdaki ilgili değerleri güncelleyin. Ayrıca, spesifik dağıtım ortamınıza ve gereksinimlerinize bağlı olarak `docker-compose.yaml` dosyasının kendisinde de, imaj sürümlerini, port eşlemelerini veya hacim bağlantılarını değiştirmek gibi ayarlamalar yapmanız gerekebilir. Herhangi bir değişiklik yaptıktan sonra, lütfen `docker-compose up -d` komutunu tekrar çalıştırın. Kullanılabilir tüm ortam değişkenlerinin tam listesini [burada](https://docs.dify.ai/getting-started/install-self-hosted/environments) bulabilirsiniz.
|
||||
|
||||
### Grafana ile Metrik İzleme
|
||||
|
||||
Uygulamalar, kiracılar, mesajlar ve daha fazlasının granularitesinde metrikleri izlemek için Dify'nin PostgreSQL veritabanını veri kaynağı olarak kullanarak panoyu Grafana'ya aktarın.
|
||||
|
||||
- [@bowenliang123 tarafından Grafana Panosu](%E9%93%BE%E6%8E%A5)
|
||||
|
||||
### Kubernetes ile Dağıtım
|
||||
|
||||
Yüksek kullanılabilirliğe sahip bir kurulum yapılandırmak isterseniz, Dify'ın Kubernetes üzerine dağıtılmasına olanak tanıyan topluluk katkılı [Helm Charts](https://helm.sh/) ve YAML dosyaları mevcuttur.
|
||||
|
||||
- [@LeoQuote tarafından Helm Chart](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -121,6 +121,14 @@ Sau khi chạy, bạn có thể truy cập bảng điều khiển Dify trong tr
|
|||
|
||||
Nếu bạn cần tùy chỉnh cấu hình, vui lòng tham khảo các nhận xét trong tệp [.env.example](../../docker/.env.example) của chúng tôi và cập nhật các giá trị tương ứng trong tệp `.env` của bạn. Ngoài ra, bạn có thể cần điều chỉnh tệp `docker-compose.yaml`, chẳng hạn như thay đổi phiên bản hình ảnh, ánh xạ cổng hoặc gắn kết khối lượng, dựa trên môi trường triển khai cụ thể và yêu cầu của bạn. Sau khi thực hiện bất kỳ thay đổi nào, vui lòng chạy lại `docker-compose up -d`. Bạn có thể tìm thấy danh sách đầy đủ các biến môi trường có sẵn [tại đây](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Giám sát Số liệu với Grafana
|
||||
|
||||
Nhập bảng điều khiển vào Grafana, sử dụng cơ sở dữ liệu PostgreSQL của Dify làm nguồn dữ liệu, để giám sát số liệu theo mức độ chi tiết của ứng dụng, người thuê, tin nhắn và hơn thế nữa.
|
||||
|
||||
- [Bảng điều khiển Grafana của @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Triển khai với Kubernetes
|
||||
|
||||
Nếu bạn muốn cấu hình một cài đặt có độ sẵn sàng cao, có các [Helm Charts](https://helm.sh/) và tệp YAML do cộng đồng đóng góp cho phép Dify được triển khai trên Kubernetes.
|
||||
|
||||
- [Helm Chart bởi @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,187 @@
|
|||
# Weaviate Migration Guide: v1.19 → v1.27
|
||||
|
||||
## Overview
|
||||
|
||||
Dify has upgraded from Weaviate v1.19 to v1.27 with the Python client updated from v3.24 to v4.17.
|
||||
|
||||
## What Changed
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
1. **Weaviate Server**: `1.19.0` → `1.27.0`
|
||||
1. **Python Client**: `weaviate-client~=3.24.0` → `weaviate-client==4.17.0`
|
||||
1. **gRPC Required**: Weaviate v1.27 requires gRPC port `50051` (in addition to HTTP port `8080`)
|
||||
1. **Docker Compose**: Added temporary entrypoint overrides for client installation
|
||||
|
||||
### Key Improvements
|
||||
|
||||
- Faster vector operations via gRPC
|
||||
- Improved batch processing
|
||||
- Better error handling
|
||||
|
||||
## Migration Steps
|
||||
|
||||
### For Docker Users
|
||||
|
||||
#### Step 1: Backup Your Data
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker compose down
|
||||
sudo cp -r ./volumes/weaviate ./volumes/weaviate_backup_$(date +%Y%m%d)
|
||||
```
|
||||
|
||||
#### Step 2: Update Dify
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
docker compose pull
|
||||
```
|
||||
|
||||
#### Step 3: Start Services
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
sleep 30
|
||||
curl http://localhost:8080/v1/meta
|
||||
```
|
||||
|
||||
#### Step 4: Verify Migration
|
||||
|
||||
```bash
|
||||
# Check both ports are accessible
|
||||
curl http://localhost:8080/v1/meta
|
||||
netstat -tulpn | grep 50051
|
||||
|
||||
# Test in Dify UI:
|
||||
# 1. Go to Knowledge Base
|
||||
# 2. Test search functionality
|
||||
# 3. Upload a test document
|
||||
```
|
||||
|
||||
### For Source Installation
|
||||
|
||||
#### Step 1: Update Dependencies
|
||||
|
||||
```bash
|
||||
cd api
|
||||
uv sync --dev
|
||||
uv run python -c "import weaviate; print(weaviate.__version__)"
|
||||
# Should show: 4.17.0
|
||||
```
|
||||
|
||||
#### Step 2: Update Weaviate Server
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker compose -f docker-compose.middleware.yaml --profile weaviate up -d weaviate
|
||||
curl http://localhost:8080/v1/meta
|
||||
netstat -tulpn | grep 50051
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Error: "No module named 'weaviate.classes'"
|
||||
|
||||
**Solution**:
|
||||
|
||||
```bash
|
||||
cd api
|
||||
uv sync --reinstall-package weaviate-client
|
||||
uv run python -c "import weaviate; print(weaviate.__version__)"
|
||||
# Should show: 4.17.0
|
||||
```
|
||||
|
||||
### Error: "gRPC health check failed"
|
||||
|
||||
**Solution**:
|
||||
|
||||
```bash
|
||||
# Check Weaviate ports
|
||||
docker ps | grep weaviate
|
||||
# Should show: 0.0.0.0:8080->8080/tcp, 0.0.0.0:50051->50051/tcp
|
||||
|
||||
# If missing gRPC port, add to docker-compose:
|
||||
# ports:
|
||||
# - "8080:8080"
|
||||
# - "50051:50051"
|
||||
```
|
||||
|
||||
### Error: "Weaviate version 1.19.0 is not supported"
|
||||
|
||||
**Solution**:
|
||||
|
||||
```bash
|
||||
# Update Weaviate image in docker-compose
|
||||
# Change: semitechnologies/weaviate:1.19.0
|
||||
# To: semitechnologies/weaviate:1.27.0
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Data Migration Failed
|
||||
|
||||
**Solution**:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker compose down
|
||||
sudo rm -rf ./volumes/weaviate
|
||||
sudo cp -r ./volumes/weaviate_backup_YYYYMMDD ./volumes/weaviate
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Rollback Instructions
|
||||
|
||||
```bash
|
||||
# 1. Stop services
|
||||
docker compose down
|
||||
|
||||
# 2. Restore data backup
|
||||
sudo rm -rf ./volumes/weaviate
|
||||
sudo cp -r ./volumes/weaviate_backup_YYYYMMDD ./volumes/weaviate
|
||||
|
||||
# 3. Checkout previous version
|
||||
git checkout <previous-commit>
|
||||
|
||||
# 4. Restart services
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
| Component | Old Version | New Version | Compatible |
|
||||
|-----------|-------------|-------------|------------|
|
||||
| Weaviate Server | 1.19.0 | 1.27.0 | ✅ Yes |
|
||||
| weaviate-client | ~3.24.0 | ==4.17.0 | ✅ Yes |
|
||||
| Existing Data | v1.19 format | v1.27 format | ✅ Yes |
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
Before deploying to production:
|
||||
|
||||
- [ ] Backup all Weaviate data
|
||||
- [ ] Test in staging environment
|
||||
- [ ] Verify existing collections are accessible
|
||||
- [ ] Test vector search functionality
|
||||
- [ ] Test document upload and retrieval
|
||||
- [ ] Monitor gRPC connection stability
|
||||
- [ ] Check performance metrics
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter issues:
|
||||
|
||||
1. Check GitHub Issues: https://github.com/langgenius/dify/issues
|
||||
1. Create a bug report with:
|
||||
- Error messages
|
||||
- Docker logs: `docker compose logs weaviate`
|
||||
- Dify version
|
||||
- Migration steps attempted
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Data Safety**: Existing vector data remains fully compatible
|
||||
- **No Re-indexing**: No need to rebuild vector indexes
|
||||
- **Temporary Workaround**: The entrypoint overrides are temporary until next Dify release
|
||||
- **Performance**: May see improved performance due to gRPC usage
|
||||
|
|
@ -127,6 +127,12 @@ docker compose up -d
|
|||
|
||||
如果您需要自定义配置,请参考 [.env.example](../../docker/.env.example) 文件中的注释,并更新 `.env` 文件中对应的值。此外,您可能需要根据您的具体部署环境和需求对 `docker-compose.yaml` 文件本身进行调整,例如更改镜像版本、端口映射或卷挂载。完成任何更改后,请重新运行 `docker-compose up -d`。您可以在[此处](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用环境变量的完整列表。
|
||||
|
||||
### 使用 Grafana 进行指标监控
|
||||
|
||||
将仪表板导入 Grafana,使用 Dify 的 PostgreSQL 数据库作为数据源,以监控应用、租户、消息等粒度的指标。
|
||||
|
||||
- [由 @bowenliang123 提供的 Grafana 仪表板](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
#### 使用 Helm Chart 或 Kubernetes 资源清单(YAML)部署
|
||||
|
||||
使用 [Helm Chart](https://helm.sh/) 版本或者 Kubernetes 资源清单(YAML),可以在 Kubernetes 上部署 Dify。
|
||||
|
|
|
|||
|
|
@ -130,6 +130,14 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify
|
|||
|
||||
如果您需要自定義配置,請參考我們的 [.env.example](../../docker/.env.example) 文件中的註釋,並在您的 `.env` 文件中更新相應的值。此外,根據您特定的部署環境和需求,您可能需要調整 `docker-compose.yaml` 文件本身,例如更改映像版本、端口映射或卷掛載。進行任何更改後,請重新運行 `docker-compose up -d`。您可以在[這裡](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用環境變數的完整列表。
|
||||
|
||||
### 使用 Grafana 進行指標監控
|
||||
|
||||
將儀表板匯入 Grafana,使用 Dify 的 PostgreSQL 資料庫作為資料來源,以監控應用程式、租戶、訊息等顆粒度的指標。
|
||||
|
||||
- [由 @bowenliang123 提供的 Grafana 儀表板](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### 使用 Kubernetes 部署
|
||||
|
||||
如果您想配置高可用性設置,社區貢獻的 [Helm Charts](https://helm.sh/) 和 Kubernetes 資源清單(YAML)允許在 Kubernetes 上部署 Dify。
|
||||
|
||||
- [由 @LeoQuote 提供的 Helm Chart](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
|
|
|||
|
|
@ -100,7 +100,10 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
catch (e: any) {
|
||||
if (e.code === 'authentication_failed')
|
||||
Toast.notify({ type: 'error', message: e.message })
|
||||
}
|
||||
finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import React, { useState } from 'react'
|
||||
import React, { useEffect, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { RiDeleteBinLine, RiEditFill, RiEditLine } from '@remixicon/react'
|
||||
import { Robot, User } from '@/app/components/base/icons/src/public/avatar'
|
||||
|
|
@ -16,7 +16,7 @@ type Props = {
|
|||
type: EditItemType
|
||||
content: string
|
||||
readonly?: boolean
|
||||
onSave: (content: string) => void
|
||||
onSave: (content: string) => Promise<void>
|
||||
}
|
||||
|
||||
export const EditTitle: FC<{ className?: string; title: string }> = ({ className, title }) => (
|
||||
|
|
@ -46,8 +46,13 @@ const EditItem: FC<Props> = ({
|
|||
const placeholder = type === EditItemType.Query ? t('appAnnotation.editModal.queryPlaceholder') : t('appAnnotation.editModal.answerPlaceholder')
|
||||
const [isEdit, setIsEdit] = useState(false)
|
||||
|
||||
const handleSave = () => {
|
||||
onSave(newContent)
|
||||
// Reset newContent when content prop changes
|
||||
useEffect(() => {
|
||||
setNewContent('')
|
||||
}, [content])
|
||||
|
||||
const handleSave = async () => {
|
||||
await onSave(newContent)
|
||||
setIsEdit(false)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ type Props = {
|
|||
isShow: boolean
|
||||
onHide: () => void
|
||||
item: AnnotationItem
|
||||
onSave: (editedQuery: string, editedAnswer: string) => void
|
||||
onSave: (editedQuery: string, editedAnswer: string) => Promise<void>
|
||||
onRemove: () => void
|
||||
}
|
||||
|
||||
|
|
@ -46,6 +46,16 @@ const ViewAnnotationModal: FC<Props> = ({
|
|||
const [currPage, setCurrPage] = React.useState<number>(0)
|
||||
const [total, setTotal] = useState(0)
|
||||
const [hitHistoryList, setHitHistoryList] = useState<HitHistoryItem[]>([])
|
||||
|
||||
// Update local state when item prop changes (e.g., when modal is reopened with updated data)
|
||||
useEffect(() => {
|
||||
setNewQuery(question)
|
||||
setNewAnswer(answer)
|
||||
setCurrPage(0)
|
||||
setTotal(0)
|
||||
setHitHistoryList([])
|
||||
}, [question, answer, id])
|
||||
|
||||
const fetchHitHistory = async (page = 1) => {
|
||||
try {
|
||||
const { data, total }: any = await fetchHitHistoryList(appId, id, {
|
||||
|
|
@ -63,6 +73,12 @@ const ViewAnnotationModal: FC<Props> = ({
|
|||
fetchHitHistory(currPage + 1)
|
||||
}, [currPage])
|
||||
|
||||
// Fetch hit history when item changes
|
||||
useEffect(() => {
|
||||
if (isShow && id)
|
||||
fetchHitHistory(1)
|
||||
}, [id, isShow])
|
||||
|
||||
const tabs = [
|
||||
{ value: TabType.annotation, text: t('appAnnotation.viewModal.annotatedResponse') },
|
||||
{
|
||||
|
|
@ -82,14 +98,20 @@ const ViewAnnotationModal: FC<Props> = ({
|
|||
},
|
||||
]
|
||||
const [activeTab, setActiveTab] = useState(TabType.annotation)
|
||||
const handleSave = (type: EditItemType, editedContent: string) => {
|
||||
if (type === EditItemType.Query) {
|
||||
setNewQuery(editedContent)
|
||||
onSave(editedContent, newAnswer)
|
||||
const handleSave = async (type: EditItemType, editedContent: string) => {
|
||||
try {
|
||||
if (type === EditItemType.Query) {
|
||||
await onSave(editedContent, newAnswer)
|
||||
setNewQuery(editedContent)
|
||||
}
|
||||
else {
|
||||
await onSave(newQuestion, editedContent)
|
||||
setNewAnswer(editedContent)
|
||||
}
|
||||
}
|
||||
else {
|
||||
setNewAnswer(editedContent)
|
||||
onSave(newQuestion, editedContent)
|
||||
catch (error) {
|
||||
// If save fails, don't update local state
|
||||
console.error('Failed to save annotation:', error)
|
||||
}
|
||||
}
|
||||
const [showModal, setShowModal] = useState(false)
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ const TopKItem: FC<Props> = ({
|
|||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const handleParamChange = (key: string, value: number) => {
|
||||
let notOutRangeValue = Number.parseFloat(value.toFixed(2))
|
||||
let notOutRangeValue = Number.parseInt(value.toFixed(0))
|
||||
notOutRangeValue = Math.max(VALUE_LIMIT.min, notOutRangeValue)
|
||||
notOutRangeValue = Math.min(VALUE_LIMIT.max, notOutRangeValue)
|
||||
onChange(key, notOutRangeValue)
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ export type TextareaProps = {
|
|||
destructive?: boolean
|
||||
styleCss?: CSSProperties
|
||||
ref?: React.Ref<HTMLTextAreaElement>
|
||||
onFocus?: () => void
|
||||
onBlur?: () => void
|
||||
onFocus?: React.FocusEventHandler<HTMLTextAreaElement>
|
||||
onBlur?: React.FocusEventHandler<HTMLTextAreaElement>
|
||||
} & React.TextareaHTMLAttributes<HTMLTextAreaElement> & VariantProps<typeof textareaVariants>
|
||||
|
||||
const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
|
||||
|
|
|
|||
|
|
@ -178,6 +178,7 @@ const ToolPicker: FC<Props> = ({
|
|||
mcpTools={mcpTools || []}
|
||||
selectedTools={selectedTools}
|
||||
canChooseMCPTool={canChooseMCPTool}
|
||||
onTagsChange={setTags}
|
||||
/>
|
||||
</div>
|
||||
</PortalToFollowElemContent>
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@ const Item: FC<Props> = ({
|
|||
key={tool.id}
|
||||
payload={tool}
|
||||
viewType={ViewType.tree}
|
||||
isShowLetterIndex={false}
|
||||
hasSearchText={hasSearchText}
|
||||
onSelect={onSelect}
|
||||
canNotSelectMultiple={canNotSelectMultiple}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ export type ToolDefaultValue = {
|
|||
paramSchemas: Record<string, any>[]
|
||||
credential_id?: string
|
||||
meta?: PluginMeta
|
||||
output_schema?: Record<string, any>
|
||||
}
|
||||
|
||||
export type DataSourceDefaultValue = {
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ export enum ScrollPosition {
|
|||
}
|
||||
|
||||
type Params = {
|
||||
wrapElemRef: React.RefObject<HTMLElement>
|
||||
nextToStickyELemRef: React.RefObject<HTMLElement>
|
||||
wrapElemRef: React.RefObject<HTMLElement | null>
|
||||
nextToStickyELemRef: React.RefObject<HTMLElement | null>
|
||||
}
|
||||
const useStickyScroll = ({
|
||||
wrapElemRef,
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ const DatasetsDetailProvider: FC<DatasetsDetailProviderProps> = ({
|
|||
nodes,
|
||||
children,
|
||||
}) => {
|
||||
const storeRef = useRef<DatasetsDetailStoreApi>()
|
||||
const storeRef = useRef<DatasetsDetailStoreApi>(undefined)
|
||||
|
||||
if (!storeRef.current)
|
||||
storeRef.current = createDatasetsDetailStore()
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import {
|
|||
useReactFlow,
|
||||
useStoreApi,
|
||||
} from 'reactflow'
|
||||
import type { ToolDefaultValue } from '../block-selector/types'
|
||||
import type { DataSourceDefaultValue, ToolDefaultValue } from '../block-selector/types'
|
||||
import type { Edge, Node, OnNodeAdd } from '../types'
|
||||
import { BlockEnum } from '../types'
|
||||
import { useWorkflowStore } from '../store'
|
||||
|
|
@ -1286,7 +1286,7 @@ export const useNodesInteractions = () => {
|
|||
currentNodeId: string,
|
||||
nodeType: BlockEnum,
|
||||
sourceHandle: string,
|
||||
toolDefaultValue?: ToolDefaultValue,
|
||||
toolDefaultValue?: ToolDefaultValue | DataSourceDefaultValue,
|
||||
) => {
|
||||
if (getNodesReadOnly()) return
|
||||
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ export const AgentStrategySelector = memo((props: AgentStrategySelectorProps) =>
|
|||
agent_strategy_name: tool!.tool_name,
|
||||
agent_strategy_provider_name: tool!.provider_name,
|
||||
agent_strategy_label: tool!.tool_label,
|
||||
agent_output_schema: tool!.output_schema,
|
||||
agent_output_schema: tool!.output_schema || {},
|
||||
plugin_unique_identifier: tool!.provider_id,
|
||||
meta: tool!.meta,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ const getIcon = (type: InputVarType) => {
|
|||
[InputVarType.jsonObject]: RiBracesLine,
|
||||
[InputVarType.singleFile]: RiFileList2Line,
|
||||
[InputVarType.multiFiles]: RiFileCopy2Line,
|
||||
[InputVarType.checkbox]: RiCheckboxLine,
|
||||
} as any)[type] || RiTextSnippet
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import {
|
|||
} from '../../../types'
|
||||
import type { Node } from '../../../types'
|
||||
import BlockSelector from '../../../block-selector'
|
||||
import type { ToolDefaultValue } from '../../../block-selector/types'
|
||||
import type { DataSourceDefaultValue, ToolDefaultValue } from '../../../block-selector/types'
|
||||
import {
|
||||
useAvailableBlocks,
|
||||
useIsChatMode,
|
||||
|
|
@ -57,7 +57,7 @@ export const NodeTargetHandle = memo(({
|
|||
if (!connected)
|
||||
setOpen(v => !v)
|
||||
}, [connected])
|
||||
const handleSelect = useCallback((type: BlockEnum, toolDefaultValue?: ToolDefaultValue) => {
|
||||
const handleSelect = useCallback((type: BlockEnum, toolDefaultValue?: ToolDefaultValue | DataSourceDefaultValue) => {
|
||||
handleNodeAdd(
|
||||
{
|
||||
nodeType: type,
|
||||
|
|
@ -140,7 +140,7 @@ export const NodeSourceHandle = memo(({
|
|||
e.stopPropagation()
|
||||
setOpen(v => !v)
|
||||
}, [])
|
||||
const handleSelect = useCallback((type: BlockEnum, toolDefaultValue?: ToolDefaultValue) => {
|
||||
const handleSelect = useCallback((type: BlockEnum, toolDefaultValue?: ToolDefaultValue | DataSourceDefaultValue) => {
|
||||
handleNodeAdd(
|
||||
{
|
||||
nodeType: type,
|
||||
|
|
|
|||
|
|
@ -42,17 +42,17 @@ export const useVarColor = (variables: string[], isExceptionVariable?: boolean,
|
|||
return 'text-util-colors-teal-teal-700'
|
||||
|
||||
return 'text-text-accent'
|
||||
}, [variables, isExceptionVariable])
|
||||
}, [variables, isExceptionVariable, variableCategory])
|
||||
}
|
||||
|
||||
export const useVarName = (variables: string[], notShowFullPath?: boolean) => {
|
||||
let variableFullPathName = variables.slice(1).join('.')
|
||||
|
||||
if (isRagVariableVar(variables))
|
||||
variableFullPathName = variables.slice(2).join('.')
|
||||
|
||||
const variablesLength = variables.length
|
||||
const varName = useMemo(() => {
|
||||
let variableFullPathName = variables.slice(1).join('.')
|
||||
|
||||
if (isRagVariableVar(variables))
|
||||
variableFullPathName = variables.slice(2).join('.')
|
||||
|
||||
const variablesLength = variables.length
|
||||
const isSystem = isSystemVar(variables)
|
||||
const varName = notShowFullPath ? variables[variablesLength - 1] : variableFullPathName
|
||||
return `${isSystem ? 'sys.' : ''}${varName}`
|
||||
|
|
|
|||
|
|
@ -48,8 +48,13 @@ import Tooltip from '@/app/components/base/tooltip'
|
|||
import useInspectVarsCrud from '../../hooks/use-inspect-vars-crud'
|
||||
import { ToolTypeEnum } from '../../block-selector/types'
|
||||
|
||||
type NodeChildProps = {
|
||||
id: string
|
||||
data: NodeProps['data']
|
||||
}
|
||||
|
||||
type BaseNodeProps = {
|
||||
children: ReactElement
|
||||
children: ReactElement<Partial<NodeChildProps>>
|
||||
id: NodeProps['id']
|
||||
data: NodeProps['data']
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,11 @@ export type OutputVar = Record<string, {
|
|||
children: null // support nest in the future,
|
||||
}>
|
||||
|
||||
export type CodeDependency = {
|
||||
name: string
|
||||
version?: string
|
||||
}
|
||||
|
||||
export type CodeNodeType = CommonNodeType & {
|
||||
variables: Variable[]
|
||||
code_language: CodeLanguage
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import {
|
|||
const useConfig = (id: string, payload: HttpNodeType) => {
|
||||
const { nodesReadOnly: readOnly } = useNodesReadOnly()
|
||||
|
||||
const defaultConfig = useStore(s => s.nodesDefaultConfigs)[payload.type]
|
||||
const defaultConfig = useStore(s => s.nodesDefaultConfigs?.[payload.type])
|
||||
|
||||
const { inputs, setInputs } = useNodeCrud<HttpNodeType>(id, payload)
|
||||
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ const ConditionItem = ({
|
|||
onRemoveCondition?.(caseId, condition.id)
|
||||
}, [caseId, condition, conditionId, isSubVariableKey, onRemoveCondition, onRemoveSubVariableCondition])
|
||||
|
||||
const { getMatchedSchemaType } = useMatchSchemaType()
|
||||
const { schemaTypeDefinitions } = useMatchSchemaType()
|
||||
const handleVarChange = useCallback((valueSelector: ValueSelector, _varItem: Var) => {
|
||||
const {
|
||||
conversationVariables,
|
||||
|
|
@ -226,7 +226,7 @@ const ConditionItem = ({
|
|||
workflowTools,
|
||||
dataSourceList: dataSourceList ?? [],
|
||||
},
|
||||
getMatchedSchemaType,
|
||||
schemaTypeDefinitions,
|
||||
})
|
||||
|
||||
const newCondition = produce(condition, (draft) => {
|
||||
|
|
@ -234,11 +234,14 @@ const ConditionItem = ({
|
|||
draft.varType = resolvedVarType
|
||||
draft.value = resolvedVarType === VarType.boolean ? false : ''
|
||||
draft.comparison_operator = getOperators(resolvedVarType)[0]
|
||||
delete draft.key
|
||||
delete draft.sub_variable_condition
|
||||
delete draft.numberVarType
|
||||
setTimeout(() => setControlPromptEditorRerenderKey(Date.now()))
|
||||
})
|
||||
doUpdateCondition(newCondition)
|
||||
setOpen(false)
|
||||
}, [condition, doUpdateCondition, availableNodes, isChatMode, setControlPromptEditorRerenderKey])
|
||||
}, [condition, doUpdateCondition, availableNodes, isChatMode, setControlPromptEditorRerenderKey, schemaTypeDefinitions])
|
||||
|
||||
const showBooleanInput = useMemo(() => {
|
||||
if(condition.varType === VarType.boolean)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { memo } from 'react'
|
||||
import { memo, useCallback } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
import Input from '@/app/components/base/input'
|
||||
import Switch from '@/app/components/base/switch'
|
||||
import { InputNumber } from '@/app/components/base/input-number'
|
||||
|
||||
export type TopKAndScoreThresholdProps = {
|
||||
topK: number
|
||||
|
|
@ -14,6 +14,24 @@ export type TopKAndScoreThresholdProps = {
|
|||
readonly?: boolean
|
||||
hiddenScoreThreshold?: boolean
|
||||
}
|
||||
|
||||
const maxTopK = (() => {
|
||||
const configValue = Number.parseInt(globalThis.document?.body?.getAttribute('data-public-top-k-max-value') || '', 10)
|
||||
if (configValue && !isNaN(configValue))
|
||||
return configValue
|
||||
return 10
|
||||
})()
|
||||
const TOP_K_VALUE_LIMIT = {
|
||||
amount: 1,
|
||||
min: 1,
|
||||
max: maxTopK,
|
||||
}
|
||||
const SCORE_THRESHOLD_VALUE_LIMIT = {
|
||||
step: 0.01,
|
||||
min: 0,
|
||||
max: 1,
|
||||
}
|
||||
|
||||
const TopKAndScoreThreshold = ({
|
||||
topK,
|
||||
onTopKChange,
|
||||
|
|
@ -25,18 +43,18 @@ const TopKAndScoreThreshold = ({
|
|||
hiddenScoreThreshold,
|
||||
}: TopKAndScoreThresholdProps) => {
|
||||
const { t } = useTranslation()
|
||||
const handleTopKChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const value = Number(e.target.value)
|
||||
if (Number.isNaN(value))
|
||||
return
|
||||
onTopKChange?.(value)
|
||||
}
|
||||
const handleTopKChange = useCallback((value: number) => {
|
||||
let notOutRangeValue = Number.parseInt(value.toFixed(0))
|
||||
notOutRangeValue = Math.max(TOP_K_VALUE_LIMIT.min, notOutRangeValue)
|
||||
notOutRangeValue = Math.min(TOP_K_VALUE_LIMIT.max, notOutRangeValue)
|
||||
onTopKChange?.(notOutRangeValue)
|
||||
}, [onTopKChange])
|
||||
|
||||
const handleScoreThresholdChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const value = Number(e.target.value)
|
||||
if (Number.isNaN(value))
|
||||
return
|
||||
onScoreThresholdChange?.(value)
|
||||
const handleScoreThresholdChange = (value: number) => {
|
||||
let notOutRangeValue = Number.parseFloat(value.toFixed(2))
|
||||
notOutRangeValue = Math.max(SCORE_THRESHOLD_VALUE_LIMIT.min, notOutRangeValue)
|
||||
notOutRangeValue = Math.min(SCORE_THRESHOLD_VALUE_LIMIT.max, notOutRangeValue)
|
||||
onScoreThresholdChange?.(notOutRangeValue)
|
||||
}
|
||||
|
||||
return (
|
||||
|
|
@ -49,11 +67,13 @@ const TopKAndScoreThreshold = ({
|
|||
popupContent={t('appDebug.datasetConfig.top_kTip')}
|
||||
/>
|
||||
</div>
|
||||
<Input
|
||||
<InputNumber
|
||||
disabled={readonly}
|
||||
type='number'
|
||||
{...TOP_K_VALUE_LIMIT}
|
||||
size='regular'
|
||||
value={topK}
|
||||
onChange={handleTopKChange}
|
||||
disabled={readonly}
|
||||
/>
|
||||
</div>
|
||||
{
|
||||
|
|
@ -74,11 +94,13 @@ const TopKAndScoreThreshold = ({
|
|||
popupContent={t('appDebug.datasetConfig.score_thresholdTip')}
|
||||
/>
|
||||
</div>
|
||||
<Input
|
||||
<InputNumber
|
||||
disabled={readonly || !isScoreThresholdEnabled}
|
||||
type='number'
|
||||
{...SCORE_THRESHOLD_VALUE_LIMIT}
|
||||
size='regular'
|
||||
value={scoreThreshold}
|
||||
onChange={handleScoreThresholdChange}
|
||||
disabled={readonly || !isScoreThresholdEnabled}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ type ConditionNumberProps = {
|
|||
nodesOutputVars: NodeOutPutVar[]
|
||||
availableNodes: Node[]
|
||||
isCommonVariable?: boolean
|
||||
commonVariables: { name: string, type: string }[]
|
||||
commonVariables: { name: string; type: string; value: string }[]
|
||||
} & ConditionValueMethodProps
|
||||
const ConditionNumber = ({
|
||||
value,
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ type ConditionStringProps = {
|
|||
nodesOutputVars: NodeOutPutVar[]
|
||||
availableNodes: Node[]
|
||||
isCommonVariable?: boolean
|
||||
commonVariables: { name: string, type: string }[]
|
||||
commonVariables: { name: string; type: string; value: string }[]
|
||||
} & ConditionValueMethodProps
|
||||
const ConditionString = ({
|
||||
value,
|
||||
|
|
|
|||
|
|
@ -128,6 +128,6 @@ export type MetadataShape = {
|
|||
availableNumberVars?: NodeOutPutVar[]
|
||||
availableNumberNodesWithParent?: Node[]
|
||||
isCommonVariable?: boolean
|
||||
availableCommonStringVars?: { name: string; type: string; }[]
|
||||
availableCommonNumberVars?: { name: string; type: string; }[]
|
||||
availableCommonStringVars?: { name: string; type: string; value: string }[]
|
||||
availableCommonNumberVars?: { name: string; type: string; value: string }[]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ const JsonImporter: FC<JsonImporterProps> = ({
|
|||
const [open, setOpen] = useState(false)
|
||||
const [json, setJson] = useState('')
|
||||
const [parseError, setParseError] = useState<any>(null)
|
||||
const importBtnRef = useRef<HTMLButtonElement>(null)
|
||||
const importBtnRef = useRef<HTMLElement>(null)
|
||||
const advancedEditing = useVisualEditorStore(state => state.advancedEditing)
|
||||
const isAddingNewField = useVisualEditorStore(state => state.isAddingNewField)
|
||||
const { emit } = useMittContext()
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ type VisualEditorProviderProps = {
|
|||
export const VisualEditorContext = createContext<VisualEditorContextType>(null)
|
||||
|
||||
export const VisualEditorContextProvider = ({ children }: VisualEditorProviderProps) => {
|
||||
const storeRef = useRef<VisualEditorStore>()
|
||||
const storeRef = useRef<VisualEditorStore | null>(null)
|
||||
|
||||
if (!storeRef.current)
|
||||
storeRef.current = createVisualEditorStore()
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ const useConfig = (id: string, payload: LLMNodeType) => {
|
|||
const { nodesReadOnly: readOnly } = useNodesReadOnly()
|
||||
const isChatMode = useIsChatMode()
|
||||
|
||||
const defaultConfig = useStore(s => s.nodesDefaultConfigs)[payload.type]
|
||||
const defaultConfig = useStore(s => s.nodesDefaultConfigs)?.[payload.type]
|
||||
const [defaultRolePrefix, setDefaultRolePrefix] = useState<{ user: string; assistant: string }>({ user: '', assistant: '' })
|
||||
const { inputs, setInputs: doSetInputs } = useNodeCrud<LLMNodeType>(id, payload)
|
||||
const inputRef = useRef(inputs)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ export const checkNodeValid = (_payload: LLMNodeType) => {
|
|||
|
||||
export const getFieldType = (field: Field) => {
|
||||
const { type, items } = field
|
||||
if(field.schemaType === 'file') return 'file'
|
||||
if(field.schemaType === 'file') return Type.file
|
||||
if (type !== Type.array || !items)
|
||||
return type
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue