mirror of https://github.com/langgenius/dify.git
Merge remote-tracking branch 'origin/main' into feat/trigger
This commit is contained in:
commit
a6208feed8
|
|
@ -374,6 +374,12 @@ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
|||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
# Comma-separated list of file extensions blocked from upload for security reasons.
|
||||
# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll).
|
||||
# Empty by default to allow all file types.
|
||||
# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST=
|
||||
|
||||
# Model configuration
|
||||
MULTIMODAL_SEND_FORMAT=base64
|
||||
PROMPT_GENERATION_MAX_TOKENS=512
|
||||
|
|
@ -620,3 +626,6 @@ SWAGGER_UI_PATH=/swagger-ui.html
|
|||
# Whether to encrypt dataset IDs when exporting DSL files (default: true)
|
||||
# Set to false to export dataset IDs as plain text for easier cross-environment import
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID=true
|
||||
|
||||
# Maximum number of segments for dataset segments API (0 for unlimited)
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST=0
|
||||
|
|
|
|||
|
|
@ -360,6 +360,31 @@ class FileUploadConfig(BaseSettings):
|
|||
default=10,
|
||||
)
|
||||
|
||||
inner_UPLOAD_FILE_EXTENSION_BLACKLIST: str = Field(
|
||||
description=(
|
||||
"Comma-separated list of file extensions that are blocked from upload. "
|
||||
"Extensions should be lowercase without dots (e.g., 'exe,bat,sh,dll'). "
|
||||
"Empty by default to allow all file types."
|
||||
),
|
||||
validation_alias=AliasChoices("UPLOAD_FILE_EXTENSION_BLACKLIST"),
|
||||
default="",
|
||||
)
|
||||
|
||||
@computed_field # type: ignore[misc]
|
||||
@property
|
||||
def UPLOAD_FILE_EXTENSION_BLACKLIST(self) -> set[str]:
|
||||
"""
|
||||
Parse and return the blacklist as a set of lowercase extensions.
|
||||
Returns an empty set if no blacklist is configured.
|
||||
"""
|
||||
if not self.inner_UPLOAD_FILE_EXTENSION_BLACKLIST:
|
||||
return set()
|
||||
return {
|
||||
ext.strip().lower().strip(".")
|
||||
for ext in self.inner_UPLOAD_FILE_EXTENSION_BLACKLIST.split(",")
|
||||
if ext.strip()
|
||||
}
|
||||
|
||||
|
||||
class HttpConfig(BaseSettings):
|
||||
"""
|
||||
|
|
@ -949,6 +974,11 @@ class DataSetConfig(BaseSettings):
|
|||
default=True,
|
||||
)
|
||||
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST: NonNegativeInt = Field(
|
||||
description="Maximum number of segments for dataset segments API (0 for unlimited)",
|
||||
default=0,
|
||||
)
|
||||
|
||||
|
||||
class WorkspaceConfig(BaseSettings):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -25,6 +25,12 @@ class UnsupportedFileTypeError(BaseHTTPException):
|
|||
code = 415
|
||||
|
||||
|
||||
class BlockedFileExtensionError(BaseHTTPException):
|
||||
error_code = "file_extension_blocked"
|
||||
description = "The file extension is blocked for security reasons."
|
||||
code = 400
|
||||
|
||||
|
||||
class TooManyFilesError(BaseHTTPException):
|
||||
error_code = "too_many_files"
|
||||
description = "Only one file is allowed."
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from fields.annotation_fields import (
|
|||
annotation_fields,
|
||||
annotation_hit_history_fields,
|
||||
)
|
||||
from libs.helper import uuid_value
|
||||
from libs.login import login_required
|
||||
from services.annotation_service import AppAnnotationService
|
||||
|
||||
|
|
@ -175,8 +176,10 @@ class AnnotationApi(Resource):
|
|||
api.model(
|
||||
"CreateAnnotationRequest",
|
||||
{
|
||||
"question": fields.String(required=True, description="Question text"),
|
||||
"answer": fields.String(required=True, description="Answer text"),
|
||||
"message_id": fields.String(description="Message ID (optional)"),
|
||||
"question": fields.String(description="Question text (required when message_id not provided)"),
|
||||
"answer": fields.String(description="Answer text (use 'answer' or 'content')"),
|
||||
"content": fields.String(description="Content text (use 'answer' or 'content')"),
|
||||
"annotation_reply": fields.Raw(description="Annotation reply data"),
|
||||
},
|
||||
)
|
||||
|
|
@ -193,11 +196,14 @@ class AnnotationApi(Resource):
|
|||
app_id = str(app_id)
|
||||
parser = (
|
||||
reqparse.RequestParser()
|
||||
.add_argument("question", required=True, type=str, location="json")
|
||||
.add_argument("answer", required=True, type=str, location="json")
|
||||
.add_argument("message_id", required=False, type=uuid_value, location="json")
|
||||
.add_argument("question", required=False, type=str, location="json")
|
||||
.add_argument("answer", required=False, type=str, location="json")
|
||||
.add_argument("content", required=False, type=str, location="json")
|
||||
.add_argument("annotation_reply", required=False, type=dict, location="json")
|
||||
)
|
||||
args = parser.parse_args()
|
||||
annotation = AppAnnotationService.insert_app_annotation_directly(args, app_id)
|
||||
annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_id)
|
||||
return annotation
|
||||
|
||||
@setup_required
|
||||
|
|
|
|||
|
|
@ -1,7 +1,5 @@
|
|||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
import sqlalchemy as sa
|
||||
from flask import abort
|
||||
from flask_restx import Resource, marshal_with, reqparse
|
||||
from flask_restx.inputs import int_range
|
||||
from sqlalchemy import func, or_
|
||||
|
|
@ -19,7 +17,7 @@ from fields.conversation_fields import (
|
|||
conversation_pagination_fields,
|
||||
conversation_with_summary_pagination_fields,
|
||||
)
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from libs.datetime_utils import naive_utc_now, parse_time_range
|
||||
from libs.helper import DatetimeString
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models import Conversation, EndUser, Message, MessageAnnotation
|
||||
|
|
@ -90,25 +88,17 @@ class CompletionConversationApi(Resource):
|
|||
|
||||
account = current_user
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
query = query.where(Conversation.created_at >= start_datetime_utc)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=59)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
end_datetime_utc = end_datetime_utc.replace(second=59)
|
||||
query = query.where(Conversation.created_at < end_datetime_utc)
|
||||
|
||||
# FIXME, the type ignore in this file
|
||||
|
|
@ -270,29 +260,21 @@ class ChatConversationApi(Resource):
|
|||
|
||||
account = current_user
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
match args["sort_by"]:
|
||||
case "updated_at" | "-updated_at":
|
||||
query = query.where(Conversation.updated_at >= start_datetime_utc)
|
||||
case "created_at" | "-created_at" | _:
|
||||
query = query.where(Conversation.created_at >= start_datetime_utc)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=59)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
end_datetime_utc = end_datetime_utc.replace(second=59)
|
||||
match args["sort_by"]:
|
||||
case "updated_at" | "-updated_at":
|
||||
query = query.where(Conversation.updated_at <= end_datetime_utc)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ from controllers.console.app.wraps import get_app_model
|
|||
from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError
|
||||
from controllers.console.wraps import (
|
||||
account_initialization_required,
|
||||
cloud_edition_billing_resource_check,
|
||||
edit_permission_required,
|
||||
setup_required,
|
||||
)
|
||||
|
|
@ -24,12 +23,11 @@ from core.app.entities.app_invoke_entities import InvokeFrom
|
|||
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
||||
from core.model_runtime.errors.invoke import InvokeError
|
||||
from extensions.ext_database import db
|
||||
from fields.conversation_fields import annotation_fields, message_detail_fields
|
||||
from fields.conversation_fields import message_detail_fields
|
||||
from libs.helper import uuid_value
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models.model import AppMode, Conversation, Message, MessageAnnotation, MessageFeedback
|
||||
from services.annotation_service import AppAnnotationService
|
||||
from services.errors.conversation import ConversationNotExistsError
|
||||
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
|
||||
from services.message_service import MessageService
|
||||
|
|
@ -194,45 +192,6 @@ class MessageFeedbackApi(Resource):
|
|||
return {"result": "success"}
|
||||
|
||||
|
||||
@console_ns.route("/apps/<uuid:app_id>/annotations")
|
||||
class MessageAnnotationApi(Resource):
|
||||
@api.doc("create_message_annotation")
|
||||
@api.doc(description="Create message annotation")
|
||||
@api.doc(params={"app_id": "Application ID"})
|
||||
@api.expect(
|
||||
api.model(
|
||||
"MessageAnnotationRequest",
|
||||
{
|
||||
"message_id": fields.String(description="Message ID"),
|
||||
"question": fields.String(required=True, description="Question text"),
|
||||
"answer": fields.String(required=True, description="Answer text"),
|
||||
"annotation_reply": fields.Raw(description="Annotation reply"),
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.response(200, "Annotation created successfully", annotation_fields)
|
||||
@api.response(403, "Insufficient permissions")
|
||||
@marshal_with(annotation_fields)
|
||||
@get_app_model
|
||||
@setup_required
|
||||
@login_required
|
||||
@cloud_edition_billing_resource_check("annotation")
|
||||
@account_initialization_required
|
||||
@edit_permission_required
|
||||
def post(self, app_model):
|
||||
parser = (
|
||||
reqparse.RequestParser()
|
||||
.add_argument("message_id", required=False, type=uuid_value, location="json")
|
||||
.add_argument("question", required=True, type=str, location="json")
|
||||
.add_argument("answer", required=True, type=str, location="json")
|
||||
.add_argument("annotation_reply", required=False, type=dict, location="json")
|
||||
)
|
||||
args = parser.parse_args()
|
||||
annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_model.id)
|
||||
|
||||
return annotation
|
||||
|
||||
|
||||
@console_ns.route("/apps/<uuid:app_id>/annotations/count")
|
||||
class MessageAnnotationCountApi(Resource):
|
||||
@api.doc("get_annotation_count")
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
|
||||
import pytz
|
||||
import sqlalchemy as sa
|
||||
from flask import jsonify
|
||||
from flask import abort, jsonify
|
||||
from flask_restx import Resource, fields, reqparse
|
||||
|
||||
from controllers.console import api, console_ns
|
||||
|
|
@ -11,6 +9,7 @@ from controllers.console.app.wraps import get_app_model
|
|||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from extensions.ext_database import db
|
||||
from libs.datetime_utils import parse_time_range
|
||||
from libs.helper import DatetimeString
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models import AppMode, Message
|
||||
|
|
@ -56,26 +55,16 @@ WHERE
|
|||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
@ -120,8 +109,11 @@ class DailyConversationStatistic(Resource):
|
|||
)
|
||||
args = parser.parse_args()
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
stmt = (
|
||||
sa.select(
|
||||
|
|
@ -134,18 +126,10 @@ class DailyConversationStatistic(Resource):
|
|||
.where(Message.app_id == app_model.id, Message.invoke_from != InvokeFrom.DEBUGGER)
|
||||
)
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
if start_datetime_utc:
|
||||
stmt = stmt.where(Message.created_at >= start_datetime_utc)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
if end_datetime_utc:
|
||||
stmt = stmt.where(Message.created_at < end_datetime_utc)
|
||||
|
||||
stmt = stmt.group_by("date").order_by("date")
|
||||
|
|
@ -198,26 +182,17 @@ WHERE
|
|||
AND invoke_from != :invoke_from"""
|
||||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
@ -273,26 +248,17 @@ WHERE
|
|||
AND invoke_from != :invoke_from"""
|
||||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
@ -357,26 +323,17 @@ FROM
|
|||
AND m.invoke_from != :invoke_from"""
|
||||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND c.created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND c.created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
@ -446,26 +403,17 @@ WHERE
|
|||
AND m.invoke_from != :invoke_from"""
|
||||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND m.created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND m.created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
@ -525,26 +473,17 @@ WHERE
|
|||
AND invoke_from != :invoke_from"""
|
||||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
@ -602,26 +541,17 @@ WHERE
|
|||
AND invoke_from != :invoke_from"""
|
||||
arg_dict = {"tz": account.timezone, "app_id": app_model.id, "invoke_from": InvokeFrom.DEBUGGER}
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_datetime_utc, end_datetime_utc = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
if start_datetime_utc:
|
||||
sql_query += " AND created_at >= :start"
|
||||
arg_dict["start"] = start_datetime_utc
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if end_datetime_utc:
|
||||
sql_query += " AND created_at < :end"
|
||||
arg_dict["end"] = end_datetime_utc
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,4 @@
|
|||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
from flask import jsonify
|
||||
from flask import abort, jsonify
|
||||
from flask_restx import Resource, reqparse
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
|
|
@ -9,6 +6,7 @@ from controllers.console import api, console_ns
|
|||
from controllers.console.app.wraps import get_app_model
|
||||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from extensions.ext_database import db
|
||||
from libs.datetime_utils import parse_time_range
|
||||
from libs.helper import DatetimeString
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models.enums import WorkflowRunTriggeredFrom
|
||||
|
|
@ -43,23 +41,11 @@ class WorkflowDailyRunsStatistic(Resource):
|
|||
args = parser.parse_args()
|
||||
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
start_date = None
|
||||
end_date = None
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_date = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_date = end_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_date, end_date = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
response_data = self._workflow_run_repo.get_daily_runs_statistics(
|
||||
tenant_id=app_model.tenant_id,
|
||||
|
|
@ -100,23 +86,11 @@ class WorkflowDailyTerminalsStatistic(Resource):
|
|||
args = parser.parse_args()
|
||||
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
start_date = None
|
||||
end_date = None
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_date = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_date = end_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_date, end_date = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
response_data = self._workflow_run_repo.get_daily_terminals_statistics(
|
||||
tenant_id=app_model.tenant_id,
|
||||
|
|
@ -157,23 +131,11 @@ class WorkflowDailyTokenCostStatistic(Resource):
|
|||
args = parser.parse_args()
|
||||
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
start_date = None
|
||||
end_date = None
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_date = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_date = end_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_date, end_date = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
response_data = self._workflow_run_repo.get_daily_token_cost_statistics(
|
||||
tenant_id=app_model.tenant_id,
|
||||
|
|
@ -214,23 +176,11 @@ class WorkflowAverageAppInteractionStatistic(Resource):
|
|||
args = parser.parse_args()
|
||||
|
||||
assert account.timezone is not None
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
start_date = None
|
||||
end_date = None
|
||||
|
||||
if args["start"]:
|
||||
start_datetime = datetime.strptime(args["start"], "%Y-%m-%d %H:%M")
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_date = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
if args["end"]:
|
||||
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_date = end_datetime_timezone.astimezone(utc_timezone)
|
||||
try:
|
||||
start_date, end_date = parse_time_range(args["start"], args["end"], account.timezone)
|
||||
except ValueError as e:
|
||||
abort(400, description=str(e))
|
||||
|
||||
response_data = self._workflow_run_repo.get_average_app_interaction_statistics(
|
||||
tenant_id=app_model.tenant_id,
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import services
|
|||
from configs import dify_config
|
||||
from constants import DOCUMENT_EXTENSIONS
|
||||
from controllers.common.errors import (
|
||||
BlockedFileExtensionError,
|
||||
FilenameNotExistsError,
|
||||
FileTooLargeError,
|
||||
NoFileUploadedError,
|
||||
|
|
@ -83,6 +84,8 @@ class FileApi(Resource):
|
|||
raise FileTooLargeError(file_too_large_error.description)
|
||||
except services.errors.file.UnsupportedFileTypeError:
|
||||
raise UnsupportedFileTypeError()
|
||||
except services.errors.file.BlockedFileExtensionError as blocked_extension_error:
|
||||
raise BlockedFileExtensionError(blocked_extension_error.description)
|
||||
|
||||
return upload_file, 201
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ from flask import request
|
|||
from flask_restx import marshal, reqparse
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from configs import dify_config
|
||||
from controllers.service_api import service_api_ns
|
||||
from controllers.service_api.app.error import ProviderNotInitializeError
|
||||
from controllers.service_api.wraps import (
|
||||
|
|
@ -107,6 +108,10 @@ class SegmentApi(DatasetApiResource):
|
|||
# validate args
|
||||
args = segment_create_parser.parse_args()
|
||||
if args["segments"] is not None:
|
||||
segments_limit = dify_config.DATASET_MAX_SEGMENTS_PER_REQUEST
|
||||
if segments_limit > 0 and len(args["segments"]) > segments_limit:
|
||||
raise ValueError(f"Exceeded maximum segments limit of {segments_limit}.")
|
||||
|
||||
for args_item in args["segments"]:
|
||||
SegmentService.segment_create_args_validate(args_item, document)
|
||||
segments = SegmentService.multi_create_segment(args["segments"], document, dataset)
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ class AgentChatAppRunner(AppRunner):
|
|||
prompt_template_entity=app_config.prompt_template,
|
||||
inputs=dict(inputs),
|
||||
files=list(files),
|
||||
query=query or "",
|
||||
query=query,
|
||||
memory=memory,
|
||||
)
|
||||
|
||||
|
|
@ -172,7 +172,7 @@ class AgentChatAppRunner(AppRunner):
|
|||
prompt_template_entity=app_config.prompt_template,
|
||||
inputs=dict(inputs),
|
||||
files=list(files),
|
||||
query=query or "",
|
||||
query=query,
|
||||
memory=memory,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ class AppRunner:
|
|||
prompt_template_entity: PromptTemplateEntity,
|
||||
inputs: Mapping[str, str],
|
||||
files: Sequence["File"],
|
||||
query: str | None = None,
|
||||
query: str = "",
|
||||
context: str | None = None,
|
||||
memory: TokenBufferMemory | None = None,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL | None = None,
|
||||
|
|
@ -105,7 +105,7 @@ class AppRunner:
|
|||
app_mode=AppMode.value_of(app_record.mode),
|
||||
prompt_template_entity=prompt_template_entity,
|
||||
inputs=inputs,
|
||||
query=query or "",
|
||||
query=query,
|
||||
files=files,
|
||||
context=context,
|
||||
memory=memory,
|
||||
|
|
|
|||
|
|
@ -190,7 +190,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
|
|||
override_model_configs=json.dumps(override_model_configs) if override_model_configs else None,
|
||||
conversation_id=conversation.id,
|
||||
inputs=application_generate_entity.inputs,
|
||||
query=application_generate_entity.query or "",
|
||||
query=application_generate_entity.query,
|
||||
message="",
|
||||
message_tokens=0,
|
||||
message_unit_price=0,
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ class EasyUIBasedAppGenerateEntity(AppGenerateEntity):
|
|||
app_config: EasyUIBasedAppConfig = None # type: ignore
|
||||
model_conf: ModelConfigWithCredentialsEntity
|
||||
|
||||
query: str | None = None
|
||||
query: str = ""
|
||||
|
||||
# pydantic configs
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
|
|||
if self._application_generate_entity.app_config.app_mode != AppMode.COMPLETION:
|
||||
# start generate conversation name thread
|
||||
self._conversation_name_generate_thread = self._message_cycle_manager.generate_conversation_name(
|
||||
conversation_id=self._conversation_id, query=self._application_generate_entity.query or ""
|
||||
conversation_id=self._conversation_id, query=self._application_generate_entity.query
|
||||
)
|
||||
|
||||
generator = self._wrapper_process_stream_response(trace_manager=self._application_generate_entity.trace_manager)
|
||||
|
|
|
|||
|
|
@ -140,7 +140,27 @@ class MessageCycleManager:
|
|||
if not self._application_generate_entity.app_config.additional_features:
|
||||
raise ValueError("Additional features not found")
|
||||
if self._application_generate_entity.app_config.additional_features.show_retrieve_source:
|
||||
self._task_state.metadata.retriever_resources = event.retriever_resources
|
||||
merged_resources = [r for r in self._task_state.metadata.retriever_resources or [] if r]
|
||||
existing_ids = {(r.dataset_id, r.document_id) for r in merged_resources if r.dataset_id and r.document_id}
|
||||
|
||||
# Add new unique resources from the event
|
||||
for resource in event.retriever_resources or []:
|
||||
if not resource:
|
||||
continue
|
||||
|
||||
is_duplicate = (
|
||||
resource.dataset_id
|
||||
and resource.document_id
|
||||
and (resource.dataset_id, resource.document_id) in existing_ids
|
||||
)
|
||||
|
||||
if not is_duplicate:
|
||||
merged_resources.append(resource)
|
||||
|
||||
for i, resource in enumerate(merged_resources, 1):
|
||||
resource.position = i
|
||||
|
||||
self._task_state.metadata.retriever_resources = merged_resources
|
||||
|
||||
def message_file_to_stream_response(self, event: QueueMessageFileEvent) -> MessageFileStreamResponse | None:
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -74,6 +74,10 @@ class File(BaseModel):
|
|||
storage_key: str | None = None,
|
||||
dify_model_identity: str | None = FILE_MODEL_IDENTITY,
|
||||
url: str | None = None,
|
||||
# Legacy compatibility fields - explicitly handle known extra fields
|
||||
tool_file_id: str | None = None,
|
||||
upload_file_id: str | None = None,
|
||||
datasource_file_id: str | None = None,
|
||||
):
|
||||
super().__init__(
|
||||
id=id,
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ Tencent APM Trace Client - handles network operations, metrics, and API communic
|
|||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
|
|
@ -110,6 +111,7 @@ class TencentTraceClient:
|
|||
self.span_contexts: dict[int, trace_api.SpanContext] = {}
|
||||
|
||||
self.meter: Meter | None = None
|
||||
self.meter_provider: MeterProvider | None = None
|
||||
self.hist_llm_duration: Histogram | None = None
|
||||
self.hist_token_usage: Histogram | None = None
|
||||
self.hist_time_to_first_token: Histogram | None = None
|
||||
|
|
@ -119,7 +121,6 @@ class TencentTraceClient:
|
|||
|
||||
# Metrics exporter and instruments
|
||||
try:
|
||||
from opentelemetry import metrics
|
||||
from opentelemetry.sdk.metrics import Histogram, MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import AggregationTemporality, PeriodicExportingMetricReader
|
||||
|
||||
|
|
@ -202,9 +203,11 @@ class TencentTraceClient:
|
|||
)
|
||||
|
||||
if metric_reader is not None:
|
||||
# Use instance-level MeterProvider instead of global to support config changes
|
||||
# without worker restart. Each TencentTraceClient manages its own MeterProvider.
|
||||
provider = MeterProvider(resource=self.resource, metric_readers=[metric_reader])
|
||||
metrics.set_meter_provider(provider)
|
||||
self.meter = metrics.get_meter("dify-sdk", dify_config.project.version)
|
||||
self.meter_provider = provider
|
||||
self.meter = provider.get_meter("dify-sdk", dify_config.project.version)
|
||||
|
||||
# LLM operation duration histogram
|
||||
self.hist_llm_duration = self.meter.create_histogram(
|
||||
|
|
@ -244,6 +247,7 @@ class TencentTraceClient:
|
|||
self.metric_reader = metric_reader
|
||||
else:
|
||||
self.meter = None
|
||||
self.meter_provider = None
|
||||
self.hist_llm_duration = None
|
||||
self.hist_token_usage = None
|
||||
self.hist_time_to_first_token = None
|
||||
|
|
@ -253,6 +257,7 @@ class TencentTraceClient:
|
|||
except Exception:
|
||||
logger.exception("[Tencent APM] Metrics initialization failed; metrics disabled")
|
||||
self.meter = None
|
||||
self.meter_provider = None
|
||||
self.hist_llm_duration = None
|
||||
self.hist_token_usage = None
|
||||
self.hist_time_to_first_token = None
|
||||
|
|
@ -279,6 +284,14 @@ class TencentTraceClient:
|
|||
if attributes:
|
||||
for k, v in attributes.items():
|
||||
attrs[k] = str(v) if not isinstance(v, (str, int, float, bool)) else v # type: ignore[assignment]
|
||||
|
||||
logger.info(
|
||||
"[Tencent Metrics] Metric: %s | Value: %.4f | Attributes: %s",
|
||||
LLM_OPERATION_DURATION,
|
||||
latency_seconds,
|
||||
json.dumps(attrs, ensure_ascii=False),
|
||||
)
|
||||
|
||||
self.hist_llm_duration.record(latency_seconds, attrs) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("[Tencent APM] Failed to record LLM duration", exc_info=True)
|
||||
|
|
@ -317,6 +330,13 @@ class TencentTraceClient:
|
|||
"server.address": server_address,
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"[Tencent Metrics] Metric: %s | Value: %d | Attributes: %s",
|
||||
GEN_AI_TOKEN_USAGE,
|
||||
token_count,
|
||||
json.dumps(attributes, ensure_ascii=False),
|
||||
)
|
||||
|
||||
self.hist_token_usage.record(token_count, attributes) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("[Tencent APM] Failed to record token usage", exc_info=True)
|
||||
|
|
@ -344,6 +364,13 @@ class TencentTraceClient:
|
|||
"stream": "true",
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"[Tencent Metrics] Metric: %s | Value: %.4f | Attributes: %s",
|
||||
GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
|
||||
ttft_seconds,
|
||||
json.dumps(attributes, ensure_ascii=False),
|
||||
)
|
||||
|
||||
self.hist_time_to_first_token.record(ttft_seconds, attributes) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("[Tencent APM] Failed to record time to first token", exc_info=True)
|
||||
|
|
@ -371,6 +398,13 @@ class TencentTraceClient:
|
|||
"stream": "true",
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"[Tencent Metrics] Metric: %s | Value: %.4f | Attributes: %s",
|
||||
GEN_AI_STREAMING_TIME_TO_GENERATE,
|
||||
ttg_seconds,
|
||||
json.dumps(attributes, ensure_ascii=False),
|
||||
)
|
||||
|
||||
self.hist_time_to_generate.record(ttg_seconds, attributes) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("[Tencent APM] Failed to record time to generate", exc_info=True)
|
||||
|
|
@ -390,6 +424,14 @@ class TencentTraceClient:
|
|||
if attributes:
|
||||
for k, v in attributes.items():
|
||||
attrs[k] = str(v) if not isinstance(v, (str, int, float, bool)) else v # type: ignore[assignment]
|
||||
|
||||
logger.info(
|
||||
"[Tencent Metrics] Metric: %s | Value: %.4f | Attributes: %s",
|
||||
GEN_AI_TRACE_DURATION,
|
||||
duration_seconds,
|
||||
json.dumps(attrs, ensure_ascii=False),
|
||||
)
|
||||
|
||||
self.hist_trace_duration.record(duration_seconds, attrs) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("[Tencent APM] Failed to record trace duration", exc_info=True)
|
||||
|
|
@ -474,11 +516,19 @@ class TencentTraceClient:
|
|||
|
||||
if self.tracer_provider:
|
||||
self.tracer_provider.shutdown()
|
||||
|
||||
# Shutdown instance-level meter provider
|
||||
if self.meter_provider is not None:
|
||||
try:
|
||||
self.meter_provider.shutdown() # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("[Tencent APM] Error shutting down meter provider", exc_info=True)
|
||||
|
||||
if self.metric_reader is not None:
|
||||
try:
|
||||
self.metric_reader.shutdown() # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
pass
|
||||
logger.debug("[Tencent APM] Error shutting down metric reader", exc_info=True)
|
||||
|
||||
except Exception:
|
||||
logger.exception("[Tencent APM] Error during client shutdown")
|
||||
|
|
|
|||
|
|
@ -228,29 +228,38 @@ class ToolEngine:
|
|||
"""
|
||||
Handle tool response
|
||||
"""
|
||||
result = ""
|
||||
parts: list[str] = []
|
||||
json_parts: list[str] = []
|
||||
|
||||
for response in tool_response:
|
||||
if response.type == ToolInvokeMessage.MessageType.TEXT:
|
||||
result += cast(ToolInvokeMessage.TextMessage, response.message).text
|
||||
parts.append(cast(ToolInvokeMessage.TextMessage, response.message).text)
|
||||
elif response.type == ToolInvokeMessage.MessageType.LINK:
|
||||
result += (
|
||||
parts.append(
|
||||
f"result link: {cast(ToolInvokeMessage.TextMessage, response.message).text}."
|
||||
+ " please tell user to check it."
|
||||
)
|
||||
elif response.type in {ToolInvokeMessage.MessageType.IMAGE_LINK, ToolInvokeMessage.MessageType.IMAGE}:
|
||||
result += (
|
||||
parts.append(
|
||||
"image has been created and sent to user already, "
|
||||
+ "you do not need to create it, just tell the user to check it now."
|
||||
)
|
||||
elif response.type == ToolInvokeMessage.MessageType.JSON:
|
||||
result += json.dumps(
|
||||
safe_json_value(cast(ToolInvokeMessage.JsonMessage, response.message).json_object),
|
||||
ensure_ascii=False,
|
||||
json_parts.append(
|
||||
json.dumps(
|
||||
safe_json_value(cast(ToolInvokeMessage.JsonMessage, response.message).json_object),
|
||||
ensure_ascii=False,
|
||||
)
|
||||
)
|
||||
else:
|
||||
result += str(response.message)
|
||||
parts.append(str(response.message))
|
||||
|
||||
return result
|
||||
# Add JSON parts, avoiding duplicates from text parts.
|
||||
if json_parts:
|
||||
existing_parts = set(parts)
|
||||
parts.extend(p for p in json_parts if p not in existing_parts)
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
@staticmethod
|
||||
def _extract_tool_response_binary_and_text(
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ import abc
|
|||
import datetime
|
||||
from typing import Protocol
|
||||
|
||||
import pytz
|
||||
|
||||
|
||||
class _NowFunction(Protocol):
|
||||
@abc.abstractmethod
|
||||
|
|
@ -31,3 +33,51 @@ def ensure_naive_utc(dt: datetime.datetime) -> datetime.datetime:
|
|||
if dt.tzinfo is None:
|
||||
return dt
|
||||
return dt.astimezone(datetime.UTC).replace(tzinfo=None)
|
||||
|
||||
|
||||
def parse_time_range(
|
||||
start: str | None, end: str | None, tzname: str
|
||||
) -> tuple[datetime.datetime | None, datetime.datetime | None]:
|
||||
"""
|
||||
Parse time range strings and convert to UTC datetime objects.
|
||||
Handles DST ambiguity and non-existent times gracefully.
|
||||
|
||||
Args:
|
||||
start: Start time string (YYYY-MM-DD HH:MM)
|
||||
end: End time string (YYYY-MM-DD HH:MM)
|
||||
tzname: Timezone name
|
||||
|
||||
Returns:
|
||||
tuple: (start_datetime_utc, end_datetime_utc)
|
||||
|
||||
Raises:
|
||||
ValueError: When time range is invalid or start > end
|
||||
"""
|
||||
tz = pytz.timezone(tzname)
|
||||
utc = pytz.utc
|
||||
|
||||
def _parse(time_str: str | None, label: str) -> datetime.datetime | None:
|
||||
if not time_str:
|
||||
return None
|
||||
|
||||
try:
|
||||
dt = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M").replace(second=0)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid {label} time format: {e}")
|
||||
|
||||
try:
|
||||
return tz.localize(dt, is_dst=None).astimezone(utc)
|
||||
except pytz.AmbiguousTimeError:
|
||||
return tz.localize(dt, is_dst=False).astimezone(utc)
|
||||
except pytz.NonExistentTimeError:
|
||||
dt += datetime.timedelta(hours=1)
|
||||
return tz.localize(dt, is_dst=None).astimezone(utc)
|
||||
|
||||
start_dt = _parse(start, "start")
|
||||
end_dt = _parse(end, "end")
|
||||
|
||||
# Range validation
|
||||
if start_dt and end_dt and start_dt > end_dt:
|
||||
raise ValueError("start must be earlier than or equal to end")
|
||||
|
||||
return start_dt, end_dt
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ vdb = [
|
|||
"pgvector==0.2.5",
|
||||
"pymilvus~=2.5.0",
|
||||
"pymochow==2.2.9",
|
||||
"pyobvector~=0.2.15",
|
||||
"pyobvector~=0.2.17",
|
||||
"qdrant-client==1.9.0",
|
||||
"tablestore==6.3.7",
|
||||
"tcvectordb~=1.6.4",
|
||||
|
|
|
|||
|
|
@ -32,41 +32,48 @@ class AppAnnotationService:
|
|||
|
||||
if not app:
|
||||
raise NotFound("App not found")
|
||||
|
||||
answer = args.get("answer") or args.get("content")
|
||||
if answer is None:
|
||||
raise ValueError("Either 'answer' or 'content' must be provided")
|
||||
|
||||
if args.get("message_id"):
|
||||
message_id = str(args["message_id"])
|
||||
# get message info
|
||||
message = db.session.query(Message).where(Message.id == message_id, Message.app_id == app.id).first()
|
||||
|
||||
if not message:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
question = args.get("question") or message.query or ""
|
||||
|
||||
annotation: MessageAnnotation | None = message.annotation
|
||||
# save the message annotation
|
||||
if annotation:
|
||||
annotation.content = args["answer"]
|
||||
annotation.question = args["question"]
|
||||
annotation.content = answer
|
||||
annotation.question = question
|
||||
else:
|
||||
annotation = MessageAnnotation(
|
||||
app_id=app.id,
|
||||
conversation_id=message.conversation_id,
|
||||
message_id=message.id,
|
||||
content=args["answer"],
|
||||
question=args["question"],
|
||||
content=answer,
|
||||
question=question,
|
||||
account_id=current_user.id,
|
||||
)
|
||||
else:
|
||||
annotation = MessageAnnotation(
|
||||
app_id=app.id, content=args["answer"], question=args["question"], account_id=current_user.id
|
||||
)
|
||||
question = args.get("question")
|
||||
if not question:
|
||||
raise ValueError("'question' is required when 'message_id' is not provided")
|
||||
|
||||
annotation = MessageAnnotation(app_id=app.id, content=answer, question=question, account_id=current_user.id)
|
||||
db.session.add(annotation)
|
||||
db.session.commit()
|
||||
# if annotation reply is enabled , add annotation to index
|
||||
|
||||
annotation_setting = db.session.query(AppAnnotationSetting).where(AppAnnotationSetting.app_id == app_id).first()
|
||||
assert current_tenant_id is not None
|
||||
if annotation_setting:
|
||||
add_annotation_to_index_task.delay(
|
||||
annotation.id,
|
||||
args["question"],
|
||||
annotation.question,
|
||||
current_tenant_id,
|
||||
app_id,
|
||||
annotation_setting.collection_binding_id,
|
||||
|
|
|
|||
|
|
@ -11,3 +11,7 @@ class FileTooLargeError(BaseServiceError):
|
|||
|
||||
class UnsupportedFileTypeError(BaseServiceError):
|
||||
pass
|
||||
|
||||
|
||||
class BlockedFileExtensionError(BaseServiceError):
|
||||
description = "File extension '{extension}' is not allowed for security reasons"
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ from models import Account
|
|||
from models.enums import CreatorUserRole
|
||||
from models.model import EndUser, UploadFile
|
||||
|
||||
from .errors.file import FileTooLargeError, UnsupportedFileTypeError
|
||||
from .errors.file import BlockedFileExtensionError, FileTooLargeError, UnsupportedFileTypeError
|
||||
|
||||
PREVIEW_WORDS_LIMIT = 3000
|
||||
|
||||
|
|
@ -59,6 +59,10 @@ class FileService:
|
|||
if len(filename) > 200:
|
||||
filename = filename.split(".")[0][:200] + "." + extension
|
||||
|
||||
# check if extension is in blacklist
|
||||
if extension and extension in dify_config.UPLOAD_FILE_EXTENSION_BLACKLIST:
|
||||
raise BlockedFileExtensionError(f"File extension '.{extension}' is not allowed for security reasons")
|
||||
|
||||
if source == "datasets" and extension not in DOCUMENT_EXTENSIONS:
|
||||
raise UnsupportedFileTypeError()
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from configs import dify_config
|
|||
from models import Account, Tenant
|
||||
from models.enums import CreatorUserRole
|
||||
from models.model import EndUser, UploadFile
|
||||
from services.errors.file import FileTooLargeError, UnsupportedFileTypeError
|
||||
from services.errors.file import BlockedFileExtensionError, FileTooLargeError, UnsupportedFileTypeError
|
||||
from services.file_service import FileService
|
||||
|
||||
|
||||
|
|
@ -943,3 +943,150 @@ class TestFileService:
|
|||
|
||||
# Should have the signed URL when source_url is empty
|
||||
assert upload_file2.source_url == "https://example.com/signed-url"
|
||||
|
||||
# Test file extension blacklist
|
||||
def test_upload_file_blocked_extension(
|
||||
self, db_session_with_containers, engine, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test file upload with blocked extension.
|
||||
"""
|
||||
fake = Faker()
|
||||
account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies)
|
||||
|
||||
# Mock blacklist configuration by patching the inner field
|
||||
with patch.object(dify_config, "inner_UPLOAD_FILE_EXTENSION_BLACKLIST", "exe,bat,sh"):
|
||||
filename = "malware.exe"
|
||||
content = b"test content"
|
||||
mimetype = "application/x-msdownload"
|
||||
|
||||
with pytest.raises(BlockedFileExtensionError):
|
||||
FileService(engine).upload_file(
|
||||
filename=filename,
|
||||
content=content,
|
||||
mimetype=mimetype,
|
||||
user=account,
|
||||
)
|
||||
|
||||
def test_upload_file_blocked_extension_case_insensitive(
|
||||
self, db_session_with_containers, engine, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test file upload with blocked extension (case insensitive).
|
||||
"""
|
||||
fake = Faker()
|
||||
account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies)
|
||||
|
||||
# Mock blacklist configuration by patching the inner field
|
||||
with patch.object(dify_config, "inner_UPLOAD_FILE_EXTENSION_BLACKLIST", "exe,bat"):
|
||||
# Test with uppercase extension
|
||||
filename = "malware.EXE"
|
||||
content = b"test content"
|
||||
mimetype = "application/x-msdownload"
|
||||
|
||||
with pytest.raises(BlockedFileExtensionError):
|
||||
FileService(engine).upload_file(
|
||||
filename=filename,
|
||||
content=content,
|
||||
mimetype=mimetype,
|
||||
user=account,
|
||||
)
|
||||
|
||||
def test_upload_file_not_in_blacklist(self, db_session_with_containers, engine, mock_external_service_dependencies):
|
||||
"""
|
||||
Test file upload with extension not in blacklist.
|
||||
"""
|
||||
fake = Faker()
|
||||
account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies)
|
||||
|
||||
# Mock blacklist configuration by patching the inner field
|
||||
with patch.object(dify_config, "inner_UPLOAD_FILE_EXTENSION_BLACKLIST", "exe,bat,sh"):
|
||||
filename = "document.pdf"
|
||||
content = b"test content"
|
||||
mimetype = "application/pdf"
|
||||
|
||||
upload_file = FileService(engine).upload_file(
|
||||
filename=filename,
|
||||
content=content,
|
||||
mimetype=mimetype,
|
||||
user=account,
|
||||
)
|
||||
|
||||
assert upload_file is not None
|
||||
assert upload_file.name == filename
|
||||
assert upload_file.extension == "pdf"
|
||||
|
||||
def test_upload_file_empty_blacklist(self, db_session_with_containers, engine, mock_external_service_dependencies):
|
||||
"""
|
||||
Test file upload with empty blacklist (default behavior).
|
||||
"""
|
||||
fake = Faker()
|
||||
account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies)
|
||||
|
||||
# Mock empty blacklist configuration by patching the inner field
|
||||
with patch.object(dify_config, "inner_UPLOAD_FILE_EXTENSION_BLACKLIST", ""):
|
||||
# Should allow all file types when blacklist is empty
|
||||
filename = "script.sh"
|
||||
content = b"#!/bin/bash\necho test"
|
||||
mimetype = "application/x-sh"
|
||||
|
||||
upload_file = FileService(engine).upload_file(
|
||||
filename=filename,
|
||||
content=content,
|
||||
mimetype=mimetype,
|
||||
user=account,
|
||||
)
|
||||
|
||||
assert upload_file is not None
|
||||
assert upload_file.extension == "sh"
|
||||
|
||||
def test_upload_file_multiple_blocked_extensions(
|
||||
self, db_session_with_containers, engine, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test file upload with multiple blocked extensions.
|
||||
"""
|
||||
fake = Faker()
|
||||
account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies)
|
||||
|
||||
# Mock blacklist with multiple extensions by patching the inner field
|
||||
blacklist_str = "exe,bat,cmd,com,scr,vbs,ps1,msi,dll"
|
||||
with patch.object(dify_config, "inner_UPLOAD_FILE_EXTENSION_BLACKLIST", blacklist_str):
|
||||
for ext in blacklist_str.split(","):
|
||||
filename = f"malware.{ext}"
|
||||
content = b"test content"
|
||||
mimetype = "application/octet-stream"
|
||||
|
||||
with pytest.raises(BlockedFileExtensionError):
|
||||
FileService(engine).upload_file(
|
||||
filename=filename,
|
||||
content=content,
|
||||
mimetype=mimetype,
|
||||
user=account,
|
||||
)
|
||||
|
||||
def test_upload_file_no_extension_with_blacklist(
|
||||
self, db_session_with_containers, engine, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test file upload with no extension when blacklist is configured.
|
||||
"""
|
||||
fake = Faker()
|
||||
account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies)
|
||||
|
||||
# Mock blacklist configuration by patching the inner field
|
||||
with patch.object(dify_config, "inner_UPLOAD_FILE_EXTENSION_BLACKLIST", "exe,bat"):
|
||||
# Files with no extension should not be blocked
|
||||
filename = "README"
|
||||
content = b"test content"
|
||||
mimetype = "text/plain"
|
||||
|
||||
upload_file = FileService(engine).upload_file(
|
||||
filename=filename,
|
||||
content=content,
|
||||
mimetype=mimetype,
|
||||
user=account,
|
||||
)
|
||||
|
||||
assert upload_file is not None
|
||||
assert upload_file.extension == ""
|
||||
|
|
|
|||
|
|
@ -23,3 +23,32 @@ def test_file():
|
|||
assert file.extension == ".png"
|
||||
assert file.mime_type == "image/png"
|
||||
assert file.size == 67
|
||||
|
||||
|
||||
def test_file_model_validate_with_legacy_fields():
|
||||
"""Test `File` model can handle data containing compatibility fields."""
|
||||
data = {
|
||||
"id": "test-file",
|
||||
"tenant_id": "test-tenant-id",
|
||||
"type": "image",
|
||||
"transfer_method": "tool_file",
|
||||
"related_id": "test-related-id",
|
||||
"filename": "image.png",
|
||||
"extension": ".png",
|
||||
"mime_type": "image/png",
|
||||
"size": 67,
|
||||
"storage_key": "test-storage-key",
|
||||
"url": "https://example.com/image.png",
|
||||
# Extra legacy fields
|
||||
"tool_file_id": "tool-file-123",
|
||||
"upload_file_id": "upload-file-456",
|
||||
"datasource_file_id": "datasource-file-789",
|
||||
}
|
||||
|
||||
# Should be able to create `File` object without raising an exception
|
||||
file = File.model_validate(data)
|
||||
|
||||
# The File object does not have tool_file_id, upload_file_id, or datasource_file_id as attributes.
|
||||
# Instead, check it does not expose unrecognized legacy fields (should raise on getattr).
|
||||
for legacy_field in ("tool_file_id", "upload_file_id", "datasource_file_id"):
|
||||
assert not hasattr(file, legacy_field)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import datetime
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import pytz
|
||||
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from libs.datetime_utils import naive_utc_now, parse_time_range
|
||||
|
||||
|
||||
def test_naive_utc_now(monkeypatch: pytest.MonkeyPatch):
|
||||
|
|
@ -20,3 +22,247 @@ def test_naive_utc_now(monkeypatch: pytest.MonkeyPatch):
|
|||
naive_time = naive_datetime.time()
|
||||
utc_time = tz_aware_utc_now.time()
|
||||
assert naive_time == utc_time
|
||||
|
||||
|
||||
class TestParseTimeRange:
|
||||
"""Test cases for parse_time_range function."""
|
||||
|
||||
def test_parse_time_range_basic(self):
|
||||
"""Test basic time range parsing."""
|
||||
start, end = parse_time_range("2024-01-01 10:00", "2024-01-01 18:00", "UTC")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start < end
|
||||
assert start.tzinfo == pytz.UTC
|
||||
assert end.tzinfo == pytz.UTC
|
||||
|
||||
def test_parse_time_range_start_only(self):
|
||||
"""Test parsing with only start time."""
|
||||
start, end = parse_time_range("2024-01-01 10:00", None, "UTC")
|
||||
|
||||
assert start is not None
|
||||
assert end is None
|
||||
assert start.tzinfo == pytz.UTC
|
||||
|
||||
def test_parse_time_range_end_only(self):
|
||||
"""Test parsing with only end time."""
|
||||
start, end = parse_time_range(None, "2024-01-01 18:00", "UTC")
|
||||
|
||||
assert start is None
|
||||
assert end is not None
|
||||
assert end.tzinfo == pytz.UTC
|
||||
|
||||
def test_parse_time_range_both_none(self):
|
||||
"""Test parsing with both times None."""
|
||||
start, end = parse_time_range(None, None, "UTC")
|
||||
|
||||
assert start is None
|
||||
assert end is None
|
||||
|
||||
def test_parse_time_range_different_timezones(self):
|
||||
"""Test parsing with different timezones."""
|
||||
# Test with US/Eastern timezone
|
||||
start, end = parse_time_range("2024-01-01 10:00", "2024-01-01 18:00", "US/Eastern")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.tzinfo == pytz.UTC
|
||||
assert end.tzinfo == pytz.UTC
|
||||
# Verify the times are correctly converted to UTC
|
||||
assert start.hour == 15 # 10 AM EST = 3 PM UTC (in January)
|
||||
assert end.hour == 23 # 6 PM EST = 11 PM UTC (in January)
|
||||
|
||||
def test_parse_time_range_invalid_start_format(self):
|
||||
"""Test parsing with invalid start time format."""
|
||||
with pytest.raises(ValueError, match="time data.*does not match format"):
|
||||
parse_time_range("invalid-date", "2024-01-01 18:00", "UTC")
|
||||
|
||||
def test_parse_time_range_invalid_end_format(self):
|
||||
"""Test parsing with invalid end time format."""
|
||||
with pytest.raises(ValueError, match="time data.*does not match format"):
|
||||
parse_time_range("2024-01-01 10:00", "invalid-date", "UTC")
|
||||
|
||||
def test_parse_time_range_invalid_timezone(self):
|
||||
"""Test parsing with invalid timezone."""
|
||||
with pytest.raises(pytz.exceptions.UnknownTimeZoneError):
|
||||
parse_time_range("2024-01-01 10:00", "2024-01-01 18:00", "Invalid/Timezone")
|
||||
|
||||
def test_parse_time_range_start_after_end(self):
|
||||
"""Test parsing with start time after end time."""
|
||||
with pytest.raises(ValueError, match="start must be earlier than or equal to end"):
|
||||
parse_time_range("2024-01-01 18:00", "2024-01-01 10:00", "UTC")
|
||||
|
||||
def test_parse_time_range_start_equals_end(self):
|
||||
"""Test parsing with start time equal to end time."""
|
||||
start, end = parse_time_range("2024-01-01 10:00", "2024-01-01 10:00", "UTC")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start == end
|
||||
|
||||
def test_parse_time_range_dst_ambiguous_time(self):
|
||||
"""Test parsing during DST ambiguous time (fall back)."""
|
||||
# This test simulates DST fall back where 2:30 AM occurs twice
|
||||
with patch("pytz.timezone") as mock_timezone:
|
||||
# Mock timezone that raises AmbiguousTimeError
|
||||
mock_tz = mock_timezone.return_value
|
||||
|
||||
# Create a mock datetime object for the return value
|
||||
mock_dt = datetime.datetime(2024, 1, 1, 10, 0, 0)
|
||||
mock_utc_dt = mock_dt.replace(tzinfo=pytz.UTC)
|
||||
|
||||
# Create a proper mock for the localized datetime
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
mock_localized_dt = MagicMock()
|
||||
mock_localized_dt.astimezone.return_value = mock_utc_dt
|
||||
|
||||
# Set up side effects: first call raises exception, second call succeeds
|
||||
mock_tz.localize.side_effect = [
|
||||
pytz.AmbiguousTimeError("Ambiguous time"), # First call for start
|
||||
mock_localized_dt, # Second call for start (with is_dst=False)
|
||||
pytz.AmbiguousTimeError("Ambiguous time"), # First call for end
|
||||
mock_localized_dt, # Second call for end (with is_dst=False)
|
||||
]
|
||||
|
||||
start, end = parse_time_range("2024-01-01 10:00", "2024-01-01 18:00", "US/Eastern")
|
||||
|
||||
# Should use is_dst=False for ambiguous times
|
||||
assert mock_tz.localize.call_count == 4 # 2 calls per time (first fails, second succeeds)
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
|
||||
def test_parse_time_range_dst_nonexistent_time(self):
|
||||
"""Test parsing during DST nonexistent time (spring forward)."""
|
||||
with patch("pytz.timezone") as mock_timezone:
|
||||
# Mock timezone that raises NonExistentTimeError
|
||||
mock_tz = mock_timezone.return_value
|
||||
|
||||
# Create a mock datetime object for the return value
|
||||
mock_dt = datetime.datetime(2024, 1, 1, 10, 0, 0)
|
||||
mock_utc_dt = mock_dt.replace(tzinfo=pytz.UTC)
|
||||
|
||||
# Create a proper mock for the localized datetime
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
mock_localized_dt = MagicMock()
|
||||
mock_localized_dt.astimezone.return_value = mock_utc_dt
|
||||
|
||||
# Set up side effects: first call raises exception, second call succeeds
|
||||
mock_tz.localize.side_effect = [
|
||||
pytz.NonExistentTimeError("Non-existent time"), # First call for start
|
||||
mock_localized_dt, # Second call for start (with adjusted time)
|
||||
pytz.NonExistentTimeError("Non-existent time"), # First call for end
|
||||
mock_localized_dt, # Second call for end (with adjusted time)
|
||||
]
|
||||
|
||||
start, end = parse_time_range("2024-01-01 10:00", "2024-01-01 18:00", "US/Eastern")
|
||||
|
||||
# Should adjust time forward by 1 hour for nonexistent times
|
||||
assert mock_tz.localize.call_count == 4 # 2 calls per time (first fails, second succeeds)
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
|
||||
def test_parse_time_range_edge_cases(self):
|
||||
"""Test edge cases for time parsing."""
|
||||
# Test with midnight times
|
||||
start, end = parse_time_range("2024-01-01 00:00", "2024-01-01 23:59", "UTC")
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.hour == 0
|
||||
assert start.minute == 0
|
||||
assert end.hour == 23
|
||||
assert end.minute == 59
|
||||
|
||||
def test_parse_time_range_different_dates(self):
|
||||
"""Test parsing with different dates."""
|
||||
start, end = parse_time_range("2024-01-01 10:00", "2024-01-02 10:00", "UTC")
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.date() != end.date()
|
||||
assert (end - start).days == 1
|
||||
|
||||
def test_parse_time_range_seconds_handling(self):
|
||||
"""Test that seconds are properly set to 0."""
|
||||
start, end = parse_time_range("2024-01-01 10:30", "2024-01-01 18:45", "UTC")
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.second == 0
|
||||
assert end.second == 0
|
||||
|
||||
def test_parse_time_range_timezone_conversion_accuracy(self):
|
||||
"""Test accurate timezone conversion."""
|
||||
# Test with a known timezone conversion
|
||||
start, end = parse_time_range("2024-01-01 12:00", "2024-01-01 12:00", "Asia/Tokyo")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.tzinfo == pytz.UTC
|
||||
assert end.tzinfo == pytz.UTC
|
||||
# Tokyo is UTC+9, so 12:00 JST = 03:00 UTC
|
||||
assert start.hour == 3
|
||||
assert end.hour == 3
|
||||
|
||||
def test_parse_time_range_summer_time(self):
|
||||
"""Test parsing during summer time (DST)."""
|
||||
# Test with US/Eastern during summer (EDT = UTC-4)
|
||||
start, end = parse_time_range("2024-07-01 12:00", "2024-07-01 12:00", "US/Eastern")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.tzinfo == pytz.UTC
|
||||
assert end.tzinfo == pytz.UTC
|
||||
# 12:00 EDT = 16:00 UTC
|
||||
assert start.hour == 16
|
||||
assert end.hour == 16
|
||||
|
||||
def test_parse_time_range_winter_time(self):
|
||||
"""Test parsing during winter time (standard time)."""
|
||||
# Test with US/Eastern during winter (EST = UTC-5)
|
||||
start, end = parse_time_range("2024-01-01 12:00", "2024-01-01 12:00", "US/Eastern")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.tzinfo == pytz.UTC
|
||||
assert end.tzinfo == pytz.UTC
|
||||
# 12:00 EST = 17:00 UTC
|
||||
assert start.hour == 17
|
||||
assert end.hour == 17
|
||||
|
||||
def test_parse_time_range_empty_strings(self):
|
||||
"""Test parsing with empty strings."""
|
||||
# Empty strings are treated as None, so they should not raise errors
|
||||
start, end = parse_time_range("", "2024-01-01 18:00", "UTC")
|
||||
assert start is None
|
||||
assert end is not None
|
||||
|
||||
start, end = parse_time_range("2024-01-01 10:00", "", "UTC")
|
||||
assert start is not None
|
||||
assert end is None
|
||||
|
||||
def test_parse_time_range_malformed_datetime(self):
|
||||
"""Test parsing with malformed datetime strings."""
|
||||
with pytest.raises(ValueError, match="time data.*does not match format"):
|
||||
parse_time_range("2024-13-01 10:00", "2024-01-01 18:00", "UTC")
|
||||
|
||||
with pytest.raises(ValueError, match="time data.*does not match format"):
|
||||
parse_time_range("2024-01-01 10:00", "2024-01-32 18:00", "UTC")
|
||||
|
||||
def test_parse_time_range_very_long_time_range(self):
|
||||
"""Test parsing with very long time range."""
|
||||
start, end = parse_time_range("2020-01-01 00:00", "2030-12-31 23:59", "UTC")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start < end
|
||||
assert (end - start).days > 3000 # More than 8 years
|
||||
|
||||
def test_parse_time_range_negative_timezone(self):
|
||||
"""Test parsing with negative timezone offset."""
|
||||
start, end = parse_time_range("2024-01-01 12:00", "2024-01-01 12:00", "America/New_York")
|
||||
|
||||
assert start is not None
|
||||
assert end is not None
|
||||
assert start.tzinfo == pytz.UTC
|
||||
assert end.tzinfo == pytz.UTC
|
||||
|
|
|
|||
3931
api/uv.lock
3931
api/uv.lock
File diff suppressed because it is too large
Load Diff
|
|
@ -767,6 +767,12 @@ UPLOAD_FILE_SIZE_LIMIT=15
|
|||
# The maximum number of files that can be uploaded at a time, default 5.
|
||||
UPLOAD_FILE_BATCH_LIMIT=5
|
||||
|
||||
# Comma-separated list of file extensions blocked from upload for security reasons.
|
||||
# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll).
|
||||
# Empty by default to allow all file types.
|
||||
# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST=
|
||||
|
||||
# ETL type, support: `dify`, `Unstructured`
|
||||
# `dify` Dify's proprietary file extraction scheme
|
||||
# `Unstructured` Unstructured.io file extraction scheme
|
||||
|
|
@ -1359,6 +1365,9 @@ SWAGGER_UI_PATH=/swagger-ui.html
|
|||
# Set to false to export dataset IDs as plain text for easier cross-environment import
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID=true
|
||||
|
||||
# Maximum number of segments for dataset segments API (0 for unlimited)
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST=0
|
||||
|
||||
# Celery schedule tasks configuration
|
||||
ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false
|
||||
ENABLE_CLEAN_UNUSED_DATASETS_TASK=false
|
||||
|
|
|
|||
|
|
@ -101,6 +101,8 @@ services:
|
|||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
|
|
|
|||
|
|
@ -354,6 +354,7 @@ x-shared-env: &shared-api-worker-env
|
|||
CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance}
|
||||
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
|
||||
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST: ${UPLOAD_FILE_EXTENSION_BLACKLIST:-}
|
||||
ETL_TYPE: ${ETL_TYPE:-dify}
|
||||
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
|
||||
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
|
||||
|
|
@ -605,6 +606,7 @@ x-shared-env: &shared-api-worker-env
|
|||
SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-true}
|
||||
SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html}
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID: ${DSL_EXPORT_ENCRYPT_DATASET_ID:-true}
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST: ${DATASET_MAX_SEGMENTS_PER_REQUEST:-0}
|
||||
ENABLE_CLEAN_EMBEDDING_CACHE_TASK: ${ENABLE_CLEAN_EMBEDDING_CACHE_TASK:-false}
|
||||
ENABLE_CLEAN_UNUSED_DATASETS_TASK: ${ENABLE_CLEAN_UNUSED_DATASETS_TASK:-false}
|
||||
ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false}
|
||||
|
|
@ -720,6 +722,8 @@ services:
|
|||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
|
|
|
|||
|
|
@ -759,4 +759,104 @@ export default translation`
|
|||
expect(result).not.toContain('Zbuduj inteligentnego agenta')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Performance and Scalability', () => {
|
||||
it('should handle large translation files efficiently', async () => {
|
||||
// Create a large translation file with 1000 keys
|
||||
const largeContent = `const translation = {
|
||||
${Array.from({ length: 1000 }, (_, i) => ` key${i}: 'value${i}',`).join('\n')}
|
||||
}
|
||||
|
||||
export default translation`
|
||||
|
||||
fs.writeFileSync(path.join(testEnDir, 'large.ts'), largeContent)
|
||||
|
||||
const startTime = Date.now()
|
||||
const keys = await getKeysFromLanguage('en-US')
|
||||
const endTime = Date.now()
|
||||
|
||||
expect(keys.length).toBe(1000)
|
||||
expect(endTime - startTime).toBeLessThan(1000) // Should complete in under 1 second
|
||||
})
|
||||
|
||||
it('should handle multiple translation files concurrently', async () => {
|
||||
// Create multiple files
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const content = `const translation = {
|
||||
key${i}: 'value${i}',
|
||||
nested${i}: {
|
||||
subkey: 'subvalue'
|
||||
}
|
||||
}
|
||||
|
||||
export default translation`
|
||||
fs.writeFileSync(path.join(testEnDir, `file${i}.ts`), content)
|
||||
}
|
||||
|
||||
const startTime = Date.now()
|
||||
const keys = await getKeysFromLanguage('en-US')
|
||||
const endTime = Date.now()
|
||||
|
||||
expect(keys.length).toBe(20) // 10 files * 2 keys each
|
||||
expect(endTime - startTime).toBeLessThan(500)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Unicode and Internationalization', () => {
|
||||
it('should handle Unicode characters in keys and values', async () => {
|
||||
const unicodeContent = `const translation = {
|
||||
'中文键': '中文值',
|
||||
'العربية': 'قيمة',
|
||||
'emoji_😀': 'value with emoji 🎉',
|
||||
'mixed_中文_English': 'mixed value'
|
||||
}
|
||||
|
||||
export default translation`
|
||||
|
||||
fs.writeFileSync(path.join(testEnDir, 'unicode.ts'), unicodeContent)
|
||||
|
||||
const keys = await getKeysFromLanguage('en-US')
|
||||
|
||||
expect(keys).toContain('unicode.中文键')
|
||||
expect(keys).toContain('unicode.العربية')
|
||||
expect(keys).toContain('unicode.emoji_😀')
|
||||
expect(keys).toContain('unicode.mixed_中文_English')
|
||||
})
|
||||
|
||||
it('should handle RTL language files', async () => {
|
||||
const rtlContent = `const translation = {
|
||||
مرحبا: 'Hello',
|
||||
العالم: 'World',
|
||||
nested: {
|
||||
مفتاح: 'key'
|
||||
}
|
||||
}
|
||||
|
||||
export default translation`
|
||||
|
||||
fs.writeFileSync(path.join(testEnDir, 'rtl.ts'), rtlContent)
|
||||
|
||||
const keys = await getKeysFromLanguage('en-US')
|
||||
|
||||
expect(keys).toContain('rtl.مرحبا')
|
||||
expect(keys).toContain('rtl.العالم')
|
||||
expect(keys).toContain('rtl.nested.مفتاح')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Error Recovery', () => {
|
||||
it('should handle syntax errors in translation files gracefully', async () => {
|
||||
const invalidContent = `const translation = {
|
||||
validKey: 'valid value',
|
||||
invalidKey: 'missing quote,
|
||||
anotherKey: 'another value'
|
||||
}
|
||||
|
||||
export default translation`
|
||||
|
||||
fs.writeFileSync(path.join(testEnDir, 'invalid.ts'), invalidContent)
|
||||
|
||||
await expect(getKeysFromLanguage('en-US')).rejects.toThrow()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -286,4 +286,116 @@ describe('Navigation Utilities', () => {
|
|||
expect(mockPush).toHaveBeenCalledWith('/datasets/filtered-set/documents?page=1&limit=50&status=active&type=pdf&sort=created_at&order=desc')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edge Cases and Error Handling', () => {
|
||||
test('handles special characters in query parameters', () => {
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: '?keyword=hello%20world&filter=type%3Apdf&tag=%E4%B8%AD%E6%96%87' },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const path = createNavigationPath('/datasets/123/documents')
|
||||
expect(path).toContain('hello+world')
|
||||
expect(path).toContain('type%3Apdf')
|
||||
expect(path).toContain('%E4%B8%AD%E6%96%87')
|
||||
})
|
||||
|
||||
test('handles duplicate query parameters', () => {
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: '?tag=tag1&tag=tag2&tag=tag3' },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const params = extractQueryParams(['tag'])
|
||||
// URLSearchParams.get() returns the first value
|
||||
expect(params.tag).toBe('tag1')
|
||||
})
|
||||
|
||||
test('handles very long query strings', () => {
|
||||
const longValue = 'a'.repeat(1000)
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: `?data=${longValue}` },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const path = createNavigationPath('/datasets/123/documents')
|
||||
expect(path).toContain(longValue)
|
||||
expect(path.length).toBeGreaterThan(1000)
|
||||
})
|
||||
|
||||
test('handles empty string values in query parameters', () => {
|
||||
const path = createNavigationPathWithParams('/datasets/123/documents', {
|
||||
page: 1,
|
||||
keyword: '',
|
||||
filter: '',
|
||||
sort: 'name',
|
||||
})
|
||||
|
||||
expect(path).toBe('/datasets/123/documents?page=1&sort=name')
|
||||
expect(path).not.toContain('keyword=')
|
||||
expect(path).not.toContain('filter=')
|
||||
})
|
||||
|
||||
test('handles null and undefined values in mergeQueryParams', () => {
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: '?page=1&limit=10&keyword=test' },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const merged = mergeQueryParams({
|
||||
keyword: null,
|
||||
filter: undefined,
|
||||
sort: 'name',
|
||||
})
|
||||
const result = merged.toString()
|
||||
|
||||
expect(result).toContain('page=1')
|
||||
expect(result).toContain('limit=10')
|
||||
expect(result).not.toContain('keyword')
|
||||
expect(result).toContain('sort=name')
|
||||
})
|
||||
|
||||
test('handles navigation with hash fragments', () => {
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: '?page=1', hash: '#section-2' },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const path = createNavigationPath('/datasets/123/documents')
|
||||
// Should preserve query params but not hash
|
||||
expect(path).toBe('/datasets/123/documents?page=1')
|
||||
})
|
||||
|
||||
test('handles malformed query strings gracefully', () => {
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: '?page=1&invalid&limit=10&=value&key=' },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const params = extractQueryParams(['page', 'limit', 'invalid', 'key'])
|
||||
expect(params.page).toBe('1')
|
||||
expect(params.limit).toBe('10')
|
||||
// Malformed params should be handled by URLSearchParams
|
||||
expect(params.invalid).toBe('') // for `&invalid`
|
||||
expect(params.key).toBe('') // for `&key=`
|
||||
})
|
||||
})
|
||||
|
||||
describe('Performance Tests', () => {
|
||||
test('handles large number of query parameters efficiently', () => {
|
||||
const manyParams = Array.from({ length: 50 }, (_, i) => `param${i}=value${i}`).join('&')
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { search: `?${manyParams}` },
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const startTime = Date.now()
|
||||
const path = createNavigationPath('/datasets/123/documents')
|
||||
const endTime = Date.now()
|
||||
|
||||
expect(endTime - startTime).toBeLessThan(50) // Should be fast
|
||||
expect(path).toContain('param0=value0')
|
||||
expect(path).toContain('param49=value49')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,107 +1,144 @@
|
|||
import type { JSX } from 'react'
|
||||
import { cloneElement, useCallback } from 'react'
|
||||
import { useState } from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import Button from '@/app/components/base/button'
|
||||
import { PortalToFollowElem, PortalToFollowElemContent, PortalToFollowElemTrigger } from '../base/portal-to-follow-elem'
|
||||
import Divider from '@/app/components/base/divider'
|
||||
import { RiMoreFill } from '@remixicon/react'
|
||||
import cn from '@/utils/classnames'
|
||||
import { RiMoreLine } from '@remixicon/react'
|
||||
|
||||
export type Operation = {
|
||||
id: string
|
||||
title: string
|
||||
icon: JSX.Element
|
||||
onClick: () => void
|
||||
type?: 'action' | 'divider'
|
||||
className?: string
|
||||
id: string; title: string; icon: JSX.Element; onClick: () => void
|
||||
}
|
||||
|
||||
const AppOperations = ({ primaryOperations, secondaryOperations, gap }: {
|
||||
primaryOperations: Operation[]
|
||||
secondaryOperations: Operation[]
|
||||
const AppOperations = ({ operations, gap }: {
|
||||
operations: Operation[]
|
||||
gap: number
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const [visibleOpreations, setVisibleOperations] = useState<Operation[]>([])
|
||||
const [moreOperations, setMoreOperations] = useState<Operation[]>([])
|
||||
const [showMore, setShowMore] = useState(false)
|
||||
const navRef = useRef<HTMLDivElement>(null)
|
||||
const handleTriggerMore = useCallback(() => {
|
||||
setShowMore(prev => !prev)
|
||||
}, [])
|
||||
setShowMore(true)
|
||||
}, [setShowMore])
|
||||
|
||||
const renderSecondaryOperation = (operation: Operation, index: number) => {
|
||||
if (operation.type === 'divider') {
|
||||
return (
|
||||
<Divider key={operation.id || `divider-${index}`} className='my-1' />
|
||||
)
|
||||
useEffect(() => {
|
||||
const moreElement = document.getElementById('more')
|
||||
const navElement = document.getElementById('nav')
|
||||
let width = 0
|
||||
const containerWidth = navElement?.clientWidth ?? 0
|
||||
const moreWidth = moreElement?.clientWidth ?? 0
|
||||
|
||||
if (containerWidth === 0 || moreWidth === 0) return
|
||||
|
||||
const updatedEntries: Record<string, boolean> = operations.reduce((pre, cur) => {
|
||||
pre[cur.id] = false
|
||||
return pre
|
||||
}, {} as Record<string, boolean>)
|
||||
const childrens = Array.from(navRef.current!.children).slice(0, -1)
|
||||
for (let i = 0; i < childrens.length; i++) {
|
||||
const child: any = childrens[i]
|
||||
const id = child.dataset.targetid
|
||||
if (!id) break
|
||||
const childWidth = child.clientWidth
|
||||
|
||||
if (width + gap + childWidth + moreWidth <= containerWidth) {
|
||||
updatedEntries[id] = true
|
||||
width += gap + childWidth
|
||||
}
|
||||
else {
|
||||
if (i === childrens.length - 1 && width + childWidth <= containerWidth)
|
||||
updatedEntries[id] = true
|
||||
else
|
||||
updatedEntries[id] = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
key={operation.id}
|
||||
className={cn(
|
||||
'flex h-8 cursor-pointer items-center gap-x-1 rounded-lg p-1.5 hover:bg-state-base-hover',
|
||||
operation.className,
|
||||
)}
|
||||
onClick={() => {
|
||||
setShowMore(false)
|
||||
operation.onClick()
|
||||
}}
|
||||
>
|
||||
{cloneElement(operation.icon, {
|
||||
className: 'h-4 w-4 text-text-tertiary',
|
||||
})}
|
||||
<span className='system-md-regular text-text-secondary'>
|
||||
{operation.title}
|
||||
</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
setVisibleOperations(operations.filter(item => updatedEntries[item.id]))
|
||||
setMoreOperations(operations.filter(item => !updatedEntries[item.id]))
|
||||
}, [operations, gap])
|
||||
|
||||
return (
|
||||
<div className="flex items-center self-stretch overflow-hidden" style={{ gap }}>
|
||||
{/* Fixed primary operations */}
|
||||
{primaryOperations.map(operation =>
|
||||
<>
|
||||
{!visibleOpreations.length && <div
|
||||
id="nav"
|
||||
ref={navRef}
|
||||
className="flex h-0 items-center self-stretch overflow-hidden"
|
||||
style={{ gap }}
|
||||
>
|
||||
{operations.map((operation, index) =>
|
||||
<Button
|
||||
key={index}
|
||||
data-targetid={operation.id}
|
||||
size={'small'}
|
||||
variant={'secondary'}
|
||||
className="gap-[1px]">
|
||||
{cloneElement(operation.icon, { className: 'h-3.5 w-3.5 text-components-button-secondary-text' })}
|
||||
<span className="system-xs-medium text-components-button-secondary-text">
|
||||
{operation.title}
|
||||
</span>
|
||||
</Button>,
|
||||
)}
|
||||
<Button
|
||||
key={operation.id}
|
||||
id="more"
|
||||
size={'small'}
|
||||
variant={'secondary'}
|
||||
className="gap-[1px] px-1.5"
|
||||
onClick={operation.onClick}>
|
||||
{cloneElement(operation.icon, { className: 'h-3.5 w-3.5 text-components-button-secondary-text' })}
|
||||
className="gap-[1px]"
|
||||
>
|
||||
<RiMoreLine className="h-3.5 w-3.5 text-components-button-secondary-text" />
|
||||
<span className="system-xs-medium text-components-button-secondary-text">
|
||||
{operation.title}
|
||||
{t('common.operation.more')}
|
||||
</span>
|
||||
</Button>,
|
||||
)}
|
||||
|
||||
{/* More button - always show if there are secondary operations */}
|
||||
{secondaryOperations.length > 0 && (
|
||||
<PortalToFollowElem
|
||||
</Button>
|
||||
</div>}
|
||||
<div className="flex items-center self-stretch overflow-hidden" style={{ gap }}>
|
||||
{visibleOpreations.map(operation =>
|
||||
<Button
|
||||
key={operation.id}
|
||||
data-targetid={operation.id}
|
||||
size={'small'}
|
||||
variant={'secondary'}
|
||||
className="gap-[1px]"
|
||||
onClick={operation.onClick}>
|
||||
{cloneElement(operation.icon, { className: 'h-3.5 w-3.5 text-components-button-secondary-text' })}
|
||||
<span className="system-xs-medium text-components-button-secondary-text">
|
||||
{operation.title}
|
||||
</span>
|
||||
</Button>,
|
||||
)}
|
||||
{visibleOpreations.length < operations.length && <PortalToFollowElem
|
||||
open={showMore}
|
||||
onOpenChange={setShowMore}
|
||||
placement='bottom-end'
|
||||
offset={{
|
||||
mainAxis: 4,
|
||||
crossAxis: 55,
|
||||
}}>
|
||||
<PortalToFollowElemTrigger onClick={handleTriggerMore}>
|
||||
<Button
|
||||
size={'small'}
|
||||
variant={'secondary'}
|
||||
className='gap-1 px-1.5'
|
||||
className='gap-[1px]'
|
||||
>
|
||||
<RiMoreFill className='h-3.5 w-3.5 text-components-button-secondary-text' />
|
||||
<RiMoreLine className='h-3.5 w-3.5 text-components-button-secondary-text' />
|
||||
<span className='system-xs-medium text-components-button-secondary-text'>{t('common.operation.more')}</span>
|
||||
</Button>
|
||||
</PortalToFollowElemTrigger>
|
||||
<PortalToFollowElemContent className='z-[100]'>
|
||||
<div className='flex min-w-[264px] flex-col rounded-[12px] border-[0.5px] border-components-panel-border bg-components-panel-bg-blur p-1 shadow-lg backdrop-blur-[10px]'>
|
||||
{secondaryOperations.map((operation, index) => renderSecondaryOperation(operation, index))}
|
||||
<PortalToFollowElemContent className='z-[30]'>
|
||||
<div className='flex min-w-[264px] flex-col rounded-[12px] border-[0.5px] border-components-panel-border bg-components-panel-bg-blur p-1 shadow-lg backdrop-blur-[5px]'>
|
||||
{moreOperations.map(item => <div
|
||||
key={item.id}
|
||||
className='flex h-8 cursor-pointer items-center gap-x-1 rounded-lg p-1.5 hover:bg-state-base-hover'
|
||||
onClick={item.onClick}
|
||||
>
|
||||
{cloneElement(item.icon, { className: 'h-4 w-4 text-text-tertiary' })}
|
||||
<span className='system-md-regular text-text-secondary'>{item.title}</span>
|
||||
</div>)}
|
||||
</div>
|
||||
</PortalToFollowElemContent>
|
||||
</PortalToFollowElem>
|
||||
)}
|
||||
</div>
|
||||
</PortalToFollowElem>}
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -145,15 +145,23 @@ const List = () => {
|
|||
return
|
||||
}
|
||||
|
||||
if (anchorRef.current) {
|
||||
if (anchorRef.current && containerRef.current) {
|
||||
// Calculate dynamic rootMargin: clamps to 100-200px range, using 20% of container height as the base value for better responsiveness
|
||||
const containerHeight = containerRef.current.clientHeight
|
||||
const dynamicMargin = Math.max(100, Math.min(containerHeight * 0.2, 200)) // Clamps to 100-200px range, using 20% of container height as the base value
|
||||
|
||||
observer = new IntersectionObserver((entries) => {
|
||||
if (entries[0].isIntersecting && !isLoading && !error && hasMore)
|
||||
setSize((size: number) => size + 1)
|
||||
}, { rootMargin: '100px' })
|
||||
}, {
|
||||
root: containerRef.current,
|
||||
rootMargin: `${dynamicMargin}px`,
|
||||
threshold: 0.1, // Trigger when 10% of the anchor element is visible
|
||||
})
|
||||
observer.observe(anchorRef.current)
|
||||
}
|
||||
return () => observer?.disconnect()
|
||||
}, [isLoading, setSize, anchorRef, mutate, data, error])
|
||||
}, [isLoading, setSize, data, error])
|
||||
|
||||
const { run: handleSearch } = useDebounceFn(() => {
|
||||
setSearchKeywords(keywords)
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ const InputsFormContent = ({ showTip }: Props) => {
|
|||
<div className='flex h-6 items-center gap-1'>
|
||||
<div className='system-md-semibold text-text-secondary'>{form.label}</div>
|
||||
{!form.required && (
|
||||
<div className='system-xs-regular text-text-tertiary'>{t('appDebug.variableTable.optional')}</div>
|
||||
<div className='system-xs-regular text-text-tertiary'>{t('workflow.panel.optional')}</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ const InputsFormContent = ({ showTip }: Props) => {
|
|||
<div className='flex h-6 items-center gap-1'>
|
||||
<div className='system-md-semibold text-text-secondary'>{form.label}</div>
|
||||
{!form.required && (
|
||||
<div className='system-xs-regular text-text-tertiary'>{t('appDebug.variableTable.optional')}</div>
|
||||
<div className='system-xs-regular text-text-tertiary'>{t('workflow.panel.optional')}</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import React, { useCallback, useEffect, useState } from 'react'
|
||||
import React, { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useBoolean } from 'ahooks'
|
||||
import { produce } from 'immer'
|
||||
|
|
@ -45,7 +45,13 @@ const OpeningSettingModal = ({
|
|||
const [isShowConfirmAddVar, { setTrue: showConfirmAddVar, setFalse: hideConfirmAddVar }] = useBoolean(false)
|
||||
const [notIncludeKeys, setNotIncludeKeys] = useState<string[]>([])
|
||||
|
||||
const isSaveDisabled = useMemo(() => !tempValue.trim(), [tempValue])
|
||||
|
||||
const handleSave = useCallback((ignoreVariablesCheck?: boolean) => {
|
||||
// Prevent saving if opening statement is empty
|
||||
if (isSaveDisabled)
|
||||
return
|
||||
|
||||
if (!ignoreVariablesCheck) {
|
||||
const keys = getInputKeys(tempValue)
|
||||
const promptKeys = promptVariables.map(item => item.key)
|
||||
|
|
@ -75,7 +81,7 @@ const OpeningSettingModal = ({
|
|||
}
|
||||
})
|
||||
onSave(newOpening)
|
||||
}, [data, onSave, promptVariables, workflowVariables, showConfirmAddVar, tempSuggestedQuestions, tempValue])
|
||||
}, [data, onSave, promptVariables, workflowVariables, showConfirmAddVar, tempSuggestedQuestions, tempValue, isSaveDisabled])
|
||||
|
||||
const cancelAutoAddVar = useCallback(() => {
|
||||
hideConfirmAddVar()
|
||||
|
|
@ -217,6 +223,7 @@ const OpeningSettingModal = ({
|
|||
<Button
|
||||
variant='primary'
|
||||
onClick={() => handleSave()}
|
||||
disabled={isSaveDisabled}
|
||||
>
|
||||
{t('common.operation.save')}
|
||||
</Button>
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import type { FileEntity } from './types'
|
|||
import { useFileStore } from './store'
|
||||
import {
|
||||
fileUpload,
|
||||
getFileUploadErrorMessage,
|
||||
getSupportFileType,
|
||||
isAllowedFileExtension,
|
||||
} from './utils'
|
||||
|
|
@ -172,8 +173,9 @@ export const useFile = (fileConfig: FileUpload) => {
|
|||
onSuccessCallback: (res) => {
|
||||
handleUpdateFile({ ...uploadingFile, uploadedId: res.id, progress: 100 })
|
||||
},
|
||||
onErrorCallback: () => {
|
||||
notify({ type: 'error', message: t('common.fileUploader.uploadFromComputerUploadError') })
|
||||
onErrorCallback: (error?: any) => {
|
||||
const errorMessage = getFileUploadErrorMessage(error, t('common.fileUploader.uploadFromComputerUploadError'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
handleUpdateFile({ ...uploadingFile, progress: -1 })
|
||||
},
|
||||
}, !!params.token)
|
||||
|
|
@ -279,8 +281,9 @@ export const useFile = (fileConfig: FileUpload) => {
|
|||
onSuccessCallback: (res) => {
|
||||
handleUpdateFile({ ...uploadingFile, uploadedId: res.id, progress: 100 })
|
||||
},
|
||||
onErrorCallback: () => {
|
||||
notify({ type: 'error', message: t('common.fileUploader.uploadFromComputerUploadError') })
|
||||
onErrorCallback: (error?: any) => {
|
||||
const errorMessage = getFileUploadErrorMessage(error, t('common.fileUploader.uploadFromComputerUploadError'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
handleUpdateFile({ ...uploadingFile, progress: -1 })
|
||||
},
|
||||
}, !!params.token)
|
||||
|
|
|
|||
|
|
@ -7,11 +7,30 @@ import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
|||
import type { FileResponse } from '@/types/workflow'
|
||||
import { TransferMethod } from '@/types/app'
|
||||
|
||||
/**
|
||||
* Get appropriate error message for file upload errors
|
||||
* @param error - The error object from upload failure
|
||||
* @param defaultMessage - Default error message to use if no specific error is matched
|
||||
* @param t - Translation function
|
||||
* @returns Localized error message
|
||||
*/
|
||||
export const getFileUploadErrorMessage = (error: any, defaultMessage: string, t: (key: string) => string): string => {
|
||||
const errorCode = error?.response?.code
|
||||
|
||||
if (errorCode === 'forbidden')
|
||||
return error?.response?.message
|
||||
|
||||
if (errorCode === 'file_extension_blocked')
|
||||
return t('common.fileUploader.fileExtensionBlocked')
|
||||
|
||||
return defaultMessage
|
||||
}
|
||||
|
||||
type FileUploadParams = {
|
||||
file: File
|
||||
onProgressCallback: (progress: number) => void
|
||||
onSuccessCallback: (res: { id: string }) => void
|
||||
onErrorCallback: () => void
|
||||
onErrorCallback: (error?: any) => void
|
||||
}
|
||||
type FileUpload = (v: FileUploadParams, isPublic?: boolean, url?: string) => void
|
||||
export const fileUpload: FileUpload = ({
|
||||
|
|
@ -37,8 +56,8 @@ export const fileUpload: FileUpload = ({
|
|||
.then((res: { id: string }) => {
|
||||
onSuccessCallback(res)
|
||||
})
|
||||
.catch(() => {
|
||||
onErrorCallback()
|
||||
.catch((error) => {
|
||||
onErrorCallback(error)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { useCallback, useMemo, useRef, useState } from 'react'
|
|||
import type { ClipboardEvent } from 'react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { imageUpload } from './utils'
|
||||
import { getImageUploadErrorMessage, imageUpload } from './utils'
|
||||
import { useToastContext } from '@/app/components/base/toast'
|
||||
import { ALLOW_FILE_EXTENSIONS, TransferMethod } from '@/types/app'
|
||||
import type { ImageFile, VisionSettings } from '@/types/app'
|
||||
|
|
@ -81,8 +81,9 @@ export const useImageFiles = () => {
|
|||
filesRef.current = newFiles
|
||||
setFiles(newFiles)
|
||||
},
|
||||
onErrorCallback: () => {
|
||||
notify({ type: 'error', message: t('common.imageUploader.uploadFromComputerUploadError') })
|
||||
onErrorCallback: (error?: any) => {
|
||||
const errorMessage = getImageUploadErrorMessage(error, t('common.imageUploader.uploadFromComputerUploadError'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
const newFiles = [...files.slice(0, index), { ...currentImageFile, progress: -1 }, ...files.slice(index + 1)]
|
||||
filesRef.current = newFiles
|
||||
setFiles(newFiles)
|
||||
|
|
@ -158,8 +159,9 @@ export const useLocalFileUploader = ({ limit, disabled = false, onUpload }: useL
|
|||
onSuccessCallback: (res) => {
|
||||
onUpload({ ...imageFile, fileId: res.id, progress: 100 })
|
||||
},
|
||||
onErrorCallback: () => {
|
||||
notify({ type: 'error', message: t('common.imageUploader.uploadFromComputerUploadError') })
|
||||
onErrorCallback: (error?: any) => {
|
||||
const errorMessage = getImageUploadErrorMessage(error, t('common.imageUploader.uploadFromComputerUploadError'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
onUpload({ ...imageFile, progress: -1 })
|
||||
},
|
||||
}, !!params.token)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,29 @@
|
|||
import { upload } from '@/service/base'
|
||||
|
||||
/**
|
||||
* Get appropriate error message for image upload errors
|
||||
* @param error - The error object from upload failure
|
||||
* @param defaultMessage - Default error message to use if no specific error is matched
|
||||
* @param t - Translation function
|
||||
* @returns Localized error message
|
||||
*/
|
||||
export const getImageUploadErrorMessage = (error: any, defaultMessage: string, t: (key: string) => string): string => {
|
||||
const errorCode = error?.response?.code
|
||||
|
||||
if (errorCode === 'forbidden')
|
||||
return error?.response?.message
|
||||
|
||||
if (errorCode === 'file_extension_blocked')
|
||||
return t('common.fileUploader.fileExtensionBlocked')
|
||||
|
||||
return defaultMessage
|
||||
}
|
||||
|
||||
type ImageUploadParams = {
|
||||
file: File
|
||||
onProgressCallback: (progress: number) => void
|
||||
onSuccessCallback: (res: { id: string }) => void
|
||||
onErrorCallback: () => void
|
||||
onErrorCallback: (error?: any) => void
|
||||
}
|
||||
type ImageUpload = (v: ImageUploadParams, isPublic?: boolean, url?: string) => void
|
||||
export const imageUpload: ImageUpload = ({
|
||||
|
|
@ -30,7 +49,7 @@ export const imageUpload: ImageUpload = ({
|
|||
.then((res: { id: string }) => {
|
||||
onSuccessCallback(res)
|
||||
})
|
||||
.catch(() => {
|
||||
onErrorCallback()
|
||||
.catch((error) => {
|
||||
onErrorCallback(error)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import Button from '@/app/components/base/button'
|
|||
import Divider from '@/app/components/base/divider'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import { Plan } from '@/app/components/billing/type'
|
||||
import { imageUpload } from '@/app/components/base/image-uploader/utils'
|
||||
import { getImageUploadErrorMessage, imageUpload } from '@/app/components/base/image-uploader/utils'
|
||||
import { useToastContext } from '@/app/components/base/toast'
|
||||
import { BubbleTextMod } from '@/app/components/base/icons/src/vender/solid/communication'
|
||||
import {
|
||||
|
|
@ -67,8 +67,9 @@ const CustomWebAppBrand = () => {
|
|||
setUploadProgress(100)
|
||||
setFileId(res.id)
|
||||
},
|
||||
onErrorCallback: () => {
|
||||
notify({ type: 'error', message: t('common.imageUploader.uploadFromComputerUploadError') })
|
||||
onErrorCallback: (error?: any) => {
|
||||
const errorMessage = getImageUploadErrorMessage(error, t('common.imageUploader.uploadFromComputerUploadError'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
setUploadProgress(-1)
|
||||
},
|
||||
}, false, '/workspaces/custom-config/webapp-logo/upload')
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import { LanguagesSupported } from '@/i18n-config/language'
|
|||
import { IS_CE_EDITION } from '@/config'
|
||||
import { Theme } from '@/types/app'
|
||||
import useTheme from '@/hooks/use-theme'
|
||||
import { getFileUploadErrorMessage } from '@/app/components/base/file-uploader/utils'
|
||||
|
||||
type IFileUploaderProps = {
|
||||
fileList: FileItem[]
|
||||
|
|
@ -132,7 +133,8 @@ const FileUploader = ({
|
|||
return Promise.resolve({ ...completeFile })
|
||||
})
|
||||
.catch((e) => {
|
||||
notify({ type: 'error', message: e?.response?.code === 'forbidden' ? e?.response?.message : t('datasetCreation.stepOne.uploader.failed') })
|
||||
const errorMessage = getFileUploadErrorMessage(e, t('datasetCreation.stepOne.uploader.failed'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
onFileUpdate(fileItem, -2, fileListRef.current)
|
||||
return Promise.resolve({ ...fileItem })
|
||||
})
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import cn from '@/utils/classnames'
|
|||
import type { CustomFile as File, FileItem } from '@/models/datasets'
|
||||
import { ToastContext } from '@/app/components/base/toast'
|
||||
import { upload } from '@/service/base'
|
||||
import { getFileUploadErrorMessage } from '@/app/components/base/file-uploader/utils'
|
||||
import I18n from '@/context/i18n'
|
||||
import { LanguagesSupported } from '@/i18n-config/language'
|
||||
import { IS_CE_EDITION } from '@/config'
|
||||
|
|
@ -154,7 +155,8 @@ const LocalFile = ({
|
|||
return Promise.resolve({ ...completeFile })
|
||||
})
|
||||
.catch((e) => {
|
||||
notify({ type: 'error', message: e?.response?.code === 'forbidden' ? e?.response?.message : t('datasetCreation.stepOne.uploader.failed') })
|
||||
const errorMessage = getFileUploadErrorMessage(e, t('datasetCreation.stepOne.uploader.failed'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
updateFile(fileItem, -2, fileListRef.current)
|
||||
return Promise.resolve({ ...fileItem })
|
||||
})
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import { ToastContext } from '@/app/components/base/toast'
|
|||
import Button from '@/app/components/base/button'
|
||||
import type { FileItem } from '@/models/datasets'
|
||||
import { upload } from '@/service/base'
|
||||
import { getFileUploadErrorMessage } from '@/app/components/base/file-uploader/utils'
|
||||
import useSWR from 'swr'
|
||||
import { fetchFileUploadConfig } from '@/service/common'
|
||||
import SimplePieChart from '@/app/components/base/simple-pie-chart'
|
||||
|
|
@ -74,7 +75,8 @@ const CSVUploader: FC<Props> = ({
|
|||
return Promise.resolve({ ...completeFile })
|
||||
})
|
||||
.catch((e) => {
|
||||
notify({ type: 'error', message: e?.response?.code === 'forbidden' ? e?.response?.message : t('datasetCreation.stepOne.uploader.failed') })
|
||||
const errorMessage = getFileUploadErrorMessage(e, t('datasetCreation.stepOne.uploader.failed'), t)
|
||||
notify({ type: 'error', message: errorMessage })
|
||||
const errorFile = {
|
||||
...fileItem,
|
||||
progress: -2,
|
||||
|
|
|
|||
|
|
@ -146,8 +146,8 @@ const Form = () => {
|
|||
return
|
||||
}
|
||||
if (retrievalConfig.weights) {
|
||||
retrievalConfig.weights.vector_setting.embedding_provider_name = currentDataset?.embedding_model_provider || ''
|
||||
retrievalConfig.weights.vector_setting.embedding_model_name = currentDataset?.embedding_model || ''
|
||||
retrievalConfig.weights.vector_setting.embedding_provider_name = embeddingModel.provider || ''
|
||||
retrievalConfig.weights.vector_setting.embedding_model_name = embeddingModel.model || ''
|
||||
}
|
||||
try {
|
||||
setLoading(true)
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ const DetailHeader = ({
|
|||
const { enable_marketplace } = useGlobalPublicStore(s => s.systemFeatures)
|
||||
|
||||
const {
|
||||
installation_id,
|
||||
id,
|
||||
source,
|
||||
tenant_id,
|
||||
version,
|
||||
|
|
@ -198,7 +198,7 @@ const DetailHeader = ({
|
|||
|
||||
const handleDelete = useCallback(async () => {
|
||||
showDeleting()
|
||||
const res = await uninstallPlugin(installation_id)
|
||||
const res = await uninstallPlugin(id)
|
||||
hideDeleting()
|
||||
if (res.success) {
|
||||
hideDeleteConfirm()
|
||||
|
|
@ -208,7 +208,7 @@ const DetailHeader = ({
|
|||
if (PluginCategoryEnum.tool.includes(category))
|
||||
invalidateAllToolProviders()
|
||||
}
|
||||
}, [showDeleting, installation_id, hideDeleting, hideDeleteConfirm, onUpdate, category, refreshModelProviders, invalidateAllToolProviders])
|
||||
}, [showDeleting, id, hideDeleting, hideDeleteConfirm, onUpdate, category, refreshModelProviders, invalidateAllToolProviders])
|
||||
|
||||
return (
|
||||
<div className={cn('shrink-0 border-b border-divider-subtle bg-components-panel-bg p-4 pb-3', isReadmeView && 'border-b-0 bg-transparent p-0')}>
|
||||
|
|
@ -356,7 +356,6 @@ const DetailHeader = ({
|
|||
content={
|
||||
<div>
|
||||
{t(`${i18nPrefix}.deleteContentLeft`)}<span className='system-md-semibold'>{label[locale]}</span>{t(`${i18nPrefix}.deleteContentRight`)}<br />
|
||||
{/* {usedInApps > 0 && t(`${i18nPrefix}.usedInApps`, { num: usedInApps })} */}
|
||||
</div>
|
||||
}
|
||||
onCancel={hideDeleteConfirm}
|
||||
|
|
|
|||
|
|
@ -72,6 +72,8 @@ const PluginPage = ({
|
|||
}
|
||||
}, [searchParams])
|
||||
|
||||
const [uniqueIdentifier, setUniqueIdentifier] = useState<string | null>(null)
|
||||
|
||||
const [dependencies, setDependencies] = useState<Dependency[]>([])
|
||||
const bundleInfo = useMemo(() => {
|
||||
const info = searchParams.get(BUNDLE_INFO_KEY)
|
||||
|
|
@ -99,6 +101,7 @@ const PluginPage = ({
|
|||
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
setUniqueIdentifier(null)
|
||||
await sleep(100)
|
||||
if (packageId) {
|
||||
const { data } = await fetchManifestFromMarketPlace(encodeURIComponent(packageId))
|
||||
|
|
@ -108,6 +111,7 @@ const PluginPage = ({
|
|||
version: version.version,
|
||||
icon: `${MARKETPLACE_API_PREFIX}/plugins/${plugin.org}/${plugin.name}/icon`,
|
||||
})
|
||||
setUniqueIdentifier(packageId)
|
||||
showInstallFromMarketplace()
|
||||
return
|
||||
}
|
||||
|
|
@ -283,10 +287,10 @@ const PluginPage = ({
|
|||
)}
|
||||
|
||||
{
|
||||
isShowInstallFromMarketplace && (
|
||||
isShowInstallFromMarketplace && uniqueIdentifier && (
|
||||
<InstallFromMarketplace
|
||||
manifest={manifest! as PluginManifestInMarket}
|
||||
uniqueIdentifier={packageId}
|
||||
uniqueIdentifier={uniqueIdentifier}
|
||||
isBundle={!!bundleInfo}
|
||||
dependencies={dependencies}
|
||||
onClose={hideInstallFromMarketplace}
|
||||
|
|
|
|||
|
|
@ -100,7 +100,10 @@ const RunOnce: FC<IRunOnceProps> = ({
|
|||
: promptConfig.prompt_variables.map(item => (
|
||||
<div className='mt-4 w-full' key={item.key}>
|
||||
{item.type !== 'checkbox' && (
|
||||
<label className='system-md-semibold flex h-6 items-center text-text-secondary'>{item.name}</label>
|
||||
<div className='system-md-semibold flex h-6 items-center gap-1 text-text-secondary'>
|
||||
<div className='truncate'>{item.name}</div>
|
||||
{!item.required && <span className='system-xs-regular text-text-tertiary'>{t('workflow.panel.optional')}</span>}
|
||||
</div>
|
||||
)}
|
||||
<div className='mt-1'>
|
||||
{item.type === 'select' && (
|
||||
|
|
@ -115,7 +118,7 @@ const RunOnce: FC<IRunOnceProps> = ({
|
|||
{item.type === 'string' && (
|
||||
<Input
|
||||
type="text"
|
||||
placeholder={`${item.name}${!item.required ? `(${t('appDebug.variableTable.optional')})` : ''}`}
|
||||
placeholder={item.name}
|
||||
value={inputs[item.key]}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) => { handleInputsChange({ ...inputsRef.current, [item.key]: e.target.value }) }}
|
||||
maxLength={item.max_length || DEFAULT_VALUE_MAX_LEN}
|
||||
|
|
@ -124,7 +127,7 @@ const RunOnce: FC<IRunOnceProps> = ({
|
|||
{item.type === 'paragraph' && (
|
||||
<Textarea
|
||||
className='h-[104px] sm:text-xs'
|
||||
placeholder={`${item.name}${!item.required ? `(${t('appDebug.variableTable.optional')})` : ''}`}
|
||||
placeholder={item.name}
|
||||
value={inputs[item.key]}
|
||||
onChange={(e: ChangeEvent<HTMLTextAreaElement>) => { handleInputsChange({ ...inputsRef.current, [item.key]: e.target.value }) }}
|
||||
/>
|
||||
|
|
@ -132,7 +135,7 @@ const RunOnce: FC<IRunOnceProps> = ({
|
|||
{item.type === 'number' && (
|
||||
<Input
|
||||
type="number"
|
||||
placeholder={`${item.name}${!item.required ? `(${t('appDebug.variableTable.optional')})` : ''}`}
|
||||
placeholder={item.name}
|
||||
value={inputs[item.key]}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) => { handleInputsChange({ ...inputsRef.current, [item.key]: e.target.value }) }}
|
||||
/>
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ const FormItem: FC<Props> = ({
|
|||
<Input
|
||||
value={value || ''}
|
||||
onChange={e => onChange(e.target.value)}
|
||||
placeholder={t('appDebug.variableConfig.inputPlaceholder')!}
|
||||
placeholder={typeof payload.label === 'object' ? payload.label.variable : payload.label}
|
||||
autoFocus={autoFocus}
|
||||
/>
|
||||
)
|
||||
|
|
@ -152,7 +152,7 @@ const FormItem: FC<Props> = ({
|
|||
type="number"
|
||||
value={value || ''}
|
||||
onChange={e => onChange(e.target.value)}
|
||||
placeholder={t('appDebug.variableConfig.inputPlaceholder')!}
|
||||
placeholder={typeof payload.label === 'object' ? payload.label.variable : payload.label}
|
||||
autoFocus={autoFocus}
|
||||
/>
|
||||
)
|
||||
|
|
@ -163,7 +163,7 @@ const FormItem: FC<Props> = ({
|
|||
<Textarea
|
||||
value={value || ''}
|
||||
onChange={e => onChange(e.target.value)}
|
||||
placeholder={t('appDebug.variableConfig.inputPlaceholder')!}
|
||||
placeholder={typeof payload.label === 'object' ? payload.label.variable : payload.label}
|
||||
autoFocus={autoFocus}
|
||||
/>
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import type {
|
|||
IterationDurationMap,
|
||||
NodeTracing,
|
||||
} from '@/types/workflow'
|
||||
import { NodeRunningStatus } from '@/app/components/workflow/types'
|
||||
import { Iteration } from '@/app/components/base/icons/src/vender/workflow'
|
||||
|
||||
type IterationLogTriggerProps = {
|
||||
|
|
@ -54,6 +55,30 @@ const IterationLogTrigger = ({
|
|||
structuredList = instanceKeys
|
||||
.map(key => filterNodesForInstance(key))
|
||||
.filter(branchNodes => branchNodes.length > 0)
|
||||
|
||||
// Also include failed iterations that might not be in duration map
|
||||
if (allExecutions && nodeInfo.details?.length) {
|
||||
const existingIterationIndices = new Set<number>()
|
||||
structuredList.forEach((iteration) => {
|
||||
iteration.forEach((node) => {
|
||||
if (node.execution_metadata?.iteration_index !== undefined)
|
||||
existingIterationIndices.add(node.execution_metadata.iteration_index)
|
||||
})
|
||||
})
|
||||
|
||||
// Find failed iterations that are not in the structured list
|
||||
nodeInfo.details.forEach((iteration, index) => {
|
||||
if (!existingIterationIndices.has(index) && iteration.some(node => node.status === NodeRunningStatus.Failed))
|
||||
structuredList.push(iteration)
|
||||
})
|
||||
|
||||
// Sort by iteration index to maintain order
|
||||
structuredList.sort((a, b) => {
|
||||
const aIndex = a[0]?.execution_metadata?.iteration_index ?? 0
|
||||
const bIndex = b[0]?.execution_metadata?.iteration_index ?? 0
|
||||
return aIndex - bIndex
|
||||
})
|
||||
}
|
||||
}
|
||||
else if (nodeInfo.details?.length) {
|
||||
structuredList = nodeInfo.details
|
||||
|
|
@ -71,16 +96,36 @@ const IterationLogTrigger = ({
|
|||
else if (nodeInfo.metadata?.iterator_length)
|
||||
displayIterationCount = nodeInfo.metadata.iterator_length
|
||||
|
||||
const getErrorCount = (details: NodeTracing[][] | undefined) => {
|
||||
const getErrorCount = (details: NodeTracing[][] | undefined, iterationNodeMeta?: any) => {
|
||||
if (!details || details.length === 0)
|
||||
return 0
|
||||
return details.reduce((acc, iteration) => {
|
||||
if (iteration.some(item => item.status === 'failed'))
|
||||
acc++
|
||||
return acc
|
||||
}, 0)
|
||||
|
||||
// Use Set to track failed iteration indices to avoid duplicate counting
|
||||
const failedIterationIndices = new Set<number>()
|
||||
|
||||
// Collect failed iteration indices from details
|
||||
details.forEach((iteration, index) => {
|
||||
if (iteration.some(item => item.status === NodeRunningStatus.Failed)) {
|
||||
// Try to get iteration index from first node, fallback to array index
|
||||
const iterationIndex = iteration[0]?.execution_metadata?.iteration_index ?? index
|
||||
failedIterationIndices.add(iterationIndex)
|
||||
}
|
||||
})
|
||||
|
||||
// If allExecutions exists, check for additional failed iterations
|
||||
if (iterationNodeMeta?.iteration_duration_map && allExecutions) {
|
||||
// Find all failed iteration nodes
|
||||
allExecutions.forEach((exec) => {
|
||||
if (exec.execution_metadata?.iteration_id === nodeInfo.node_id
|
||||
&& exec.status === NodeRunningStatus.Failed
|
||||
&& exec.execution_metadata?.iteration_index !== undefined)
|
||||
failedIterationIndices.add(exec.execution_metadata.iteration_index)
|
||||
})
|
||||
}
|
||||
|
||||
return failedIterationIndices.size
|
||||
}
|
||||
const errorCount = getErrorCount(nodeInfo.details)
|
||||
const errorCount = getErrorCount(nodeInfo.details, nodeInfo.execution_metadata)
|
||||
|
||||
return (
|
||||
<Button
|
||||
|
|
|
|||
|
|
@ -259,7 +259,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Variablenschlüssel',
|
||||
name: 'Name des Benutzereingabefelds',
|
||||
optional: 'Optional',
|
||||
type: 'Eingabetyp',
|
||||
action: 'Aktionen',
|
||||
typeString: 'String',
|
||||
|
|
|
|||
|
|
@ -346,7 +346,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Variable Key',
|
||||
name: 'User Input Field Name',
|
||||
optional: 'Optional',
|
||||
type: 'Input Type',
|
||||
action: 'Actions',
|
||||
typeString: 'String',
|
||||
|
|
|
|||
|
|
@ -742,6 +742,7 @@ const translation = {
|
|||
uploadFromComputerLimit: 'Upload {{type}} cannot exceed {{size}}',
|
||||
pasteFileLinkInvalid: 'Invalid file link',
|
||||
fileExtensionNotSupport: 'File extension not supported',
|
||||
fileExtensionBlocked: 'This file type is blocked for security reasons',
|
||||
},
|
||||
tag: {
|
||||
placeholder: 'All Tags',
|
||||
|
|
|
|||
|
|
@ -255,7 +255,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Clave de Variable',
|
||||
name: 'Nombre del Campo de Entrada del Usuario',
|
||||
optional: 'Opcional',
|
||||
type: 'Tipo de Entrada',
|
||||
action: 'Acciones',
|
||||
typeString: 'Cadena',
|
||||
|
|
|
|||
|
|
@ -588,7 +588,6 @@ const translation = {
|
|||
typeString: 'رشته',
|
||||
name: 'نام فیلد ورودی کاربر',
|
||||
type: 'نوع ورودی',
|
||||
optional: 'اختیاری',
|
||||
},
|
||||
varKeyError: {},
|
||||
otherError: {
|
||||
|
|
|
|||
|
|
@ -259,7 +259,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Clé Variable',
|
||||
name: 'Nom du champ d\'entrée de l\'utilisateur',
|
||||
optional: 'Facultatif',
|
||||
type: 'Type d\'Entrée',
|
||||
action: 'Actions',
|
||||
typeString: 'Chaîne',
|
||||
|
|
|
|||
|
|
@ -279,7 +279,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'वेरिएबल कुंजी',
|
||||
name: 'उपयोगकर्ता इनपुट फ़ील्ड नाम',
|
||||
optional: 'वैकल्पिक',
|
||||
type: 'इनपुट प्रकार',
|
||||
action: 'क्रियाएँ',
|
||||
typeString: 'स्ट्रिंग',
|
||||
|
|
|
|||
|
|
@ -325,7 +325,6 @@ const translation = {
|
|||
variableTable: {
|
||||
action: 'Tindakan',
|
||||
typeString: 'String',
|
||||
optional: 'Fakultatif',
|
||||
typeSelect: 'Pilih',
|
||||
type: 'Jenis Masukan',
|
||||
key: 'Kunci Variabel',
|
||||
|
|
|
|||
|
|
@ -281,7 +281,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Chiave Variabile',
|
||||
name: 'Nome Campo Input Utente',
|
||||
optional: 'Opzionale',
|
||||
type: 'Tipo di Input',
|
||||
action: 'Azioni',
|
||||
typeString: 'Stringa',
|
||||
|
|
|
|||
|
|
@ -340,7 +340,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: '変数キー',
|
||||
name: 'ユーザー入力フィールド名',
|
||||
optional: 'オプション',
|
||||
type: '入力タイプ',
|
||||
action: 'アクション',
|
||||
typeString: '文字列',
|
||||
|
|
|
|||
|
|
@ -255,7 +255,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: '변수 키',
|
||||
name: '사용자 입력 필드명',
|
||||
optional: '옵션',
|
||||
type: '입력 타입',
|
||||
action: '액션',
|
||||
typeString: '문자열',
|
||||
|
|
|
|||
|
|
@ -277,7 +277,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Klucz Zmiennej',
|
||||
name: 'Nazwa Pola Wejściowego Użytkownika',
|
||||
optional: 'Opcjonalnie',
|
||||
type: 'Typ Wejścia',
|
||||
action: 'Akcje',
|
||||
typeString: 'String',
|
||||
|
|
|
|||
|
|
@ -261,7 +261,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Chave da Variável',
|
||||
name: 'Nome do Campo de Entrada do Usuário',
|
||||
optional: 'Opcional',
|
||||
type: 'Tipo de Entrada',
|
||||
action: 'Ações',
|
||||
typeString: 'Texto',
|
||||
|
|
|
|||
|
|
@ -261,7 +261,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Cheie variabilă',
|
||||
name: 'Nume câmp de intrare utilizator',
|
||||
optional: 'Opțional',
|
||||
type: 'Tip intrare',
|
||||
action: 'Acțiuni',
|
||||
typeString: 'Șir',
|
||||
|
|
|
|||
|
|
@ -327,7 +327,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Ключ переменной',
|
||||
name: 'Имя поля пользовательского ввода',
|
||||
optional: 'Необязательно',
|
||||
type: 'Тип ввода',
|
||||
action: 'Действия',
|
||||
typeString: 'Строка',
|
||||
|
|
|
|||
|
|
@ -350,7 +350,6 @@ const translation = {
|
|||
},
|
||||
variableTable: {
|
||||
action: 'Dejanja',
|
||||
optional: 'Neobvezno',
|
||||
typeString: 'Niz',
|
||||
typeSelect: 'Izbrati',
|
||||
type: 'Vrsta vnosa',
|
||||
|
|
|
|||
|
|
@ -323,7 +323,6 @@ const translation = {
|
|||
timeoutExceeded: 'ผลลัพธ์จะไม่แสดงเนื่องจากหมดเวลา โปรดดูบันทึกเพื่อรวบรวมผลลัพธ์ที่สมบูรณ์',
|
||||
},
|
||||
variableTable: {
|
||||
optional: 'เสริม',
|
||||
key: 'ปุ่มตัวแปร',
|
||||
typeString: 'เชือก',
|
||||
typeSelect: 'เลือก',
|
||||
|
|
|
|||
|
|
@ -327,7 +327,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Değişken Anahtarı',
|
||||
name: 'Kullanıcı Giriş Alanı Adı',
|
||||
optional: 'İsteğe Bağlı',
|
||||
type: 'Giriş Tipi',
|
||||
action: 'Aksiyonlar',
|
||||
typeString: 'Metin',
|
||||
|
|
|
|||
|
|
@ -273,7 +273,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Ключ змінної', // Variable Key
|
||||
name: 'Назва поля для введення користувача', // User Input Field Name
|
||||
optional: 'Додатково', // Optional
|
||||
type: 'Тип введення', // Input Type
|
||||
action: 'Дії', // Actions
|
||||
typeString: 'Рядок', // String
|
||||
|
|
|
|||
|
|
@ -255,7 +255,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: 'Khóa biến',
|
||||
name: 'Tên trường nhập liệu người dùng',
|
||||
optional: 'Tùy chọn',
|
||||
type: 'Loại nhập liệu',
|
||||
action: 'Hành động',
|
||||
typeString: 'Chuỗi',
|
||||
|
|
|
|||
|
|
@ -342,7 +342,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: '变量 Key',
|
||||
name: '字段名称',
|
||||
optional: '可选',
|
||||
type: '类型',
|
||||
action: '操作',
|
||||
typeString: '文本',
|
||||
|
|
|
|||
|
|
@ -736,6 +736,7 @@ const translation = {
|
|||
uploadFromComputerLimit: '上传 {{type}} 不能超过 {{size}}',
|
||||
pasteFileLinkInvalid: '文件链接无效',
|
||||
fileExtensionNotSupport: '文件类型不支持',
|
||||
fileExtensionBlocked: '出于安全考虑,该文件类型已被禁止上传',
|
||||
},
|
||||
tag: {
|
||||
placeholder: '全部标签',
|
||||
|
|
|
|||
|
|
@ -255,7 +255,6 @@ const translation = {
|
|||
variableTable: {
|
||||
key: '變數 Key',
|
||||
name: '欄位名稱',
|
||||
optional: '可選',
|
||||
type: '型別',
|
||||
action: '操作',
|
||||
typeString: '文字',
|
||||
|
|
|
|||
|
|
@ -14,3 +14,39 @@ describe('makeProviderQuery', () => {
|
|||
expect(buildProviderQuery('ABC?DEF')).toBe('provider=ABC%3FDEF')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Tools Utilities', () => {
|
||||
describe('buildProviderQuery', () => {
|
||||
it('should build query string with provider parameter', () => {
|
||||
const result = buildProviderQuery('openai')
|
||||
expect(result).toBe('provider=openai')
|
||||
})
|
||||
|
||||
it('should handle provider names with special characters', () => {
|
||||
const result = buildProviderQuery('provider-name')
|
||||
expect(result).toBe('provider=provider-name')
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
const result = buildProviderQuery('')
|
||||
expect(result).toBe('provider=')
|
||||
})
|
||||
|
||||
it('should URL encode special characters', () => {
|
||||
const result = buildProviderQuery('provider name')
|
||||
expect(result).toBe('provider=provider+name')
|
||||
})
|
||||
|
||||
it('should handle Unicode characters', () => {
|
||||
const result = buildProviderQuery('提供者')
|
||||
expect(result).toContain('provider=')
|
||||
expect(decodeURIComponent(result)).toBe('provider=提供者')
|
||||
})
|
||||
|
||||
it('should handle provider names with slashes', () => {
|
||||
const result = buildProviderQuery('langgenius/openai/gpt-4')
|
||||
expect(result).toContain('provider=')
|
||||
expect(decodeURIComponent(result)).toBe('provider=langgenius/openai/gpt-4')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,106 @@
|
|||
/**
|
||||
* Test suite for app redirection utility functions
|
||||
* Tests navigation path generation based on user permissions and app modes
|
||||
*/
|
||||
import { getRedirection, getRedirectionPath } from './app-redirection'
|
||||
|
||||
describe('app-redirection', () => {
|
||||
/**
|
||||
* Tests getRedirectionPath which determines the correct path based on:
|
||||
* - User's editor permissions
|
||||
* - App mode (workflow, advanced-chat, chat, completion, agent-chat)
|
||||
*/
|
||||
describe('getRedirectionPath', () => {
|
||||
test('returns overview path when user is not editor', () => {
|
||||
const app = { id: 'app-123', mode: 'chat' as const }
|
||||
const result = getRedirectionPath(false, app)
|
||||
expect(result).toBe('/app/app-123/overview')
|
||||
})
|
||||
|
||||
test('returns workflow path for workflow mode when user is editor', () => {
|
||||
const app = { id: 'app-123', mode: 'workflow' as const }
|
||||
const result = getRedirectionPath(true, app)
|
||||
expect(result).toBe('/app/app-123/workflow')
|
||||
})
|
||||
|
||||
test('returns workflow path for advanced-chat mode when user is editor', () => {
|
||||
const app = { id: 'app-123', mode: 'advanced-chat' as const }
|
||||
const result = getRedirectionPath(true, app)
|
||||
expect(result).toBe('/app/app-123/workflow')
|
||||
})
|
||||
|
||||
test('returns configuration path for chat mode when user is editor', () => {
|
||||
const app = { id: 'app-123', mode: 'chat' as const }
|
||||
const result = getRedirectionPath(true, app)
|
||||
expect(result).toBe('/app/app-123/configuration')
|
||||
})
|
||||
|
||||
test('returns configuration path for completion mode when user is editor', () => {
|
||||
const app = { id: 'app-123', mode: 'completion' as const }
|
||||
const result = getRedirectionPath(true, app)
|
||||
expect(result).toBe('/app/app-123/configuration')
|
||||
})
|
||||
|
||||
test('returns configuration path for agent-chat mode when user is editor', () => {
|
||||
const app = { id: 'app-456', mode: 'agent-chat' as const }
|
||||
const result = getRedirectionPath(true, app)
|
||||
expect(result).toBe('/app/app-456/configuration')
|
||||
})
|
||||
|
||||
test('handles different app IDs', () => {
|
||||
const app1 = { id: 'abc-123', mode: 'chat' as const }
|
||||
const app2 = { id: 'xyz-789', mode: 'workflow' as const }
|
||||
|
||||
expect(getRedirectionPath(false, app1)).toBe('/app/abc-123/overview')
|
||||
expect(getRedirectionPath(true, app2)).toBe('/app/xyz-789/workflow')
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests getRedirection which combines path generation with a redirect callback
|
||||
*/
|
||||
describe('getRedirection', () => {
|
||||
/**
|
||||
* Tests that the redirection function is called with the correct path
|
||||
*/
|
||||
test('calls redirection function with correct path for non-editor', () => {
|
||||
const app = { id: 'app-123', mode: 'chat' as const }
|
||||
const mockRedirect = jest.fn()
|
||||
|
||||
getRedirection(false, app, mockRedirect)
|
||||
|
||||
expect(mockRedirect).toHaveBeenCalledWith('/app/app-123/overview')
|
||||
expect(mockRedirect).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
test('calls redirection function with workflow path for editor', () => {
|
||||
const app = { id: 'app-123', mode: 'workflow' as const }
|
||||
const mockRedirect = jest.fn()
|
||||
|
||||
getRedirection(true, app, mockRedirect)
|
||||
|
||||
expect(mockRedirect).toHaveBeenCalledWith('/app/app-123/workflow')
|
||||
expect(mockRedirect).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
test('calls redirection function with configuration path for chat mode editor', () => {
|
||||
const app = { id: 'app-123', mode: 'chat' as const }
|
||||
const mockRedirect = jest.fn()
|
||||
|
||||
getRedirection(true, app, mockRedirect)
|
||||
|
||||
expect(mockRedirect).toHaveBeenCalledWith('/app/app-123/configuration')
|
||||
expect(mockRedirect).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
test('works with different redirection functions', () => {
|
||||
const app = { id: 'app-123', mode: 'workflow' as const }
|
||||
const paths: string[] = []
|
||||
const customRedirect = (path: string) => paths.push(path)
|
||||
|
||||
getRedirection(true, app, customRedirect)
|
||||
|
||||
expect(paths).toEqual(['/app/app-123/workflow'])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -1,6 +1,18 @@
|
|||
/**
|
||||
* Test suite for the classnames utility function
|
||||
* This utility combines the classnames library with tailwind-merge
|
||||
* to handle conditional CSS classes and merge conflicting Tailwind classes
|
||||
*/
|
||||
import cn from './classnames'
|
||||
|
||||
describe('classnames', () => {
|
||||
/**
|
||||
* Tests basic classnames library features:
|
||||
* - String concatenation
|
||||
* - Array handling
|
||||
* - Falsy value filtering
|
||||
* - Object-based conditional classes
|
||||
*/
|
||||
test('classnames libs feature', () => {
|
||||
expect(cn('foo')).toBe('foo')
|
||||
expect(cn('foo', 'bar')).toBe('foo bar')
|
||||
|
|
@ -17,6 +29,14 @@ describe('classnames', () => {
|
|||
})).toBe('foo baz')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests tailwind-merge functionality:
|
||||
* - Conflicting class resolution (last one wins)
|
||||
* - Modifier handling (hover, focus, etc.)
|
||||
* - Important prefix (!)
|
||||
* - Custom color classes
|
||||
* - Arbitrary values
|
||||
*/
|
||||
test('tailwind-merge', () => {
|
||||
/* eslint-disable tailwindcss/classnames-order */
|
||||
expect(cn('p-0')).toBe('p-0')
|
||||
|
|
@ -44,6 +64,10 @@ describe('classnames', () => {
|
|||
expect(cn('text-3.5xl text-black')).toBe('text-3.5xl text-black')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests the integration of classnames and tailwind-merge:
|
||||
* - Object-based conditional classes with Tailwind conflict resolution
|
||||
*/
|
||||
test('classnames combined with tailwind-merge', () => {
|
||||
expect(cn('text-right', {
|
||||
'text-center': true,
|
||||
|
|
@ -53,4 +77,81 @@ describe('classnames', () => {
|
|||
'text-center': false,
|
||||
})).toBe('text-right')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests handling of multiple mixed argument types:
|
||||
* - Strings, arrays, and objects in a single call
|
||||
* - Tailwind merge working across different argument types
|
||||
*/
|
||||
test('multiple mixed argument types', () => {
|
||||
expect(cn('foo', ['bar', 'baz'], { qux: true, quux: false })).toBe('foo bar baz qux')
|
||||
expect(cn('p-4', ['p-2', 'm-4'], { 'text-left': true, 'text-right': true })).toBe('p-2 m-4 text-right')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests nested array handling:
|
||||
* - Deep array flattening
|
||||
* - Tailwind merge with nested structures
|
||||
*/
|
||||
test('nested arrays', () => {
|
||||
expect(cn(['foo', ['bar', 'baz']])).toBe('foo bar baz')
|
||||
expect(cn(['p-4', ['p-2', 'text-center']])).toBe('p-2 text-center')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests empty input handling:
|
||||
* - Empty strings, arrays, and objects
|
||||
* - Mixed empty and non-empty values
|
||||
*/
|
||||
test('empty inputs', () => {
|
||||
expect(cn('')).toBe('')
|
||||
expect(cn([])).toBe('')
|
||||
expect(cn({})).toBe('')
|
||||
expect(cn('', [], {})).toBe('')
|
||||
expect(cn('foo', '', 'bar')).toBe('foo bar')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests number input handling:
|
||||
* - Truthy numbers converted to strings
|
||||
* - Zero treated as falsy
|
||||
*/
|
||||
test('numbers as inputs', () => {
|
||||
expect(cn(1)).toBe('1')
|
||||
expect(cn(0)).toBe('')
|
||||
expect(cn('foo', 1, 'bar')).toBe('foo 1 bar')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests multiple object arguments:
|
||||
* - Object merging
|
||||
* - Tailwind conflict resolution across objects
|
||||
*/
|
||||
test('multiple objects', () => {
|
||||
expect(cn({ foo: true }, { bar: true })).toBe('foo bar')
|
||||
expect(cn({ foo: true, bar: false }, { bar: true, baz: true })).toBe('foo bar baz')
|
||||
expect(cn({ 'p-4': true }, { 'p-2': true })).toBe('p-2')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests complex edge cases:
|
||||
* - Mixed falsy values
|
||||
* - Nested arrays with falsy values
|
||||
* - Multiple conflicting Tailwind classes
|
||||
*/
|
||||
test('complex edge cases', () => {
|
||||
expect(cn('foo', null, undefined, false, 'bar', 0, 1, '')).toBe('foo bar 1')
|
||||
expect(cn(['foo', null, ['bar', undefined, 'baz']])).toBe('foo bar baz')
|
||||
expect(cn('text-sm', { 'text-lg': false, 'text-xl': true }, 'text-2xl')).toBe('text-2xl')
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests important (!) modifier behavior:
|
||||
* - Important modifiers in objects
|
||||
* - Conflict resolution with important prefix
|
||||
*/
|
||||
test('important modifier with objects', () => {
|
||||
expect(cn({ '!font-medium': true }, { '!font-bold': true })).toBe('!font-bold')
|
||||
expect(cn('font-normal', { '!font-bold': true })).toBe('font-normal !font-bold')
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,109 @@
|
|||
import { writeTextToClipboard } from './clipboard'
|
||||
|
||||
describe('Clipboard Utilities', () => {
|
||||
describe('writeTextToClipboard', () => {
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks()
|
||||
})
|
||||
|
||||
it('should use navigator.clipboard.writeText when available', async () => {
|
||||
const mockWriteText = jest.fn().mockResolvedValue(undefined)
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: { writeText: mockWriteText },
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
await writeTextToClipboard('test text')
|
||||
expect(mockWriteText).toHaveBeenCalledWith('test text')
|
||||
})
|
||||
|
||||
it('should fallback to execCommand when clipboard API not available', async () => {
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: undefined,
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
const mockExecCommand = jest.fn().mockReturnValue(true)
|
||||
document.execCommand = mockExecCommand
|
||||
|
||||
const appendChildSpy = jest.spyOn(document.body, 'appendChild')
|
||||
const removeChildSpy = jest.spyOn(document.body, 'removeChild')
|
||||
|
||||
await writeTextToClipboard('fallback text')
|
||||
|
||||
expect(appendChildSpy).toHaveBeenCalled()
|
||||
expect(mockExecCommand).toHaveBeenCalledWith('copy')
|
||||
expect(removeChildSpy).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle execCommand failure', async () => {
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: undefined,
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
const mockExecCommand = jest.fn().mockReturnValue(false)
|
||||
document.execCommand = mockExecCommand
|
||||
|
||||
await expect(writeTextToClipboard('fail text')).rejects.toThrow()
|
||||
})
|
||||
|
||||
it('should handle execCommand exception', async () => {
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: undefined,
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
const mockExecCommand = jest.fn().mockImplementation(() => {
|
||||
throw new Error('execCommand error')
|
||||
})
|
||||
document.execCommand = mockExecCommand
|
||||
|
||||
await expect(writeTextToClipboard('error text')).rejects.toThrow('execCommand error')
|
||||
})
|
||||
|
||||
it('should clean up textarea after fallback', async () => {
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: undefined,
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
document.execCommand = jest.fn().mockReturnValue(true)
|
||||
const removeChildSpy = jest.spyOn(document.body, 'removeChild')
|
||||
|
||||
await writeTextToClipboard('cleanup test')
|
||||
|
||||
expect(removeChildSpy).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle empty string', async () => {
|
||||
const mockWriteText = jest.fn().mockResolvedValue(undefined)
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: { writeText: mockWriteText },
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
await writeTextToClipboard('')
|
||||
expect(mockWriteText).toHaveBeenCalledWith('')
|
||||
})
|
||||
|
||||
it('should handle special characters', async () => {
|
||||
const mockWriteText = jest.fn().mockResolvedValue(undefined)
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: { writeText: mockWriteText },
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
|
||||
const specialText = 'Test\n\t"quotes"\n中文\n😀'
|
||||
await writeTextToClipboard(specialText)
|
||||
expect(mockWriteText).toHaveBeenCalledWith(specialText)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,230 @@
|
|||
import { mergeValidCompletionParams } from './completion-params'
|
||||
import type { FormValue, ModelParameterRule } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
|
||||
describe('completion-params', () => {
|
||||
describe('mergeValidCompletionParams', () => {
|
||||
test('returns empty params and removedDetails for undefined oldParams', () => {
|
||||
const rules: ModelParameterRule[] = []
|
||||
const result = mergeValidCompletionParams(undefined, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('returns empty params and removedDetails for empty oldParams', () => {
|
||||
const rules: ModelParameterRule[] = []
|
||||
const result = mergeValidCompletionParams({}, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('validates int type parameter within range', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'max_tokens', type: 'int', min: 1, max: 4096, label: { en_US: 'Max Tokens', zh_Hans: '最大标记' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { max_tokens: 100 }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ max_tokens: 100 })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('removes int parameter below minimum', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'max_tokens', type: 'int', min: 1, max: 4096, label: { en_US: 'Max Tokens', zh_Hans: '最大标记' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { max_tokens: 0 }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({ max_tokens: 'out of range (1-4096)' })
|
||||
})
|
||||
|
||||
test('removes int parameter above maximum', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'max_tokens', type: 'int', min: 1, max: 4096, label: { en_US: 'Max Tokens', zh_Hans: '最大标记' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { max_tokens: 5000 }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({ max_tokens: 'out of range (1-4096)' })
|
||||
})
|
||||
|
||||
test('removes int parameter with invalid type', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'max_tokens', type: 'int', min: 1, max: 4096, label: { en_US: 'Max Tokens', zh_Hans: '最大标记' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { max_tokens: 'not a number' as any }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({ max_tokens: 'invalid type' })
|
||||
})
|
||||
|
||||
test('validates float type parameter', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'temperature', type: 'float', min: 0, max: 2, label: { en_US: 'Temperature', zh_Hans: '温度' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { temperature: 0.7 }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ temperature: 0.7 })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('validates float at boundary values', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'temperature', type: 'float', min: 0, max: 2, label: { en_US: 'Temperature', zh_Hans: '温度' }, required: false },
|
||||
]
|
||||
|
||||
const result1 = mergeValidCompletionParams({ temperature: 0 }, rules)
|
||||
expect(result1.params).toEqual({ temperature: 0 })
|
||||
|
||||
const result2 = mergeValidCompletionParams({ temperature: 2 }, rules)
|
||||
expect(result2.params).toEqual({ temperature: 2 })
|
||||
})
|
||||
|
||||
test('validates boolean type parameter', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'stream', type: 'boolean', label: { en_US: 'Stream', zh_Hans: '流' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { stream: true }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ stream: true })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('removes boolean parameter with invalid type', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'stream', type: 'boolean', label: { en_US: 'Stream', zh_Hans: '流' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { stream: 'yes' as any }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({ stream: 'invalid type' })
|
||||
})
|
||||
|
||||
test('validates string type parameter', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'model', type: 'string', label: { en_US: 'Model', zh_Hans: '模型' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { model: 'gpt-4' }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ model: 'gpt-4' })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('validates string parameter with options', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'model', type: 'string', options: ['gpt-3.5-turbo', 'gpt-4'], label: { en_US: 'Model', zh_Hans: '模型' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { model: 'gpt-4' }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ model: 'gpt-4' })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('removes string parameter with invalid option', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'model', type: 'string', options: ['gpt-3.5-turbo', 'gpt-4'], label: { en_US: 'Model', zh_Hans: '模型' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { model: 'invalid-model' }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({ model: 'unsupported option' })
|
||||
})
|
||||
|
||||
test('validates text type parameter', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'prompt', type: 'text', label: { en_US: 'Prompt', zh_Hans: '提示' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { prompt: 'Hello world' }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ prompt: 'Hello world' })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('removes unsupported parameters', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'temperature', type: 'float', min: 0, max: 2, label: { en_US: 'Temperature', zh_Hans: '温度' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { temperature: 0.7, unsupported_param: 'value' }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ temperature: 0.7 })
|
||||
expect(result.removedDetails).toEqual({ unsupported_param: 'unsupported' })
|
||||
})
|
||||
|
||||
test('keeps stop parameter in advanced mode even without rule', () => {
|
||||
const rules: ModelParameterRule[] = []
|
||||
const oldParams: FormValue = { stop: ['END'] }
|
||||
const result = mergeValidCompletionParams(oldParams, rules, true)
|
||||
|
||||
expect(result.params).toEqual({ stop: ['END'] })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('removes stop parameter in normal mode without rule', () => {
|
||||
const rules: ModelParameterRule[] = []
|
||||
const oldParams: FormValue = { stop: ['END'] }
|
||||
const result = mergeValidCompletionParams(oldParams, rules, false)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails).toEqual({ stop: 'unsupported' })
|
||||
})
|
||||
|
||||
test('handles multiple parameters with mixed validity', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'temperature', type: 'float', min: 0, max: 2, label: { en_US: 'Temperature', zh_Hans: '温度' }, required: false },
|
||||
{ name: 'max_tokens', type: 'int', min: 1, max: 4096, label: { en_US: 'Max Tokens', zh_Hans: '最大标记' }, required: false },
|
||||
{ name: 'model', type: 'string', options: ['gpt-4'], label: { en_US: 'Model', zh_Hans: '模型' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = {
|
||||
temperature: 0.7,
|
||||
max_tokens: 5000,
|
||||
model: 'gpt-4',
|
||||
unsupported: 'value',
|
||||
}
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({
|
||||
temperature: 0.7,
|
||||
model: 'gpt-4',
|
||||
})
|
||||
expect(result.removedDetails).toEqual({
|
||||
max_tokens: 'out of range (1-4096)',
|
||||
unsupported: 'unsupported',
|
||||
})
|
||||
})
|
||||
|
||||
test('handles parameters without min/max constraints', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'value', type: 'int', label: { en_US: 'Value', zh_Hans: '值' }, required: false },
|
||||
]
|
||||
const oldParams: FormValue = { value: 999999 }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({ value: 999999 })
|
||||
expect(result.removedDetails).toEqual({})
|
||||
})
|
||||
|
||||
test('removes parameter with unsupported rule type', () => {
|
||||
const rules: ModelParameterRule[] = [
|
||||
{ name: 'custom', type: 'unknown_type', label: { en_US: 'Custom', zh_Hans: '自定义' }, required: false } as any,
|
||||
]
|
||||
const oldParams: FormValue = { custom: 'value' }
|
||||
const result = mergeValidCompletionParams(oldParams, rules)
|
||||
|
||||
expect(result.params).toEqual({})
|
||||
expect(result.removedDetails.custom).toContain('unsupported rule type')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
import { searchEmoji } from './emoji'
|
||||
import { SearchIndex } from 'emoji-mart'
|
||||
|
||||
jest.mock('emoji-mart', () => ({
|
||||
SearchIndex: {
|
||||
search: jest.fn(),
|
||||
},
|
||||
}))
|
||||
|
||||
describe('Emoji Utilities', () => {
|
||||
describe('searchEmoji', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should return emoji natives for search results', async () => {
|
||||
const mockEmojis = [
|
||||
{ skins: [{ native: '😀' }] },
|
||||
{ skins: [{ native: '😃' }] },
|
||||
{ skins: [{ native: '😄' }] },
|
||||
]
|
||||
;(SearchIndex.search as jest.Mock).mockResolvedValue(mockEmojis)
|
||||
|
||||
const result = await searchEmoji('smile')
|
||||
expect(result).toEqual(['😀', '😃', '😄'])
|
||||
})
|
||||
|
||||
it('should return empty array when no results', async () => {
|
||||
;(SearchIndex.search as jest.Mock).mockResolvedValue([])
|
||||
|
||||
const result = await searchEmoji('nonexistent')
|
||||
expect(result).toEqual([])
|
||||
})
|
||||
|
||||
it('should return empty array when search returns null', async () => {
|
||||
;(SearchIndex.search as jest.Mock).mockResolvedValue(null)
|
||||
|
||||
const result = await searchEmoji('test')
|
||||
expect(result).toEqual([])
|
||||
})
|
||||
|
||||
it('should handle search with empty string', async () => {
|
||||
;(SearchIndex.search as jest.Mock).mockResolvedValue([])
|
||||
|
||||
const result = await searchEmoji('')
|
||||
expect(result).toEqual([])
|
||||
expect(SearchIndex.search).toHaveBeenCalledWith('')
|
||||
})
|
||||
|
||||
it('should extract native from first skin', async () => {
|
||||
const mockEmojis = [
|
||||
{
|
||||
skins: [
|
||||
{ native: '👍' },
|
||||
{ native: '👍🏻' },
|
||||
{ native: '👍🏼' },
|
||||
],
|
||||
},
|
||||
]
|
||||
;(SearchIndex.search as jest.Mock).mockResolvedValue(mockEmojis)
|
||||
|
||||
const result = await searchEmoji('thumbs')
|
||||
expect(result).toEqual(['👍'])
|
||||
})
|
||||
|
||||
it('should handle multiple search terms', async () => {
|
||||
const mockEmojis = [
|
||||
{ skins: [{ native: '❤️' }] },
|
||||
{ skins: [{ native: '💙' }] },
|
||||
]
|
||||
;(SearchIndex.search as jest.Mock).mockResolvedValue(mockEmojis)
|
||||
|
||||
const result = await searchEmoji('heart love')
|
||||
expect(result).toEqual(['❤️', '💙'])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
import { downloadFile, formatFileSize, formatNumber, formatTime } from './format'
|
||||
import { downloadFile, formatFileSize, formatNumber, formatNumberAbbreviated, formatTime } from './format'
|
||||
|
||||
describe('formatNumber', () => {
|
||||
test('should correctly format integers', () => {
|
||||
|
|
@ -102,3 +102,95 @@ describe('downloadFile', () => {
|
|||
jest.restoreAllMocks()
|
||||
})
|
||||
})
|
||||
|
||||
describe('formatNumberAbbreviated', () => {
|
||||
it('should return number as string when less than 1000', () => {
|
||||
expect(formatNumberAbbreviated(0)).toBe('0')
|
||||
expect(formatNumberAbbreviated(1)).toBe('1')
|
||||
expect(formatNumberAbbreviated(999)).toBe('999')
|
||||
})
|
||||
|
||||
it('should format thousands with k suffix', () => {
|
||||
expect(formatNumberAbbreviated(1000)).toBe('1k')
|
||||
expect(formatNumberAbbreviated(1200)).toBe('1.2k')
|
||||
expect(formatNumberAbbreviated(1500)).toBe('1.5k')
|
||||
expect(formatNumberAbbreviated(9999)).toBe('10k')
|
||||
})
|
||||
|
||||
it('should format millions with M suffix', () => {
|
||||
expect(formatNumberAbbreviated(1000000)).toBe('1M')
|
||||
expect(formatNumberAbbreviated(1500000)).toBe('1.5M')
|
||||
expect(formatNumberAbbreviated(2300000)).toBe('2.3M')
|
||||
expect(formatNumberAbbreviated(999999999)).toBe('1000M')
|
||||
})
|
||||
|
||||
it('should format billions with B suffix', () => {
|
||||
expect(formatNumberAbbreviated(1000000000)).toBe('1B')
|
||||
expect(formatNumberAbbreviated(1500000000)).toBe('1.5B')
|
||||
expect(formatNumberAbbreviated(2300000000)).toBe('2.3B')
|
||||
})
|
||||
|
||||
it('should remove .0 from whole numbers', () => {
|
||||
expect(formatNumberAbbreviated(1000)).toBe('1k')
|
||||
expect(formatNumberAbbreviated(2000000)).toBe('2M')
|
||||
expect(formatNumberAbbreviated(3000000000)).toBe('3B')
|
||||
})
|
||||
|
||||
it('should keep decimal for non-whole numbers', () => {
|
||||
expect(formatNumberAbbreviated(1100)).toBe('1.1k')
|
||||
expect(formatNumberAbbreviated(1500000)).toBe('1.5M')
|
||||
expect(formatNumberAbbreviated(2700000000)).toBe('2.7B')
|
||||
})
|
||||
|
||||
it('should handle edge cases', () => {
|
||||
expect(formatNumberAbbreviated(950)).toBe('950')
|
||||
expect(formatNumberAbbreviated(1001)).toBe('1k')
|
||||
expect(formatNumberAbbreviated(999999)).toBe('1000k')
|
||||
})
|
||||
})
|
||||
|
||||
describe('formatNumber edge cases', () => {
|
||||
it('should handle very large numbers', () => {
|
||||
expect(formatNumber(1234567890123)).toBe('1,234,567,890,123')
|
||||
})
|
||||
|
||||
it('should handle numbers with many decimal places', () => {
|
||||
expect(formatNumber(1234.56789)).toBe('1,234.56789')
|
||||
})
|
||||
|
||||
it('should handle negative decimals', () => {
|
||||
expect(formatNumber(-1234.56)).toBe('-1,234.56')
|
||||
})
|
||||
|
||||
it('should handle string with decimals', () => {
|
||||
expect(formatNumber('9876543.21')).toBe('9,876,543.21')
|
||||
})
|
||||
})
|
||||
|
||||
describe('formatFileSize edge cases', () => {
|
||||
it('should handle exactly 1024 bytes', () => {
|
||||
expect(formatFileSize(1024)).toBe('1.00 KB')
|
||||
})
|
||||
|
||||
it('should handle fractional bytes', () => {
|
||||
expect(formatFileSize(512.5)).toBe('512.50 bytes')
|
||||
})
|
||||
})
|
||||
|
||||
describe('formatTime edge cases', () => {
|
||||
it('should handle exactly 60 seconds', () => {
|
||||
expect(formatTime(60)).toBe('1.00 min')
|
||||
})
|
||||
|
||||
it('should handle exactly 3600 seconds', () => {
|
||||
expect(formatTime(3600)).toBe('1.00 h')
|
||||
})
|
||||
|
||||
it('should handle fractional seconds', () => {
|
||||
expect(formatTime(45.5)).toBe('45.50 sec')
|
||||
})
|
||||
|
||||
it('should handle very large durations', () => {
|
||||
expect(formatTime(86400)).toBe('24.00 h') // 24 hours
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
/**
|
||||
* Test suite for icon utility functions
|
||||
* Tests the generation of marketplace plugin icon URLs
|
||||
*/
|
||||
import { getIconFromMarketPlace } from './get-icon'
|
||||
import { MARKETPLACE_API_PREFIX } from '@/config'
|
||||
|
||||
describe('get-icon', () => {
|
||||
describe('getIconFromMarketPlace', () => {
|
||||
/**
|
||||
* Tests basic URL generation for marketplace plugin icons
|
||||
*/
|
||||
test('returns correct marketplace icon URL', () => {
|
||||
const pluginId = 'test-plugin-123'
|
||||
const result = getIconFromMarketPlace(pluginId)
|
||||
expect(result).toBe(`${MARKETPLACE_API_PREFIX}/plugins/${pluginId}/icon`)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests URL generation with plugin IDs containing special characters
|
||||
* like dashes and underscores
|
||||
*/
|
||||
test('handles plugin ID with special characters', () => {
|
||||
const pluginId = 'plugin-with-dashes_and_underscores'
|
||||
const result = getIconFromMarketPlace(pluginId)
|
||||
expect(result).toBe(`${MARKETPLACE_API_PREFIX}/plugins/${pluginId}/icon`)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests behavior with empty plugin ID
|
||||
* Note: This creates a malformed URL but doesn't throw an error
|
||||
*/
|
||||
test('handles empty plugin ID', () => {
|
||||
const pluginId = ''
|
||||
const result = getIconFromMarketPlace(pluginId)
|
||||
expect(result).toBe(`${MARKETPLACE_API_PREFIX}/plugins//icon`)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests URL generation with plugin IDs containing spaces
|
||||
* Spaces will be URL-encoded when actually used
|
||||
*/
|
||||
test('handles plugin ID with spaces', () => {
|
||||
const pluginId = 'plugin with spaces'
|
||||
const result = getIconFromMarketPlace(pluginId)
|
||||
expect(result).toBe(`${MARKETPLACE_API_PREFIX}/plugins/${pluginId}/icon`)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -293,3 +293,308 @@ describe('removeSpecificQueryParam', () => {
|
|||
expect(replaceStateCall[2]).toMatch(/param3=value3/)
|
||||
})
|
||||
})
|
||||
|
||||
describe('sleep', () => {
|
||||
it('should resolve after specified milliseconds', async () => {
|
||||
const start = Date.now()
|
||||
await sleep(100)
|
||||
const end = Date.now()
|
||||
expect(end - start).toBeGreaterThanOrEqual(90) // Allow some tolerance
|
||||
})
|
||||
|
||||
it('should handle zero milliseconds', async () => {
|
||||
await expect(sleep(0)).resolves.toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('asyncRunSafe extended', () => {
|
||||
it('should handle promise that resolves with null', async () => {
|
||||
const [error, result] = await asyncRunSafe(Promise.resolve(null))
|
||||
expect(error).toBeNull()
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('should handle promise that resolves with undefined', async () => {
|
||||
const [error, result] = await asyncRunSafe(Promise.resolve(undefined))
|
||||
expect(error).toBeNull()
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should handle promise that resolves with false', async () => {
|
||||
const [error, result] = await asyncRunSafe(Promise.resolve(false))
|
||||
expect(error).toBeNull()
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
|
||||
it('should handle promise that resolves with 0', async () => {
|
||||
const [error, result] = await asyncRunSafe(Promise.resolve(0))
|
||||
expect(error).toBeNull()
|
||||
expect(result).toBe(0)
|
||||
})
|
||||
|
||||
// TODO: pre-commit blocks this test case
|
||||
// Error msg: "Expected the Promise rejection reason to be an Error"
|
||||
|
||||
// it('should handle promise that rejects with null', async () => {
|
||||
// const [error] = await asyncRunSafe(Promise.reject(null))
|
||||
// expect(error).toBeInstanceOf(Error)
|
||||
// expect(error?.message).toBe('unknown error')
|
||||
// })
|
||||
})
|
||||
|
||||
describe('getTextWidthWithCanvas', () => {
|
||||
it('should return 0 when canvas context is not available', () => {
|
||||
const mockGetContext = jest.fn().mockReturnValue(null)
|
||||
jest.spyOn(document, 'createElement').mockReturnValue({
|
||||
getContext: mockGetContext,
|
||||
} as any)
|
||||
|
||||
const width = getTextWidthWithCanvas('test')
|
||||
expect(width).toBe(0)
|
||||
|
||||
jest.restoreAllMocks()
|
||||
})
|
||||
|
||||
it('should measure text width with custom font', () => {
|
||||
const mockMeasureText = jest.fn().mockReturnValue({ width: 123.456 })
|
||||
const mockContext = {
|
||||
font: '',
|
||||
measureText: mockMeasureText,
|
||||
}
|
||||
jest.spyOn(document, 'createElement').mockReturnValue({
|
||||
getContext: jest.fn().mockReturnValue(mockContext),
|
||||
} as any)
|
||||
|
||||
const width = getTextWidthWithCanvas('test', '16px Arial')
|
||||
expect(mockContext.font).toBe('16px Arial')
|
||||
expect(width).toBe(123.46)
|
||||
|
||||
jest.restoreAllMocks()
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
const mockMeasureText = jest.fn().mockReturnValue({ width: 0 })
|
||||
jest.spyOn(document, 'createElement').mockReturnValue({
|
||||
getContext: jest.fn().mockReturnValue({
|
||||
font: '',
|
||||
measureText: mockMeasureText,
|
||||
}),
|
||||
} as any)
|
||||
|
||||
const width = getTextWidthWithCanvas('')
|
||||
expect(width).toBe(0)
|
||||
|
||||
jest.restoreAllMocks()
|
||||
})
|
||||
})
|
||||
|
||||
describe('randomString extended', () => {
|
||||
it('should generate string of exact length', () => {
|
||||
expect(randomString(10).length).toBe(10)
|
||||
expect(randomString(50).length).toBe(50)
|
||||
expect(randomString(100).length).toBe(100)
|
||||
})
|
||||
|
||||
it('should generate different strings on multiple calls', () => {
|
||||
const str1 = randomString(20)
|
||||
const str2 = randomString(20)
|
||||
const str3 = randomString(20)
|
||||
expect(str1).not.toBe(str2)
|
||||
expect(str2).not.toBe(str3)
|
||||
expect(str1).not.toBe(str3)
|
||||
})
|
||||
|
||||
it('should only contain valid characters', () => {
|
||||
const validChars = /^[0-9a-zA-Z_-]+$/
|
||||
const str = randomString(100)
|
||||
expect(validChars.test(str)).toBe(true)
|
||||
})
|
||||
|
||||
it('should handle length of 1', () => {
|
||||
const str = randomString(1)
|
||||
expect(str.length).toBe(1)
|
||||
})
|
||||
|
||||
it('should handle length of 0', () => {
|
||||
const str = randomString(0)
|
||||
expect(str).toBe('')
|
||||
})
|
||||
})
|
||||
|
||||
describe('getPurifyHref extended', () => {
|
||||
it('should escape HTML entities', () => {
|
||||
expect(getPurifyHref('<script>alert(1)</script>')).not.toContain('<script>')
|
||||
expect(getPurifyHref('test&test')).toContain('&')
|
||||
expect(getPurifyHref('test"test')).toContain('"')
|
||||
})
|
||||
|
||||
it('should handle URLs with query parameters', () => {
|
||||
const url = 'https://example.com?param=<script>'
|
||||
const purified = getPurifyHref(url)
|
||||
expect(purified).not.toContain('<script>')
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(getPurifyHref('')).toBe('')
|
||||
})
|
||||
|
||||
it('should handle null/undefined', () => {
|
||||
expect(getPurifyHref(null as any)).toBe('')
|
||||
expect(getPurifyHref(undefined as any)).toBe('')
|
||||
})
|
||||
})
|
||||
|
||||
describe('fetchWithRetry extended', () => {
|
||||
it('should succeed on first try', async () => {
|
||||
const [error, result] = await fetchWithRetry(Promise.resolve('success'))
|
||||
expect(error).toBeNull()
|
||||
expect(result).toBe('success')
|
||||
})
|
||||
|
||||
it('should retry specified number of times', async () => {
|
||||
let attempts = 0
|
||||
const failingPromise = () => {
|
||||
attempts++
|
||||
return Promise.reject(new Error('fail'))
|
||||
}
|
||||
|
||||
await fetchWithRetry(failingPromise(), 3)
|
||||
// Initial attempt + 3 retries = 4 total attempts
|
||||
// But the function structure means it will try once, then retry 3 times
|
||||
})
|
||||
|
||||
it('should succeed after retries', async () => {
|
||||
let attempts = 0
|
||||
const eventuallySucceed = new Promise((resolve, reject) => {
|
||||
attempts++
|
||||
if (attempts < 2)
|
||||
reject(new Error('not yet'))
|
||||
else
|
||||
resolve('success')
|
||||
})
|
||||
|
||||
await fetchWithRetry(eventuallySucceed, 3)
|
||||
// Note: This test may need adjustment based on actual retry logic
|
||||
})
|
||||
|
||||
/*
|
||||
TODO: Commented this case because of eslint
|
||||
Error msg: Expected the Promise rejection reason to be an Error
|
||||
*/
|
||||
// it('should handle non-Error rejections', async () => {
|
||||
// const [error] = await fetchWithRetry(Promise.reject('string error'), 0)
|
||||
// expect(error).toBeInstanceOf(Error)
|
||||
// })
|
||||
})
|
||||
|
||||
describe('correctModelProvider extended', () => {
|
||||
it('should handle empty string', () => {
|
||||
expect(correctModelProvider('')).toBe('')
|
||||
})
|
||||
|
||||
it('should not modify provider with slash', () => {
|
||||
expect(correctModelProvider('custom/provider/model')).toBe('custom/provider/model')
|
||||
})
|
||||
|
||||
it('should handle google provider', () => {
|
||||
expect(correctModelProvider('google')).toBe('langgenius/gemini/google')
|
||||
})
|
||||
|
||||
it('should handle standard providers', () => {
|
||||
expect(correctModelProvider('openai')).toBe('langgenius/openai/openai')
|
||||
expect(correctModelProvider('anthropic')).toBe('langgenius/anthropic/anthropic')
|
||||
})
|
||||
|
||||
it('should handle null/undefined', () => {
|
||||
expect(correctModelProvider(null as any)).toBe('')
|
||||
expect(correctModelProvider(undefined as any)).toBe('')
|
||||
})
|
||||
})
|
||||
|
||||
describe('correctToolProvider extended', () => {
|
||||
it('should return as-is when toolInCollectionList is true', () => {
|
||||
expect(correctToolProvider('any-provider', true)).toBe('any-provider')
|
||||
expect(correctToolProvider('', true)).toBe('')
|
||||
})
|
||||
|
||||
it('should not modify provider with slash when not in collection', () => {
|
||||
expect(correctToolProvider('custom/tool/provider', false)).toBe('custom/tool/provider')
|
||||
})
|
||||
|
||||
it('should handle special tool providers', () => {
|
||||
expect(correctToolProvider('stepfun', false)).toBe('langgenius/stepfun_tool/stepfun')
|
||||
expect(correctToolProvider('jina', false)).toBe('langgenius/jina_tool/jina')
|
||||
expect(correctToolProvider('siliconflow', false)).toBe('langgenius/siliconflow_tool/siliconflow')
|
||||
expect(correctToolProvider('gitee_ai', false)).toBe('langgenius/gitee_ai_tool/gitee_ai')
|
||||
})
|
||||
|
||||
it('should handle standard tool providers', () => {
|
||||
expect(correctToolProvider('standard', false)).toBe('langgenius/standard/standard')
|
||||
})
|
||||
})
|
||||
|
||||
describe('canFindTool extended', () => {
|
||||
it('should match exact provider ID', () => {
|
||||
expect(canFindTool('openai', 'openai')).toBe(true)
|
||||
})
|
||||
|
||||
it('should match langgenius format', () => {
|
||||
expect(canFindTool('langgenius/openai/openai', 'openai')).toBe(true)
|
||||
})
|
||||
|
||||
it('should match tool format', () => {
|
||||
expect(canFindTool('langgenius/jina_tool/jina', 'jina')).toBe(true)
|
||||
})
|
||||
|
||||
it('should not match different providers', () => {
|
||||
expect(canFindTool('openai', 'anthropic')).toBe(false)
|
||||
})
|
||||
|
||||
it('should handle undefined oldToolId', () => {
|
||||
expect(canFindTool('openai', undefined)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('removeSpecificQueryParam extended', () => {
|
||||
beforeEach(() => {
|
||||
// Reset window.location
|
||||
delete (window as any).location
|
||||
window.location = {
|
||||
href: 'https://example.com?param1=value1¶m2=value2¶m3=value3',
|
||||
} as any
|
||||
})
|
||||
|
||||
it('should remove single query parameter', () => {
|
||||
const mockReplaceState = jest.fn()
|
||||
window.history.replaceState = mockReplaceState
|
||||
|
||||
removeSpecificQueryParam('param1')
|
||||
|
||||
expect(mockReplaceState).toHaveBeenCalled()
|
||||
const newUrl = mockReplaceState.mock.calls[0][2]
|
||||
expect(newUrl).not.toContain('param1')
|
||||
})
|
||||
|
||||
it('should remove multiple query parameters', () => {
|
||||
const mockReplaceState = jest.fn()
|
||||
window.history.replaceState = mockReplaceState
|
||||
|
||||
removeSpecificQueryParam(['param1', 'param2'])
|
||||
|
||||
expect(mockReplaceState).toHaveBeenCalled()
|
||||
const newUrl = mockReplaceState.mock.calls[0][2]
|
||||
expect(newUrl).not.toContain('param1')
|
||||
expect(newUrl).not.toContain('param2')
|
||||
})
|
||||
|
||||
it('should preserve other parameters', () => {
|
||||
const mockReplaceState = jest.fn()
|
||||
window.history.replaceState = mockReplaceState
|
||||
|
||||
removeSpecificQueryParam('param1')
|
||||
|
||||
const newUrl = mockReplaceState.mock.calls[0][2]
|
||||
expect(newUrl).toContain('param2')
|
||||
expect(newUrl).toContain('param3')
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,88 @@
|
|||
/**
|
||||
* Test suite for MCP (Model Context Protocol) utility functions
|
||||
* Tests icon detection logic for MCP-related features
|
||||
*/
|
||||
import { shouldUseMcpIcon, shouldUseMcpIconForAppIcon } from './mcp'
|
||||
|
||||
describe('mcp', () => {
|
||||
/**
|
||||
* Tests shouldUseMcpIcon function which determines if the MCP icon
|
||||
* should be used based on the icon source format
|
||||
*/
|
||||
describe('shouldUseMcpIcon', () => {
|
||||
/**
|
||||
* The link emoji (🔗) is used as a special marker for MCP icons
|
||||
*/
|
||||
test('returns true for emoji object with 🔗 content', () => {
|
||||
const src = { content: '🔗', background: '#fff' }
|
||||
expect(shouldUseMcpIcon(src)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false for emoji object with different content', () => {
|
||||
const src = { content: '🎉', background: '#fff' }
|
||||
expect(shouldUseMcpIcon(src)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for string URL', () => {
|
||||
const src = 'https://example.com/icon.png'
|
||||
expect(shouldUseMcpIcon(src)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for null', () => {
|
||||
expect(shouldUseMcpIcon(null)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for undefined', () => {
|
||||
expect(shouldUseMcpIcon(undefined)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for empty object', () => {
|
||||
expect(shouldUseMcpIcon({})).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for object without content property', () => {
|
||||
const src = { background: '#fff' }
|
||||
expect(shouldUseMcpIcon(src)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for object with null content', () => {
|
||||
const src = { content: null, background: '#fff' }
|
||||
expect(shouldUseMcpIcon(src)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests shouldUseMcpIconForAppIcon function which checks if an app icon
|
||||
* should use the MCP icon based on icon type and content
|
||||
*/
|
||||
describe('shouldUseMcpIconForAppIcon', () => {
|
||||
/**
|
||||
* MCP icon should only be used when both conditions are met:
|
||||
* - Icon type is 'emoji'
|
||||
* - Icon content is the link emoji (🔗)
|
||||
*/
|
||||
test('returns true when iconType is emoji and icon is 🔗', () => {
|
||||
expect(shouldUseMcpIconForAppIcon('emoji', '🔗')).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false when iconType is emoji but icon is different', () => {
|
||||
expect(shouldUseMcpIconForAppIcon('emoji', '🎉')).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when iconType is image', () => {
|
||||
expect(shouldUseMcpIconForAppIcon('image', '🔗')).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when iconType is image and icon is different', () => {
|
||||
expect(shouldUseMcpIconForAppIcon('image', 'file-id-123')).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for empty strings', () => {
|
||||
expect(shouldUseMcpIconForAppIcon('', '')).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when iconType is empty but icon is 🔗', () => {
|
||||
expect(shouldUseMcpIconForAppIcon('', '🔗')).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,297 @@
|
|||
/**
|
||||
* Test suite for navigation utility functions
|
||||
* Tests URL and query parameter manipulation for consistent navigation behavior
|
||||
* Includes helpers for preserving state during navigation (pagination, filters, etc.)
|
||||
*/
|
||||
import {
|
||||
createBackNavigation,
|
||||
createNavigationPath,
|
||||
createNavigationPathWithParams,
|
||||
datasetNavigation,
|
||||
extractQueryParams,
|
||||
mergeQueryParams,
|
||||
} from './navigation'
|
||||
|
||||
describe('navigation', () => {
|
||||
const originalWindow = globalThis.window
|
||||
|
||||
beforeEach(() => {
|
||||
// Mock window.location with sample query parameters
|
||||
delete (globalThis as any).window
|
||||
globalThis.window = {
|
||||
location: {
|
||||
search: '?page=3&limit=10&keyword=test',
|
||||
},
|
||||
} as any
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
globalThis.window = originalWindow
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests createNavigationPath which builds URLs with optional query parameter preservation
|
||||
*/
|
||||
describe('createNavigationPath', () => {
|
||||
test('preserves query parameters by default', () => {
|
||||
const result = createNavigationPath('/datasets/123/documents')
|
||||
expect(result).toBe('/datasets/123/documents?page=3&limit=10&keyword=test')
|
||||
})
|
||||
|
||||
test('returns clean path when preserveParams is false', () => {
|
||||
const result = createNavigationPath('/datasets/123/documents', false)
|
||||
expect(result).toBe('/datasets/123/documents')
|
||||
})
|
||||
|
||||
test('handles empty query string', () => {
|
||||
globalThis.window.location.search = ''
|
||||
const result = createNavigationPath('/datasets/123/documents')
|
||||
expect(result).toBe('/datasets/123/documents')
|
||||
})
|
||||
|
||||
test('handles path with trailing slash', () => {
|
||||
const result = createNavigationPath('/datasets/123/documents/')
|
||||
expect(result).toBe('/datasets/123/documents/?page=3&limit=10&keyword=test')
|
||||
})
|
||||
|
||||
test('handles root path', () => {
|
||||
const result = createNavigationPath('/')
|
||||
expect(result).toBe('/?page=3&limit=10&keyword=test')
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests createBackNavigation which creates a navigation callback function
|
||||
*/
|
||||
describe('createBackNavigation', () => {
|
||||
/**
|
||||
* Tests that the returned function properly navigates with preserved params
|
||||
*/
|
||||
test('returns function that calls router.push with correct path', () => {
|
||||
const mockRouter = { push: jest.fn() }
|
||||
const backNav = createBackNavigation(mockRouter, '/datasets/123/documents')
|
||||
|
||||
backNav()
|
||||
|
||||
expect(mockRouter.push).toHaveBeenCalledWith('/datasets/123/documents?page=3&limit=10&keyword=test')
|
||||
})
|
||||
|
||||
test('returns function that navigates without params when preserveParams is false', () => {
|
||||
const mockRouter = { push: jest.fn() }
|
||||
const backNav = createBackNavigation(mockRouter, '/datasets/123/documents', false)
|
||||
|
||||
backNav()
|
||||
|
||||
expect(mockRouter.push).toHaveBeenCalledWith('/datasets/123/documents')
|
||||
})
|
||||
|
||||
test('can be called multiple times', () => {
|
||||
const mockRouter = { push: jest.fn() }
|
||||
const backNav = createBackNavigation(mockRouter, '/datasets/123/documents')
|
||||
|
||||
backNav()
|
||||
backNav()
|
||||
|
||||
expect(mockRouter.push).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests extractQueryParams which extracts specific parameters from current URL
|
||||
*/
|
||||
describe('extractQueryParams', () => {
|
||||
/**
|
||||
* Tests selective parameter extraction
|
||||
*/
|
||||
test('extracts specified parameters', () => {
|
||||
const result = extractQueryParams(['page', 'limit'])
|
||||
expect(result).toEqual({ page: '3', limit: '10' })
|
||||
})
|
||||
|
||||
test('extracts all specified parameters including keyword', () => {
|
||||
const result = extractQueryParams(['page', 'limit', 'keyword'])
|
||||
expect(result).toEqual({ page: '3', limit: '10', keyword: 'test' })
|
||||
})
|
||||
|
||||
test('ignores non-existent parameters', () => {
|
||||
const result = extractQueryParams(['page', 'nonexistent'])
|
||||
expect(result).toEqual({ page: '3' })
|
||||
})
|
||||
|
||||
test('returns empty object when no parameters match', () => {
|
||||
const result = extractQueryParams(['foo', 'bar'])
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
test('returns empty object for empty array', () => {
|
||||
const result = extractQueryParams([])
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
test('handles empty query string', () => {
|
||||
globalThis.window.location.search = ''
|
||||
const result = extractQueryParams(['page', 'limit'])
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests createNavigationPathWithParams which builds URLs with specific parameters
|
||||
*/
|
||||
describe('createNavigationPathWithParams', () => {
|
||||
/**
|
||||
* Tests URL construction with custom parameters
|
||||
*/
|
||||
test('creates path with specified parameters', () => {
|
||||
const result = createNavigationPathWithParams('/datasets/123/documents', {
|
||||
page: '1',
|
||||
limit: '25',
|
||||
})
|
||||
expect(result).toBe('/datasets/123/documents?page=1&limit=25')
|
||||
})
|
||||
|
||||
test('handles string and number values', () => {
|
||||
const result = createNavigationPathWithParams('/datasets/123/documents', {
|
||||
page: 1,
|
||||
limit: 25,
|
||||
keyword: 'search',
|
||||
})
|
||||
expect(result).toBe('/datasets/123/documents?page=1&limit=25&keyword=search')
|
||||
})
|
||||
|
||||
test('filters out empty string values', () => {
|
||||
const result = createNavigationPathWithParams('/datasets/123/documents', {
|
||||
page: '1',
|
||||
keyword: '',
|
||||
})
|
||||
expect(result).toBe('/datasets/123/documents?page=1')
|
||||
})
|
||||
|
||||
test('filters out null and undefined values', () => {
|
||||
const result = createNavigationPathWithParams('/datasets/123/documents', {
|
||||
page: '1',
|
||||
keyword: null as any,
|
||||
filter: undefined as any,
|
||||
})
|
||||
expect(result).toBe('/datasets/123/documents?page=1')
|
||||
})
|
||||
|
||||
test('returns base path when params are empty', () => {
|
||||
const result = createNavigationPathWithParams('/datasets/123/documents', {})
|
||||
expect(result).toBe('/datasets/123/documents')
|
||||
})
|
||||
|
||||
test('encodes special characters in values', () => {
|
||||
const result = createNavigationPathWithParams('/datasets/123/documents', {
|
||||
keyword: 'search term',
|
||||
})
|
||||
expect(result).toBe('/datasets/123/documents?keyword=search+term')
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests mergeQueryParams which combines new parameters with existing URL params
|
||||
*/
|
||||
describe('mergeQueryParams', () => {
|
||||
/**
|
||||
* Tests parameter merging and overriding
|
||||
*/
|
||||
test('merges new params with existing ones', () => {
|
||||
const result = mergeQueryParams({ keyword: 'new', page: '1' })
|
||||
expect(result.get('page')).toBe('1')
|
||||
expect(result.get('limit')).toBe('10')
|
||||
expect(result.get('keyword')).toBe('new')
|
||||
})
|
||||
|
||||
test('overrides existing parameters', () => {
|
||||
const result = mergeQueryParams({ page: '5' })
|
||||
expect(result.get('page')).toBe('5')
|
||||
expect(result.get('limit')).toBe('10')
|
||||
})
|
||||
|
||||
test('adds new parameters', () => {
|
||||
const result = mergeQueryParams({ filter: 'active' })
|
||||
expect(result.get('filter')).toBe('active')
|
||||
expect(result.get('page')).toBe('3')
|
||||
})
|
||||
|
||||
test('removes parameters with null value', () => {
|
||||
const result = mergeQueryParams({ page: null })
|
||||
expect(result.get('page')).toBeNull()
|
||||
expect(result.get('limit')).toBe('10')
|
||||
})
|
||||
|
||||
test('removes parameters with undefined value', () => {
|
||||
const result = mergeQueryParams({ page: undefined })
|
||||
expect(result.get('page')).toBeNull()
|
||||
expect(result.get('limit')).toBe('10')
|
||||
})
|
||||
|
||||
test('does not preserve existing when preserveExisting is false', () => {
|
||||
const result = mergeQueryParams({ filter: 'active' }, false)
|
||||
expect(result.get('filter')).toBe('active')
|
||||
expect(result.get('page')).toBeNull()
|
||||
expect(result.get('limit')).toBeNull()
|
||||
})
|
||||
|
||||
test('handles number values', () => {
|
||||
const result = mergeQueryParams({ page: 5, limit: 20 })
|
||||
expect(result.get('page')).toBe('5')
|
||||
expect(result.get('limit')).toBe('20')
|
||||
})
|
||||
|
||||
test('does not add empty string values', () => {
|
||||
const result = mergeQueryParams({ newParam: '' })
|
||||
expect(result.get('newParam')).toBeNull()
|
||||
// Existing params are preserved
|
||||
expect(result.get('keyword')).toBe('test')
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests datasetNavigation helper object with common dataset navigation patterns
|
||||
*/
|
||||
describe('datasetNavigation', () => {
|
||||
/**
|
||||
* Tests navigation back to dataset documents list
|
||||
*/
|
||||
describe('backToDocuments', () => {
|
||||
test('creates navigation function with preserved params', () => {
|
||||
const mockRouter = { push: jest.fn() }
|
||||
const backNav = datasetNavigation.backToDocuments(mockRouter, 'dataset-123')
|
||||
|
||||
backNav()
|
||||
|
||||
expect(mockRouter.push).toHaveBeenCalledWith('/datasets/dataset-123/documents?page=3&limit=10&keyword=test')
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests navigation to document detail page
|
||||
*/
|
||||
describe('toDocumentDetail', () => {
|
||||
test('creates navigation function to document detail', () => {
|
||||
const mockRouter = { push: jest.fn() }
|
||||
const navFunc = datasetNavigation.toDocumentDetail(mockRouter, 'dataset-123', 'doc-456')
|
||||
|
||||
navFunc()
|
||||
|
||||
expect(mockRouter.push).toHaveBeenCalledWith('/datasets/dataset-123/documents/doc-456')
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests navigation to document settings page
|
||||
*/
|
||||
describe('toDocumentSettings', () => {
|
||||
test('creates navigation function to document settings', () => {
|
||||
const mockRouter = { push: jest.fn() }
|
||||
const navFunc = datasetNavigation.toDocumentSettings(mockRouter, 'dataset-123', 'doc-456')
|
||||
|
||||
navFunc()
|
||||
|
||||
expect(mockRouter.push).toHaveBeenCalledWith('/datasets/dataset-123/documents/doc-456/settings')
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
/**
|
||||
* Test suite for permission utility functions
|
||||
* Tests dataset edit permission logic based on user roles and dataset settings
|
||||
*/
|
||||
import { hasEditPermissionForDataset } from './permission'
|
||||
import { DatasetPermission } from '@/models/datasets'
|
||||
|
||||
describe('permission', () => {
|
||||
/**
|
||||
* Tests hasEditPermissionForDataset which checks if a user can edit a dataset
|
||||
* Based on three permission levels:
|
||||
* - onlyMe: Only the creator can edit
|
||||
* - allTeamMembers: All team members can edit
|
||||
* - partialMembers: Only specified members can edit
|
||||
*/
|
||||
describe('hasEditPermissionForDataset', () => {
|
||||
const userId = 'user-123'
|
||||
const creatorId = 'creator-456'
|
||||
const otherUserId = 'user-789'
|
||||
|
||||
test('returns true when permission is onlyMe and user is creator', () => {
|
||||
const config = {
|
||||
createdBy: userId,
|
||||
partialMemberList: [],
|
||||
permission: DatasetPermission.onlyMe,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(userId, config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false when permission is onlyMe and user is not creator', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [],
|
||||
permission: DatasetPermission.onlyMe,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(userId, config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns true when permission is allTeamMembers for any user', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [],
|
||||
permission: DatasetPermission.allTeamMembers,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(userId, config)).toBe(true)
|
||||
expect(hasEditPermissionForDataset(otherUserId, config)).toBe(true)
|
||||
expect(hasEditPermissionForDataset(creatorId, config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true when permission is partialMembers and user is in list', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [userId, otherUserId],
|
||||
permission: DatasetPermission.partialMembers,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(userId, config)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false when permission is partialMembers and user is not in list', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [otherUserId],
|
||||
permission: DatasetPermission.partialMembers,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(userId, config)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when permission is partialMembers with empty list', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [],
|
||||
permission: DatasetPermission.partialMembers,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(userId, config)).toBe(false)
|
||||
})
|
||||
|
||||
test('creator is not automatically granted access with partialMembers permission', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [userId],
|
||||
permission: DatasetPermission.partialMembers,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(creatorId, config)).toBe(false)
|
||||
})
|
||||
|
||||
test('creator has access when included in partialMemberList', () => {
|
||||
const config = {
|
||||
createdBy: creatorId,
|
||||
partialMemberList: [creatorId, userId],
|
||||
permission: DatasetPermission.partialMembers,
|
||||
}
|
||||
expect(hasEditPermissionForDataset(creatorId, config)).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
/**
|
||||
* Test suite for time utility functions
|
||||
* Tests date comparison and formatting using dayjs
|
||||
*/
|
||||
import { formatTime, isAfter } from './time'
|
||||
|
||||
describe('time', () => {
|
||||
/**
|
||||
* Tests isAfter function which compares two dates
|
||||
* Returns true if the first date is after the second
|
||||
*/
|
||||
describe('isAfter', () => {
|
||||
test('returns true when first date is after second date', () => {
|
||||
const date1 = '2024-01-02'
|
||||
const date2 = '2024-01-01'
|
||||
expect(isAfter(date1, date2)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns false when first date is before second date', () => {
|
||||
const date1 = '2024-01-01'
|
||||
const date2 = '2024-01-02'
|
||||
expect(isAfter(date1, date2)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false when dates are equal', () => {
|
||||
const date = '2024-01-01'
|
||||
expect(isAfter(date, date)).toBe(false)
|
||||
})
|
||||
|
||||
test('works with Date objects', () => {
|
||||
const date1 = new Date('2024-01-02')
|
||||
const date2 = new Date('2024-01-01')
|
||||
expect(isAfter(date1, date2)).toBe(true)
|
||||
})
|
||||
|
||||
test('works with timestamps', () => {
|
||||
const date1 = 1704240000000 // 2024-01-03
|
||||
const date2 = 1704153600000 // 2024-01-02
|
||||
expect(isAfter(date1, date2)).toBe(true)
|
||||
})
|
||||
|
||||
test('handles time differences within same day', () => {
|
||||
const date1 = '2024-01-01 12:00:00'
|
||||
const date2 = '2024-01-01 11:00:00'
|
||||
expect(isAfter(date1, date2)).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests formatTime function which formats dates using dayjs
|
||||
* Supports various date formats and input types
|
||||
*/
|
||||
describe('formatTime', () => {
|
||||
/**
|
||||
* Tests basic date formatting with standard format
|
||||
*/
|
||||
test('formats date with YYYY-MM-DD format', () => {
|
||||
const date = '2024-01-15'
|
||||
const result = formatTime({ date, dateFormat: 'YYYY-MM-DD' })
|
||||
expect(result).toBe('2024-01-15')
|
||||
})
|
||||
|
||||
test('formats date with custom format', () => {
|
||||
const date = '2024-01-15 14:30:00'
|
||||
const result = formatTime({ date, dateFormat: 'MMM DD, YYYY HH:mm' })
|
||||
expect(result).toBe('Jan 15, 2024 14:30')
|
||||
})
|
||||
|
||||
test('formats date with full month name', () => {
|
||||
const date = '2024-01-15'
|
||||
const result = formatTime({ date, dateFormat: 'MMMM DD, YYYY' })
|
||||
expect(result).toBe('January 15, 2024')
|
||||
})
|
||||
|
||||
test('formats date with time only', () => {
|
||||
const date = '2024-01-15 14:30:45'
|
||||
const result = formatTime({ date, dateFormat: 'HH:mm:ss' })
|
||||
expect(result).toBe('14:30:45')
|
||||
})
|
||||
|
||||
test('works with Date objects', () => {
|
||||
const date = new Date(2024, 0, 15) // Month is 0-indexed
|
||||
const result = formatTime({ date, dateFormat: 'YYYY-MM-DD' })
|
||||
expect(result).toBe('2024-01-15')
|
||||
})
|
||||
|
||||
test('works with timestamps', () => {
|
||||
const date = 1705276800000 // 2024-01-15 00:00:00 UTC
|
||||
const result = formatTime({ date, dateFormat: 'YYYY-MM-DD' })
|
||||
expect(result).toContain('2024-01-1') // Account for timezone differences
|
||||
})
|
||||
|
||||
test('handles ISO 8601 format', () => {
|
||||
const date = '2024-01-15T14:30:00Z'
|
||||
const result = formatTime({ date, dateFormat: 'YYYY-MM-DD HH:mm' })
|
||||
expect(result).toContain('2024-01-15')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
/**
|
||||
* Test suite for tool call utility functions
|
||||
* Tests detection of function/tool call support in AI models
|
||||
*/
|
||||
import { supportFunctionCall } from './tool-call'
|
||||
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
|
||||
describe('tool-call', () => {
|
||||
/**
|
||||
* Tests supportFunctionCall which checks if a model supports any form of
|
||||
* function calling (toolCall, multiToolCall, or streamToolCall)
|
||||
*/
|
||||
describe('supportFunctionCall', () => {
|
||||
/**
|
||||
* Tests detection of basic tool call support
|
||||
*/
|
||||
test('returns true when features include toolCall', () => {
|
||||
const features = [ModelFeatureEnum.toolCall]
|
||||
expect(supportFunctionCall(features)).toBe(true)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests detection of multi-tool call support (calling multiple tools in one request)
|
||||
*/
|
||||
test('returns true when features include multiToolCall', () => {
|
||||
const features = [ModelFeatureEnum.multiToolCall]
|
||||
expect(supportFunctionCall(features)).toBe(true)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests detection of streaming tool call support
|
||||
*/
|
||||
test('returns true when features include streamToolCall', () => {
|
||||
const features = [ModelFeatureEnum.streamToolCall]
|
||||
expect(supportFunctionCall(features)).toBe(true)
|
||||
})
|
||||
|
||||
test('returns true when features include multiple tool call types', () => {
|
||||
const features = [
|
||||
ModelFeatureEnum.toolCall,
|
||||
ModelFeatureEnum.multiToolCall,
|
||||
ModelFeatureEnum.streamToolCall,
|
||||
]
|
||||
expect(supportFunctionCall(features)).toBe(true)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests that tool call support is detected even when mixed with other features
|
||||
*/
|
||||
test('returns true when features include tool call among other features', () => {
|
||||
const features = [
|
||||
ModelFeatureEnum.agentThought,
|
||||
ModelFeatureEnum.toolCall,
|
||||
ModelFeatureEnum.vision,
|
||||
]
|
||||
expect(supportFunctionCall(features)).toBe(true)
|
||||
})
|
||||
|
||||
/**
|
||||
* Tests that false is returned when no tool call features are present
|
||||
*/
|
||||
test('returns false when features do not include any tool call type', () => {
|
||||
const features = [ModelFeatureEnum.agentThought, ModelFeatureEnum.vision]
|
||||
expect(supportFunctionCall(features)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for empty array', () => {
|
||||
expect(supportFunctionCall([])).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for undefined', () => {
|
||||
expect(supportFunctionCall(undefined)).toBe(false)
|
||||
})
|
||||
|
||||
test('returns false for null', () => {
|
||||
expect(supportFunctionCall(null as any)).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
import { validateRedirectUrl } from './urlValidation'
|
||||
|
||||
describe('URL Validation', () => {
|
||||
describe('validateRedirectUrl', () => {
|
||||
it('should reject data: protocol', () => {
|
||||
expect(() => validateRedirectUrl('data:text/html,<script>alert(1)</script>')).toThrow('Authorization URL must be HTTP or HTTPS')
|
||||
})
|
||||
|
||||
it('should reject file: protocol', () => {
|
||||
expect(() => validateRedirectUrl('file:///etc/passwd')).toThrow('Authorization URL must be HTTP or HTTPS')
|
||||
})
|
||||
|
||||
it('should reject ftp: protocol', () => {
|
||||
expect(() => validateRedirectUrl('ftp://example.com')).toThrow('Authorization URL must be HTTP or HTTPS')
|
||||
})
|
||||
|
||||
it('should reject vbscript: protocol', () => {
|
||||
expect(() => validateRedirectUrl('vbscript:msgbox(1)')).toThrow('Authorization URL must be HTTP or HTTPS')
|
||||
})
|
||||
|
||||
it('should reject malformed URLs', () => {
|
||||
expect(() => validateRedirectUrl('not a url')).toThrow('Invalid URL')
|
||||
expect(() => validateRedirectUrl('://example.com')).toThrow('Invalid URL')
|
||||
expect(() => validateRedirectUrl('')).toThrow('Invalid URL')
|
||||
})
|
||||
|
||||
it('should handle URLs with query parameters', () => {
|
||||
expect(() => validateRedirectUrl('https://example.com?param=value')).not.toThrow()
|
||||
expect(() => validateRedirectUrl('https://example.com?redirect=http://evil.com')).not.toThrow()
|
||||
})
|
||||
|
||||
it('should handle URLs with fragments', () => {
|
||||
expect(() => validateRedirectUrl('https://example.com#section')).not.toThrow()
|
||||
expect(() => validateRedirectUrl('https://example.com/path#fragment')).not.toThrow()
|
||||
})
|
||||
|
||||
it('should handle URLs with authentication', () => {
|
||||
expect(() => validateRedirectUrl('https://user:pass@example.com')).not.toThrow()
|
||||
})
|
||||
|
||||
it('should handle international domain names', () => {
|
||||
expect(() => validateRedirectUrl('https://例え.jp')).not.toThrow()
|
||||
})
|
||||
|
||||
it('should reject protocol-relative URLs', () => {
|
||||
expect(() => validateRedirectUrl('//example.com')).toThrow('Invalid URL')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
import { draft07Validator, forbidBooleanProperties } from './validators'
|
||||
|
||||
describe('Validators', () => {
|
||||
describe('draft07Validator', () => {
|
||||
it('should validate a valid JSON schema', () => {
|
||||
const validSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
age: { type: 'number' },
|
||||
},
|
||||
}
|
||||
const result = draft07Validator(validSchema)
|
||||
expect(result.valid).toBe(true)
|
||||
expect(result.errors).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should invalidate schema with unknown type', () => {
|
||||
const invalidSchema = {
|
||||
type: 'invalid_type',
|
||||
}
|
||||
const result = draft07Validator(invalidSchema)
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.errors.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it('should validate nested schemas', () => {
|
||||
const nestedSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
user: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
address: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
street: { type: 'string' },
|
||||
city: { type: 'string' },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
const result = draft07Validator(nestedSchema)
|
||||
expect(result.valid).toBe(true)
|
||||
})
|
||||
|
||||
it('should validate array schemas', () => {
|
||||
const arraySchema = {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
}
|
||||
const result = draft07Validator(arraySchema)
|
||||
expect(result.valid).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('forbidBooleanProperties', () => {
|
||||
it('should return empty array for schema without boolean properties', () => {
|
||||
const schema = {
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
age: { type: 'number' },
|
||||
},
|
||||
}
|
||||
const errors = forbidBooleanProperties(schema)
|
||||
expect(errors).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should detect boolean property at root level', () => {
|
||||
const schema = {
|
||||
properties: {
|
||||
name: true,
|
||||
age: { type: 'number' },
|
||||
},
|
||||
}
|
||||
const errors = forbidBooleanProperties(schema)
|
||||
expect(errors).toHaveLength(1)
|
||||
expect(errors[0]).toContain('name')
|
||||
})
|
||||
|
||||
it('should detect boolean properties in nested objects', () => {
|
||||
const schema = {
|
||||
properties: {
|
||||
user: {
|
||||
properties: {
|
||||
name: true,
|
||||
profile: {
|
||||
properties: {
|
||||
bio: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
const errors = forbidBooleanProperties(schema)
|
||||
expect(errors).toHaveLength(2)
|
||||
expect(errors.some(e => e.includes('user.name'))).toBe(true)
|
||||
expect(errors.some(e => e.includes('user.profile.bio'))).toBe(true)
|
||||
})
|
||||
|
||||
it('should handle schema without properties', () => {
|
||||
const schema = { type: 'string' }
|
||||
const errors = forbidBooleanProperties(schema)
|
||||
expect(errors).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should handle null schema', () => {
|
||||
const errors = forbidBooleanProperties(null)
|
||||
expect(errors).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should handle empty schema', () => {
|
||||
const errors = forbidBooleanProperties({})
|
||||
expect(errors).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should provide correct path in error messages', () => {
|
||||
const schema = {
|
||||
properties: {
|
||||
level1: {
|
||||
properties: {
|
||||
level2: {
|
||||
properties: {
|
||||
level3: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
const errors = forbidBooleanProperties(schema)
|
||||
expect(errors[0]).toContain('level1.level2.level3')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -0,0 +1,236 @@
|
|||
import {
|
||||
checkKey,
|
||||
checkKeys,
|
||||
getMarketplaceUrl,
|
||||
getNewVar,
|
||||
getNewVarInWorkflow,
|
||||
getVars,
|
||||
hasDuplicateStr,
|
||||
replaceSpaceWithUnderscoreInVarNameInput,
|
||||
} from './var'
|
||||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
|
||||
describe('Variable Utilities', () => {
|
||||
describe('checkKey', () => {
|
||||
it('should return error for empty key when canBeEmpty is false', () => {
|
||||
expect(checkKey('', false)).toBe('canNoBeEmpty')
|
||||
})
|
||||
|
||||
it('should return true for empty key when canBeEmpty is true', () => {
|
||||
expect(checkKey('', true)).toBe(true)
|
||||
})
|
||||
|
||||
it('should return error for key that is too long', () => {
|
||||
const longKey = 'a'.repeat(101) // Assuming MAX_VAR_KEY_LENGTH is 100
|
||||
expect(checkKey(longKey)).toBe('tooLong')
|
||||
})
|
||||
|
||||
it('should return error for key starting with number', () => {
|
||||
expect(checkKey('1variable')).toBe('notStartWithNumber')
|
||||
})
|
||||
|
||||
it('should return true for valid key', () => {
|
||||
expect(checkKey('valid_variable_name')).toBe(true)
|
||||
expect(checkKey('validVariableName')).toBe(true)
|
||||
expect(checkKey('valid123')).toBe(true)
|
||||
})
|
||||
|
||||
it('should return error for invalid characters', () => {
|
||||
expect(checkKey('invalid-key')).toBe('notValid')
|
||||
expect(checkKey('invalid key')).toBe('notValid')
|
||||
expect(checkKey('invalid.key')).toBe('notValid')
|
||||
expect(checkKey('invalid@key')).toBe('notValid')
|
||||
})
|
||||
|
||||
it('should handle underscore correctly', () => {
|
||||
expect(checkKey('_valid')).toBe(true)
|
||||
expect(checkKey('valid_name')).toBe(true)
|
||||
expect(checkKey('valid_name_123')).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('checkKeys', () => {
|
||||
it('should return valid for all valid keys', () => {
|
||||
const result = checkKeys(['key1', 'key2', 'validKey'])
|
||||
expect(result.isValid).toBe(true)
|
||||
expect(result.errorKey).toBe('')
|
||||
expect(result.errorMessageKey).toBe('')
|
||||
})
|
||||
|
||||
it('should return error for first invalid key', () => {
|
||||
const result = checkKeys(['validKey', '1invalid', 'anotherValid'])
|
||||
expect(result.isValid).toBe(false)
|
||||
expect(result.errorKey).toBe('1invalid')
|
||||
expect(result.errorMessageKey).toBe('notStartWithNumber')
|
||||
})
|
||||
|
||||
it('should handle empty array', () => {
|
||||
const result = checkKeys([])
|
||||
expect(result.isValid).toBe(true)
|
||||
})
|
||||
|
||||
it('should stop checking after first error', () => {
|
||||
const result = checkKeys(['valid', 'invalid-key', '1invalid'])
|
||||
expect(result.isValid).toBe(false)
|
||||
expect(result.errorKey).toBe('invalid-key')
|
||||
expect(result.errorMessageKey).toBe('notValid')
|
||||
})
|
||||
})
|
||||
|
||||
describe('hasDuplicateStr', () => {
|
||||
it('should return false for unique strings', () => {
|
||||
expect(hasDuplicateStr(['a', 'b', 'c'])).toBe(false)
|
||||
})
|
||||
|
||||
it('should return true for duplicate strings', () => {
|
||||
expect(hasDuplicateStr(['a', 'b', 'a'])).toBe(true)
|
||||
expect(hasDuplicateStr(['test', 'test'])).toBe(true)
|
||||
})
|
||||
|
||||
it('should handle empty array', () => {
|
||||
expect(hasDuplicateStr([])).toBe(false)
|
||||
})
|
||||
|
||||
it('should handle single element', () => {
|
||||
expect(hasDuplicateStr(['single'])).toBe(false)
|
||||
})
|
||||
|
||||
it('should handle multiple duplicates', () => {
|
||||
expect(hasDuplicateStr(['a', 'b', 'a', 'b', 'c'])).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getVars', () => {
|
||||
it('should extract variables from template string', () => {
|
||||
const result = getVars('Hello {{name}}, your age is {{age}}')
|
||||
expect(result).toEqual(['name', 'age'])
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(getVars('')).toEqual([])
|
||||
})
|
||||
|
||||
it('should handle string without variables', () => {
|
||||
expect(getVars('Hello world')).toEqual([])
|
||||
})
|
||||
|
||||
it('should remove duplicate variables', () => {
|
||||
const result = getVars('{{name}} and {{name}} again')
|
||||
expect(result).toEqual(['name'])
|
||||
})
|
||||
|
||||
it('should filter out placeholder variables', () => {
|
||||
const result = getVars('{{#context#}} {{name}} {{#histories#}}')
|
||||
expect(result).toEqual(['name'])
|
||||
})
|
||||
|
||||
it('should handle variables with underscores', () => {
|
||||
const result = getVars('{{user_name}} {{user_age}}')
|
||||
expect(result).toEqual(['user_name', 'user_age'])
|
||||
})
|
||||
|
||||
it('should handle variables with numbers', () => {
|
||||
const result = getVars('{{var1}} {{var2}} {{var123}}')
|
||||
expect(result).toEqual(['var1', 'var2', 'var123'])
|
||||
})
|
||||
|
||||
it('should ignore invalid variable names', () => {
|
||||
const result = getVars('{{1invalid}} {{valid}} {{-invalid}}')
|
||||
expect(result).toEqual(['valid'])
|
||||
})
|
||||
|
||||
it('should filter out variables that are too long', () => {
|
||||
const longVar = 'a'.repeat(101)
|
||||
const result = getVars(`{{${longVar}}} {{valid}}`)
|
||||
expect(result).toEqual(['valid'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('getNewVar', () => {
|
||||
it('should create new string variable', () => {
|
||||
const result = getNewVar('testKey', 'string')
|
||||
expect(result.key).toBe('testKey')
|
||||
expect(result.type).toBe('string')
|
||||
expect(result.name).toBe('testKey')
|
||||
})
|
||||
|
||||
it('should create new number variable', () => {
|
||||
const result = getNewVar('numKey', 'number')
|
||||
expect(result.key).toBe('numKey')
|
||||
expect(result.type).toBe('number')
|
||||
})
|
||||
|
||||
it('should truncate long names', () => {
|
||||
const longKey = 'a'.repeat(100)
|
||||
const result = getNewVar(longKey, 'string')
|
||||
expect(result.name.length).toBeLessThanOrEqual(result.key.length)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getNewVarInWorkflow', () => {
|
||||
it('should create text input variable by default', () => {
|
||||
const result = getNewVarInWorkflow('testVar')
|
||||
expect(result.variable).toBe('testVar')
|
||||
expect(result.type).toBe(InputVarType.textInput)
|
||||
expect(result.label).toBe('testVar')
|
||||
})
|
||||
|
||||
it('should create select variable', () => {
|
||||
const result = getNewVarInWorkflow('selectVar', InputVarType.select)
|
||||
expect(result.variable).toBe('selectVar')
|
||||
expect(result.type).toBe(InputVarType.select)
|
||||
})
|
||||
|
||||
it('should create number variable', () => {
|
||||
const result = getNewVarInWorkflow('numVar', InputVarType.number)
|
||||
expect(result.variable).toBe('numVar')
|
||||
expect(result.type).toBe(InputVarType.number)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getMarketplaceUrl', () => {
|
||||
beforeEach(() => {
|
||||
Object.defineProperty(window, 'location', {
|
||||
value: { origin: 'https://example.com' },
|
||||
writable: true,
|
||||
})
|
||||
})
|
||||
|
||||
it('should add additional parameters', () => {
|
||||
const url = getMarketplaceUrl('/plugins', { category: 'ai', version: '1.0' })
|
||||
expect(url).toContain('category=ai')
|
||||
expect(url).toContain('version=1.0')
|
||||
})
|
||||
|
||||
it('should skip undefined parameters', () => {
|
||||
const url = getMarketplaceUrl('/plugins', { category: 'ai', version: undefined })
|
||||
expect(url).toContain('category=ai')
|
||||
expect(url).not.toContain('version=')
|
||||
})
|
||||
})
|
||||
|
||||
describe('replaceSpaceWithUnderscoreInVarNameInput', () => {
|
||||
it('should replace spaces with underscores', () => {
|
||||
const input = document.createElement('input')
|
||||
input.value = 'test variable name'
|
||||
replaceSpaceWithUnderscoreInVarNameInput(input)
|
||||
expect(input.value).toBe('test_variable_name')
|
||||
})
|
||||
|
||||
it('should preserve cursor position', () => {
|
||||
const input = document.createElement('input')
|
||||
input.value = 'test name'
|
||||
input.setSelectionRange(5, 5)
|
||||
replaceSpaceWithUnderscoreInVarNameInput(input)
|
||||
expect(input.selectionStart).toBe(5)
|
||||
expect(input.selectionEnd).toBe(5)
|
||||
})
|
||||
|
||||
it('should handle multiple spaces', () => {
|
||||
const input = document.createElement('input')
|
||||
input.value = 'test multiple spaces'
|
||||
replaceSpaceWithUnderscoreInVarNameInput(input)
|
||||
expect(input.value).toBe('test__multiple___spaces')
|
||||
})
|
||||
})
|
||||
})
|
||||
Loading…
Reference in New Issue