Merge branch 'main' into feat/plugin-auto-upgrade-fe

This commit is contained in:
Joel 2025-06-30 14:01:29 +08:00
commit 63a1a1077e
56 changed files with 3371 additions and 2577 deletions

View File

@ -223,6 +223,10 @@ class CeleryConfig(DatabaseConfig):
default=None,
)
CELERY_SENTINEL_PASSWORD: Optional[str] = Field(
description="Password of the Redis Sentinel master.",
default=None,
)
CELERY_SENTINEL_SOCKET_TIMEOUT: Optional[PositiveFloat] = Field(
description="Timeout for Redis Sentinel socket operations in seconds.",
default=0.1,

View File

@ -13,6 +13,7 @@ from core.model_runtime.utils.encoders import jsonable_encoder
from core.plugin.impl.exc import PluginDaemonClientSideError
from libs.login import login_required
from models.account import TenantPluginPermission
from services.plugin.plugin_parameter_service import PluginParameterService
from services.plugin.plugin_permission_service import PluginPermissionService
from services.plugin.plugin_service import PluginService
@ -497,6 +498,42 @@ class PluginFetchPermissionApi(Resource):
)
class PluginFetchDynamicSelectOptionsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
# check if the user is admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
tenant_id = current_user.current_tenant_id
user_id = current_user.id
parser = reqparse.RequestParser()
parser.add_argument("plugin_id", type=str, required=True, location="args")
parser.add_argument("provider", type=str, required=True, location="args")
parser.add_argument("action", type=str, required=True, location="args")
parser.add_argument("parameter", type=str, required=True, location="args")
parser.add_argument("provider_type", type=str, required=True, location="args")
args = parser.parse_args()
try:
options = PluginParameterService.get_dynamic_select_options(
tenant_id,
user_id,
args["plugin_id"],
args["provider"],
args["action"],
args["parameter"],
args["provider_type"],
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder({"options": options})
api.add_resource(PluginDebuggingKeyApi, "/workspaces/current/plugin/debugging-key")
api.add_resource(PluginListApi, "/workspaces/current/plugin/list")
api.add_resource(PluginListLatestVersionsApi, "/workspaces/current/plugin/list/latest-versions")
@ -521,3 +558,5 @@ api.add_resource(PluginFetchMarketplacePkgApi, "/workspaces/current/plugin/marke
api.add_resource(PluginChangePermissionApi, "/workspaces/current/plugin/permission/change")
api.add_resource(PluginFetchPermissionApi, "/workspaces/current/plugin/permission/fetch")
api.add_resource(PluginFetchDynamicSelectOptionsApi, "/workspaces/current/plugin/parameters/dynamic-options")

View File

@ -17,6 +17,7 @@ from core.plugin.entities.request import (
RequestInvokeApp,
RequestInvokeEncrypt,
RequestInvokeLLM,
RequestInvokeLLMWithStructuredOutput,
RequestInvokeModeration,
RequestInvokeParameterExtractorNode,
RequestInvokeQuestionClassifierNode,
@ -47,6 +48,21 @@ class PluginInvokeLLMApi(Resource):
return length_prefixed_response(0xF, generator())
class PluginInvokeLLMWithStructuredOutputApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeLLMWithStructuredOutput)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeLLMWithStructuredOutput):
def generator():
response = PluginModelBackwardsInvocation.invoke_llm_with_structured_output(
user_model.id, tenant_model, payload
)
return PluginModelBackwardsInvocation.convert_to_event_stream(response)
return length_prefixed_response(0xF, generator())
class PluginInvokeTextEmbeddingApi(Resource):
@setup_required
@plugin_inner_api_only
@ -291,6 +307,7 @@ class PluginFetchAppInfoApi(Resource):
api.add_resource(PluginInvokeLLMApi, "/invoke/llm")
api.add_resource(PluginInvokeLLMWithStructuredOutputApi, "/invoke/llm/structured-output")
api.add_resource(PluginInvokeTextEmbeddingApi, "/invoke/text-embedding")
api.add_resource(PluginInvokeRerankApi, "/invoke/rerank")
api.add_resource(PluginInvokeTTSApi, "/invoke/tts")

View File

@ -29,7 +29,19 @@ class EnterpriseWorkspace(Resource):
tenant_was_created.send(tenant)
return {"message": "enterprise workspace created."}
resp = {
"id": tenant.id,
"name": tenant.name,
"plan": tenant.plan,
"status": tenant.status,
"created_at": tenant.created_at.isoformat() + "Z" if tenant.created_at else None,
"updated_at": tenant.updated_at.isoformat() + "Z" if tenant.updated_at else None,
}
return {
"message": "enterprise workspace created.",
"tenant": resp,
}
class EnterpriseWorkspaceNoOwnerEmail(Resource):

View File

@ -133,6 +133,22 @@ class DatasetListApi(DatasetApiResource):
parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
args = parser.parse_args()
if args.get("embedding_model_provider"):
DatasetService.check_embedding_model_setting(
tenant_id, args.get("embedding_model_provider"), args.get("embedding_model")
)
if (
args.get("retrieval_model")
and args.get("retrieval_model").get("reranking_model")
and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
try:
dataset = DatasetService.create_empty_dataset(
tenant_id=tenant_id,
@ -265,10 +281,20 @@ class DatasetApi(DatasetApiResource):
data = request.get_json()
# check embedding model setting
if data.get("indexing_technique") == "high_quality":
if data.get("indexing_technique") == "high_quality" or data.get("embedding_model_provider"):
DatasetService.check_embedding_model_setting(
dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
)
if (
data.get("retrieval_model")
and data.get("retrieval_model").get("reranking_model")
and data.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
dataset.tenant_id,
data.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
data.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
# The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
DatasetPermissionService.check_permission(

View File

@ -29,7 +29,7 @@ from extensions.ext_database import db
from fields.document_fields import document_fields, document_status_fields
from libs.login import current_user
from models.dataset import Dataset, Document, DocumentSegment
from services.dataset_service import DocumentService
from services.dataset_service import DatasetService, DocumentService
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
from services.file_service import FileService
@ -59,6 +59,7 @@ class DocumentAddByTextApi(DatasetApiResource):
parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
@ -74,6 +75,21 @@ class DocumentAddByTextApi(DatasetApiResource):
if text is None or name is None:
raise ValueError("Both 'text' and 'name' must be non-null values.")
if args.get("embedding_model_provider"):
DatasetService.check_embedding_model_setting(
tenant_id, args.get("embedding_model_provider"), args.get("embedding_model")
)
if (
args.get("retrieval_model")
and args.get("retrieval_model").get("reranking_model")
and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
upload_file = FileService.upload_text(text=str(text), text_name=str(name))
data_source = {
"type": "upload_file",
@ -124,6 +140,17 @@ class DocumentUpdateByTextApi(DatasetApiResource):
if not dataset:
raise ValueError("Dataset does not exist.")
if (
args.get("retrieval_model")
and args.get("retrieval_model").get("reranking_model")
and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
# indexing_technique is already set in dataset since this is an update
args["indexing_technique"] = dataset.indexing_technique
@ -188,6 +215,21 @@ class DocumentAddByFileApi(DatasetApiResource):
raise ValueError("indexing_technique is required.")
args["indexing_technique"] = indexing_technique
if "embedding_model_provider" in args:
DatasetService.check_embedding_model_setting(
tenant_id, args["embedding_model_provider"], args["embedding_model"]
)
if (
"retrieval_model" in args
and args["retrieval_model"].get("reranking_model")
and args["retrieval_model"].get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args["retrieval_model"].get("reranking_model").get("reranking_provider_name"),
args["retrieval_model"].get("reranking_model").get("reranking_model_name"),
)
# save file info
file = request.files["file"]
# check file

View File

@ -15,6 +15,11 @@ class CommonParameterType(StrEnum):
MODEL_SELECTOR = "model-selector"
TOOLS_SELECTOR = "array[tools]"
# Dynamic select parameter
# Once you are not sure about the available options until authorization is done
# eg: Select a Slack channel from a Slack workspace
DYNAMIC_SELECT = "dynamic-select"
# TOOL_SELECTOR = "tool-selector"

View File

@ -0,0 +1,374 @@
import json
from collections.abc import Generator, Mapping, Sequence
from copy import deepcopy
from enum import StrEnum
from typing import Any, Literal, Optional, cast, overload
import json_repair
from pydantic import TypeAdapter, ValidationError
from core.llm_generator.output_parser.errors import OutputParserError
from core.llm_generator.prompts import STRUCTURED_OUTPUT_PROMPT
from core.model_manager import ModelInstance
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkDelta,
LLMResultChunkWithStructuredOutput,
LLMResultWithStructuredOutput,
)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageTool,
SystemPromptMessage,
)
from core.model_runtime.entities.model_entities import AIModelEntity, ParameterRule
class ResponseFormat(StrEnum):
"""Constants for model response formats"""
JSON_SCHEMA = "json_schema" # model's structured output mode. some model like gemini, gpt-4o, support this mode.
JSON = "JSON" # model's json mode. some model like claude support this mode.
JSON_OBJECT = "json_object" # json mode's another alias. some model like deepseek-chat, qwen use this alias.
class SpecialModelType(StrEnum):
"""Constants for identifying model types"""
GEMINI = "gemini"
OLLAMA = "ollama"
@overload
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: Literal[True] = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> Generator[LLMResultChunkWithStructuredOutput, None, None]: ...
@overload
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: Literal[False] = False,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> LLMResultWithStructuredOutput: ...
@overload
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: ...
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]:
"""
Invoke large language model with structured output
1. This method invokes model_instance.invoke_llm with json_schema
2. Try to parse the result as structured output
:param prompt_messages: prompt messages
:param json_schema: json schema
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
:param callbacks: callbacks
:return: full response or stream response chunk generator result
"""
# handle native json schema
model_parameters_with_json_schema: dict[str, Any] = {
**(model_parameters or {}),
}
if model_schema.support_structure_output:
model_parameters = _handle_native_json_schema(
provider, model_schema, json_schema, model_parameters_with_json_schema, model_schema.parameter_rules
)
else:
# Set appropriate response format based on model capabilities
_set_response_format(model_parameters_with_json_schema, model_schema.parameter_rules)
# handle prompt based schema
prompt_messages = _handle_prompt_based_schema(
prompt_messages=prompt_messages,
structured_output_schema=json_schema,
)
llm_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=model_parameters_with_json_schema,
tools=tools,
stop=stop,
stream=stream,
user=user,
callbacks=callbacks,
)
if isinstance(llm_result, LLMResult):
if not isinstance(llm_result.message.content, str):
raise OutputParserError(
f"Failed to parse structured output, LLM result is not a string: {llm_result.message.content}"
)
return LLMResultWithStructuredOutput(
structured_output=_parse_structured_output(llm_result.message.content),
model=llm_result.model,
message=llm_result.message,
usage=llm_result.usage,
system_fingerprint=llm_result.system_fingerprint,
prompt_messages=llm_result.prompt_messages,
)
else:
def generator() -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
result_text: str = ""
prompt_messages: Sequence[PromptMessage] = []
system_fingerprint: Optional[str] = None
for event in llm_result:
if isinstance(event, LLMResultChunk):
if isinstance(event.delta.message.content, str):
result_text += event.delta.message.content
prompt_messages = event.prompt_messages
system_fingerprint = event.system_fingerprint
yield LLMResultChunkWithStructuredOutput(
model=model_schema.model,
prompt_messages=prompt_messages,
system_fingerprint=system_fingerprint,
delta=event.delta,
)
yield LLMResultChunkWithStructuredOutput(
structured_output=_parse_structured_output(result_text),
model=model_schema.model,
prompt_messages=prompt_messages,
system_fingerprint=system_fingerprint,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=""),
usage=None,
finish_reason=None,
),
)
return generator()
def _handle_native_json_schema(
provider: str,
model_schema: AIModelEntity,
structured_output_schema: Mapping,
model_parameters: dict,
rules: list[ParameterRule],
) -> dict:
"""
Handle structured output for models with native JSON schema support.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
:return: Updated model parameters with JSON schema configuration
"""
# Process schema according to model requirements
schema_json = _prepare_schema_for_model(provider, model_schema, structured_output_schema)
# Set JSON schema in parameters
model_parameters["json_schema"] = json.dumps(schema_json, ensure_ascii=False)
# Set appropriate response format if required by the model
for rule in rules:
if rule.name == "response_format" and ResponseFormat.JSON_SCHEMA.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_SCHEMA.value
return model_parameters
def _set_response_format(model_parameters: dict, rules: list) -> None:
"""
Set the appropriate response format parameter based on model rules.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
"""
for rule in rules:
if rule.name == "response_format":
if ResponseFormat.JSON.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON.value
elif ResponseFormat.JSON_OBJECT.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_OBJECT.value
def _handle_prompt_based_schema(
prompt_messages: Sequence[PromptMessage], structured_output_schema: Mapping
) -> list[PromptMessage]:
"""
Handle structured output for models without native JSON schema support.
This function modifies the prompt messages to include schema-based output requirements.
Args:
prompt_messages: Original sequence of prompt messages
Returns:
list[PromptMessage]: Updated prompt messages with structured output requirements
"""
# Convert schema to string format
schema_str = json.dumps(structured_output_schema, ensure_ascii=False)
# Find existing system prompt with schema placeholder
system_prompt = next(
(prompt for prompt in prompt_messages if isinstance(prompt, SystemPromptMessage)),
None,
)
structured_output_prompt = STRUCTURED_OUTPUT_PROMPT.replace("{{schema}}", schema_str)
# Prepare system prompt content
system_prompt_content = (
structured_output_prompt + "\n\n" + system_prompt.content
if system_prompt and isinstance(system_prompt.content, str)
else structured_output_prompt
)
system_prompt = SystemPromptMessage(content=system_prompt_content)
# Extract content from the last user message
filtered_prompts = [prompt for prompt in prompt_messages if not isinstance(prompt, SystemPromptMessage)]
updated_prompt = [system_prompt] + filtered_prompts
return updated_prompt
def _parse_structured_output(result_text: str) -> Mapping[str, Any]:
structured_output: Mapping[str, Any] = {}
parsed: Mapping[str, Any] = {}
try:
parsed = TypeAdapter(Mapping).validate_json(result_text)
if not isinstance(parsed, dict):
raise OutputParserError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
except ValidationError:
# if the result_text is not a valid json, try to repair it
temp_parsed = json_repair.loads(result_text)
if not isinstance(temp_parsed, dict):
# handle reasoning model like deepseek-r1 got '<think>\n\n</think>\n' prefix
if isinstance(temp_parsed, list):
temp_parsed = next((item for item in temp_parsed if isinstance(item, dict)), {})
else:
raise OutputParserError(f"Failed to parse structured output: {result_text}")
structured_output = cast(dict, temp_parsed)
return structured_output
def _prepare_schema_for_model(provider: str, model_schema: AIModelEntity, schema: Mapping) -> dict:
"""
Prepare JSON schema based on model requirements.
Different models have different requirements for JSON schema formatting.
This function handles these differences.
:param schema: The original JSON schema
:return: Processed schema compatible with the current model
"""
# Deep copy to avoid modifying the original schema
processed_schema = dict(deepcopy(schema))
# Convert boolean types to string types (common requirement)
convert_boolean_to_string(processed_schema)
# Apply model-specific transformations
if SpecialModelType.GEMINI in model_schema.model:
remove_additional_properties(processed_schema)
return processed_schema
elif SpecialModelType.OLLAMA in provider:
return processed_schema
else:
# Default format with name field
return {"schema": processed_schema, "name": "llm_response"}
def remove_additional_properties(schema: dict) -> None:
"""
Remove additionalProperties fields from JSON schema.
Used for models like Gemini that don't support this property.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Remove additionalProperties at current level
schema.pop("additionalProperties", None)
# Process nested structures recursively
for value in schema.values():
if isinstance(value, dict):
remove_additional_properties(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
remove_additional_properties(item)
def convert_boolean_to_string(schema: dict) -> None:
"""
Convert boolean type specifications to string in JSON schema.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Check for boolean type at current level
if schema.get("type") == "boolean":
schema["type"] = "string"
# Process nested dictionaries and lists recursively
for value in schema.values():
if isinstance(value, dict):
convert_boolean_to_string(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_boolean_to_string(item)

View File

@ -291,3 +291,21 @@ Your task is to convert simple user descriptions into properly formatted JSON Sc
Now, generate a JSON Schema based on my description
""" # noqa: E501
STRUCTURED_OUTPUT_PROMPT = """Youre a helpful AI assistant. You could answer questions and output in JSON format.
constraints:
- You must output in JSON format.
- Do not output boolean value, use string type instead.
- Do not output integer or float value, use number type instead.
eg:
Here is the JSON schema:
{"additionalProperties": false, "properties": {"age": {"type": "number"}, "name": {"type": "string"}}, "required": ["name", "age"], "type": "object"}
Here is the user's question:
My name is John Doe and I am 30 years old.
output:
{"name": "John Doe", "age": 30}
Here is the JSON schema:
{{schema}}
""" # noqa: E501

View File

@ -1,7 +1,7 @@
from collections.abc import Sequence
from collections.abc import Mapping, Sequence
from decimal import Decimal
from enum import StrEnum
from typing import Optional
from typing import Any, Optional
from pydantic import BaseModel, Field
@ -101,6 +101,20 @@ class LLMResult(BaseModel):
system_fingerprint: Optional[str] = None
class LLMStructuredOutput(BaseModel):
"""
Model class for llm structured output.
"""
structured_output: Optional[Mapping[str, Any]] = None
class LLMResultWithStructuredOutput(LLMResult, LLMStructuredOutput):
"""
Model class for llm result with structured output.
"""
class LLMResultChunkDelta(BaseModel):
"""
Model class for llm result chunk delta.
@ -123,6 +137,12 @@ class LLMResultChunk(BaseModel):
delta: LLMResultChunkDelta
class LLMResultChunkWithStructuredOutput(LLMResultChunk, LLMStructuredOutput):
"""
Model class for llm result chunk with structured output.
"""
class NumTokensResult(PriceInfo):
"""
Model class for number of tokens result.

View File

@ -2,8 +2,15 @@ import tempfile
from binascii import hexlify, unhexlify
from collections.abc import Generator
from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output
from core.model_manager import ModelManager
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkDelta,
LLMResultChunkWithStructuredOutput,
LLMResultWithStructuredOutput,
)
from core.model_runtime.entities.message_entities import (
PromptMessage,
SystemPromptMessage,
@ -12,6 +19,7 @@ from core.model_runtime.entities.message_entities import (
from core.plugin.backwards_invocation.base import BaseBackwardsInvocation
from core.plugin.entities.request import (
RequestInvokeLLM,
RequestInvokeLLMWithStructuredOutput,
RequestInvokeModeration,
RequestInvokeRerank,
RequestInvokeSpeech2Text,
@ -81,6 +89,72 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation):
return handle_non_streaming(response)
@classmethod
def invoke_llm_with_structured_output(
cls, user_id: str, tenant: Tenant, payload: RequestInvokeLLMWithStructuredOutput
):
"""
invoke llm with structured output
"""
model_instance = ModelManager().get_model_instance(
tenant_id=tenant.id,
provider=payload.provider,
model_type=payload.model_type,
model=payload.model,
)
model_schema = model_instance.model_type_instance.get_model_schema(payload.model, model_instance.credentials)
if not model_schema:
raise ValueError(f"Model schema not found for {payload.model}")
response = invoke_llm_with_structured_output(
provider=payload.provider,
model_schema=model_schema,
model_instance=model_instance,
prompt_messages=payload.prompt_messages,
json_schema=payload.structured_output_schema,
tools=payload.tools,
stop=payload.stop,
stream=True if payload.stream is None else payload.stream,
user=user_id,
model_parameters=payload.completion_params,
)
if isinstance(response, Generator):
def handle() -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
for chunk in response:
if chunk.delta.usage:
llm_utils.deduct_llm_quota(
tenant_id=tenant.id, model_instance=model_instance, usage=chunk.delta.usage
)
chunk.prompt_messages = []
yield chunk
return handle()
else:
if response.usage:
llm_utils.deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=response.usage)
def handle_non_streaming(
response: LLMResultWithStructuredOutput,
) -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
yield LLMResultChunkWithStructuredOutput(
model=response.model,
prompt_messages=[],
system_fingerprint=response.system_fingerprint,
structured_output=response.structured_output,
delta=LLMResultChunkDelta(
index=0,
message=response.message,
usage=response.usage,
finish_reason="",
),
)
return handle_non_streaming(response)
@classmethod
def invoke_text_embedding(cls, user_id: str, tenant: Tenant, payload: RequestInvokeTextEmbedding):
"""

View File

@ -10,6 +10,9 @@ from core.tools.entities.common_entities import I18nObject
class PluginParameterOption(BaseModel):
value: str = Field(..., description="The value of the option")
label: I18nObject = Field(..., description="The label of the option")
icon: Optional[str] = Field(
default=None, description="The icon of the option, can be a url or a base64 encoded image"
)
@field_validator("value", mode="before")
@classmethod
@ -35,6 +38,7 @@ class PluginParameterType(enum.StrEnum):
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
DYNAMIC_SELECT = CommonParameterType.DYNAMIC_SELECT.value
# deprecated, should not use.
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES.value

View File

@ -1,4 +1,4 @@
from collections.abc import Mapping
from collections.abc import Mapping, Sequence
from datetime import datetime
from enum import StrEnum
from typing import Any, Generic, Optional, TypeVar
@ -9,6 +9,7 @@ from core.agent.plugin_entities import AgentProviderEntityWithPlugin
from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.entities.provider_entities import ProviderEntity
from core.plugin.entities.base import BasePluginEntity
from core.plugin.entities.parameters import PluginParameterOption
from core.plugin.entities.plugin import PluginDeclaration, PluginEntity
from core.tools.entities.common_entities import I18nObject
from core.tools.entities.tool_entities import ToolProviderEntityWithPlugin
@ -186,3 +187,7 @@ class PluginOAuthCredentialsResponse(BaseModel):
class PluginListResponse(BaseModel):
list: list[PluginEntity]
total: int
class PluginDynamicSelectOptionsResponse(BaseModel):
options: Sequence[PluginParameterOption] = Field(description="The options of the dynamic select.")

View File

@ -82,6 +82,16 @@ class RequestInvokeLLM(BaseRequestInvokeModel):
return v
class RequestInvokeLLMWithStructuredOutput(RequestInvokeLLM):
"""
Request to invoke LLM with structured output
"""
structured_output_schema: dict[str, Any] = Field(
default_factory=dict, description="The schema of the structured output in JSON schema format"
)
class RequestInvokeTextEmbedding(BaseRequestInvokeModel):
"""
Request to invoke text embedding

View File

@ -0,0 +1,45 @@
from collections.abc import Mapping
from typing import Any
from core.plugin.entities.plugin import GenericProviderID
from core.plugin.entities.plugin_daemon import PluginDynamicSelectOptionsResponse
from core.plugin.impl.base import BasePluginClient
class DynamicSelectClient(BasePluginClient):
def fetch_dynamic_select_options(
self,
tenant_id: str,
user_id: str,
plugin_id: str,
provider: str,
action: str,
credentials: Mapping[str, Any],
parameter: str,
) -> PluginDynamicSelectOptionsResponse:
"""
Fetch dynamic select options for a plugin parameter.
"""
response = self._request_with_plugin_daemon_response_stream(
"POST",
f"plugin/{tenant_id}/dispatch/dynamic_select/fetch_parameter_options",
PluginDynamicSelectOptionsResponse,
data={
"user_id": user_id,
"data": {
"provider": GenericProviderID(provider).provider_name,
"credentials": credentials,
"provider_action": action,
"parameter": parameter,
},
},
headers={
"X-Plugin-ID": plugin_id,
"Content-Type": "application/json",
},
)
for options in response:
return options
raise ValueError("Plugin service returned no options")

View File

@ -240,6 +240,7 @@ class ToolParameter(PluginParameter):
FILES = PluginParameterType.FILES.value
APP_SELECTOR = PluginParameterType.APP_SELECTOR.value
MODEL_SELECTOR = PluginParameterType.MODEL_SELECTOR.value
DYNAMIC_SELECT = PluginParameterType.DYNAMIC_SELECT.value
# deprecated, should not use.
SYSTEM_FILES = PluginParameterType.SYSTEM_FILES.value

View File

@ -86,6 +86,7 @@ class ProviderConfigEncrypter(BaseModel):
cached_credentials = cache.get()
if cached_credentials:
return cached_credentials
data = self._deep_copy(data)
# get fields need to be decrypted
fields = dict[str, BasicProviderConfig]()

View File

@ -5,11 +5,11 @@ import logging
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Optional, cast
import json_repair
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.file import FileType, file_manager
from core.helper.code_executor import CodeExecutor, CodeLanguage
from core.llm_generator.output_parser.errors import OutputParserError
from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities import (
@ -18,7 +18,13 @@ from core.model_runtime.entities import (
PromptMessageContentType,
TextPromptMessageContent,
)
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMUsage
from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkWithStructuredOutput,
LLMStructuredOutput,
LLMUsage,
)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageContentUnionTypes,
@ -31,7 +37,6 @@ from core.model_runtime.entities.model_entities import (
ModelFeature,
ModelPropertyKey,
ModelType,
ParameterRule,
)
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
@ -62,11 +67,6 @@ from core.workflow.nodes.event import (
RunRetrieverResourceEvent,
RunStreamChunkEvent,
)
from core.workflow.utils.structured_output.entities import (
ResponseFormat,
SpecialModelType,
)
from core.workflow.utils.structured_output.prompt import STRUCTURED_OUTPUT_PROMPT
from core.workflow.utils.variable_template_parser import VariableTemplateParser
from . import llm_utils
@ -143,12 +143,6 @@ class LLMNode(BaseNode[LLMNodeData]):
return "1"
def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
def process_structured_output(text: str) -> Optional[dict[str, Any]]:
"""Process structured output if enabled"""
if not self.node_data.structured_output_enabled or not self.node_data.structured_output:
return None
return self._parse_structured_output(text)
node_inputs: Optional[dict[str, Any]] = None
process_data = None
result_text = ""
@ -244,6 +238,8 @@ class LLMNode(BaseNode[LLMNodeData]):
stop=stop,
)
structured_output: LLMStructuredOutput | None = None
for event in generator:
if isinstance(event, RunStreamChunkEvent):
yield event
@ -254,10 +250,12 @@ class LLMNode(BaseNode[LLMNodeData]):
# deduct quota
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
break
elif isinstance(event, LLMStructuredOutput):
structured_output = event
outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
structured_output = process_structured_output(result_text)
if structured_output:
outputs["structured_output"] = structured_output
outputs["structured_output"] = structured_output.structured_output
if self._file_outputs is not None:
outputs["files"] = ArrayFileSegment(value=self._file_outputs)
@ -302,20 +300,40 @@ class LLMNode(BaseNode[LLMNodeData]):
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
stop: Optional[Sequence[str]] = None,
) -> Generator[NodeEvent, None, None]:
invoke_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=node_data_model.completion_params,
stop=list(stop or []),
stream=True,
user=self.user_id,
) -> Generator[NodeEvent | LLMStructuredOutput, None, None]:
model_schema = model_instance.model_type_instance.get_model_schema(
node_data_model.name, model_instance.credentials
)
if not model_schema:
raise ValueError(f"Model schema not found for {node_data_model.name}")
if self.node_data.structured_output_enabled:
output_schema = self._fetch_structured_output_schema()
invoke_result = invoke_llm_with_structured_output(
provider=model_instance.provider,
model_schema=model_schema,
model_instance=model_instance,
prompt_messages=prompt_messages,
json_schema=output_schema,
model_parameters=node_data_model.completion_params,
stop=list(stop or []),
stream=True,
user=self.user_id,
)
else:
invoke_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=node_data_model.completion_params,
stop=list(stop or []),
stream=True,
user=self.user_id,
)
return self._handle_invoke_result(invoke_result=invoke_result)
def _handle_invoke_result(
self, invoke_result: LLMResult | Generator[LLMResultChunk, None, None]
) -> Generator[NodeEvent, None, None]:
self, invoke_result: LLMResult | Generator[LLMResultChunk | LLMStructuredOutput, None, None]
) -> Generator[NodeEvent | LLMStructuredOutput, None, None]:
# For blocking mode
if isinstance(invoke_result, LLMResult):
event = self._handle_blocking_result(invoke_result=invoke_result)
@ -329,23 +347,32 @@ class LLMNode(BaseNode[LLMNodeData]):
usage = LLMUsage.empty_usage()
finish_reason = None
full_text_buffer = io.StringIO()
for result in invoke_result:
contents = result.delta.message.content
for text_part in self._save_multimodal_output_and_convert_result_to_markdown(contents):
full_text_buffer.write(text_part)
yield RunStreamChunkEvent(chunk_content=text_part, from_variable_selector=[self.node_id, "text"])
# Consume the invoke result and handle generator exception
try:
for result in invoke_result:
if isinstance(result, LLMResultChunkWithStructuredOutput):
yield result
if isinstance(result, LLMResultChunk):
contents = result.delta.message.content
for text_part in self._save_multimodal_output_and_convert_result_to_markdown(contents):
full_text_buffer.write(text_part)
yield RunStreamChunkEvent(
chunk_content=text_part, from_variable_selector=[self.node_id, "text"]
)
# Update the whole metadata
if not model and result.model:
model = result.model
if len(prompt_messages) == 0:
# TODO(QuantumGhost): it seems that this update has no visable effect.
# What's the purpose of the line below?
prompt_messages = list(result.prompt_messages)
if usage.prompt_tokens == 0 and result.delta.usage:
usage = result.delta.usage
if finish_reason is None and result.delta.finish_reason:
finish_reason = result.delta.finish_reason
# Update the whole metadata
if not model and result.model:
model = result.model
if len(prompt_messages) == 0:
# TODO(QuantumGhost): it seems that this update has no visable effect.
# What's the purpose of the line below?
prompt_messages = list(result.prompt_messages)
if usage.prompt_tokens == 0 and result.delta.usage:
usage = result.delta.usage
if finish_reason is None and result.delta.finish_reason:
finish_reason = result.delta.finish_reason
except OutputParserError as e:
raise LLMNodeError(f"Failed to parse structured output: {e}")
yield ModelInvokeCompletedEvent(text=full_text_buffer.getvalue(), usage=usage, finish_reason=finish_reason)
@ -522,12 +549,6 @@ class LLMNode(BaseNode[LLMNodeData]):
if not model_schema:
raise ModelNotExistError(f"Model {node_data_model.name} not exist.")
if self.node_data.structured_output_enabled:
if model_schema.support_structure_output:
completion_params = self._handle_native_json_schema(completion_params, model_schema.parameter_rules)
else:
# Set appropriate response format based on model capabilities
self._set_response_format(completion_params, model_schema.parameter_rules)
model_config_with_cred.parameters = completion_params
# NOTE(-LAN-): This line modify the `self.node_data.model`, which is used in `_invoke_llm()`.
node_data_model.completion_params = completion_params
@ -719,32 +740,8 @@ class LLMNode(BaseNode[LLMNodeData]):
)
if not model_schema:
raise ModelNotExistError(f"Model {model_config.model} not exist.")
if self.node_data.structured_output_enabled:
if not model_schema.support_structure_output:
filtered_prompt_messages = self._handle_prompt_based_schema(
prompt_messages=filtered_prompt_messages,
)
return filtered_prompt_messages, model_config.stop
def _parse_structured_output(self, result_text: str) -> dict[str, Any]:
structured_output: dict[str, Any] = {}
try:
parsed = json.loads(result_text)
if not isinstance(parsed, dict):
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
except json.JSONDecodeError as e:
# if the result_text is not a valid json, try to repair it
parsed = json_repair.loads(result_text)
if not isinstance(parsed, dict):
# handle reasoning model like deepseek-r1 got '<think>\n\n</think>\n' prefix
if isinstance(parsed, list):
parsed = next((item for item in parsed if isinstance(item, dict)), {})
else:
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
return structured_output
@classmethod
def _extract_variable_selector_to_variable_mapping(
cls,
@ -934,104 +931,6 @@ class LLMNode(BaseNode[LLMNodeData]):
self._file_outputs.append(saved_file)
return saved_file
def _handle_native_json_schema(self, model_parameters: dict, rules: list[ParameterRule]) -> dict:
"""
Handle structured output for models with native JSON schema support.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
:return: Updated model parameters with JSON schema configuration
"""
# Process schema according to model requirements
schema = self._fetch_structured_output_schema()
schema_json = self._prepare_schema_for_model(schema)
# Set JSON schema in parameters
model_parameters["json_schema"] = json.dumps(schema_json, ensure_ascii=False)
# Set appropriate response format if required by the model
for rule in rules:
if rule.name == "response_format" and ResponseFormat.JSON_SCHEMA.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_SCHEMA.value
return model_parameters
def _handle_prompt_based_schema(self, prompt_messages: Sequence[PromptMessage]) -> list[PromptMessage]:
"""
Handle structured output for models without native JSON schema support.
This function modifies the prompt messages to include schema-based output requirements.
Args:
prompt_messages: Original sequence of prompt messages
Returns:
list[PromptMessage]: Updated prompt messages with structured output requirements
"""
# Convert schema to string format
schema_str = json.dumps(self._fetch_structured_output_schema(), ensure_ascii=False)
# Find existing system prompt with schema placeholder
system_prompt = next(
(prompt for prompt in prompt_messages if isinstance(prompt, SystemPromptMessage)),
None,
)
structured_output_prompt = STRUCTURED_OUTPUT_PROMPT.replace("{{schema}}", schema_str)
# Prepare system prompt content
system_prompt_content = (
structured_output_prompt + "\n\n" + system_prompt.content
if system_prompt and isinstance(system_prompt.content, str)
else structured_output_prompt
)
system_prompt = SystemPromptMessage(content=system_prompt_content)
# Extract content from the last user message
filtered_prompts = [prompt for prompt in prompt_messages if not isinstance(prompt, SystemPromptMessage)]
updated_prompt = [system_prompt] + filtered_prompts
return updated_prompt
def _set_response_format(self, model_parameters: dict, rules: list) -> None:
"""
Set the appropriate response format parameter based on model rules.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
"""
for rule in rules:
if rule.name == "response_format":
if ResponseFormat.JSON.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON.value
elif ResponseFormat.JSON_OBJECT.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_OBJECT.value
def _prepare_schema_for_model(self, schema: dict) -> dict:
"""
Prepare JSON schema based on model requirements.
Different models have different requirements for JSON schema formatting.
This function handles these differences.
:param schema: The original JSON schema
:return: Processed schema compatible with the current model
"""
# Deep copy to avoid modifying the original schema
processed_schema = schema.copy()
# Convert boolean types to string types (common requirement)
convert_boolean_to_string(processed_schema)
# Apply model-specific transformations
if SpecialModelType.GEMINI in self.node_data.model.name:
remove_additional_properties(processed_schema)
return processed_schema
elif SpecialModelType.OLLAMA in self.node_data.model.provider:
return processed_schema
else:
# Default format with name field
return {"schema": processed_schema, "name": "llm_response"}
def _fetch_model_schema(self, provider: str) -> AIModelEntity | None:
"""
Fetch model schema
@ -1243,49 +1142,3 @@ def _handle_completion_template(
)
prompt_messages.append(prompt_message)
return prompt_messages
def remove_additional_properties(schema: dict) -> None:
"""
Remove additionalProperties fields from JSON schema.
Used for models like Gemini that don't support this property.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Remove additionalProperties at current level
schema.pop("additionalProperties", None)
# Process nested structures recursively
for value in schema.values():
if isinstance(value, dict):
remove_additional_properties(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
remove_additional_properties(item)
def convert_boolean_to_string(schema: dict) -> None:
"""
Convert boolean type specifications to string in JSON schema.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Check for boolean type at current level
if schema.get("type") == "boolean":
schema["type"] = "string"
# Process nested dictionaries and lists recursively
for value in schema.values():
if isinstance(value, dict):
convert_boolean_to_string(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_boolean_to_string(item)

View File

@ -1,16 +0,0 @@
from enum import StrEnum
class ResponseFormat(StrEnum):
"""Constants for model response formats"""
JSON_SCHEMA = "json_schema" # model's structured output mode. some model like gemini, gpt-4o, support this mode.
JSON = "JSON" # model's json mode. some model like claude support this mode.
JSON_OBJECT = "json_object" # json mode's another alias. some model like deepseek-chat, qwen use this alias.
class SpecialModelType(StrEnum):
"""Constants for identifying model types"""
GEMINI = "gemini"
OLLAMA = "ollama"

View File

@ -1,17 +0,0 @@
STRUCTURED_OUTPUT_PROMPT = """Youre a helpful AI assistant. You could answer questions and output in JSON format.
constraints:
- You must output in JSON format.
- Do not output boolean value, use string type instead.
- Do not output integer or float value, use number type instead.
eg:
Here is the JSON schema:
{"additionalProperties": false, "properties": {"age": {"type": "number"}, "name": {"type": "string"}}, "required": ["name", "age"], "type": "object"}
Here is the user's question:
My name is John Doe and I am 30 years old.
output:
{"name": "John Doe", "age": 30}
Here is the JSON schema:
{{schema}}
""" # noqa: E501

View File

@ -21,6 +21,7 @@ def init_app(app: DifyApp) -> Celery:
"master_name": dify_config.CELERY_SENTINEL_MASTER_NAME,
"sentinel_kwargs": {
"socket_timeout": dify_config.CELERY_SENTINEL_SOCKET_TIMEOUT,
"password": dify_config.CELERY_SENTINEL_PASSWORD,
},
}

View File

@ -198,7 +198,7 @@ vdb = [
"pymochow==1.3.1",
"pyobvector~=0.1.6",
"qdrant-client==1.9.0",
"tablestore==6.1.0",
"tablestore==6.2.0",
"tcvectordb~=1.6.4",
"tidb-vector==0.0.9",
"upstash-vector==0.6.0",

View File

@ -278,6 +278,23 @@ class DatasetService:
except ProviderTokenNotInitError as ex:
raise ValueError(ex.description)
@staticmethod
def check_reranking_model_setting(tenant_id: str, reranking_model_provider: str, reranking_model: str):
try:
model_manager = ModelManager()
model_manager.get_model_instance(
tenant_id=tenant_id,
provider=reranking_model_provider,
model_type=ModelType.RERANK,
model=reranking_model,
)
except LLMBadRequestError:
raise ValueError(
"No Rerank Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ValueError(ex.description)
@staticmethod
def update_dataset(dataset_id, data, user):
"""
@ -2207,6 +2224,7 @@ class SegmentService:
# calc embedding use tokens
if document.doc_form == "qa_model":
segment.answer = args.answer
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment.answer])[0]
else:
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])[0]

View File

@ -0,0 +1,74 @@
from collections.abc import Mapping, Sequence
from typing import Any, Literal
from sqlalchemy.orm import Session
from core.plugin.entities.parameters import PluginParameterOption
from core.plugin.impl.dynamic_select import DynamicSelectClient
from core.tools.tool_manager import ToolManager
from core.tools.utils.configuration import ProviderConfigEncrypter
from extensions.ext_database import db
from models.tools import BuiltinToolProvider
class PluginParameterService:
@staticmethod
def get_dynamic_select_options(
tenant_id: str,
user_id: str,
plugin_id: str,
provider: str,
action: str,
parameter: str,
provider_type: Literal["tool"],
) -> Sequence[PluginParameterOption]:
"""
Get dynamic select options for a plugin parameter.
Args:
tenant_id: The tenant ID.
plugin_id: The plugin ID.
provider: The provider name.
action: The action name.
parameter: The parameter name.
"""
credentials: Mapping[str, Any] = {}
match provider_type:
case "tool":
provider_controller = ToolManager.get_builtin_provider(provider, tenant_id)
# init tool configuration
tool_configuration = ProviderConfigEncrypter(
tenant_id=tenant_id,
config=[x.to_basic_provider_config() for x in provider_controller.get_credentials_schema()],
provider_type=provider_controller.provider_type.value,
provider_identity=provider_controller.entity.identity.name,
)
# check if credentials are required
if not provider_controller.need_credentials:
credentials = {}
else:
# fetch credentials from db
with Session(db.engine) as session:
db_record = (
session.query(BuiltinToolProvider)
.filter(
BuiltinToolProvider.tenant_id == tenant_id,
BuiltinToolProvider.provider == provider,
)
.first()
)
if db_record is None:
raise ValueError(f"Builtin provider {provider} not found when fetching credentials")
credentials = tool_configuration.decrypt(db_record.credentials)
case _:
raise ValueError(f"Invalid provider type: {provider_type}")
return (
DynamicSelectClient()
.fetch_dynamic_select_options(tenant_id, user_id, plugin_id, provider, action, credentials, parameter)
.options
)

View File

@ -9,6 +9,7 @@ from unittest.mock import MagicMock, patch
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom
from core.llm_generator.output_parser.structured_output import _parse_structured_output
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
from core.model_runtime.entities.message_entities import AssistantPromptMessage
from core.workflow.entities.variable_pool import VariablePool
@ -277,29 +278,6 @@ def test_execute_llm_with_jinja2(flask_req_ctx, setup_code_executor_mock):
def test_extract_json():
node = init_llm_node(
config={
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"prompt_config": {
"structured_output": {
"enabled": True,
"schema": {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "number"}},
},
}
},
"prompt_template": [{"role": "user", "text": "{{#sys.query#}}"}],
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
},
},
)
llm_texts = [
'<think>\n\n</think>{"name": "test", "age": 123', # resoning model (deepseek-r1)
'{"name":"test","age":123}', # json schema model (gpt-4o)
@ -308,4 +286,4 @@ def test_extract_json():
'{"name":"test",age:123}', # without quotes (qwen-2.5-0.5b)
]
result = {"name": "test", "age": 123}
assert all(node._parse_structured_output(item) == result for item in llm_texts)
assert all(_parse_structured_output(item) == result for item in llm_texts)

File diff suppressed because it is too large Load Diff

View File

@ -285,6 +285,7 @@ BROKER_USE_SSL=false
# If you are using Redis Sentinel for high availability, configure the following settings.
CELERY_USE_SENTINEL=false
CELERY_SENTINEL_MASTER_NAME=
CELERY_SENTINEL_PASSWORD=
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
# ------------------------------

View File

@ -79,6 +79,7 @@ x-shared-env: &shared-api-worker-env
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-}
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}

View File

@ -191,6 +191,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
const { userProfile: { timezone } } = useAppContext()
const { formatTime } = useTimestamp()
const { onClose, appDetail } = useContext(DrawerContext)
const { notify } = useContext(ToastContext)
const { currentLogItem, setCurrentLogItem, showMessageLogModal, setShowMessageLogModal, showPromptLogModal, setShowPromptLogModal, currentLogModalActiveTab } = useAppStore(useShallow(state => ({
currentLogItem: state.currentLogItem,
setCurrentLogItem: state.setCurrentLogItem,
@ -312,18 +313,34 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
return item
}))
}, [allChatItems])
const handleAnnotationRemoved = useCallback((index: number) => {
setAllChatItems(allChatItems.map((item, i) => {
if (i === index) {
return {
...item,
content: item.content,
annotation: undefined,
}
const handleAnnotationRemoved = useCallback(async (index: number): Promise<boolean> => {
const annotation = allChatItems[index]?.annotation
try {
if (annotation?.id) {
const { delAnnotation } = await import('@/service/annotation')
await delAnnotation(appDetail?.id || '', annotation.id)
}
return item
}))
}, [allChatItems])
setAllChatItems(allChatItems.map((item, i) => {
if (i === index) {
return {
...item,
content: item.content,
annotation: undefined,
}
}
return item
}))
notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
return true
}
catch {
notify({ type: 'error', message: t('common.actionMsg.modifiedUnsuccessfully') })
return false
}
}, [allChatItems, appDetail?.id, t])
const fetchInitiated = useRef(false)

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.9 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.9 KiB

View File

@ -18,7 +18,7 @@ describe('InputNumber Component', () => {
it('renders input with default values', () => {
render(<InputNumber {...defaultProps} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
expect(input).toBeInTheDocument()
})
@ -56,7 +56,7 @@ describe('InputNumber Component', () => {
it('handles direct input changes', () => {
render(<InputNumber {...defaultProps} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
fireEvent.change(input, { target: { value: '42' } })
expect(defaultProps.onChange).toHaveBeenCalledWith(42)
@ -64,7 +64,7 @@ describe('InputNumber Component', () => {
it('handles empty input', () => {
render(<InputNumber {...defaultProps} value={0} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
fireEvent.change(input, { target: { value: '' } })
expect(defaultProps.onChange).toHaveBeenCalledWith(undefined)
@ -72,7 +72,7 @@ describe('InputNumber Component', () => {
it('handles invalid input', () => {
render(<InputNumber {...defaultProps} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
fireEvent.change(input, { target: { value: 'abc' } })
expect(defaultProps.onChange).not.toHaveBeenCalled()
@ -86,7 +86,7 @@ describe('InputNumber Component', () => {
it('disables controls when disabled prop is true', () => {
render(<InputNumber {...defaultProps} disabled />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
const incrementBtn = screen.getByRole('button', { name: /increment/i })
const decrementBtn = screen.getByRole('button', { name: /decrement/i })

View File

@ -55,8 +55,8 @@ export const InputNumber: FC<InputNumberProps> = (props) => {
return <div className={classNames('flex', wrapClassName)}>
<Input {...rest}
// disable default controller
type='text'
className={classNames('rounded-r-none', className)}
type='number'
className={classNames('no-spinner rounded-r-none', className)}
value={value}
max={max}
min={min}
@ -77,8 +77,8 @@ export const InputNumber: FC<InputNumberProps> = (props) => {
size={size}
/>
<div className={classNames(
'flex flex-col bg-components-input-bg-normal rounded-r-md border-l border-divider-subtle text-text-tertiary focus:shadow-xs',
disabled && 'opacity-50 cursor-not-allowed',
'flex flex-col rounded-r-md border-l border-divider-subtle bg-components-input-bg-normal text-text-tertiary focus:shadow-xs',
disabled && 'cursor-not-allowed opacity-50',
controlWrapClassName)}
>
<button

View File

@ -1,10 +1,10 @@
'use client'
import type { FC } from 'react'
import React, { useEffect, useState } from 'react'
import React, { useEffect, useRef, useState } from 'react'
import { Combobox, ComboboxButton, ComboboxInput, ComboboxOption, ComboboxOptions, Listbox, ListboxButton, ListboxOption, ListboxOptions } from '@headlessui/react'
import { ChevronDownIcon, ChevronUpIcon, XMarkIcon } from '@heroicons/react/20/solid'
import Badge from '../badge/index'
import { RiCheckLine } from '@remixicon/react'
import { RiCheckLine, RiLoader4Line } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import classNames from '@/utils/classnames'
import {
@ -51,6 +51,8 @@ export type ISelectProps = {
item: Item
selected: boolean
}) => React.ReactNode
isLoading?: boolean
onOpenChange?: (open: boolean) => void
}
const Select: FC<ISelectProps> = ({
className,
@ -178,17 +180,20 @@ const SimpleSelect: FC<ISelectProps> = ({
defaultValue = 1,
disabled = false,
onSelect,
onOpenChange,
placeholder,
optionWrapClassName,
optionClassName,
hideChecked,
notClearable,
renderOption,
isLoading = false,
}) => {
const { t } = useTranslation()
const localPlaceholder = placeholder || t('common.placeholder.select')
const [selectedItem, setSelectedItem] = useState<Item | null>(null)
useEffect(() => {
let defaultSelect = null
const existed = items.find((item: Item) => item.value === defaultValue)
@ -199,8 +204,10 @@ const SimpleSelect: FC<ISelectProps> = ({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [defaultValue])
const listboxRef = useRef<HTMLDivElement>(null)
return (
<Listbox
<Listbox ref={listboxRef}
value={selectedItem}
onChange={(value: Item) => {
if (!disabled) {
@ -212,10 +219,17 @@ const SimpleSelect: FC<ISelectProps> = ({
<div className={classNames('group/simple-select relative h-9', wrapperClassName)}>
{renderTrigger && <ListboxButton className='w-full'>{renderTrigger(selectedItem)}</ListboxButton>}
{!renderTrigger && (
<ListboxButton className={classNames(`flex items-center w-full h-full rounded-lg border-0 bg-components-input-bg-normal pl-3 pr-10 sm:text-sm sm:leading-6 focus-visible:outline-none focus-visible:bg-state-base-hover-alt group-hover/simple-select:bg-state-base-hover-alt ${disabled ? 'cursor-not-allowed' : 'cursor-pointer'}`, className)}>
<ListboxButton onClick={() => {
// get data-open, use setTimeout to ensure the attribute is set
setTimeout(() => {
if (listboxRef.current)
onOpenChange?.(listboxRef.current.getAttribute('data-open') !== null)
})
}} className={classNames(`flex items-center w-full h-full rounded-lg border-0 bg-components-input-bg-normal pl-3 pr-10 sm:text-sm sm:leading-6 focus-visible:outline-none focus-visible:bg-state-base-hover-alt group-hover/simple-select:bg-state-base-hover-alt ${disabled ? 'cursor-not-allowed' : 'cursor-pointer'}`, className)}>
<span className={classNames('block truncate text-left system-sm-regular text-components-input-text-filled', !selectedItem?.name && 'text-components-input-text-placeholder')}>{selectedItem?.name ?? localPlaceholder}</span>
<span className="absolute inset-y-0 right-0 flex items-center pr-2">
{(selectedItem && !notClearable)
{isLoading ? <RiLoader4Line className='h-3.5 w-3.5 animate-spin text-text-secondary' />
: (selectedItem && !notClearable)
? (
<XMarkIcon
onClick={(e) => {
@ -237,7 +251,7 @@ const SimpleSelect: FC<ISelectProps> = ({
</ListboxButton>
)}
{!disabled && (
{(!disabled) && (
<ListboxOptions className={classNames('absolute z-10 mt-1 px-1 max-h-60 w-full overflow-auto rounded-xl bg-components-panel-bg-blur backdrop-blur-sm py-1 text-base shadow-lg border-components-panel-border border-[0.5px] focus:outline-none sm:text-sm', optionWrapClassName)}>
{items.map((item: Item) => (
<ListboxOption

View File

@ -1,6 +1,5 @@
import { useState } from 'react'
import { useCallback, useState } from 'react'
import type { ChangeEvent, FC, KeyboardEvent } from 'react'
import { } from 'use-context-selector'
import { useTranslation } from 'react-i18next'
import AutosizeInput from 'react-18-input-autosize'
import { RiAddLine, RiCloseLine } from '@remixicon/react'
@ -40,6 +39,29 @@ const TagInput: FC<TagInputProps> = ({
onChange(copyItems)
}
const handleNewTag = useCallback((value: string) => {
const valueTrimmed = value.trim()
if (!valueTrimmed) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordEmpty') })
return
}
if ((items.find(item => item === valueTrimmed))) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordDuplicate') })
return
}
if (valueTrimmed.length > 20) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordError') })
return
}
onChange([...items, valueTrimmed])
setTimeout(() => {
setValue('')
})
}, [items, onChange, notify, t])
const handleKeyDown = (e: KeyboardEvent) => {
if (isSpecialMode && e.key === 'Enter')
setValue(`${value}`)
@ -48,24 +70,12 @@ const TagInput: FC<TagInputProps> = ({
if (isSpecialMode)
e.preventDefault()
const valueTrimmed = value.trim()
if (!valueTrimmed || (items.find(item => item === valueTrimmed)))
return
if (valueTrimmed.length > 20) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordError') })
return
}
onChange([...items, valueTrimmed])
setTimeout(() => {
setValue('')
})
handleNewTag(value)
}
}
const handleBlur = () => {
setValue('')
handleNewTag(value)
setFocused(false)
}

View File

@ -52,8 +52,8 @@ const StepThree = ({ datasetId, datasetName, indexingType, creationCache, retrie
datasetId={datasetId || creationCache?.dataset?.id || ''}
batchId={creationCache?.batch || ''}
documents={creationCache?.documents as FullDocumentDetail[]}
indexingType={indexingType || creationCache?.dataset?.indexing_technique}
retrievalMethod={retrievalMethod || creationCache?.dataset?.retrieval_model?.search_method}
indexingType={creationCache?.dataset?.indexing_technique || indexingType}
retrievalMethod={creationCache?.dataset?.retrieval_model_dict?.search_method || retrievalMethod}
/>
</div>
</div>

View File

@ -507,13 +507,15 @@ const StepTwo = ({
const separator = rules.segmentation.separator
const max = rules.segmentation.max_tokens
const overlap = rules.segmentation.chunk_overlap
const isHierarchicalDocument = documentDetail.doc_form === ChunkingMode.parentChild
|| (rules.parent_mode && rules.subchunk_segmentation)
setSegmentIdentifier(separator)
setMaxChunkLength(max)
setOverlap(overlap!)
setRules(rules.pre_processing_rules)
setDefaultConfig(rules)
if (documentDetail.dataset_process_rule.mode === 'hierarchical') {
if (isHierarchicalDocument) {
setParentChildConfig({
chunkForContext: rules.parent_mode || 'paragraph',
parent: {
@ -575,6 +577,7 @@ const StepTwo = ({
onSuccess(data) {
updateIndexingTypeCache && updateIndexingTypeCache(indexType as string)
updateResultCache && updateResultCache(data)
updateRetrievalMethodCache && updateRetrievalMethodCache(retrievalConfig.search_method as string)
},
})
}

View File

@ -19,12 +19,14 @@ export enum FormTypeEnum {
toolSelector = 'tool-selector',
multiToolSelector = 'array[tools]',
appSelector = 'app-selector',
dynamicSelect = 'dynamic-select',
}
export type FormOption = {
label: TypeWithI18N
value: string
show_on: FormShowOnObject[]
icon?: string
}
export enum ModelTypeEnum {

View File

@ -30,7 +30,7 @@ const HeaderWrapper = ({
return (
<div className={classNames(
'sticky top-0 left-0 right-0 z-30 flex flex-col grow-0 shrink-0 basis-auto min-h-[56px]',
'sticky left-0 right-0 top-0 z-[15] flex min-h-[56px] shrink-0 grow-0 basis-auto flex-col',
s.header,
isBordered ? 'border-b border-divider-regular' : '',
)}

View File

@ -18,6 +18,15 @@ type Props = {
onSaved: (value: Record<string, any>) => void
}
const extractDefaultValues = (schemas: any[]) => {
const result: Record<string, any> = {}
for (const field of schemas) {
if (field.default !== undefined)
result[field.name] = field.default
}
return result
}
const EndpointModal: FC<Props> = ({
formSchemas,
defaultValues = {},
@ -26,7 +35,10 @@ const EndpointModal: FC<Props> = ({
}) => {
const getValueFromI18nObject = useRenderI18nObject()
const { t } = useTranslation()
const [tempCredential, setTempCredential] = React.useState<any>(defaultValues)
const initialValues = Object.keys(defaultValues).length > 0
? defaultValues
: extractDefaultValues(formSchemas)
const [tempCredential, setTempCredential] = React.useState<any>(initialValues)
const handleSave = () => {
for (const field of formSchemas) {

View File

@ -117,6 +117,7 @@ const MultipleToolSelector = ({
)}
{!disabled && (
<ActionButton className='mx-1' onClick={() => {
setCollapse(false)
setOpen(!open)
setPanelShowState(true)
}}>
@ -126,23 +127,6 @@ const MultipleToolSelector = ({
</div>
{!collapse && (
<>
<ToolSelector
nodeId={nodeId}
nodeOutputVars={nodeOutputVars}
availableNodes={availableNodes}
scope={scope}
value={undefined}
selectedTools={value}
onSelect={handleAdd}
controlledState={open}
onControlledStateChange={setOpen}
trigger={
<div className=''></div>
}
panelShowState={panelShowState}
onPanelShowStateChange={setPanelShowState}
isEdit={false}
/>
{value.length === 0 && (
<div className='system-xs-regular flex justify-center rounded-[10px] bg-background-section p-3 text-text-tertiary'>{t('plugin.detailPanel.toolSelector.empty')}</div>
)}
@ -164,6 +148,23 @@ const MultipleToolSelector = ({
))}
</>
)}
<ToolSelector
nodeId={nodeId}
nodeOutputVars={nodeOutputVars}
availableNodes={availableNodes}
scope={scope}
value={undefined}
selectedTools={value}
onSelect={handleAdd}
controlledState={open}
onControlledStateChange={setOpen}
trigger={
<div className=''></div>
}
panelShowState={panelShowState}
onPanelShowStateChange={setPanelShowState}
isEdit={false}
/>
</>
)
}

View File

@ -275,7 +275,7 @@ const ToolSelector: FC<Props> = ({
/>
)}
</PortalToFollowElemTrigger>
<PortalToFollowElemContent className='z-[1000]'>
<PortalToFollowElemContent>
<div className={cn('relative max-h-[642px] min-h-20 w-[361px] rounded-xl border-[0.5px] border-components-panel-border bg-components-panel-bg-blur pb-4 shadow-lg backdrop-blur-sm', !isShowSettingAuth && 'overflow-y-auto pb-2')}>
{!isShowSettingAuth && (
<>

View File

@ -36,7 +36,7 @@ export type ToolValue = {
provider_name: string
tool_name: string
tool_label: string
tool_description: string
tool_description?: string
settings?: Record<string, any>
parameters?: Record<string, any>
enabled?: boolean

View File

@ -33,6 +33,8 @@ const HeaderInNormal = ({
const setShowWorkflowVersionHistoryPanel = useStore(s => s.setShowWorkflowVersionHistoryPanel)
const setShowEnvPanel = useStore(s => s.setShowEnvPanel)
const setShowDebugAndPreviewPanel = useStore(s => s.setShowDebugAndPreviewPanel)
const setShowVariableInspectPanel = useStore(s => s.setShowVariableInspectPanel)
const setShowChatVariablePanel = useStore(s => s.setShowChatVariablePanel)
const nodes = useNodes<StartNodeType>()
const selectedNode = nodes.find(node => node.data.selected)
const { handleBackupDraft } = useWorkflowRun()
@ -46,8 +48,10 @@ const HeaderInNormal = ({
setShowWorkflowVersionHistoryPanel(true)
setShowEnvPanel(false)
setShowDebugAndPreviewPanel(false)
setShowVariableInspectPanel(false)
setShowChatVariablePanel(false)
}, [handleBackupDraft, workflowStore, handleNodeSelect, selectedNode,
setShowWorkflowVersionHistoryPanel, setShowEnvPanel, setShowDebugAndPreviewPanel])
setShowWorkflowVersionHistoryPanel, setShowEnvPanel, setShowDebugAndPreviewPanel, setShowVariableInspectPanel])
return (
<>

View File

@ -15,6 +15,7 @@ import {
import useToggleExpend from '@/app/components/workflow/nodes/_base/hooks/use-toggle-expend'
import type { FileEntity } from '@/app/components/base/file-uploader/types'
import FileListInLog from '@/app/components/base/file-uploader/file-list-in-log'
import ActionButton from '@/app/components/base/action-button'
type Props = {
className?: string
@ -88,15 +89,16 @@ const Base: FC<Props> = ({
<CodeGeneratorButton onGenerated={onGenerated} codeLanguages={codeLanguages} />
</div>
)}
{!isCopied
? (
<Clipboard className='mx-1 h-3.5 w-3.5 cursor-pointer text-text-tertiary' onClick={handleCopy} />
)
: (
<ClipboardCheck className='mx-1 h-3.5 w-3.5 text-text-tertiary' />
)
}
<ActionButton className='ml-1' onClick={handleCopy}>
{!isCopied
? (
<Clipboard className='h-4 w-4 cursor-pointer' />
)
: (
<ClipboardCheck className='h-4 w-4' />
)
}
</ActionButton>
<div className='ml-1'>
<ToggleExpandBtn isExpand={isExpand} onExpandChange={setIsExpand} />
</div>

View File

@ -13,6 +13,8 @@ type Props = {
readonly: boolean
value: string
onChange: (value: string | number, varKindType: VarKindType, varInfo?: Var) => void
onOpenChange?: (open: boolean) => void
isLoading?: boolean
}
const DEFAULT_SCHEMA = {} as CredentialFormSchema
@ -22,6 +24,8 @@ const ConstantField: FC<Props> = ({
readonly,
value,
onChange,
onOpenChange,
isLoading,
}) => {
const language = useLanguage()
const placeholder = (schema as CredentialFormSchemaSelect).placeholder
@ -36,7 +40,7 @@ const ConstantField: FC<Props> = ({
return (
<>
{schema.type === FormTypeEnum.select && (
{(schema.type === FormTypeEnum.select || schema.type === FormTypeEnum.dynamicSelect) && (
<SimpleSelect
wrapperClassName='w-full !h-8'
className='flex items-center'
@ -45,6 +49,8 @@ const ConstantField: FC<Props> = ({
items={(schema as CredentialFormSchemaSelect).options.map(option => ({ value: option.value, name: option.label[language] || option.label.en_US }))}
onSelect={item => handleSelectChange(item.value)}
placeholder={placeholder?.[language] || placeholder?.en_US}
onOpenChange={onOpenChange}
isLoading={isLoading}
/>
)}
{schema.type === FormTypeEnum.textNumber && (

View File

@ -1090,13 +1090,13 @@ export const getNodeUsedVarPassToServerKey = (node: Node, valueSelector: ValueSe
break
}
case BlockEnum.Code: {
const targetVar = (data as CodeNodeType).variables?.find(v => v.value_selector.join('.') === valueSelector.join('.'))
const targetVar = (data as CodeNodeType).variables?.find(v => Array.isArray(v.value_selector) && v.value_selector && v.value_selector.join('.') === valueSelector.join('.'))
if (targetVar)
res = targetVar.variable
break
}
case BlockEnum.TemplateTransform: {
const targetVar = (data as TemplateTransformNodeType).variables?.find(v => v.value_selector.join('.') === valueSelector.join('.'))
const targetVar = (data as TemplateTransformNodeType).variables?.find(v => Array.isArray(v.value_selector) && v.value_selector && v.value_selector.join('.') === valueSelector.join('.'))
if (targetVar)
res = targetVar.variable
break

View File

@ -6,6 +6,7 @@ import {
RiArrowDownSLine,
RiCloseLine,
RiErrorWarningFill,
RiLoader4Line,
RiMoreLine,
} from '@remixicon/react'
import produce from 'immer'
@ -16,8 +17,9 @@ import VarReferencePopup from './var-reference-popup'
import { getNodeInfoById, isConversationVar, isENV, isSystemVar, varTypeToStructType } from './utils'
import ConstantField from './constant-field'
import cn from '@/utils/classnames'
import type { Node, NodeOutPutVar, ValueSelector, Var } from '@/app/components/workflow/types'
import type { CredentialFormSchema } from '@/app/components/header/account-setting/model-provider-page/declarations'
import type { Node, NodeOutPutVar, ToolWithProvider, ValueSelector, Var } from '@/app/components/workflow/types'
import type { CredentialFormSchemaSelect } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { type CredentialFormSchema, type FormOption, FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum } from '@/app/components/workflow/types'
import { VarBlockIcon } from '@/app/components/workflow/block-icon'
import { Line3 } from '@/app/components/base/icons/src/public/common'
@ -40,6 +42,8 @@ import Tooltip from '@/app/components/base/tooltip'
import { isExceptionVariable } from '@/app/components/workflow/utils'
import VarFullPathPanel from './var-full-path-panel'
import { noop } from 'lodash-es'
import { useFetchDynamicOptions } from '@/service/use-plugins'
import type { Tool } from '@/app/components/tools/types'
const TRIGGER_DEFAULT_WIDTH = 227
@ -68,6 +72,8 @@ type Props = {
minWidth?: number
popupFor?: 'assigned' | 'toAssigned'
zIndex?: number
currentTool?: Tool
currentProvider?: ToolWithProvider
}
const DEFAULT_VALUE_SELECTOR: Props['value'] = []
@ -97,6 +103,8 @@ const VarReferencePicker: FC<Props> = ({
minWidth,
popupFor,
zIndex,
currentTool,
currentProvider,
}) => {
const { t } = useTranslation()
const store = useStoreApi()
@ -316,6 +324,42 @@ const VarReferencePicker: FC<Props> = ({
return null
}, [isValidVar, isShowAPart, hasValue, t, outputVarNode?.title, outputVarNode?.type, value, type])
const [dynamicOptions, setDynamicOptions] = useState<FormOption[] | null>(null)
const [isLoading, setIsLoading] = useState(false)
const { mutateAsync: fetchDynamicOptions } = useFetchDynamicOptions(
currentProvider?.plugin_id || '', currentProvider?.name || '', currentTool?.name || '', (schema as CredentialFormSchemaSelect)?.variable || '',
'tool',
)
const handleFetchDynamicOptions = async () => {
if (schema?.type !== FormTypeEnum.dynamicSelect || !currentTool || !currentProvider)
return
setIsLoading(true)
try {
const data = await fetchDynamicOptions()
setDynamicOptions(data?.options || [])
}
finally {
setIsLoading(false)
}
}
useEffect(() => {
handleFetchDynamicOptions()
}, [currentTool, currentProvider, schema])
const schemaWithDynamicSelect = useMemo(() => {
if (schema?.type !== FormTypeEnum.dynamicSelect)
return schema
// rewrite schema.options with dynamicOptions
if (dynamicOptions) {
return {
...schema,
options: dynamicOptions,
}
}
return schema
}, [dynamicOptions])
return (
<div className={cn(className, !readonly && 'cursor-pointer')}>
<PortalToFollowElem
@ -366,8 +410,9 @@ const VarReferencePicker: FC<Props> = ({
<ConstantField
value={value as string}
onChange={onChange as ((value: string | number, varKindType: VarKindType, varInfo?: Var) => void)}
schema={schema as CredentialFormSchema}
schema={schemaWithDynamicSelect as CredentialFormSchema}
readonly={readonly}
isLoading={isLoading}
/>
)
: (
@ -412,6 +457,7 @@ const VarReferencePicker: FC<Props> = ({
)}
<div className='flex items-center text-text-accent'>
{!hasValue && <Variable02 className='h-3.5 w-3.5' />}
{isLoading && <RiLoader4Line className='h-3.5 w-3.5 animate-spin text-text-secondary' />}
{isEnv && <Env className='h-3.5 w-3.5 text-util-colors-violet-violet-600' />}
{isChatVar && <BubbleX className='h-3.5 w-3.5 text-util-colors-teal-teal-700' />}
<div className={cn('ml-0.5 truncate text-xs font-medium', isEnv && '!text-text-secondary', isChatVar && 'text-util-colors-teal-teal-700', isException && 'text-text-warning')} title={varName} style={{
@ -424,7 +470,16 @@ const VarReferencePicker: FC<Props> = ({
{!isValidVar && <RiErrorWarningFill className='ml-0.5 h-3 w-3 text-text-destructive' />}
</>
)
: <div className={`overflow-hidden ${readonly ? 'text-components-input-text-disabled' : 'text-components-input-text-placeholder'} system-sm-regular text-ellipsis`}>{placeholder ?? t('workflow.common.setVarValuePlaceholder')}</div>}
: <div className={`overflow-hidden ${readonly ? 'text-components-input-text-disabled' : 'text-components-input-text-placeholder'} system-sm-regular text-ellipsis`}>
{isLoading ? (
<div className='flex items-center'>
<RiLoader4Line className='mr-1 h-3.5 w-3.5 animate-spin text-text-secondary' />
<span>{placeholder ?? t('workflow.common.setVarValuePlaceholder')}</span>
</div>
) : (
placeholder ?? t('workflow.common.setVarValuePlaceholder')
)}
</div>}
</div>
</Tooltip>
</div>

View File

@ -6,7 +6,7 @@ import { useTranslation } from 'react-i18next'
import type { ToolVarInputs } from '../types'
import { VarType as VarKindType } from '../types'
import cn from '@/utils/classnames'
import type { ValueSelector, Var } from '@/app/components/workflow/types'
import type { ToolWithProvider, ValueSelector, Var } from '@/app/components/workflow/types'
import type { CredentialFormSchema } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks'
@ -17,6 +17,7 @@ import { VarType } from '@/app/components/workflow/types'
import AppSelector from '@/app/components/plugins/plugin-detail-panel/app-selector'
import ModelParameterModal from '@/app/components/plugins/plugin-detail-panel/model-selector'
import { noop } from 'lodash-es'
import type { Tool } from '@/app/components/tools/types'
type Props = {
readOnly: boolean
@ -27,6 +28,8 @@ type Props = {
onOpen?: (index: number) => void
isSupportConstantValue?: boolean
filterVar?: (payload: Var, valueSelector: ValueSelector) => boolean
currentTool?: Tool
currentProvider?: ToolWithProvider
}
const InputVarList: FC<Props> = ({
@ -38,6 +41,8 @@ const InputVarList: FC<Props> = ({
onOpen = noop,
isSupportConstantValue,
filterVar,
currentTool,
currentProvider,
}) => {
const language = useLanguage()
const { t } = useTranslation()
@ -58,6 +63,8 @@ const InputVarList: FC<Props> = ({
return 'ModelSelector'
else if (type === FormTypeEnum.toolSelector)
return 'ToolSelector'
else if (type === FormTypeEnum.dynamicSelect || type === FormTypeEnum.select)
return 'Select'
else
return 'String'
}
@ -149,6 +156,7 @@ const InputVarList: FC<Props> = ({
const handleOpen = useCallback((index: number) => {
return () => onOpen(index)
}, [onOpen])
return (
<div className='space-y-3'>
{
@ -163,7 +171,8 @@ const InputVarList: FC<Props> = ({
} = schema
const varInput = value[variable]
const isNumber = type === FormTypeEnum.textNumber
const isSelect = type === FormTypeEnum.select
const isDynamicSelect = type === FormTypeEnum.dynamicSelect
const isSelect = type === FormTypeEnum.select || type === FormTypeEnum.dynamicSelect
const isFile = type === FormTypeEnum.file || type === FormTypeEnum.files
const isAppSelector = type === FormTypeEnum.appSelector
const isModelSelector = type === FormTypeEnum.modelSelector
@ -198,11 +207,13 @@ const InputVarList: FC<Props> = ({
value={varInput?.type === VarKindType.constant ? (varInput?.value ?? '') : (varInput?.value ?? [])}
onChange={handleNotMixedTypeChange(variable)}
onOpen={handleOpen(index)}
defaultVarKindType={varInput?.type || (isNumber ? VarKindType.constant : VarKindType.variable)}
defaultVarKindType={varInput?.type || ((isNumber || isDynamicSelect) ? VarKindType.constant : VarKindType.variable)}
isSupportConstantValue={isSupportConstantValue}
filterVar={isNumber ? filterVar : undefined}
availableVars={isSelect ? availableVars : undefined}
schema={schema}
currentTool={currentTool}
currentProvider={currentProvider}
/>
)}
{isFile && (

View File

@ -42,6 +42,7 @@ const Panel: FC<NodePanelProps<ToolNodeType>> = ({
isLoading,
outputSchema,
hasObjectOutput,
currTool,
} = useConfig(id, data)
if (isLoading) {
@ -80,6 +81,8 @@ const Panel: FC<NodePanelProps<ToolNodeType>> = ({
filterVar={filterVar}
isSupportConstantValue
onOpen={handleOnVarOpen}
currentProvider={currCollection}
currentTool={currTool}
/>
</Field>
)}

View File

@ -1,5 +1,5 @@
import type { FC } from 'react'
import { memo, useEffect, useRef } from 'react'
import { memo, useCallback, useEffect, useRef } from 'react'
import { useNodes } from 'reactflow'
import type { CommonNodeType } from '../types'
import { Panel as NodePanel } from '../nodes'
@ -13,6 +13,51 @@ export type PanelProps = {
right?: React.ReactNode
}
}
/**
* Reference MDN standard implementationhttps://developer.mozilla.org/zh-CN/docs/Web/API/ResizeObserverEntry/borderBoxSize
*/
const getEntryWidth = (entry: ResizeObserverEntry, element: HTMLElement): number => {
if (entry.borderBoxSize?.length > 0)
return entry.borderBoxSize[0].inlineSize
if (entry.contentRect.width > 0)
return entry.contentRect.width
return element.getBoundingClientRect().width
}
const useResizeObserver = (
callback: (width: number) => void,
dependencies: React.DependencyList = [],
) => {
const elementRef = useRef<HTMLDivElement>(null)
const stableCallback = useCallback(callback, [callback])
useEffect(() => {
const element = elementRef.current
if (!element) return
const resizeObserver = new ResizeObserver((entries) => {
for (const entry of entries) {
const width = getEntryWidth(entry, element)
stableCallback(width)
}
})
resizeObserver.observe(element)
const initialWidth = element.getBoundingClientRect().width
stableCallback(initialWidth)
return () => {
resizeObserver.disconnect()
}
}, [stableCallback, ...dependencies])
return elementRef
}
const Panel: FC<PanelProps> = ({
components,
}) => {
@ -20,44 +65,21 @@ const Panel: FC<PanelProps> = ({
const selectedNode = nodes.find(node => node.data.selected)
const showEnvPanel = useStore(s => s.showEnvPanel)
const isRestoring = useStore(s => s.isRestoring)
const showWorkflowVersionHistoryPanel = useStore(s => s.showWorkflowVersionHistoryPanel)
const rightPanelRef = useRef<HTMLDivElement>(null)
const setRightPanelWidth = useStore(s => s.setRightPanelWidth)
// get right panel width
useEffect(() => {
if (rightPanelRef.current) {
const resizeRightPanelObserver = new ResizeObserver((entries) => {
for (const entry of entries) {
const { inlineSize } = entry.borderBoxSize[0]
setRightPanelWidth(inlineSize)
}
})
resizeRightPanelObserver.observe(rightPanelRef.current)
return () => {
resizeRightPanelObserver.disconnect()
}
}
}, [setRightPanelWidth])
const otherPanelRef = useRef<HTMLDivElement>(null)
const setOtherPanelWidth = useStore(s => s.setOtherPanelWidth)
// get other panel width
useEffect(() => {
if (otherPanelRef.current) {
const resizeOtherPanelObserver = new ResizeObserver((entries) => {
for (const entry of entries) {
const { inlineSize } = entry.borderBoxSize[0]
setOtherPanelWidth(inlineSize)
}
})
resizeOtherPanelObserver.observe(otherPanelRef.current)
return () => {
resizeOtherPanelObserver.disconnect()
}
}
}, [setOtherPanelWidth])
const rightPanelRef = useResizeObserver(
setRightPanelWidth,
[setRightPanelWidth, selectedNode, showEnvPanel, showWorkflowVersionHistoryPanel],
)
const otherPanelRef = useResizeObserver(
setOtherPanelWidth,
[setOtherPanelWidth, showEnvPanel, showWorkflowVersionHistoryPanel],
)
return (
<div
ref={rightPanelRef}
@ -65,26 +87,14 @@ const Panel: FC<PanelProps> = ({
className={cn('absolute bottom-1 right-0 top-14 z-10 flex outline-none')}
key={`${isRestoring}`}
>
{
components?.left
}
{
!!selectedNode && (
<NodePanel {...selectedNode!} />
)
}
{components?.left}
{!!selectedNode && <NodePanel {...selectedNode} />}
<div
className='relative'
className="relative"
ref={otherPanelRef}
>
{
components?.right
}
{
showEnvPanel && (
<EnvPanel />
)
}
{components?.right}
{showEnvPanel && <EnvPanel />}
</div>
</div>
)

View File

@ -29,6 +29,7 @@ import type {
import ErrorHandleTip from '@/app/components/workflow/nodes/_base/components/error-handle/error-handle-tip'
import { hasRetryNode } from '@/app/components/workflow/utils'
import { useDocLink } from '@/context/i18n'
import Tooltip from '@/app/components/base/tooltip'
type Props = {
className?: string
@ -129,10 +130,16 @@ const NodePanel: FC<Props> = ({
/>
)}
<BlockIcon size={inMessage ? 'xs' : 'sm'} className={cn('mr-2 shrink-0', inMessage && '!mr-1')} type={nodeInfo.node_type} toolIcon={nodeInfo.extras?.icon || nodeInfo.extras} />
<div className={cn(
'system-xs-semibold-uppercase grow truncate text-text-secondary',
hideInfo && '!text-xs',
)} title={nodeInfo.title}>{nodeInfo.title}</div>
<Tooltip
popupContent={
<div className='max-w-xs'>{nodeInfo.title}</div>
}
>
<div className={cn(
'system-xs-semibold-uppercase grow truncate text-text-secondary',
hideInfo && '!text-xs',
)}>{nodeInfo.title}</div>
</Tooltip>
{nodeInfo.status !== 'running' && !hideInfo && (
<div className='system-xs-regular shrink-0 text-text-tertiary'>{nodeInfo.execution_metadata?.total_tokens ? `${getTokenCount(nodeInfo.execution_metadata?.total_tokens || 0)} tokens · ` : ''}{`${getTime(nodeInfo.elapsed_time || 0)}`}</div>
)}

View File

@ -12,6 +12,7 @@ import type { CommonNodeType } from '@/app/components/workflow/types'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { EVENT_WORKFLOW_STOP } from '@/app/components/workflow/variable-inspect/types'
import cn from '@/utils/classnames'
import { useNodesReadOnly } from '../hooks/use-workflow'
const VariableInspectTrigger: FC = () => {
const { t } = useTranslation()
@ -32,7 +33,10 @@ const VariableInspectTrigger: FC = () => {
const allVars = [...environmentVariables, ...conversationVars, ...systemVars, ...nodesWithInspectVars]
return allVars
}, [environmentVariables, conversationVars, systemVars, nodesWithInspectVars])
const {
nodesReadOnly,
getNodesReadOnly,
} = useNodesReadOnly()
const workflowRunningData = useStore(s => s.workflowRunningData)
const nodes = useNodes<CommonNodeType>()
const isStepRunning = useMemo(() => nodes.some(node => node.data._singleRunningStatus === NodeRunningStatus.Running), [nodes])
@ -61,8 +65,14 @@ const VariableInspectTrigger: FC = () => {
<div className={cn('flex items-center gap-1')}>
{!isRunning && !currentVars.length && (
<div
className='system-2xs-semibold-uppercase flex h-5 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-background-default-hover'
onClick={() => setShowVariableInspectPanel(true)}
className={cn('system-2xs-semibold-uppercase flex h-5 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-background-default-hover',
nodesReadOnly && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
)}
onClick={() => {
if (getNodesReadOnly())
return
setShowVariableInspectPanel(true)
}}
>
{t('workflow.debug.variableInspect.trigger.normal')}
</div>
@ -70,13 +80,21 @@ const VariableInspectTrigger: FC = () => {
{!isRunning && currentVars.length > 0 && (
<>
<div
className='system-xs-medium flex h-6 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-accent shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent'
onClick={() => setShowVariableInspectPanel(true)}
className={cn('system-xs-medium flex h-6 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-accent shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent',
nodesReadOnly && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
)}
onClick={() => {
if (getNodesReadOnly())
return
setShowVariableInspectPanel(true)
}}
>
{t('workflow.debug.variableInspect.trigger.cached')}
</div>
<div
className='system-xs-medium flex h-6 cursor-pointer items-center rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-1 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent hover:text-text-accent'
className={cn('system-xs-medium flex h-6 cursor-pointer items-center rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-1 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent hover:text-text-accent',
nodesReadOnly && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
)}
onClick={handleClearAll}
>
{t('workflow.debug.variableInspect.trigger.clear')}

View File

@ -697,4 +697,15 @@ button:focus-within {
-ms-overflow-style: none;
scrollbar-width: none;
}
/* Hide arrows from number input */
.no-spinner::-webkit-outer-spin-button,
.no-spinner::-webkit-inner-spin-button {
-webkit-appearance: none;
margin: 0;
}
.no-spinner {
-moz-appearance: textfield;
}
}

View File

@ -355,7 +355,9 @@ const translation = {
newChildChunk: 'New Child Chunk',
keywords: 'KEYWORDS',
addKeyWord: 'Add keyword',
keywordEmpty: 'The keyword cannot be empty',
keywordError: 'The maximum length of keyword is 20',
keywordDuplicate: 'The keyword already exists',
characters_one: 'character',
characters_other: 'characters',
hitCount: 'Retrieval count',

View File

@ -1,5 +1,6 @@
import { useCallback, useEffect } from 'react'
import type {
FormOption,
ModelProvider,
} from '@/app/components/header/account-setting/model-provider-page/declarations'
import { fetchModelProviderModelList } from '@/service/common'
@ -518,7 +519,7 @@ export const usePluginTaskList = (category?: PluginType) => {
refreshPluginList(category ? { category } as any : undefined, !category)
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isRefetching])
const handleRefetch = useCallback(() => {
@ -612,3 +613,17 @@ export const usePluginInfo = (providerName?: string) => {
enabled: !!providerName,
})
}
export const useFetchDynamicOptions = (plugin_id: string, provider: string, action: string, parameter: string, provider_type: 'tool') => {
return useMutation({
mutationFn: () => get<{ options: FormOption[] }>('/workspaces/current/plugin/parameters/dynamic-options', {
params: {
plugin_id,
provider,
action,
parameter,
provider_type,
},
}),
})
}