mirror of https://github.com/langgenius/dify.git
merge main
This commit is contained in:
commit
5dbda7f4c5
|
|
@ -208,7 +208,7 @@ class AnnotationBatchImportApi(Resource):
|
|||
if len(request.files) > 1:
|
||||
raise TooManyFilesError()
|
||||
# check file type
|
||||
if not file.filename.endswith(".csv"):
|
||||
if not file.filename or not file.filename.endswith(".csv"):
|
||||
raise ValueError("Invalid file type. Only CSV files are allowed")
|
||||
return AppAnnotationService.batch_import_app_annotations(app_id, file)
|
||||
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ class DatasetDocumentSegmentBatchImportApi(Resource):
|
|||
if len(request.files) > 1:
|
||||
raise TooManyFilesError()
|
||||
# check file type
|
||||
if not file.filename.endswith(".csv"):
|
||||
if not file.filename or not file.filename.endswith(".csv"):
|
||||
raise ValueError("Invalid file type. Only CSV files are allowed")
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from core.plugin.entities.request import (
|
|||
)
|
||||
from core.tools.entities.tool_entities import ToolProviderType
|
||||
from core.tools.utils.model_invocation_utils import ModelInvocationUtils
|
||||
from core.workflow.nodes.llm.node import LLMNode
|
||||
from core.workflow.nodes.llm import llm_utils
|
||||
from models.account import Tenant
|
||||
|
||||
|
||||
|
|
@ -55,7 +55,7 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation):
|
|||
def handle() -> Generator[LLMResultChunk, None, None]:
|
||||
for chunk in response:
|
||||
if chunk.delta.usage:
|
||||
LLMNode.deduct_llm_quota(
|
||||
llm_utils.deduct_llm_quota(
|
||||
tenant_id=tenant.id, model_instance=model_instance, usage=chunk.delta.usage
|
||||
)
|
||||
chunk.prompt_messages = []
|
||||
|
|
@ -64,7 +64,7 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation):
|
|||
return handle()
|
||||
else:
|
||||
if response.usage:
|
||||
LLMNode.deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=response.usage)
|
||||
llm_utils.deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=response.usage)
|
||||
|
||||
def handle_non_streaming(response: LLMResult) -> Generator[LLMResultChunk, None, None]:
|
||||
yield LLMResultChunk(
|
||||
|
|
|
|||
|
|
@ -139,4 +139,4 @@ class CacheEmbedding(Embeddings):
|
|||
logging.exception(f"Failed to add embedding to redis for the text '{text[:10]}...({len(text)} chars)'")
|
||||
raise ex
|
||||
|
||||
return embedding_results
|
||||
return embedding_results # type: ignore
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ class QAIndexProcessor(BaseIndexProcessor):
|
|||
|
||||
def format_by_template(self, file: FileStorage, **kwargs) -> list[Document]:
|
||||
# check file type
|
||||
if not file.filename.endswith(".csv"):
|
||||
if not file.filename or not file.filename.endswith(".csv"):
|
||||
raise ValueError("Invalid file type. Only CSV files are allowed")
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
|
|||
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate
|
||||
from core.rag.retrieval.output_parser.react_output import ReactAction
|
||||
from core.rag.retrieval.output_parser.structured_chat import StructuredChatOutputParser
|
||||
from core.workflow.nodes.llm import LLMNode
|
||||
from core.workflow.nodes.llm import llm_utils
|
||||
|
||||
PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:"""
|
||||
|
||||
|
|
@ -165,7 +165,7 @@ class ReactMultiDatasetRouter:
|
|||
text, usage = self._handle_invoke_result(invoke_result=invoke_result)
|
||||
|
||||
# deduct quota
|
||||
LLMNode.deduct_llm_quota(tenant_id=tenant_id, model_instance=model_instance, usage=usage)
|
||||
llm_utils.deduct_llm_quota(tenant_id=tenant_id, model_instance=model_instance, usage=usage)
|
||||
|
||||
return text, usage
|
||||
|
||||
|
|
|
|||
|
|
@ -32,14 +32,14 @@ class ToolFileMessageTransformer:
|
|||
try:
|
||||
assert isinstance(message.message, ToolInvokeMessage.TextMessage)
|
||||
tool_file_manager = ToolFileManager()
|
||||
file = tool_file_manager.create_file_by_url(
|
||||
tool_file = tool_file_manager.create_file_by_url(
|
||||
user_id=user_id,
|
||||
tenant_id=tenant_id,
|
||||
file_url=message.message.text,
|
||||
conversation_id=conversation_id,
|
||||
)
|
||||
|
||||
url = f"/files/tools/{file.id}{guess_extension(file.mimetype) or '.png'}"
|
||||
url = f"/files/tools/{tool_file.id}{guess_extension(tool_file.mimetype) or '.png'}"
|
||||
|
||||
yield ToolInvokeMessage(
|
||||
type=ToolInvokeMessage.MessageType.IMAGE_LINK,
|
||||
|
|
@ -68,7 +68,7 @@ class ToolFileMessageTransformer:
|
|||
|
||||
assert isinstance(message.message.blob, bytes)
|
||||
tool_file_manager = ToolFileManager()
|
||||
file = tool_file_manager.create_file_by_raw(
|
||||
tool_file = tool_file_manager.create_file_by_raw(
|
||||
user_id=user_id,
|
||||
tenant_id=tenant_id,
|
||||
conversation_id=conversation_id,
|
||||
|
|
@ -77,7 +77,7 @@ class ToolFileMessageTransformer:
|
|||
filename=filename,
|
||||
)
|
||||
|
||||
url = cls.get_tool_file_url(tool_file_id=file.id, extension=guess_extension(file.mimetype))
|
||||
url = cls.get_tool_file_url(tool_file_id=tool_file.id, extension=guess_extension(tool_file.mimetype))
|
||||
|
||||
# check if file is image
|
||||
if "image" in mimetype:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,156 @@
|
|||
from collections.abc import Sequence
|
||||
from datetime import UTC, datetime
|
||||
from typing import Optional, cast
|
||||
|
||||
from sqlalchemy import select, update
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_entities import QuotaUnit
|
||||
from core.file.models import File
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance, ModelManager
|
||||
from core.model_runtime.entities.llm_entities import LLMUsage
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.plugin.entities.plugin import ModelProviderID
|
||||
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
|
||||
from core.variables.segments import ArrayAnySegment, ArrayFileSegment, FileSegment, NoneSegment, StringSegment
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.nodes.llm.entities import ModelConfig
|
||||
from models import db
|
||||
from models.model import Conversation
|
||||
from models.provider import Provider, ProviderType
|
||||
|
||||
from .exc import InvalidVariableTypeError, LLMModeRequiredError, ModelNotExistError
|
||||
|
||||
|
||||
def fetch_model_config(
|
||||
tenant_id: str, node_data_model: ModelConfig
|
||||
) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
|
||||
if not node_data_model.mode:
|
||||
raise LLMModeRequiredError("LLM mode is required.")
|
||||
|
||||
model = ModelManager().get_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
provider=node_data_model.provider,
|
||||
model=node_data_model.name,
|
||||
)
|
||||
|
||||
model.model_type_instance = cast(LargeLanguageModel, model.model_type_instance)
|
||||
|
||||
# check model
|
||||
provider_model = model.provider_model_bundle.configuration.get_provider_model(
|
||||
model=node_data_model.name, model_type=ModelType.LLM
|
||||
)
|
||||
|
||||
if provider_model is None:
|
||||
raise ModelNotExistError(f"Model {node_data_model.name} not exist.")
|
||||
provider_model.raise_for_status()
|
||||
|
||||
# model config
|
||||
stop: list[str] = []
|
||||
if "stop" in node_data_model.completion_params:
|
||||
stop = node_data_model.completion_params.pop("stop")
|
||||
|
||||
model_schema = model.model_type_instance.get_model_schema(node_data_model.name, model.credentials)
|
||||
if not model_schema:
|
||||
raise ModelNotExistError(f"Model {node_data_model.name} not exist.")
|
||||
|
||||
return model, ModelConfigWithCredentialsEntity(
|
||||
provider=node_data_model.provider,
|
||||
model=node_data_model.name,
|
||||
model_schema=model_schema,
|
||||
mode=node_data_model.mode,
|
||||
provider_model_bundle=model.provider_model_bundle,
|
||||
credentials=model.credentials,
|
||||
parameters=node_data_model.completion_params,
|
||||
stop=stop,
|
||||
)
|
||||
|
||||
|
||||
def fetch_files(variable_pool: VariablePool, selector: Sequence[str]) -> Sequence["File"]:
|
||||
variable = variable_pool.get(selector)
|
||||
if variable is None:
|
||||
return []
|
||||
elif isinstance(variable, FileSegment):
|
||||
return [variable.value]
|
||||
elif isinstance(variable, ArrayFileSegment):
|
||||
return variable.value
|
||||
elif isinstance(variable, NoneSegment | ArrayAnySegment):
|
||||
return []
|
||||
raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
|
||||
|
||||
|
||||
def fetch_memory(
|
||||
variable_pool: VariablePool, app_id: str, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
|
||||
) -> Optional[TokenBufferMemory]:
|
||||
if not node_data_memory:
|
||||
return None
|
||||
|
||||
# get conversation id
|
||||
conversation_id_variable = variable_pool.get(["sys", SystemVariableKey.CONVERSATION_ID.value])
|
||||
if not isinstance(conversation_id_variable, StringSegment):
|
||||
return None
|
||||
conversation_id = conversation_id_variable.value
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Conversation).where(Conversation.app_id == app_id, Conversation.id == conversation_id)
|
||||
conversation = session.scalar(stmt)
|
||||
if not conversation:
|
||||
return None
|
||||
|
||||
memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
|
||||
return memory
|
||||
|
||||
|
||||
def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
|
||||
provider_model_bundle = model_instance.provider_model_bundle
|
||||
provider_configuration = provider_model_bundle.configuration
|
||||
|
||||
if provider_configuration.using_provider_type != ProviderType.SYSTEM:
|
||||
return
|
||||
|
||||
system_configuration = provider_configuration.system_configuration
|
||||
|
||||
quota_unit = None
|
||||
for quota_configuration in system_configuration.quota_configurations:
|
||||
if quota_configuration.quota_type == system_configuration.current_quota_type:
|
||||
quota_unit = quota_configuration.quota_unit
|
||||
|
||||
if quota_configuration.quota_limit == -1:
|
||||
return
|
||||
|
||||
break
|
||||
|
||||
used_quota = None
|
||||
if quota_unit:
|
||||
if quota_unit == QuotaUnit.TOKENS:
|
||||
used_quota = usage.total_tokens
|
||||
elif quota_unit == QuotaUnit.CREDITS:
|
||||
used_quota = dify_config.get_model_credits(model_instance.model)
|
||||
else:
|
||||
used_quota = 1
|
||||
|
||||
if used_quota is not None and system_configuration.current_quota_type is not None:
|
||||
with Session(db.engine) as session:
|
||||
stmt = (
|
||||
update(Provider)
|
||||
.where(
|
||||
Provider.tenant_id == tenant_id,
|
||||
# TODO: Use provider name with prefix after the data migration.
|
||||
Provider.provider_name == ModelProviderID(model_instance.provider).provider_name,
|
||||
Provider.provider_type == ProviderType.SYSTEM.value,
|
||||
Provider.quota_type == system_configuration.current_quota_type.value,
|
||||
Provider.quota_limit > Provider.quota_used,
|
||||
)
|
||||
.values(
|
||||
quota_used=Provider.quota_used + used_quota,
|
||||
last_used=datetime.now(tz=UTC).replace(tzinfo=None),
|
||||
)
|
||||
)
|
||||
session.execute(stmt)
|
||||
session.commit()
|
||||
|
|
@ -3,16 +3,11 @@ import io
|
|||
import json
|
||||
import logging
|
||||
from collections.abc import Generator, Mapping, Sequence
|
||||
from datetime import UTC, datetime
|
||||
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
|
||||
import json_repair
|
||||
from sqlalchemy import select, update
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_entities import QuotaUnit
|
||||
from core.file import FileType, file_manager
|
||||
from core.helper.code_executor import CodeExecutor, CodeLanguage
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
|
|
@ -40,12 +35,10 @@ from core.model_runtime.entities.model_entities import (
|
|||
)
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.plugin.entities.plugin import ModelProviderID
|
||||
from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
|
||||
from core.prompt.utils.prompt_message_util import PromptMessageUtil
|
||||
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
|
||||
from core.variables import (
|
||||
ArrayAnySegment,
|
||||
ArrayFileSegment,
|
||||
ArraySegment,
|
||||
FileSegment,
|
||||
|
|
@ -75,10 +68,8 @@ from core.workflow.utils.structured_output.entities import (
|
|||
)
|
||||
from core.workflow.utils.structured_output.prompt import STRUCTURED_OUTPUT_PROMPT
|
||||
from core.workflow.utils.variable_template_parser import VariableTemplateParser
|
||||
from extensions.ext_database import db
|
||||
from models.model import Conversation
|
||||
from models.provider import Provider, ProviderType
|
||||
|
||||
from . import llm_utils
|
||||
from .entities import (
|
||||
LLMNodeChatModelMessage,
|
||||
LLMNodeCompletionModelPromptTemplate,
|
||||
|
|
@ -88,7 +79,6 @@ from .entities import (
|
|||
from .exc import (
|
||||
InvalidContextStructureError,
|
||||
InvalidVariableTypeError,
|
||||
LLMModeRequiredError,
|
||||
LLMNodeError,
|
||||
MemoryRolePrefixRequiredError,
|
||||
ModelNotExistError,
|
||||
|
|
@ -160,6 +150,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
result_text = ""
|
||||
usage = LLMUsage.empty_usage()
|
||||
finish_reason = None
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
try:
|
||||
# init messages template
|
||||
|
|
@ -178,7 +169,10 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
|
||||
# fetch files
|
||||
files = (
|
||||
self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
|
||||
llm_utils.fetch_files(
|
||||
variable_pool=variable_pool,
|
||||
selector=self.node_data.vision.configs.variable_selector,
|
||||
)
|
||||
if self.node_data.vision.enabled
|
||||
else []
|
||||
)
|
||||
|
|
@ -200,15 +194,18 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
model_instance, model_config = self._fetch_model_config(self.node_data.model)
|
||||
|
||||
# fetch memory
|
||||
memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
|
||||
memory = llm_utils.fetch_memory(
|
||||
variable_pool=variable_pool,
|
||||
app_id=self.app_id,
|
||||
node_data_memory=self.node_data.memory,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
|
||||
query = None
|
||||
if self.node_data.memory:
|
||||
query = self.node_data.memory.query_prompt_template
|
||||
if not query and (
|
||||
query_variable := self.graph_runtime_state.variable_pool.get(
|
||||
(SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)
|
||||
)
|
||||
query_variable := variable_pool.get((SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY))
|
||||
):
|
||||
query = query_variable.text
|
||||
|
||||
|
|
@ -222,7 +219,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
memory_config=self.node_data.memory,
|
||||
vision_enabled=self.node_data.vision.enabled,
|
||||
vision_detail=self.node_data.vision.configs.detail,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
variable_pool=variable_pool,
|
||||
jinja2_variables=self.node_data.prompt_config.jinja2_variables,
|
||||
)
|
||||
|
||||
|
|
@ -251,7 +248,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
usage = event.usage
|
||||
finish_reason = event.finish_reason
|
||||
# deduct quota
|
||||
self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
break
|
||||
outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
|
||||
structured_output = process_structured_output(result_text)
|
||||
|
|
@ -447,18 +444,6 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
|
||||
return inputs
|
||||
|
||||
def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
|
||||
variable = self.graph_runtime_state.variable_pool.get(selector)
|
||||
if variable is None:
|
||||
return []
|
||||
elif isinstance(variable, FileSegment):
|
||||
return [variable.value]
|
||||
elif isinstance(variable, ArrayFileSegment):
|
||||
return variable.value
|
||||
elif isinstance(variable, NoneSegment | ArrayAnySegment):
|
||||
return []
|
||||
raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
|
||||
|
||||
def _fetch_context(self, node_data: LLMNodeData):
|
||||
if not node_data.context.enabled:
|
||||
return
|
||||
|
|
@ -524,31 +509,10 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
def _fetch_model_config(
|
||||
self, node_data_model: ModelConfig
|
||||
) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
|
||||
if not node_data_model.mode:
|
||||
raise LLMModeRequiredError("LLM mode is required.")
|
||||
|
||||
model = ModelManager().get_model_instance(
|
||||
tenant_id=self.tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
provider=node_data_model.provider,
|
||||
model=node_data_model.name,
|
||||
model, model_config_with_cred = llm_utils.fetch_model_config(
|
||||
tenant_id=self.tenant_id, node_data_model=node_data_model
|
||||
)
|
||||
|
||||
model.model_type_instance = cast(LargeLanguageModel, model.model_type_instance)
|
||||
|
||||
# check model
|
||||
provider_model = model.provider_model_bundle.configuration.get_provider_model(
|
||||
model=node_data_model.name, model_type=ModelType.LLM
|
||||
)
|
||||
|
||||
if provider_model is None:
|
||||
raise ModelNotExistError(f"Model {node_data_model.name} not exist.")
|
||||
provider_model.raise_for_status()
|
||||
|
||||
# model config
|
||||
stop: list[str] = []
|
||||
if "stop" in node_data_model.completion_params:
|
||||
stop = node_data_model.completion_params.pop("stop")
|
||||
completion_params = model_config_with_cred.parameters
|
||||
|
||||
model_schema = model.model_type_instance.get_model_schema(node_data_model.name, model.credentials)
|
||||
if not model_schema:
|
||||
|
|
@ -556,47 +520,12 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
|
||||
if self.node_data.structured_output_enabled:
|
||||
if model_schema.support_structure_output:
|
||||
node_data_model.completion_params = self._handle_native_json_schema(
|
||||
node_data_model.completion_params, model_schema.parameter_rules
|
||||
)
|
||||
completion_params = self._handle_native_json_schema(completion_params, model_schema.parameter_rules)
|
||||
else:
|
||||
# Set appropriate response format based on model capabilities
|
||||
self._set_response_format(node_data_model.completion_params, model_schema.parameter_rules)
|
||||
|
||||
return model, ModelConfigWithCredentialsEntity(
|
||||
provider=node_data_model.provider,
|
||||
model=node_data_model.name,
|
||||
model_schema=model_schema,
|
||||
mode=node_data_model.mode,
|
||||
provider_model_bundle=model.provider_model_bundle,
|
||||
credentials=model.credentials,
|
||||
parameters=node_data_model.completion_params,
|
||||
stop=stop,
|
||||
)
|
||||
|
||||
def _fetch_memory(
|
||||
self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
|
||||
) -> Optional[TokenBufferMemory]:
|
||||
if not node_data_memory:
|
||||
return None
|
||||
|
||||
# get conversation id
|
||||
conversation_id_variable = self.graph_runtime_state.variable_pool.get(
|
||||
["sys", SystemVariableKey.CONVERSATION_ID.value]
|
||||
)
|
||||
if not isinstance(conversation_id_variable, StringSegment):
|
||||
return None
|
||||
conversation_id = conversation_id_variable.value
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Conversation).where(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
|
||||
conversation = session.scalar(stmt)
|
||||
if not conversation:
|
||||
return None
|
||||
|
||||
memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
|
||||
|
||||
return memory
|
||||
self._set_response_format(completion_params, model_schema.parameter_rules)
|
||||
model_config_with_cred.parameters = completion_params
|
||||
return model, model_config_with_cred
|
||||
|
||||
def _fetch_prompt_messages(
|
||||
self,
|
||||
|
|
@ -775,15 +704,15 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
model = ModelManager().get_model_instance(
|
||||
tenant_id=self.tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
provider=self.node_data.model.provider,
|
||||
model=self.node_data.model.name,
|
||||
provider=model_config.provider,
|
||||
model=model_config.model,
|
||||
)
|
||||
model_schema = model.model_type_instance.get_model_schema(
|
||||
model=self.node_data.model.name,
|
||||
model=model_config.model,
|
||||
credentials=model.credentials,
|
||||
)
|
||||
if not model_schema:
|
||||
raise ModelNotExistError(f"Model {self.node_data.model.name} not exist.")
|
||||
raise ModelNotExistError(f"Model {model_config.model} not exist.")
|
||||
if self.node_data.structured_output_enabled:
|
||||
if not model_schema.support_structure_output:
|
||||
filtered_prompt_messages = self._handle_prompt_based_schema(
|
||||
|
|
@ -810,55 +739,6 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
structured_output = parsed
|
||||
return structured_output
|
||||
|
||||
@classmethod
|
||||
def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
|
||||
provider_model_bundle = model_instance.provider_model_bundle
|
||||
provider_configuration = provider_model_bundle.configuration
|
||||
|
||||
if provider_configuration.using_provider_type != ProviderType.SYSTEM:
|
||||
return
|
||||
|
||||
system_configuration = provider_configuration.system_configuration
|
||||
|
||||
quota_unit = None
|
||||
for quota_configuration in system_configuration.quota_configurations:
|
||||
if quota_configuration.quota_type == system_configuration.current_quota_type:
|
||||
quota_unit = quota_configuration.quota_unit
|
||||
|
||||
if quota_configuration.quota_limit == -1:
|
||||
return
|
||||
|
||||
break
|
||||
|
||||
used_quota = None
|
||||
if quota_unit:
|
||||
if quota_unit == QuotaUnit.TOKENS:
|
||||
used_quota = usage.total_tokens
|
||||
elif quota_unit == QuotaUnit.CREDITS:
|
||||
used_quota = dify_config.get_model_credits(model_instance.model)
|
||||
else:
|
||||
used_quota = 1
|
||||
|
||||
if used_quota is not None and system_configuration.current_quota_type is not None:
|
||||
with Session(db.engine) as session:
|
||||
stmt = (
|
||||
update(Provider)
|
||||
.where(
|
||||
Provider.tenant_id == tenant_id,
|
||||
# TODO: Use provider name with prefix after the data migration.
|
||||
Provider.provider_name == ModelProviderID(model_instance.provider).provider_name,
|
||||
Provider.provider_type == ProviderType.SYSTEM.value,
|
||||
Provider.quota_type == system_configuration.current_quota_type.value,
|
||||
Provider.quota_limit > Provider.quota_used,
|
||||
)
|
||||
.values(
|
||||
quota_used=Provider.quota_used + used_quota,
|
||||
last_used=datetime.now(tz=UTC).replace(tzinfo=None),
|
||||
)
|
||||
)
|
||||
session.execute(stmt)
|
||||
session.commit()
|
||||
|
||||
@classmethod
|
||||
def _extract_variable_selector_to_variable_mapping(
|
||||
cls,
|
||||
|
|
|
|||
|
|
@ -28,8 +28,9 @@ from core.prompt.utils.prompt_message_util import PromptMessageUtil
|
|||
from core.workflow.entities.node_entities import NodeRunResult
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
|
||||
from core.workflow.nodes.base.node import BaseNode
|
||||
from core.workflow.nodes.enums import NodeType
|
||||
from core.workflow.nodes.llm import LLMNode, ModelConfig
|
||||
from core.workflow.nodes.llm import ModelConfig, llm_utils
|
||||
from core.workflow.utils import variable_template_parser
|
||||
|
||||
from .entities import ParameterExtractorNodeData
|
||||
|
|
@ -83,7 +84,7 @@ def extract_json(text):
|
|||
return None
|
||||
|
||||
|
||||
class ParameterExtractorNode(LLMNode):
|
||||
class ParameterExtractorNode(BaseNode):
|
||||
"""
|
||||
Parameter Extractor Node.
|
||||
"""
|
||||
|
|
@ -116,8 +117,11 @@ class ParameterExtractorNode(LLMNode):
|
|||
variable = self.graph_runtime_state.variable_pool.get(node_data.query)
|
||||
query = variable.text if variable else ""
|
||||
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
files = (
|
||||
self._fetch_files(
|
||||
llm_utils.fetch_files(
|
||||
variable_pool=variable_pool,
|
||||
selector=node_data.vision.configs.variable_selector,
|
||||
)
|
||||
if node_data.vision.enabled
|
||||
|
|
@ -137,7 +141,9 @@ class ParameterExtractorNode(LLMNode):
|
|||
raise ModelSchemaNotFoundError("Model schema not found")
|
||||
|
||||
# fetch memory
|
||||
memory = self._fetch_memory(
|
||||
memory = llm_utils.fetch_memory(
|
||||
variable_pool=variable_pool,
|
||||
app_id=self.app_id,
|
||||
node_data_memory=node_data.memory,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
|
|
@ -279,7 +285,7 @@ class ParameterExtractorNode(LLMNode):
|
|||
tool_call = invoke_result.message.tool_calls[0] if invoke_result.message.tool_calls else None
|
||||
|
||||
# deduct quota
|
||||
self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
|
||||
if text is None:
|
||||
text = ""
|
||||
|
|
@ -794,7 +800,9 @@ class ParameterExtractorNode(LLMNode):
|
|||
Fetch model config.
|
||||
"""
|
||||
if not self._model_instance or not self._model_config:
|
||||
self._model_instance, self._model_config = super()._fetch_model_config(node_data_model)
|
||||
self._model_instance, self._model_config = llm_utils.fetch_model_config(
|
||||
tenant_id=self.tenant_id, node_data_model=node_data_model
|
||||
)
|
||||
|
||||
return self._model_instance, self._model_config
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ from core.workflow.nodes.llm import (
|
|||
LLMNode,
|
||||
LLMNodeChatModelMessage,
|
||||
LLMNodeCompletionModelPromptTemplate,
|
||||
llm_utils,
|
||||
)
|
||||
from core.workflow.utils.variable_template_parser import VariableTemplateParser
|
||||
from libs.json_in_md_parser import parse_and_check_json_markdown
|
||||
|
|
@ -50,7 +51,9 @@ class QuestionClassifierNode(LLMNode):
|
|||
# fetch model config
|
||||
model_instance, model_config = self._fetch_model_config(node_data.model)
|
||||
# fetch memory
|
||||
memory = self._fetch_memory(
|
||||
memory = llm_utils.fetch_memory(
|
||||
variable_pool=variable_pool,
|
||||
app_id=self.app_id,
|
||||
node_data_memory=node_data.memory,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
|
|
@ -59,7 +62,8 @@ class QuestionClassifierNode(LLMNode):
|
|||
node_data.instruction = variable_pool.convert_template(node_data.instruction).text
|
||||
|
||||
files = (
|
||||
self._fetch_files(
|
||||
llm_utils.fetch_files(
|
||||
variable_pool=variable_pool,
|
||||
selector=node_data.vision.configs.variable_selector,
|
||||
)
|
||||
if node_data.vision.enabled
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
warn_return_any = True
|
||||
warn_unused_configs = True
|
||||
check_untyped_defs = True
|
||||
cache_fine_grained = True
|
||||
sqlite_cache = True
|
||||
exclude = (?x)(
|
||||
core/model_runtime/model_providers/
|
||||
| tests/
|
||||
|
|
|
|||
|
|
@ -56,7 +56,6 @@ dependencies = [
|
|||
"opentelemetry-sdk==1.27.0",
|
||||
"opentelemetry-semantic-conventions==0.48b0",
|
||||
"opentelemetry-util-http==0.48b0",
|
||||
"pandas-stubs~=2.2.3.241009",
|
||||
"pandas[excel,output-formatting,performance]~=2.2.2",
|
||||
"pandoc~=2.4",
|
||||
"psycogreen~=1.0.2",
|
||||
|
|
@ -104,7 +103,7 @@ dev = [
|
|||
"dotenv-linter~=0.5.0",
|
||||
"faker~=32.1.0",
|
||||
"lxml-stubs~=0.5.1",
|
||||
"mypy~=1.15.0",
|
||||
"mypy~=1.16.0",
|
||||
"ruff~=0.11.5",
|
||||
"pytest~=8.3.2",
|
||||
"pytest-benchmark~=4.0.0",
|
||||
|
|
@ -152,6 +151,8 @@ dev = [
|
|||
"types_pyOpenSSL>=24.1.0",
|
||||
"types_cffi>=1.17.0",
|
||||
"types_setuptools>=80.9.0",
|
||||
"pandas-stubs~=2.2.3",
|
||||
"scipy-stubs>=1.15.3.0",
|
||||
]
|
||||
|
||||
############################################################
|
||||
|
|
|
|||
|
|
@ -353,7 +353,7 @@ def test_extract_json_from_tool_call():
|
|||
assert result["location"] == "kawaii"
|
||||
|
||||
|
||||
def test_chat_parameter_extractor_with_memory(setup_model_mock):
|
||||
def test_chat_parameter_extractor_with_memory(setup_model_mock, monkeypatch):
|
||||
"""
|
||||
Test chat parameter extractor with memory.
|
||||
"""
|
||||
|
|
@ -384,7 +384,8 @@ def test_chat_parameter_extractor_with_memory(setup_model_mock):
|
|||
mode="chat",
|
||||
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
|
||||
)
|
||||
node._fetch_memory = get_mocked_fetch_memory("customized memory")
|
||||
# Test the mock before running the actual test
|
||||
monkeypatch.setattr("core.workflow.nodes.llm.llm_utils.fetch_memory", get_mocked_fetch_memory("customized memory"))
|
||||
db.session.close = MagicMock()
|
||||
|
||||
result = node._run()
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ from core.workflow.entities.variable_pool import VariablePool
|
|||
from core.workflow.graph_engine import Graph, GraphInitParams, GraphRuntimeState
|
||||
from core.workflow.nodes.answer import AnswerStreamGenerateRoute
|
||||
from core.workflow.nodes.end import EndStreamParam
|
||||
from core.workflow.nodes.llm import llm_utils
|
||||
from core.workflow.nodes.llm.entities import (
|
||||
ContextConfig,
|
||||
LLMNodeChatModelMessage,
|
||||
|
|
@ -170,7 +171,7 @@ def model_config():
|
|||
)
|
||||
|
||||
|
||||
def test_fetch_files_with_file_segment(llm_node):
|
||||
def test_fetch_files_with_file_segment():
|
||||
file = File(
|
||||
id="1",
|
||||
tenant_id="test",
|
||||
|
|
@ -180,13 +181,14 @@ def test_fetch_files_with_file_segment(llm_node):
|
|||
related_id="1",
|
||||
storage_key="",
|
||||
)
|
||||
llm_node.graph_runtime_state.variable_pool.add(["sys", "files"], file)
|
||||
variable_pool = VariablePool()
|
||||
variable_pool.add(["sys", "files"], file)
|
||||
|
||||
result = llm_node._fetch_files(selector=["sys", "files"])
|
||||
result = llm_utils.fetch_files(variable_pool=variable_pool, selector=["sys", "files"])
|
||||
assert result == [file]
|
||||
|
||||
|
||||
def test_fetch_files_with_array_file_segment(llm_node):
|
||||
def test_fetch_files_with_array_file_segment():
|
||||
files = [
|
||||
File(
|
||||
id="1",
|
||||
|
|
@ -207,28 +209,32 @@ def test_fetch_files_with_array_file_segment(llm_node):
|
|||
storage_key="",
|
||||
),
|
||||
]
|
||||
llm_node.graph_runtime_state.variable_pool.add(["sys", "files"], ArrayFileSegment(value=files))
|
||||
variable_pool = VariablePool()
|
||||
variable_pool.add(["sys", "files"], ArrayFileSegment(value=files))
|
||||
|
||||
result = llm_node._fetch_files(selector=["sys", "files"])
|
||||
result = llm_utils.fetch_files(variable_pool=variable_pool, selector=["sys", "files"])
|
||||
assert result == files
|
||||
|
||||
|
||||
def test_fetch_files_with_none_segment(llm_node):
|
||||
llm_node.graph_runtime_state.variable_pool.add(["sys", "files"], NoneSegment())
|
||||
def test_fetch_files_with_none_segment():
|
||||
variable_pool = VariablePool()
|
||||
variable_pool.add(["sys", "files"], NoneSegment())
|
||||
|
||||
result = llm_node._fetch_files(selector=["sys", "files"])
|
||||
result = llm_utils.fetch_files(variable_pool=variable_pool, selector=["sys", "files"])
|
||||
assert result == []
|
||||
|
||||
|
||||
def test_fetch_files_with_array_any_segment(llm_node):
|
||||
llm_node.graph_runtime_state.variable_pool.add(["sys", "files"], ArrayAnySegment(value=[]))
|
||||
def test_fetch_files_with_array_any_segment():
|
||||
variable_pool = VariablePool()
|
||||
variable_pool.add(["sys", "files"], ArrayAnySegment(value=[]))
|
||||
|
||||
result = llm_node._fetch_files(selector=["sys", "files"])
|
||||
result = llm_utils.fetch_files(variable_pool=variable_pool, selector=["sys", "files"])
|
||||
assert result == []
|
||||
|
||||
|
||||
def test_fetch_files_with_non_existent_variable(llm_node):
|
||||
result = llm_node._fetch_files(selector=["sys", "files"])
|
||||
def test_fetch_files_with_non_existent_variable():
|
||||
variable_pool = VariablePool()
|
||||
result = llm_utils.fetch_files(variable_pool=variable_pool, selector=["sys", "files"])
|
||||
assert result == []
|
||||
|
||||
|
||||
|
|
|
|||
4370
api/uv.lock
4370
api/uv.lock
File diff suppressed because it is too large
Load Diff
|
|
@ -7,4 +7,4 @@ cd "$SCRIPT_DIR/.."
|
|||
|
||||
# run mypy checks
|
||||
uv run --directory api --dev --with pip \
|
||||
python -m mypy --install-types --non-interactive --cache-fine-grained --sqlite-cache .
|
||||
python -m mypy --install-types --non-interactive ./
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ const PluginPage = ({
|
|||
variant='secondary-accent'
|
||||
>
|
||||
<RiBookOpenLine className='mr-1 h-4 w-4' />
|
||||
{t('plugin.submitPlugin')}
|
||||
{t('plugin.publishPlugins')}
|
||||
</Button>
|
||||
</Link>
|
||||
<div className='mx-1 h-3.5 w-[1px] shrink-0 bg-divider-regular'></div>
|
||||
|
|
|
|||
|
|
@ -247,11 +247,11 @@ const useConfig = (id: string, payload: LLMNodeType) => {
|
|||
}, [inputs, setInputs])
|
||||
|
||||
const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => {
|
||||
const newInputs = produce(inputRef.current, (draft) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
draft.prompt_template = newPrompt
|
||||
})
|
||||
setInputs(newInputs)
|
||||
}, [setInputs])
|
||||
}, [inputs, setInputs])
|
||||
|
||||
const handleMemoryChange = useCallback((newMemory?: Memory) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ export type InputVar = {
|
|||
value_selector?: ValueSelector
|
||||
placeholder?: string
|
||||
unit?: string
|
||||
hide: boolean
|
||||
hide?: boolean
|
||||
} & Partial<UploadFileSetting>
|
||||
|
||||
export type ModelConfig = {
|
||||
|
|
|
|||
|
|
@ -195,7 +195,6 @@ const translation = {
|
|||
allCategories: 'Alle Kategorien',
|
||||
install: '{{num}} Installationen',
|
||||
installAction: 'Installieren',
|
||||
submitPlugin: 'Plugin einreichen',
|
||||
from: 'Von',
|
||||
fromMarketplace: 'Aus dem Marketplace',
|
||||
search: 'Suchen',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'Die aktuelle Dify-Version ist mit diesem Plugin nicht kompatibel, bitte aktualisieren Sie auf die erforderliche Mindestversion: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Ein Plugin anfordern',
|
||||
publishPlugins: 'Plugins veröffentlichen',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ const translation = {
|
|||
clearAll: 'Clear all',
|
||||
},
|
||||
requestAPlugin: 'Request a plugin',
|
||||
submitPlugin: 'Submit plugin',
|
||||
publishPlugins: 'Publish plugins',
|
||||
difyVersionNotCompatible: 'The current Dify version is not compatible with this plugin, please upgrade to the minimum version required: {{minimalDifyVersion}}',
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -195,7 +195,6 @@ const translation = {
|
|||
fromMarketplace: 'De Marketplace',
|
||||
endpointsEnabled: '{{num}} conjuntos de puntos finales habilitados',
|
||||
from: 'De',
|
||||
submitPlugin: 'Enviar plugin',
|
||||
installAction: 'Instalar',
|
||||
install: '{{num}} instalaciones',
|
||||
allCategories: 'Todas las categorías',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'La versión actual de Dify no es compatible con este plugin, por favor actualiza a la versión mínima requerida: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Solicitar un plugin',
|
||||
publishPlugins: 'Publicar plugins',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -195,7 +195,6 @@ const translation = {
|
|||
searchTools: 'ابزارهای جستجو...',
|
||||
findMoreInMarketplace: 'اطلاعات بیشتر در Marketplace',
|
||||
searchInMarketplace: 'جستجو در Marketplace',
|
||||
submitPlugin: 'ارسال افزونه',
|
||||
searchCategories: 'دسته بندی ها را جستجو کنید',
|
||||
fromMarketplace: 'از بازار',
|
||||
installPlugin: 'افزونه را نصب کنید',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'نسخه فعلی دیفی با این پلاگین سازگار نیست، لطفاً به نسخه حداقل مورد نیاز بهروزرسانی کنید: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'درخواست یک افزونه',
|
||||
publishPlugins: 'انتشار افزونه ها',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -193,7 +193,6 @@ const translation = {
|
|||
installing: 'Installation des plugins {{installingLength}}, 0 fait.',
|
||||
},
|
||||
search: 'Rechercher',
|
||||
submitPlugin: 'Soumettre le plugin',
|
||||
installAction: 'Installer',
|
||||
from: 'De',
|
||||
searchCategories: 'Catégories de recherche',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'La version actuelle de Dify n\'est pas compatible avec ce plugin, veuillez mettre à niveau vers la version minimale requise : {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Demander un plugin',
|
||||
publishPlugins: 'Publier des plugins',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -464,6 +464,7 @@ const translation = {
|
|||
options: {
|
||||
disabled: {
|
||||
subTitle: 'Ne pas activer le filtrage des métadonnées',
|
||||
title: 'Handicapé',
|
||||
},
|
||||
automatic: {
|
||||
subTitle: 'Générer automatiquement des conditions de filtrage des métadonnées en fonction de la requête de l\'utilisateur',
|
||||
|
|
|
|||
|
|
@ -196,7 +196,6 @@ const translation = {
|
|||
fromMarketplace: 'मार्केटप्लेस से',
|
||||
searchPlugins: 'खोज प्लगइन्स',
|
||||
install: '{{num}} इंस्टॉलेशन',
|
||||
submitPlugin: 'प्लगइन सबमिट करें',
|
||||
allCategories: 'सभी श्रेणियाँ',
|
||||
search: 'खोज',
|
||||
searchTools: 'खोज उपकरण...',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'वर्तमान डिफाई संस्करण इस प्लगइन के साथ संगत नहीं है, कृपया आवश्यक न्यूनतम संस्करण में अपग्रेड करें: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'एक प्लगइन का अनुरोध करें',
|
||||
publishPlugins: 'प्लगइन प्रकाशित करें',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -203,7 +203,6 @@ const translation = {
|
|||
install: '{{num}} installazioni',
|
||||
findMoreInMarketplace: 'Scopri di più su Marketplace',
|
||||
installPlugin: 'Installa il plugin',
|
||||
submitPlugin: 'Invia plugin',
|
||||
searchPlugins: 'Plugin di ricerca',
|
||||
search: 'Ricerca',
|
||||
installFrom: 'INSTALLA DA',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'L\'attuale versione di Dify non è compatibile con questo plugin, si prega di aggiornare alla versione minima richiesta: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Richiedi un plugin',
|
||||
publishPlugins: 'Pubblicare plugin',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -206,12 +206,12 @@ const translation = {
|
|||
searchTools: '検索ツール...',
|
||||
installPlugin: 'プラグインをインストールする',
|
||||
searchInMarketplace: 'マーケットプレイスで検索',
|
||||
submitPlugin: 'プラグインを提出する',
|
||||
difyVersionNotCompatible: '現在の Dify バージョンはこのプラグインと互換性がありません。最小バージョンは{{minimalDifyVersion}}です。',
|
||||
metadata: {
|
||||
title: 'プラグイン',
|
||||
},
|
||||
requestAPlugin: 'プラグインをリクエストする',
|
||||
publishPlugins: 'プラグインを公開する',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -198,7 +198,6 @@ const translation = {
|
|||
endpointsEnabled: '{{num}}개의 엔드포인트 집합이 활성화되었습니다.',
|
||||
installFrom: '에서 설치',
|
||||
allCategories: '모든 카테고리',
|
||||
submitPlugin: '플러그인 제출',
|
||||
findMoreInMarketplace: 'Marketplace 에서 더 알아보기',
|
||||
searchCategories: '검색 카테고리',
|
||||
search: '검색',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: '현재 Dify 버전이 이 플러그인과 호환되지 않습니다. 필요한 최소 버전으로 업그레이드하십시오: {{minimalDifyVersion}}',
|
||||
requestAPlugin: '플러그인을 요청하세요',
|
||||
publishPlugins: '플러그인 게시',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -206,12 +206,12 @@ const translation = {
|
|||
fromMarketplace: 'Z Marketplace',
|
||||
searchPlugins: 'Wtyczki wyszukiwania',
|
||||
searchTools: 'Narzędzia wyszukiwania...',
|
||||
submitPlugin: 'Prześlij wtyczkę',
|
||||
metadata: {
|
||||
title: 'Wtyczki',
|
||||
},
|
||||
difyVersionNotCompatible: 'Obecna wersja Dify nie jest kompatybilna z tym wtyczką, proszę zaktualizować do minimalnej wymaganej wersji: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Poproś o wtyczkę',
|
||||
publishPlugins: 'Publikowanie wtyczek',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -194,7 +194,6 @@ const translation = {
|
|||
},
|
||||
installAction: 'Instalar',
|
||||
endpointsEnabled: '{{num}} conjuntos de endpoints habilitados',
|
||||
submitPlugin: 'Enviar plugin',
|
||||
searchPlugins: 'Pesquisar plugins',
|
||||
searchInMarketplace: 'Pesquisar no Marketplace',
|
||||
installPlugin: 'Instale o plugin',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'A versão atual do Dify não é compatível com este plugin, por favor atualize para a versão mínima exigida: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Solicitar um plugin',
|
||||
publishPlugins: 'Publicar plugins',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -192,7 +192,6 @@ const translation = {
|
|||
installingWithSuccess: 'Instalarea pluginurilor {{installingLength}}, {{successLength}} succes.',
|
||||
installing: 'Instalarea pluginurilor {{installingLength}}, 0 terminat.',
|
||||
},
|
||||
submitPlugin: 'Trimite plugin',
|
||||
fromMarketplace: 'Din Marketplace',
|
||||
from: 'Din',
|
||||
findMoreInMarketplace: 'Află mai multe în Marketplace',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'Versiunea curentă Dify nu este compatibilă cu acest plugin, vă rugăm să faceți upgrade la versiunea minimă necesară: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Solicitați un plugin',
|
||||
publishPlugins: 'Publicați pluginuri',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -199,7 +199,6 @@ const translation = {
|
|||
searchTools: 'Инструменты поиска...',
|
||||
allCategories: 'Все категории',
|
||||
endpointsEnabled: '{{num}} наборы включенных конечных точек',
|
||||
submitPlugin: 'Отправить плагин',
|
||||
installAction: 'Устанавливать',
|
||||
from: 'От',
|
||||
installFrom: 'УСТАНОВИТЬ С',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'Текущая версия Dify не совместима с этим плагином, пожалуйста, обновите до минимально необходимой версии: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Запросите плагин',
|
||||
publishPlugins: 'Публикация плагинов',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -209,9 +209,9 @@ const translation = {
|
|||
findMoreInMarketplace: 'Poiščite več v Tržnici',
|
||||
install: '{{num}} namestitev',
|
||||
allCategories: 'Vse kategorije',
|
||||
submitPlugin: 'Oddajte vtičnik',
|
||||
difyVersionNotCompatible: 'Trenutna različica Dify ni združljiva s to vtičnico, prosimo, posodobite na minimalno zahtevano različico: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Zahtevajte vtičnik',
|
||||
publishPlugins: 'Objavljanje vtičnikov',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -205,13 +205,13 @@ const translation = {
|
|||
searchTools: 'เครื่องมือค้นหา...',
|
||||
installFrom: 'ติดตั้งจาก',
|
||||
fromMarketplace: 'จาก Marketplace',
|
||||
submitPlugin: 'ส่งปลั๊กอิน',
|
||||
allCategories: 'หมวดหมู่ทั้งหมด',
|
||||
metadata: {
|
||||
title: 'ปลั๊กอิน',
|
||||
},
|
||||
difyVersionNotCompatible: 'เวอร์ชั่นปัจจุบันของ Dify ไม่สามารถใช้งานร่วมกับปลั๊กอินนี้ได้ กรุณาอัปเกรดไปยังเวอร์ชั่นขั้นต่ำที่ต้องการ: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'ขอปลั๊กอิน',
|
||||
publishPlugins: 'เผยแพร่ปลั๊กอิน',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -197,7 +197,6 @@ const translation = {
|
|||
search: 'Aramak',
|
||||
install: '{{num}} yükleme',
|
||||
searchPlugins: 'Eklentileri ara',
|
||||
submitPlugin: 'Eklenti gönder',
|
||||
searchTools: 'Arama araçları...',
|
||||
fromMarketplace: 'Pazar Yerinden',
|
||||
installPlugin: 'Eklentiyi yükle',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'Mevcut Dify sürümü bu eklentiyle uyumlu değil, lütfen gerekli minimum sürüme güncelleyin: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Bir eklenti iste',
|
||||
publishPlugins: 'Eklentileri yayınlayın',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -192,7 +192,6 @@ const translation = {
|
|||
installing: 'Встановлення плагінів {{installingLength}}, 0 виконано.',
|
||||
installingWithSuccess: 'Встановлення плагінів {{installingLength}}, успіх {{successLength}}.',
|
||||
},
|
||||
submitPlugin: 'Надіслати плагін',
|
||||
from: 'Від',
|
||||
searchInMarketplace: 'Пошук у Marketplace',
|
||||
endpointsEnabled: '{{num}} наборів кінцевих точок увімкнено',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'Поточна версія Dify не сумісна з цим плагіном, будь ласка, оновіть до мінімальної версії: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Запросити плагін',
|
||||
publishPlugins: 'Публікація плагінів',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -198,7 +198,6 @@ const translation = {
|
|||
endpointsEnabled: '{{num}} bộ điểm cuối được kích hoạt',
|
||||
install: '{{num}} lượt cài đặt',
|
||||
findMoreInMarketplace: 'Tìm thêm trong Marketplace',
|
||||
submitPlugin: 'Gửi plugin',
|
||||
search: 'Tìm kiếm',
|
||||
searchCategories: 'Danh mục tìm kiếm',
|
||||
installPlugin: 'Cài đặt plugin',
|
||||
|
|
@ -212,6 +211,7 @@ const translation = {
|
|||
},
|
||||
difyVersionNotCompatible: 'Phiên bản Dify hiện tại không tương thích với plugin này, vui lòng nâng cấp lên phiên bản tối thiểu cần thiết: {{minimalDifyVersion}}',
|
||||
requestAPlugin: 'Yêu cầu một plugin',
|
||||
publishPlugins: 'Xuất bản plugin',
|
||||
}
|
||||
|
||||
export default translation
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ const translation = {
|
|||
clearAll: '清除所有',
|
||||
},
|
||||
requestAPlugin: '申请插件',
|
||||
submitPlugin: '上传插件',
|
||||
publishPlugins: '发布插件',
|
||||
difyVersionNotCompatible: '当前 Dify 版本不兼容该插件,其最低版本要求为 {{minimalDifyVersion}}',
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ const translation = {
|
|||
github: '從 GitHub 安裝',
|
||||
marketplace: '從 Marketplace 安裝',
|
||||
},
|
||||
noInstalled: '未安裝外掛程式',
|
||||
notFound: '未找到外掛程式',
|
||||
noInstalled: '未安裝插件',
|
||||
notFound: '未找到插件',
|
||||
},
|
||||
source: {
|
||||
marketplace: '市場',
|
||||
|
|
@ -31,12 +31,12 @@ const translation = {
|
|||
detailPanel: {
|
||||
categoryTip: {
|
||||
marketplace: '從 Marketplace 安裝',
|
||||
debugging: '調試外掛程式',
|
||||
debugging: '調試插件',
|
||||
github: '從 Github 安裝',
|
||||
local: '本地外掛程式',
|
||||
local: '本地插件',
|
||||
},
|
||||
operation: {
|
||||
info: '外掛程式資訊',
|
||||
info: '插件資訊',
|
||||
detail: '詳',
|
||||
remove: '刪除',
|
||||
install: '安裝',
|
||||
|
|
@ -45,7 +45,7 @@ const translation = {
|
|||
checkUpdate: '檢查更新',
|
||||
},
|
||||
toolSelector: {
|
||||
uninstalledContent: '此外掛程式是從 local/GitHub 儲存庫安裝的。請在安裝後使用。',
|
||||
uninstalledContent: '此插件是從 local/GitHub 儲存庫安裝的。請在安裝後使用。',
|
||||
descriptionLabel: '工具描述',
|
||||
params: '推理配置',
|
||||
paramsTip2: '當 \'Automatic\' 關閉時,使用預設值。',
|
||||
|
|
@ -56,9 +56,9 @@ const translation = {
|
|||
uninstalledTitle: '未安裝工具',
|
||||
auto: '自動',
|
||||
title: '添加工具',
|
||||
unsupportedContent: '已安裝的外掛程式版本不提供此作。',
|
||||
unsupportedContent: '已安裝的插件版本不提供此作。',
|
||||
settings: '用戶設置',
|
||||
uninstalledLink: '在外掛程式中管理',
|
||||
uninstalledLink: '在插件中管理',
|
||||
empty: '點擊 『+』 按鈕添加工具。您可以新增多個工具。',
|
||||
unsupportedContent2: '按兩下以切換版本。',
|
||||
paramsTip1: '控制 LLM 推理參數。',
|
||||
|
|
@ -69,14 +69,14 @@ const translation = {
|
|||
strategyNum: '{{num}}{{策略}}包括',
|
||||
endpoints: '端點',
|
||||
endpointDisableTip: '禁用端點',
|
||||
endpointsTip: '此外掛程式通過終端節點提供特定功能,您可以為當前工作區配置多個終端節點集。',
|
||||
endpointsTip: '此插件通過終端節點提供特定功能,您可以為當前工作區配置多個終端節點集。',
|
||||
modelNum: '{{num}}包含的型號',
|
||||
endpointsEmpty: '按兩下「+」按鈕添加端點',
|
||||
endpointDisableContent: '您想禁用 {{name}} 嗎?',
|
||||
configureApp: '配置 App',
|
||||
endpointDeleteContent: '您想刪除 {{name}} 嗎?',
|
||||
configureTool: '配置工具',
|
||||
endpointModalDesc: '配置后,即可使用外掛程式通過 API 端點提供的功能。',
|
||||
endpointModalDesc: '配置后,即可使用插件通過 API 端點提供的功能。',
|
||||
disabled: '禁用',
|
||||
serviceOk: '服務正常',
|
||||
endpointDeleteTip: '刪除端點',
|
||||
|
|
@ -89,26 +89,26 @@ const translation = {
|
|||
title: '調試',
|
||||
},
|
||||
privilege: {
|
||||
whoCanDebug: '誰可以調試外掛程式?',
|
||||
whoCanInstall: '誰可以安裝和管理外掛程式?',
|
||||
whoCanDebug: '誰可以調試插件?',
|
||||
whoCanInstall: '誰可以安裝和管理插件?',
|
||||
noone: '沒人',
|
||||
title: '外掛程式首選項',
|
||||
title: '插件首選項',
|
||||
everyone: '每個人 都',
|
||||
admins: '管理員',
|
||||
},
|
||||
pluginInfoModal: {
|
||||
repository: '存儲庫',
|
||||
release: '釋放',
|
||||
title: '外掛程式資訊',
|
||||
title: '插件資訊',
|
||||
packageName: '包',
|
||||
},
|
||||
action: {
|
||||
deleteContentRight: '外掛程式?',
|
||||
deleteContentRight: '插件?',
|
||||
deleteContentLeft: '是否要刪除',
|
||||
usedInApps: '此外掛程式正在 {{num}} 個應用程式中使用。',
|
||||
pluginInfo: '外掛程式資訊',
|
||||
usedInApps: '此插件正在 {{num}} 個應用程式中使用。',
|
||||
pluginInfo: '插件資訊',
|
||||
checkForUpdates: '檢查更新',
|
||||
delete: '刪除外掛程式',
|
||||
delete: '刪除插件',
|
||||
},
|
||||
installModal: {
|
||||
labels: {
|
||||
|
|
@ -116,26 +116,26 @@ const translation = {
|
|||
version: '版本',
|
||||
package: '包',
|
||||
},
|
||||
readyToInstallPackage: '即將安裝以下外掛程式',
|
||||
readyToInstallPackage: '即將安裝以下插件',
|
||||
back: '返回',
|
||||
installFailed: '安裝失敗',
|
||||
readyToInstallPackages: '即將安裝以下 {{num}} 個外掛程式',
|
||||
readyToInstallPackages: '即將安裝以下 {{num}} 個插件',
|
||||
next: '下一個',
|
||||
dropPluginToInstall: '將外掛程式包拖放到此處進行安裝',
|
||||
pluginLoadError: '外掛程式載入錯誤',
|
||||
dropPluginToInstall: '將插件包拖放到此處進行安裝',
|
||||
pluginLoadError: '插件載入錯誤',
|
||||
installedSuccessfully: '安裝成功',
|
||||
uploadFailed: '上傳失敗',
|
||||
installFailedDesc: '外掛程式安裝失敗。',
|
||||
fromTrustSource: '請確保您只從<trustSource>受信任的來源</trustSource>安裝外掛程式。',
|
||||
pluginLoadErrorDesc: '此外掛程式將不會被安裝',
|
||||
installFailedDesc: '插件安裝失敗。',
|
||||
fromTrustSource: '請確保您只從<trustSource>受信任的來源</trustSource>安裝插件。',
|
||||
pluginLoadErrorDesc: '此插件將不會被安裝',
|
||||
installComplete: '安裝完成',
|
||||
install: '安裝',
|
||||
installedSuccessfullyDesc: '外掛程式已成功安裝。',
|
||||
installedSuccessfullyDesc: '插件已成功安裝。',
|
||||
close: '關閉',
|
||||
uploadingPackage: '正在上傳 {{packageName}}...',
|
||||
readyToInstall: '即將安裝以下外掛程式',
|
||||
readyToInstall: '即將安裝以下插件',
|
||||
cancel: '取消',
|
||||
installPlugin: '安裝外掛程式',
|
||||
installPlugin: '安裝插件',
|
||||
installing: '安裝。。。',
|
||||
},
|
||||
installFromGitHub: {
|
||||
|
|
@ -145,18 +145,18 @@ const translation = {
|
|||
uploadFailed: '上傳失敗',
|
||||
selectVersion: '選擇版本',
|
||||
selectVersionPlaceholder: '請選擇一個版本',
|
||||
updatePlugin: '從 GitHub 更新外掛程式',
|
||||
installPlugin: '從 GitHub 安裝外掛程式',
|
||||
updatePlugin: '從 GitHub 更新插件',
|
||||
installPlugin: '從 GitHub 安裝插件',
|
||||
installedSuccessfully: '安裝成功',
|
||||
selectPackage: '選擇套餐',
|
||||
installNote: '請確保您只從受信任的來源安裝外掛程式。',
|
||||
installNote: '請確保您只從受信任的來源安裝插件。',
|
||||
},
|
||||
upgrade: {
|
||||
close: '關閉',
|
||||
title: '安裝外掛程式',
|
||||
title: '安裝插件',
|
||||
upgrade: '安裝',
|
||||
upgrading: '安裝。。。',
|
||||
description: '即將安裝以下外掛程式',
|
||||
description: '即將安裝以下插件',
|
||||
usedInApps: '用於 {{num}} 個應用',
|
||||
successfulTitle: '安裝成功',
|
||||
},
|
||||
|
|
@ -173,7 +173,7 @@ const translation = {
|
|||
mostPopular: '最受歡迎',
|
||||
},
|
||||
discover: '發現',
|
||||
noPluginFound: '未找到外掛程式',
|
||||
noPluginFound: '未找到插件',
|
||||
empower: '為您的 AI 開發提供支援',
|
||||
moreFrom: '來自 Marketplace 的更多內容',
|
||||
and: '和',
|
||||
|
|
@ -186,20 +186,20 @@ const translation = {
|
|||
},
|
||||
task: {
|
||||
installingWithError: '安裝 {{installingLength}} 個插件,{{successLength}} 成功,{{errorLength}} 失敗',
|
||||
installedError: '{{errorLength}} 個外掛程式安裝失敗',
|
||||
installError: '{{errorLength}} 個外掛程式安裝失敗,點擊查看',
|
||||
installedError: '{{errorLength}} 個插件安裝失敗',
|
||||
installError: '{{errorLength}} 個插件安裝失敗,點擊查看',
|
||||
installingWithSuccess: '安裝 {{installingLength}} 個插件,{{successLength}} 成功。',
|
||||
clearAll: '全部清除',
|
||||
installing: '安裝 {{installingLength}} 個外掛程式,0 個完成。',
|
||||
installing: '安裝 {{installingLength}} 個插件,0 個完成。',
|
||||
},
|
||||
requestAPlugin: '申请外掛程式',
|
||||
submitPlugin: '提交外掛程式',
|
||||
requestAPlugin: '申请插件',
|
||||
publishPlugins: '發佈插件',
|
||||
findMoreInMarketplace: '在 Marketplace 中查找更多內容',
|
||||
installPlugin: '安裝外掛程式',
|
||||
installPlugin: '安裝插件',
|
||||
search: '搜索',
|
||||
allCategories: '全部分類',
|
||||
from: '從',
|
||||
searchPlugins: '搜索外掛程式',
|
||||
searchPlugins: '搜索插件',
|
||||
searchTools: '搜尋工具...',
|
||||
installAction: '安裝',
|
||||
installFrom: '安裝起始位置',
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ const translation = {
|
|||
'noResult': '未找到匹配項',
|
||||
'searchTool': '搜索工具',
|
||||
'agent': '代理策略',
|
||||
'plugin': '外掛程式',
|
||||
'plugin': '插件',
|
||||
},
|
||||
blocks: {
|
||||
'start': '開始',
|
||||
|
|
@ -789,13 +789,13 @@ const translation = {
|
|||
},
|
||||
modelNotInMarketplace: {
|
||||
title: '未安裝模型',
|
||||
manageInPlugins: '在外掛程式中管理',
|
||||
manageInPlugins: '在插件中管理',
|
||||
desc: '此模型是從 Local 或 GitHub 儲存庫安裝的。請在安裝後使用。',
|
||||
},
|
||||
modelNotSupport: {
|
||||
title: '不支援的型號',
|
||||
desc: '已安裝的外掛程式版本不提供此模型。',
|
||||
descForVersionSwitch: '已安裝的外掛程式版本不提供此模型。按兩下以切換版本。',
|
||||
desc: '已安裝的插件版本不提供此模型。',
|
||||
descForVersionSwitch: '已安裝的插件版本不提供此模型。按兩下以切換版本。',
|
||||
},
|
||||
modelSelectorTooltips: {
|
||||
deprecated: '此模型已棄用',
|
||||
|
|
@ -815,18 +815,18 @@ const translation = {
|
|||
strategyNotSelected: '未選擇策略',
|
||||
},
|
||||
installPlugin: {
|
||||
title: '安裝外掛程式',
|
||||
title: '安裝插件',
|
||||
changelog: '更新日誌',
|
||||
cancel: '取消',
|
||||
desc: '即將安裝以下外掛程式',
|
||||
desc: '即將安裝以下插件',
|
||||
install: '安裝',
|
||||
},
|
||||
pluginNotFoundDesc: '此外掛程式是從 GitHub 安裝的。請前往外掛程式 重新安裝',
|
||||
pluginNotFoundDesc: '此插件是從 GitHub 安裝的。請前往插件 重新安裝',
|
||||
modelNotSelected: '未選擇模型',
|
||||
tools: '工具',
|
||||
strategyNotFoundDesc: '已安裝的外掛程式版本不提供此策略。',
|
||||
pluginNotInstalledDesc: '此外掛程式是從 GitHub 安裝的。請前往外掛程式 重新安裝',
|
||||
strategyNotFoundDescAndSwitchVersion: '已安裝的外掛程式版本不提供此策略。按兩下以切換版本。',
|
||||
strategyNotFoundDesc: '已安裝的插件版本不提供此策略。',
|
||||
pluginNotInstalledDesc: '此插件是從 GitHub 安裝的。請前往插件 重新安裝',
|
||||
strategyNotFoundDescAndSwitchVersion: '已安裝的插件版本不提供此策略。按兩下以切換版本。',
|
||||
strategyNotInstallTooltip: '{{strategy}} 未安裝',
|
||||
toolNotAuthorizedTooltip: '{{工具}}未授權',
|
||||
unsupportedStrategy: '不支援的策略',
|
||||
|
|
@ -838,8 +838,8 @@ const translation = {
|
|||
toolbox: '工具箱',
|
||||
configureModel: '配置模型',
|
||||
learnMore: '瞭解更多資訊',
|
||||
linkToPlugin: '連結到外掛程式',
|
||||
pluginNotInstalled: '此外掛程式未安裝',
|
||||
linkToPlugin: '連結到插件',
|
||||
pluginNotInstalled: '此插件未安裝',
|
||||
notAuthorized: '未授權',
|
||||
},
|
||||
loop: {
|
||||
|
|
|
|||
Loading…
Reference in New Issue