mirror of https://github.com/langgenius/dify.git
chore: adopt StrEnum and auto() for some string-typed enums (#25129)
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
This commit is contained in:
parent
635e7d3e70
commit
a13d7987e0
|
|
@ -477,12 +477,12 @@ def convert_to_agent_apps():
|
||||||
click.echo(f"Converting app: {app.id}")
|
click.echo(f"Converting app: {app.id}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
app.mode = AppMode.AGENT_CHAT.value
|
app.mode = AppMode.AGENT_CHAT
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
||||||
# update conversation mode to agent
|
# update conversation mode to agent
|
||||||
db.session.query(Conversation).where(Conversation.app_id == app.id).update(
|
db.session.query(Conversation).where(Conversation.app_id == app.id).update(
|
||||||
{Conversation.mode: AppMode.AGENT_CHAT.value}
|
{Conversation.mode: AppMode.AGENT_CHAT}
|
||||||
)
|
)
|
||||||
|
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
import enum
|
from enum import Enum
|
||||||
from typing import Literal, Optional
|
from typing import Literal, Optional
|
||||||
|
|
||||||
from pydantic import Field, PositiveInt
|
from pydantic import Field, PositiveInt
|
||||||
|
|
@ -10,7 +10,7 @@ class OpenSearchConfig(BaseSettings):
|
||||||
Configuration settings for OpenSearch
|
Configuration settings for OpenSearch
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class AuthMethod(enum.StrEnum):
|
class AuthMethod(Enum):
|
||||||
"""
|
"""
|
||||||
Authentication method for OpenSearch
|
Authentication method for OpenSearch
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ default_app_templates: Mapping[AppMode, Mapping] = {
|
||||||
# workflow default mode
|
# workflow default mode
|
||||||
AppMode.WORKFLOW: {
|
AppMode.WORKFLOW: {
|
||||||
"app": {
|
"app": {
|
||||||
"mode": AppMode.WORKFLOW.value,
|
"mode": AppMode.WORKFLOW,
|
||||||
"enable_site": True,
|
"enable_site": True,
|
||||||
"enable_api": True,
|
"enable_api": True,
|
||||||
}
|
}
|
||||||
|
|
@ -15,7 +15,7 @@ default_app_templates: Mapping[AppMode, Mapping] = {
|
||||||
# completion default mode
|
# completion default mode
|
||||||
AppMode.COMPLETION: {
|
AppMode.COMPLETION: {
|
||||||
"app": {
|
"app": {
|
||||||
"mode": AppMode.COMPLETION.value,
|
"mode": AppMode.COMPLETION,
|
||||||
"enable_site": True,
|
"enable_site": True,
|
||||||
"enable_api": True,
|
"enable_api": True,
|
||||||
},
|
},
|
||||||
|
|
@ -44,7 +44,7 @@ default_app_templates: Mapping[AppMode, Mapping] = {
|
||||||
# chat default mode
|
# chat default mode
|
||||||
AppMode.CHAT: {
|
AppMode.CHAT: {
|
||||||
"app": {
|
"app": {
|
||||||
"mode": AppMode.CHAT.value,
|
"mode": AppMode.CHAT,
|
||||||
"enable_site": True,
|
"enable_site": True,
|
||||||
"enable_api": True,
|
"enable_api": True,
|
||||||
},
|
},
|
||||||
|
|
@ -60,7 +60,7 @@ default_app_templates: Mapping[AppMode, Mapping] = {
|
||||||
# advanced-chat default mode
|
# advanced-chat default mode
|
||||||
AppMode.ADVANCED_CHAT: {
|
AppMode.ADVANCED_CHAT: {
|
||||||
"app": {
|
"app": {
|
||||||
"mode": AppMode.ADVANCED_CHAT.value,
|
"mode": AppMode.ADVANCED_CHAT,
|
||||||
"enable_site": True,
|
"enable_site": True,
|
||||||
"enable_api": True,
|
"enable_api": True,
|
||||||
},
|
},
|
||||||
|
|
@ -68,7 +68,7 @@ default_app_templates: Mapping[AppMode, Mapping] = {
|
||||||
# agent-chat default mode
|
# agent-chat default mode
|
||||||
AppMode.AGENT_CHAT: {
|
AppMode.AGENT_CHAT: {
|
||||||
"app": {
|
"app": {
|
||||||
"mode": AppMode.AGENT_CHAT.value,
|
"mode": AppMode.AGENT_CHAT,
|
||||||
"enable_site": True,
|
"enable_site": True,
|
||||||
"enable_api": True,
|
"enable_api": True,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -307,7 +307,7 @@ class ChatConversationApi(Resource):
|
||||||
.having(func.count(Message.id) >= args["message_count_gte"])
|
.having(func.count(Message.id) >= args["message_count_gte"])
|
||||||
)
|
)
|
||||||
|
|
||||||
if app_model.mode == AppMode.ADVANCED_CHAT.value:
|
if app_model.mode == AppMode.ADVANCED_CHAT:
|
||||||
query = query.where(Conversation.invoke_from != InvokeFrom.DEBUGGER.value)
|
query = query.where(Conversation.invoke_from != InvokeFrom.DEBUGGER.value)
|
||||||
|
|
||||||
match args["sort_by"]:
|
match args["sort_by"]:
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ class ModelConfigResource(Resource):
|
||||||
)
|
)
|
||||||
new_app_model_config = new_app_model_config.from_model_config_dict(model_configuration)
|
new_app_model_config = new_app_model_config.from_model_config_dict(model_configuration)
|
||||||
|
|
||||||
if app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
|
if app_model.mode == AppMode.AGENT_CHAT or app_model.is_agent:
|
||||||
# get original app model config
|
# get original app model config
|
||||||
original_app_model_config = (
|
original_app_model_config = (
|
||||||
db.session.query(AppModelConfig).where(AppModelConfig.id == app_model.app_model_config_id).first()
|
db.session.query(AppModelConfig).where(AppModelConfig.id == app_model.app_model_config_id).first()
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ class AppParameterApi(InstalledAppResource):
|
||||||
if app_model is None:
|
if app_model is None:
|
||||||
raise AppUnavailableError()
|
raise AppUnavailableError()
|
||||||
|
|
||||||
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
workflow = app_model.workflow
|
workflow = app_model.workflow
|
||||||
if workflow is None:
|
if workflow is None:
|
||||||
raise AppUnavailableError()
|
raise AppUnavailableError()
|
||||||
|
|
|
||||||
|
|
@ -150,7 +150,7 @@ class MCPAppApi(Resource):
|
||||||
def _get_user_input_form(self, app: App) -> list[VariableEntity]:
|
def _get_user_input_form(self, app: App) -> list[VariableEntity]:
|
||||||
"""Get and convert user input form"""
|
"""Get and convert user input form"""
|
||||||
# Get raw user input form based on app mode
|
# Get raw user input form based on app mode
|
||||||
if app.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
if not app.workflow:
|
if not app.workflow:
|
||||||
raise MCPRequestError(mcp_types.INVALID_REQUEST, "App is unavailable")
|
raise MCPRequestError(mcp_types.INVALID_REQUEST, "App is unavailable")
|
||||||
raw_user_input_form = app.workflow.user_input_form(to_old_structure=True)
|
raw_user_input_form = app.workflow.user_input_form(to_old_structure=True)
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ class AppParameterApi(Resource):
|
||||||
|
|
||||||
Returns the input form parameters and configuration for the application.
|
Returns the input form parameters and configuration for the application.
|
||||||
"""
|
"""
|
||||||
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
workflow = app_model.workflow
|
workflow = app_model.workflow
|
||||||
if workflow is None:
|
if workflow is None:
|
||||||
raise AppUnavailableError()
|
raise AppUnavailableError()
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ class AppParameterApi(WebApiResource):
|
||||||
@marshal_with(fields.parameters_fields)
|
@marshal_with(fields.parameters_fields)
|
||||||
def get(self, app_model: App, end_user):
|
def get(self, app_model: App, end_user):
|
||||||
"""Retrieve app parameters."""
|
"""Retrieve app parameters."""
|
||||||
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
workflow = app_model.workflow
|
workflow = app_model.workflow
|
||||||
if workflow is None:
|
if workflow is None:
|
||||||
raise AppUnavailableError()
|
raise AppUnavailableError()
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
import enum
|
from enum import StrEnum
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
|
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
|
||||||
|
|
@ -26,25 +26,25 @@ class AgentStrategyProviderIdentity(ToolProviderIdentity):
|
||||||
|
|
||||||
|
|
||||||
class AgentStrategyParameter(PluginParameter):
|
class AgentStrategyParameter(PluginParameter):
|
||||||
class AgentStrategyParameterType(enum.StrEnum):
|
class AgentStrategyParameterType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Keep all the types from PluginParameterType
|
Keep all the types from PluginParameterType
|
||||||
"""
|
"""
|
||||||
|
|
||||||
STRING = CommonParameterType.STRING.value
|
STRING = CommonParameterType.STRING
|
||||||
NUMBER = CommonParameterType.NUMBER.value
|
NUMBER = CommonParameterType.NUMBER
|
||||||
BOOLEAN = CommonParameterType.BOOLEAN.value
|
BOOLEAN = CommonParameterType.BOOLEAN
|
||||||
SELECT = CommonParameterType.SELECT.value
|
SELECT = CommonParameterType.SELECT
|
||||||
SECRET_INPUT = CommonParameterType.SECRET_INPUT.value
|
SECRET_INPUT = CommonParameterType.SECRET_INPUT
|
||||||
FILE = CommonParameterType.FILE.value
|
FILE = CommonParameterType.FILE
|
||||||
FILES = CommonParameterType.FILES.value
|
FILES = CommonParameterType.FILES
|
||||||
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
|
APP_SELECTOR = CommonParameterType.APP_SELECTOR
|
||||||
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
|
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR
|
||||||
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
|
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR
|
||||||
ANY = CommonParameterType.ANY.value
|
ANY = CommonParameterType.ANY
|
||||||
|
|
||||||
# deprecated, should not use.
|
# deprecated, should not use.
|
||||||
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES.value
|
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES
|
||||||
|
|
||||||
def as_normal_type(self):
|
def as_normal_type(self):
|
||||||
return as_normal_type(self)
|
return as_normal_type(self)
|
||||||
|
|
@ -72,7 +72,7 @@ class AgentStrategyIdentity(ToolIdentity):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AgentFeature(enum.StrEnum):
|
class AgentFeature(StrEnum):
|
||||||
"""
|
"""
|
||||||
Agent Feature, used to describe the features of the agent strategy.
|
Agent Feature, used to describe the features of the agent strategy.
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ class PromptTemplateConfigManager:
|
||||||
:param config: app model config args
|
:param config: app model config args
|
||||||
"""
|
"""
|
||||||
if not config.get("prompt_type"):
|
if not config.get("prompt_type"):
|
||||||
config["prompt_type"] = PromptTemplateEntity.PromptType.SIMPLE.value
|
config["prompt_type"] = PromptTemplateEntity.PromptType.SIMPLE
|
||||||
|
|
||||||
prompt_type_vals = [typ.value for typ in PromptTemplateEntity.PromptType]
|
prompt_type_vals = [typ.value for typ in PromptTemplateEntity.PromptType]
|
||||||
if config["prompt_type"] not in prompt_type_vals:
|
if config["prompt_type"] not in prompt_type_vals:
|
||||||
|
|
@ -90,7 +90,7 @@ class PromptTemplateConfigManager:
|
||||||
if not isinstance(config["completion_prompt_config"], dict):
|
if not isinstance(config["completion_prompt_config"], dict):
|
||||||
raise ValueError("completion_prompt_config must be of object type")
|
raise ValueError("completion_prompt_config must be of object type")
|
||||||
|
|
||||||
if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value:
|
if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED:
|
||||||
if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
|
if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"chat_prompt_config or completion_prompt_config is required when prompt_type is advanced"
|
"chat_prompt_config or completion_prompt_config is required when prompt_type is advanced"
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import BaseModel, Field, field_validator
|
||||||
|
|
@ -61,14 +61,14 @@ class PromptTemplateEntity(BaseModel):
|
||||||
Prompt Template Entity.
|
Prompt Template Entity.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class PromptType(Enum):
|
class PromptType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Prompt Type.
|
Prompt Type.
|
||||||
'simple', 'advanced'
|
'simple', 'advanced'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SIMPLE = "simple"
|
SIMPLE = auto()
|
||||||
ADVANCED = "advanced"
|
ADVANCED = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str):
|
def value_of(cls, value: str):
|
||||||
|
|
@ -195,14 +195,14 @@ class DatasetRetrieveConfigEntity(BaseModel):
|
||||||
Dataset Retrieve Config Entity.
|
Dataset Retrieve Config Entity.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class RetrieveStrategy(Enum):
|
class RetrieveStrategy(StrEnum):
|
||||||
"""
|
"""
|
||||||
Dataset Retrieve Strategy.
|
Dataset Retrieve Strategy.
|
||||||
'single' or 'multiple'
|
'single' or 'multiple'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SINGLE = "single"
|
SINGLE = auto()
|
||||||
MULTIPLE = "multiple"
|
MULTIPLE = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str):
|
def value_of(cls, value: str):
|
||||||
|
|
@ -293,12 +293,12 @@ class AppConfig(BaseModel):
|
||||||
sensitive_word_avoidance: Optional[SensitiveWordAvoidanceEntity] = None
|
sensitive_word_avoidance: Optional[SensitiveWordAvoidanceEntity] = None
|
||||||
|
|
||||||
|
|
||||||
class EasyUIBasedAppModelConfigFrom(Enum):
|
class EasyUIBasedAppModelConfigFrom(StrEnum):
|
||||||
"""
|
"""
|
||||||
App Model Config From.
|
App Model Config From.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ARGS = "args"
|
ARGS = auto()
|
||||||
APP_LATEST_CONFIG = "app-latest-config"
|
APP_LATEST_CONFIG = "app-latest-config"
|
||||||
CONVERSATION_SPECIFIC_CONFIG = "conversation-specific-config"
|
CONVERSATION_SPECIFIC_CONFIG = "conversation-specific-config"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
from collections.abc import Mapping, Sequence
|
from collections.abc import Mapping, Sequence
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
@ -626,15 +626,15 @@ class QueueStopEvent(AppQueueEvent):
|
||||||
QueueStopEvent entity
|
QueueStopEvent entity
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class StopBy(Enum):
|
class StopBy(StrEnum):
|
||||||
"""
|
"""
|
||||||
Stop by enum
|
Stop by enum
|
||||||
"""
|
"""
|
||||||
|
|
||||||
USER_MANUAL = "user-manual"
|
USER_MANUAL = auto()
|
||||||
ANNOTATION_REPLY = "annotation-reply"
|
ANNOTATION_REPLY = auto()
|
||||||
OUTPUT_MODERATION = "output-moderation"
|
OUTPUT_MODERATION = auto()
|
||||||
INPUT_MODERATION = "input-moderation"
|
INPUT_MODERATION = auto()
|
||||||
|
|
||||||
event: QueueEvent = QueueEvent.STOP
|
event: QueueEvent = QueueEvent.STOP
|
||||||
stopped_by: StopBy
|
stopped_by: StopBy
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from collections.abc import Mapping, Sequence
|
from collections.abc import Mapping, Sequence
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field
|
from pydantic import BaseModel, ConfigDict, Field
|
||||||
|
|
@ -50,37 +50,37 @@ class WorkflowTaskState(TaskState):
|
||||||
answer: str = ""
|
answer: str = ""
|
||||||
|
|
||||||
|
|
||||||
class StreamEvent(Enum):
|
class StreamEvent(StrEnum):
|
||||||
"""
|
"""
|
||||||
Stream event
|
Stream event
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PING = "ping"
|
PING = auto()
|
||||||
ERROR = "error"
|
ERROR = auto()
|
||||||
MESSAGE = "message"
|
MESSAGE = auto()
|
||||||
MESSAGE_END = "message_end"
|
MESSAGE_END = auto()
|
||||||
TTS_MESSAGE = "tts_message"
|
TTS_MESSAGE = auto()
|
||||||
TTS_MESSAGE_END = "tts_message_end"
|
TTS_MESSAGE_END = auto()
|
||||||
MESSAGE_FILE = "message_file"
|
MESSAGE_FILE = auto()
|
||||||
MESSAGE_REPLACE = "message_replace"
|
MESSAGE_REPLACE = auto()
|
||||||
AGENT_THOUGHT = "agent_thought"
|
AGENT_THOUGHT = auto()
|
||||||
AGENT_MESSAGE = "agent_message"
|
AGENT_MESSAGE = auto()
|
||||||
WORKFLOW_STARTED = "workflow_started"
|
WORKFLOW_STARTED = auto()
|
||||||
WORKFLOW_FINISHED = "workflow_finished"
|
WORKFLOW_FINISHED = auto()
|
||||||
NODE_STARTED = "node_started"
|
NODE_STARTED = auto()
|
||||||
NODE_FINISHED = "node_finished"
|
NODE_FINISHED = auto()
|
||||||
NODE_RETRY = "node_retry"
|
NODE_RETRY = auto()
|
||||||
PARALLEL_BRANCH_STARTED = "parallel_branch_started"
|
PARALLEL_BRANCH_STARTED = auto()
|
||||||
PARALLEL_BRANCH_FINISHED = "parallel_branch_finished"
|
PARALLEL_BRANCH_FINISHED = auto()
|
||||||
ITERATION_STARTED = "iteration_started"
|
ITERATION_STARTED = auto()
|
||||||
ITERATION_NEXT = "iteration_next"
|
ITERATION_NEXT = auto()
|
||||||
ITERATION_COMPLETED = "iteration_completed"
|
ITERATION_COMPLETED = auto()
|
||||||
LOOP_STARTED = "loop_started"
|
LOOP_STARTED = auto()
|
||||||
LOOP_NEXT = "loop_next"
|
LOOP_NEXT = auto()
|
||||||
LOOP_COMPLETED = "loop_completed"
|
LOOP_COMPLETED = auto()
|
||||||
TEXT_CHUNK = "text_chunk"
|
TEXT_CHUNK = auto()
|
||||||
TEXT_REPLACE = "text_replace"
|
TEXT_REPLACE = auto()
|
||||||
AGENT_LOG = "agent_log"
|
AGENT_LOG = auto()
|
||||||
|
|
||||||
|
|
||||||
class StreamResponse(BaseModel):
|
class StreamResponse(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -145,7 +145,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
|
||||||
if self._task_state.metadata:
|
if self._task_state.metadata:
|
||||||
extras["metadata"] = self._task_state.metadata.model_dump()
|
extras["metadata"] = self._task_state.metadata.model_dump()
|
||||||
response: Union[ChatbotAppBlockingResponse, CompletionAppBlockingResponse]
|
response: Union[ChatbotAppBlockingResponse, CompletionAppBlockingResponse]
|
||||||
if self._conversation_mode == AppMode.COMPLETION.value:
|
if self._conversation_mode == AppMode.COMPLETION:
|
||||||
response = CompletionAppBlockingResponse(
|
response = CompletionAppBlockingResponse(
|
||||||
task_id=self._application_generate_entity.task_id,
|
task_id=self._application_generate_entity.task_id,
|
||||||
data=CompletionAppBlockingResponse.Data(
|
data=CompletionAppBlockingResponse.Data(
|
||||||
|
|
|
||||||
|
|
@ -92,7 +92,7 @@ class MessageCycleManager:
|
||||||
if not conversation:
|
if not conversation:
|
||||||
return
|
return
|
||||||
|
|
||||||
if conversation.mode != AppMode.COMPLETION.value:
|
if conversation.mode != AppMode.COMPLETION:
|
||||||
app_model = conversation.app
|
app_model = conversation.app
|
||||||
if not app_model:
|
if not app_model:
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
|
|
||||||
|
|
||||||
class PlanningStrategy(Enum):
|
class PlanningStrategy(StrEnum):
|
||||||
ROUTER = "router"
|
ROUTER = auto()
|
||||||
REACT_ROUTER = "react_router"
|
REACT_ROUTER = auto()
|
||||||
REACT = "react"
|
REACT = auto()
|
||||||
FUNCTION_CALL = "function_call"
|
FUNCTION_CALL = auto()
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingInputType(Enum):
|
class EmbeddingInputType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum for embedding input type.
|
Enum for embedding input type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DOCUMENT = "document"
|
DOCUMENT = auto()
|
||||||
QUERY = "query"
|
QUERY = auto()
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict
|
from pydantic import BaseModel, ConfigDict
|
||||||
|
|
@ -9,16 +9,16 @@ from core.model_runtime.entities.model_entities import ModelType, ProviderModel
|
||||||
from core.model_runtime.entities.provider_entities import ProviderEntity
|
from core.model_runtime.entities.provider_entities import ProviderEntity
|
||||||
|
|
||||||
|
|
||||||
class ModelStatus(Enum):
|
class ModelStatus(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for model status.
|
Enum class for model status.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ACTIVE = "active"
|
ACTIVE = auto()
|
||||||
NO_CONFIGURE = "no-configure"
|
NO_CONFIGURE = "no-configure"
|
||||||
QUOTA_EXCEEDED = "quota-exceeded"
|
QUOTA_EXCEEDED = "quota-exceeded"
|
||||||
NO_PERMISSION = "no-permission"
|
NO_PERMISSION = "no-permission"
|
||||||
DISABLED = "disabled"
|
DISABLED = auto()
|
||||||
CREDENTIAL_REMOVED = "credential-removed"
|
CREDENTIAL_REMOVED = "credential-removed"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,20 @@
|
||||||
from enum import StrEnum
|
from enum import StrEnum, auto
|
||||||
|
|
||||||
|
|
||||||
class CommonParameterType(StrEnum):
|
class CommonParameterType(StrEnum):
|
||||||
SECRET_INPUT = "secret-input"
|
SECRET_INPUT = "secret-input"
|
||||||
TEXT_INPUT = "text-input"
|
TEXT_INPUT = "text-input"
|
||||||
SELECT = "select"
|
SELECT = auto()
|
||||||
STRING = "string"
|
STRING = auto()
|
||||||
NUMBER = "number"
|
NUMBER = auto()
|
||||||
FILE = "file"
|
FILE = auto()
|
||||||
FILES = "files"
|
FILES = auto()
|
||||||
SYSTEM_FILES = "system-files"
|
SYSTEM_FILES = "system-files"
|
||||||
BOOLEAN = "boolean"
|
BOOLEAN = auto()
|
||||||
APP_SELECTOR = "app-selector"
|
APP_SELECTOR = "app-selector"
|
||||||
MODEL_SELECTOR = "model-selector"
|
MODEL_SELECTOR = "model-selector"
|
||||||
TOOLS_SELECTOR = "array[tools]"
|
TOOLS_SELECTOR = "array[tools]"
|
||||||
ANY = "any"
|
ANY = auto()
|
||||||
|
|
||||||
# Dynamic select parameter
|
# Dynamic select parameter
|
||||||
# Once you are not sure about the available options until authorization is done
|
# Once you are not sure about the available options until authorization is done
|
||||||
|
|
@ -23,29 +23,29 @@ class CommonParameterType(StrEnum):
|
||||||
|
|
||||||
# TOOL_SELECTOR = "tool-selector"
|
# TOOL_SELECTOR = "tool-selector"
|
||||||
# MCP object and array type parameters
|
# MCP object and array type parameters
|
||||||
ARRAY = "array"
|
ARRAY = auto()
|
||||||
OBJECT = "object"
|
OBJECT = auto()
|
||||||
|
|
||||||
|
|
||||||
class AppSelectorScope(StrEnum):
|
class AppSelectorScope(StrEnum):
|
||||||
ALL = "all"
|
ALL = auto()
|
||||||
CHAT = "chat"
|
CHAT = auto()
|
||||||
WORKFLOW = "workflow"
|
WORKFLOW = auto()
|
||||||
COMPLETION = "completion"
|
COMPLETION = auto()
|
||||||
|
|
||||||
|
|
||||||
class ModelSelectorScope(StrEnum):
|
class ModelSelectorScope(StrEnum):
|
||||||
LLM = "llm"
|
LLM = auto()
|
||||||
TEXT_EMBEDDING = "text-embedding"
|
TEXT_EMBEDDING = "text-embedding"
|
||||||
RERANK = "rerank"
|
RERANK = auto()
|
||||||
TTS = "tts"
|
TTS = auto()
|
||||||
SPEECH2TEXT = "speech2text"
|
SPEECH2TEXT = auto()
|
||||||
MODERATION = "moderation"
|
MODERATION = auto()
|
||||||
VISION = "vision"
|
VISION = auto()
|
||||||
|
|
||||||
|
|
||||||
class ToolSelectorScope(StrEnum):
|
class ToolSelectorScope(StrEnum):
|
||||||
ALL = "all"
|
ALL = auto()
|
||||||
CUSTOM = "custom"
|
CUSTOM = auto()
|
||||||
BUILTIN = "builtin"
|
BUILTIN = auto()
|
||||||
WORKFLOW = "workflow"
|
WORKFLOW = auto()
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field
|
from pydantic import BaseModel, ConfigDict, Field
|
||||||
|
|
@ -13,14 +13,14 @@ from core.model_runtime.entities.model_entities import ModelType
|
||||||
from core.tools.entities.common_entities import I18nObject
|
from core.tools.entities.common_entities import I18nObject
|
||||||
|
|
||||||
|
|
||||||
class ProviderQuotaType(Enum):
|
class ProviderQuotaType(StrEnum):
|
||||||
PAID = "paid"
|
PAID = auto()
|
||||||
"""hosted paid quota"""
|
"""hosted paid quota"""
|
||||||
|
|
||||||
FREE = "free"
|
FREE = auto()
|
||||||
"""third-party free quota"""
|
"""third-party free quota"""
|
||||||
|
|
||||||
TRIAL = "trial"
|
TRIAL = auto()
|
||||||
"""hosted trial quota"""
|
"""hosted trial quota"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
@ -31,20 +31,20 @@ class ProviderQuotaType(Enum):
|
||||||
raise ValueError(f"No matching enum found for value '{value}'")
|
raise ValueError(f"No matching enum found for value '{value}'")
|
||||||
|
|
||||||
|
|
||||||
class QuotaUnit(Enum):
|
class QuotaUnit(StrEnum):
|
||||||
TIMES = "times"
|
TIMES = auto()
|
||||||
TOKENS = "tokens"
|
TOKENS = auto()
|
||||||
CREDITS = "credits"
|
CREDITS = auto()
|
||||||
|
|
||||||
|
|
||||||
class SystemConfigurationStatus(Enum):
|
class SystemConfigurationStatus(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for system configuration status.
|
Enum class for system configuration status.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ACTIVE = "active"
|
ACTIVE = auto()
|
||||||
QUOTA_EXCEEDED = "quota-exceeded"
|
QUOTA_EXCEEDED = "quota-exceeded"
|
||||||
UNSUPPORTED = "unsupported"
|
UNSUPPORTED = auto()
|
||||||
|
|
||||||
|
|
||||||
class RestrictModel(BaseModel):
|
class RestrictModel(BaseModel):
|
||||||
|
|
@ -168,14 +168,14 @@ class BasicProviderConfig(BaseModel):
|
||||||
Base model class for common provider settings like credentials
|
Base model class for common provider settings like credentials
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Type(Enum):
|
class Type(StrEnum):
|
||||||
SECRET_INPUT = CommonParameterType.SECRET_INPUT.value
|
SECRET_INPUT = CommonParameterType.SECRET_INPUT
|
||||||
TEXT_INPUT = CommonParameterType.TEXT_INPUT.value
|
TEXT_INPUT = CommonParameterType.TEXT_INPUT
|
||||||
SELECT = CommonParameterType.SELECT.value
|
SELECT = CommonParameterType.SELECT
|
||||||
BOOLEAN = CommonParameterType.BOOLEAN.value
|
BOOLEAN = CommonParameterType.BOOLEAN
|
||||||
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
|
APP_SELECTOR = CommonParameterType.APP_SELECTOR
|
||||||
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
|
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR
|
||||||
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
|
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str) -> "ProviderConfig.Type":
|
def value_of(cls, value: str) -> "ProviderConfig.Type":
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
import enum
|
|
||||||
import importlib.util
|
import importlib.util
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from enum import StrEnum, auto
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
|
@ -13,9 +13,9 @@ from core.helper.position_helper import sort_to_dict_by_position_map
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ExtensionModule(enum.Enum):
|
class ExtensionModule(StrEnum):
|
||||||
MODERATION = "moderation"
|
MODERATION = auto()
|
||||||
EXTERNAL_DATA_TOOL = "external_data_tool"
|
EXTERNAL_DATA_TOOL = auto()
|
||||||
|
|
||||||
|
|
||||||
class ModuleExtension(BaseModel):
|
class ModuleExtension(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
import json
|
import json
|
||||||
from enum import Enum
|
from enum import StrEnum
|
||||||
from json import JSONDecodeError
|
from json import JSONDecodeError
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from extensions.ext_redis import redis_client
|
from extensions.ext_redis import redis_client
|
||||||
|
|
||||||
|
|
||||||
class ProviderCredentialsCacheType(Enum):
|
class ProviderCredentialsCacheType(StrEnum):
|
||||||
PROVIDER = "provider"
|
PROVIDER = "provider"
|
||||||
MODEL = "provider_model"
|
MODEL = "provider_model"
|
||||||
LOAD_BALANCING_MODEL = "load_balancing_provider_model"
|
LOAD_BALANCING_MODEL = "load_balancing_provider_model"
|
||||||
|
|
@ -14,7 +14,7 @@ class ProviderCredentialsCacheType(Enum):
|
||||||
|
|
||||||
class ProviderCredentialsCache:
|
class ProviderCredentialsCache:
|
||||||
def __init__(self, tenant_id: str, identity_id: str, cache_type: ProviderCredentialsCacheType):
|
def __init__(self, tenant_id: str, identity_id: str, cache_type: ProviderCredentialsCacheType):
|
||||||
self.cache_key = f"{cache_type.value}_credentials:tenant_id:{tenant_id}:id:{identity_id}"
|
self.cache_key = f"{cache_type}_credentials:tenant_id:{tenant_id}:id:{identity_id}"
|
||||||
|
|
||||||
def get(self) -> Optional[dict]:
|
def get(self) -> Optional[dict]:
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
import json
|
import json
|
||||||
from enum import Enum
|
from enum import StrEnum
|
||||||
from json import JSONDecodeError
|
from json import JSONDecodeError
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from extensions.ext_redis import redis_client
|
from extensions.ext_redis import redis_client
|
||||||
|
|
||||||
|
|
||||||
class ToolParameterCacheType(Enum):
|
class ToolParameterCacheType(StrEnum):
|
||||||
PARAMETER = "tool_parameter"
|
PARAMETER = "tool_parameter"
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -15,7 +15,7 @@ class ToolParameterCache:
|
||||||
self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str
|
self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str
|
||||||
):
|
):
|
||||||
self.cache_key = (
|
self.cache_key = (
|
||||||
f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}"
|
f"{cache_type}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}"
|
||||||
f":identity_id:{identity_id}"
|
f":identity_id:{identity_id}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -142,7 +142,7 @@ def handle_call_tool(
|
||||||
end_user,
|
end_user,
|
||||||
args,
|
args,
|
||||||
InvokeFrom.SERVICE_API,
|
InvokeFrom.SERVICE_API,
|
||||||
streaming=app.mode == AppMode.AGENT_CHAT.value,
|
streaming=app.mode == AppMode.AGENT_CHAT,
|
||||||
)
|
)
|
||||||
|
|
||||||
answer = extract_answer_from_response(app, response)
|
answer = extract_answer_from_response(app, response)
|
||||||
|
|
@ -157,7 +157,7 @@ def build_parameter_schema(
|
||||||
"""Build parameter schema for the tool"""
|
"""Build parameter schema for the tool"""
|
||||||
parameters, required = convert_input_form_to_parameters(user_input_form, parameters_dict)
|
parameters, required = convert_input_form_to_parameters(user_input_form, parameters_dict)
|
||||||
|
|
||||||
if app_mode in {AppMode.COMPLETION.value, AppMode.WORKFLOW.value}:
|
if app_mode in {AppMode.COMPLETION, AppMode.WORKFLOW}:
|
||||||
return {
|
return {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": parameters,
|
"properties": parameters,
|
||||||
|
|
@ -175,9 +175,9 @@ def build_parameter_schema(
|
||||||
|
|
||||||
def prepare_tool_arguments(app: App, arguments: dict[str, Any]) -> dict[str, Any]:
|
def prepare_tool_arguments(app: App, arguments: dict[str, Any]) -> dict[str, Any]:
|
||||||
"""Prepare arguments based on app mode"""
|
"""Prepare arguments based on app mode"""
|
||||||
if app.mode == AppMode.WORKFLOW.value:
|
if app.mode == AppMode.WORKFLOW:
|
||||||
return {"inputs": arguments}
|
return {"inputs": arguments}
|
||||||
elif app.mode == AppMode.COMPLETION.value:
|
elif app.mode == AppMode.COMPLETION:
|
||||||
return {"query": "", "inputs": arguments}
|
return {"query": "", "inputs": arguments}
|
||||||
else:
|
else:
|
||||||
# Chat modes - create a copy to avoid modifying original dict
|
# Chat modes - create a copy to avoid modifying original dict
|
||||||
|
|
@ -218,13 +218,13 @@ def process_streaming_response(response: RateLimitGenerator) -> str:
|
||||||
def process_mapping_response(app: App, response: Mapping) -> str:
|
def process_mapping_response(app: App, response: Mapping) -> str:
|
||||||
"""Process mapping response based on app mode"""
|
"""Process mapping response based on app mode"""
|
||||||
if app.mode in {
|
if app.mode in {
|
||||||
AppMode.ADVANCED_CHAT.value,
|
AppMode.ADVANCED_CHAT,
|
||||||
AppMode.COMPLETION.value,
|
AppMode.COMPLETION,
|
||||||
AppMode.CHAT.value,
|
AppMode.CHAT,
|
||||||
AppMode.AGENT_CHAT.value,
|
AppMode.AGENT_CHAT,
|
||||||
}:
|
}:
|
||||||
return response.get("answer", "")
|
return response.get("answer", "")
|
||||||
elif app.mode == AppMode.WORKFLOW.value:
|
elif app.mode == AppMode.WORKFLOW:
|
||||||
return json.dumps(response["data"]["outputs"], ensure_ascii=False)
|
return json.dumps(response["data"]["outputs"], ensure_ascii=False)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid app mode: " + str(app.mode))
|
raise ValueError("Invalid app mode: " + str(app.mode))
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,20 @@
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from collections.abc import Mapping, Sequence
|
from collections.abc import Mapping, Sequence
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
from typing import Annotated, Any, Literal, Optional, Union
|
from typing import Annotated, Any, Literal, Optional, Union
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_serializer, field_validator
|
from pydantic import BaseModel, Field, field_serializer, field_validator
|
||||||
|
|
||||||
|
|
||||||
class PromptMessageRole(Enum):
|
class PromptMessageRole(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for prompt message.
|
Enum class for prompt message.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SYSTEM = "system"
|
SYSTEM = auto()
|
||||||
USER = "user"
|
USER = auto()
|
||||||
ASSISTANT = "assistant"
|
ASSISTANT = auto()
|
||||||
TOOL = "tool"
|
TOOL = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str) -> "PromptMessageRole":
|
def value_of(cls, value: str) -> "PromptMessageRole":
|
||||||
|
|
@ -54,11 +54,11 @@ class PromptMessageContentType(StrEnum):
|
||||||
Enum class for prompt message content type.
|
Enum class for prompt message content type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TEXT = "text"
|
TEXT = auto()
|
||||||
IMAGE = "image"
|
IMAGE = auto()
|
||||||
AUDIO = "audio"
|
AUDIO = auto()
|
||||||
VIDEO = "video"
|
VIDEO = auto()
|
||||||
DOCUMENT = "document"
|
DOCUMENT = auto()
|
||||||
|
|
||||||
|
|
||||||
class PromptMessageContent(ABC, BaseModel):
|
class PromptMessageContent(ABC, BaseModel):
|
||||||
|
|
@ -108,8 +108,8 @@ class ImagePromptMessageContent(MultiModalPromptMessageContent):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class DETAIL(StrEnum):
|
class DETAIL(StrEnum):
|
||||||
LOW = "low"
|
LOW = auto()
|
||||||
HIGH = "high"
|
HIGH = auto()
|
||||||
|
|
||||||
type: Literal[PromptMessageContentType.IMAGE] = PromptMessageContentType.IMAGE
|
type: Literal[PromptMessageContentType.IMAGE] = PromptMessageContentType.IMAGE
|
||||||
detail: DETAIL = DETAIL.LOW
|
detail: DETAIL = DETAIL.LOW
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, model_validator
|
from pydantic import BaseModel, ConfigDict, model_validator
|
||||||
|
|
@ -7,17 +7,17 @@ from pydantic import BaseModel, ConfigDict, model_validator
|
||||||
from core.model_runtime.entities.common_entities import I18nObject
|
from core.model_runtime.entities.common_entities import I18nObject
|
||||||
|
|
||||||
|
|
||||||
class ModelType(Enum):
|
class ModelType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for model type.
|
Enum class for model type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LLM = "llm"
|
LLM = auto()
|
||||||
TEXT_EMBEDDING = "text-embedding"
|
TEXT_EMBEDDING = "text-embedding"
|
||||||
RERANK = "rerank"
|
RERANK = auto()
|
||||||
SPEECH2TEXT = "speech2text"
|
SPEECH2TEXT = auto()
|
||||||
MODERATION = "moderation"
|
MODERATION = auto()
|
||||||
TTS = "tts"
|
TTS = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, origin_model_type: str) -> "ModelType":
|
def value_of(cls, origin_model_type: str) -> "ModelType":
|
||||||
|
|
@ -26,17 +26,17 @@ class ModelType(Enum):
|
||||||
|
|
||||||
:return: model type
|
:return: model type
|
||||||
"""
|
"""
|
||||||
if origin_model_type in {"text-generation", cls.LLM.value}:
|
if origin_model_type in {"text-generation", cls.LLM}:
|
||||||
return cls.LLM
|
return cls.LLM
|
||||||
elif origin_model_type in {"embeddings", cls.TEXT_EMBEDDING.value}:
|
elif origin_model_type in {"embeddings", cls.TEXT_EMBEDDING}:
|
||||||
return cls.TEXT_EMBEDDING
|
return cls.TEXT_EMBEDDING
|
||||||
elif origin_model_type in {"reranking", cls.RERANK.value}:
|
elif origin_model_type in {"reranking", cls.RERANK}:
|
||||||
return cls.RERANK
|
return cls.RERANK
|
||||||
elif origin_model_type in {"speech2text", cls.SPEECH2TEXT.value}:
|
elif origin_model_type in {"speech2text", cls.SPEECH2TEXT}:
|
||||||
return cls.SPEECH2TEXT
|
return cls.SPEECH2TEXT
|
||||||
elif origin_model_type in {"tts", cls.TTS.value}:
|
elif origin_model_type in {"tts", cls.TTS}:
|
||||||
return cls.TTS
|
return cls.TTS
|
||||||
elif origin_model_type == cls.MODERATION.value:
|
elif origin_model_type == cls.MODERATION:
|
||||||
return cls.MODERATION
|
return cls.MODERATION
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"invalid origin model type {origin_model_type}")
|
raise ValueError(f"invalid origin model type {origin_model_type}")
|
||||||
|
|
@ -63,7 +63,7 @@ class ModelType(Enum):
|
||||||
raise ValueError(f"invalid model type {self}")
|
raise ValueError(f"invalid model type {self}")
|
||||||
|
|
||||||
|
|
||||||
class FetchFrom(Enum):
|
class FetchFrom(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for fetch from.
|
Enum class for fetch from.
|
||||||
"""
|
"""
|
||||||
|
|
@ -72,7 +72,7 @@ class FetchFrom(Enum):
|
||||||
CUSTOMIZABLE_MODEL = "customizable-model"
|
CUSTOMIZABLE_MODEL = "customizable-model"
|
||||||
|
|
||||||
|
|
||||||
class ModelFeature(Enum):
|
class ModelFeature(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for llm feature.
|
Enum class for llm feature.
|
||||||
"""
|
"""
|
||||||
|
|
@ -80,11 +80,11 @@ class ModelFeature(Enum):
|
||||||
TOOL_CALL = "tool-call"
|
TOOL_CALL = "tool-call"
|
||||||
MULTI_TOOL_CALL = "multi-tool-call"
|
MULTI_TOOL_CALL = "multi-tool-call"
|
||||||
AGENT_THOUGHT = "agent-thought"
|
AGENT_THOUGHT = "agent-thought"
|
||||||
VISION = "vision"
|
VISION = auto()
|
||||||
STREAM_TOOL_CALL = "stream-tool-call"
|
STREAM_TOOL_CALL = "stream-tool-call"
|
||||||
DOCUMENT = "document"
|
DOCUMENT = auto()
|
||||||
VIDEO = "video"
|
VIDEO = auto()
|
||||||
AUDIO = "audio"
|
AUDIO = auto()
|
||||||
STRUCTURED_OUTPUT = "structured-output"
|
STRUCTURED_OUTPUT = "structured-output"
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -93,14 +93,14 @@ class DefaultParameterName(StrEnum):
|
||||||
Enum class for parameter template variable.
|
Enum class for parameter template variable.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TEMPERATURE = "temperature"
|
TEMPERATURE = auto()
|
||||||
TOP_P = "top_p"
|
TOP_P = auto()
|
||||||
TOP_K = "top_k"
|
TOP_K = auto()
|
||||||
PRESENCE_PENALTY = "presence_penalty"
|
PRESENCE_PENALTY = auto()
|
||||||
FREQUENCY_PENALTY = "frequency_penalty"
|
FREQUENCY_PENALTY = auto()
|
||||||
MAX_TOKENS = "max_tokens"
|
MAX_TOKENS = auto()
|
||||||
RESPONSE_FORMAT = "response_format"
|
RESPONSE_FORMAT = auto()
|
||||||
JSON_SCHEMA = "json_schema"
|
JSON_SCHEMA = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: Any) -> "DefaultParameterName":
|
def value_of(cls, value: Any) -> "DefaultParameterName":
|
||||||
|
|
@ -116,34 +116,34 @@ class DefaultParameterName(StrEnum):
|
||||||
raise ValueError(f"invalid parameter name {value}")
|
raise ValueError(f"invalid parameter name {value}")
|
||||||
|
|
||||||
|
|
||||||
class ParameterType(Enum):
|
class ParameterType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for parameter type.
|
Enum class for parameter type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
FLOAT = "float"
|
FLOAT = auto()
|
||||||
INT = "int"
|
INT = auto()
|
||||||
STRING = "string"
|
STRING = auto()
|
||||||
BOOLEAN = "boolean"
|
BOOLEAN = auto()
|
||||||
TEXT = "text"
|
TEXT = auto()
|
||||||
|
|
||||||
|
|
||||||
class ModelPropertyKey(Enum):
|
class ModelPropertyKey(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for model property key.
|
Enum class for model property key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
MODE = "mode"
|
MODE = auto()
|
||||||
CONTEXT_SIZE = "context_size"
|
CONTEXT_SIZE = auto()
|
||||||
MAX_CHUNKS = "max_chunks"
|
MAX_CHUNKS = auto()
|
||||||
FILE_UPLOAD_LIMIT = "file_upload_limit"
|
FILE_UPLOAD_LIMIT = auto()
|
||||||
SUPPORTED_FILE_EXTENSIONS = "supported_file_extensions"
|
SUPPORTED_FILE_EXTENSIONS = auto()
|
||||||
MAX_CHARACTERS_PER_CHUNK = "max_characters_per_chunk"
|
MAX_CHARACTERS_PER_CHUNK = auto()
|
||||||
DEFAULT_VOICE = "default_voice"
|
DEFAULT_VOICE = auto()
|
||||||
VOICES = "voices"
|
VOICES = auto()
|
||||||
WORD_LIMIT = "word_limit"
|
WORD_LIMIT = auto()
|
||||||
AUDIO_TYPE = "audio_type"
|
AUDIO_TYPE = auto()
|
||||||
MAX_WORKERS = "max_workers"
|
MAX_WORKERS = auto()
|
||||||
|
|
||||||
|
|
||||||
class ProviderModel(BaseModel):
|
class ProviderModel(BaseModel):
|
||||||
|
|
@ -220,13 +220,13 @@ class ModelUsage(BaseModel):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class PriceType(Enum):
|
class PriceType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for price type.
|
Enum class for price type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
INPUT = "input"
|
INPUT = auto()
|
||||||
OUTPUT = "output"
|
OUTPUT = auto()
|
||||||
|
|
||||||
|
|
||||||
class PriceInfo(BaseModel):
|
class PriceInfo(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from enum import Enum
|
from enum import Enum, StrEnum, auto
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||||
|
|
@ -17,16 +17,16 @@ class ConfigurateMethod(Enum):
|
||||||
CUSTOMIZABLE_MODEL = "customizable-model"
|
CUSTOMIZABLE_MODEL = "customizable-model"
|
||||||
|
|
||||||
|
|
||||||
class FormType(Enum):
|
class FormType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for form type.
|
Enum class for form type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TEXT_INPUT = "text-input"
|
TEXT_INPUT = "text-input"
|
||||||
SECRET_INPUT = "secret-input"
|
SECRET_INPUT = "secret-input"
|
||||||
SELECT = "select"
|
SELECT = auto()
|
||||||
RADIO = "radio"
|
RADIO = auto()
|
||||||
SWITCH = "switch"
|
SWITCH = auto()
|
||||||
|
|
||||||
|
|
||||||
class FormShowOnObject(BaseModel):
|
class FormShowOnObject(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ class TextEmbeddingModel(AIModel):
|
||||||
model=model,
|
model=model,
|
||||||
credentials=credentials,
|
credentials=credentials,
|
||||||
texts=texts,
|
texts=texts,
|
||||||
input_type=input_type.value,
|
input_type=input_type,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise self._transform_invoke_error(e)
|
raise self._transform_invoke_error(e)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ from pydantic_core import Url
|
||||||
from pydantic_extra_types.color import Color
|
from pydantic_extra_types.color import Color
|
||||||
|
|
||||||
|
|
||||||
def _model_dump(model: BaseModel, mode: Literal["json", "python"] = "json", **kwargs: Any):
|
def _model_dump(model: BaseModel, mode: Literal["json", "python"] = "json", **kwargs: Any) -> Any:
|
||||||
return model.model_dump(mode=mode, **kwargs)
|
return model.model_dump(mode=mode, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -100,7 +100,7 @@ def jsonable_encoder(
|
||||||
exclude_none: bool = False,
|
exclude_none: bool = False,
|
||||||
custom_encoder: Optional[dict[Any, Callable[[Any], Any]]] = None,
|
custom_encoder: Optional[dict[Any, Callable[[Any], Any]]] = None,
|
||||||
sqlalchemy_safe: bool = True,
|
sqlalchemy_safe: bool = True,
|
||||||
):
|
) -> Any:
|
||||||
custom_encoder = custom_encoder or {}
|
custom_encoder = custom_encoder or {}
|
||||||
if custom_encoder:
|
if custom_encoder:
|
||||||
if type(obj) in custom_encoder:
|
if type(obj) in custom_encoder:
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
@ -7,9 +7,9 @@ from pydantic import BaseModel, Field
|
||||||
from core.extension.extensible import Extensible, ExtensionModule
|
from core.extension.extensible import Extensible, ExtensionModule
|
||||||
|
|
||||||
|
|
||||||
class ModerationAction(Enum):
|
class ModerationAction(StrEnum):
|
||||||
DIRECT_OUTPUT = "direct_output"
|
DIRECT_OUTPUT = auto()
|
||||||
OVERRIDDEN = "overridden"
|
OVERRIDDEN = auto()
|
||||||
|
|
||||||
|
|
||||||
class ModerationInputsResult(BaseModel):
|
class ModerationInputsResult(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from enum import Enum
|
from enum import StrEnum
|
||||||
|
|
||||||
# public
|
# public
|
||||||
GEN_AI_SESSION_ID = "gen_ai.session.id"
|
GEN_AI_SESSION_ID = "gen_ai.session.id"
|
||||||
|
|
@ -53,7 +53,7 @@ TOOL_DESCRIPTION = "tool.description"
|
||||||
TOOL_PARAMETERS = "tool.parameters"
|
TOOL_PARAMETERS = "tool.parameters"
|
||||||
|
|
||||||
|
|
||||||
class GenAISpanKind(Enum):
|
class GenAISpanKind(StrEnum):
|
||||||
CHAIN = "CHAIN"
|
CHAIN = "CHAIN"
|
||||||
RETRIEVER = "RETRIEVER"
|
RETRIEVER = "RETRIEVER"
|
||||||
RERANKER = "RERANKER"
|
RERANKER = "RERANKER"
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation):
|
||||||
app = cls._get_app(app_id, tenant_id)
|
app = cls._get_app(app_id, tenant_id)
|
||||||
|
|
||||||
"""Retrieve app parameters."""
|
"""Retrieve app parameters."""
|
||||||
if app.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
workflow = app.workflow
|
workflow = app.workflow
|
||||||
if workflow is None:
|
if workflow is None:
|
||||||
raise ValueError("unexpected app type")
|
raise ValueError("unexpected app type")
|
||||||
|
|
@ -70,7 +70,7 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation):
|
||||||
|
|
||||||
conversation_id = conversation_id or ""
|
conversation_id = conversation_id or ""
|
||||||
|
|
||||||
if app.mode in {AppMode.ADVANCED_CHAT.value, AppMode.AGENT_CHAT.value, AppMode.CHAT.value}:
|
if app.mode in {AppMode.ADVANCED_CHAT, AppMode.AGENT_CHAT, AppMode.CHAT}:
|
||||||
if not query:
|
if not query:
|
||||||
raise ValueError("missing query")
|
raise ValueError("missing query")
|
||||||
|
|
||||||
|
|
@ -96,7 +96,7 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation):
|
||||||
"""
|
"""
|
||||||
invoke chat app
|
invoke chat app
|
||||||
"""
|
"""
|
||||||
if app.mode == AppMode.ADVANCED_CHAT.value:
|
if app.mode == AppMode.ADVANCED_CHAT:
|
||||||
workflow = app.workflow
|
workflow = app.workflow
|
||||||
if not workflow:
|
if not workflow:
|
||||||
raise ValueError("unexpected app type")
|
raise ValueError("unexpected app type")
|
||||||
|
|
@ -114,7 +114,7 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation):
|
||||||
invoke_from=InvokeFrom.SERVICE_API,
|
invoke_from=InvokeFrom.SERVICE_API,
|
||||||
streaming=stream,
|
streaming=stream,
|
||||||
)
|
)
|
||||||
elif app.mode == AppMode.AGENT_CHAT.value:
|
elif app.mode == AppMode.AGENT_CHAT:
|
||||||
return AgentChatAppGenerator().generate(
|
return AgentChatAppGenerator().generate(
|
||||||
app_model=app,
|
app_model=app,
|
||||||
user=user,
|
user=user,
|
||||||
|
|
@ -127,7 +127,7 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation):
|
||||||
invoke_from=InvokeFrom.SERVICE_API,
|
invoke_from=InvokeFrom.SERVICE_API,
|
||||||
streaming=stream,
|
streaming=stream,
|
||||||
)
|
)
|
||||||
elif app.mode == AppMode.CHAT.value:
|
elif app.mode == AppMode.CHAT:
|
||||||
return ChatAppGenerator().generate(
|
return ChatAppGenerator().generate(
|
||||||
app_model=app,
|
app_model=app,
|
||||||
user=user,
|
user=user,
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import enum
|
|
||||||
import json
|
import json
|
||||||
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import BaseModel, Field, field_validator
|
||||||
|
|
@ -25,44 +25,44 @@ class PluginParameterOption(BaseModel):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
||||||
class PluginParameterType(enum.StrEnum):
|
class PluginParameterType(StrEnum):
|
||||||
"""
|
"""
|
||||||
all available parameter types
|
all available parameter types
|
||||||
"""
|
"""
|
||||||
|
|
||||||
STRING = CommonParameterType.STRING.value
|
STRING = CommonParameterType.STRING
|
||||||
NUMBER = CommonParameterType.NUMBER.value
|
NUMBER = CommonParameterType.NUMBER
|
||||||
BOOLEAN = CommonParameterType.BOOLEAN.value
|
BOOLEAN = CommonParameterType.BOOLEAN
|
||||||
SELECT = CommonParameterType.SELECT.value
|
SELECT = CommonParameterType.SELECT
|
||||||
SECRET_INPUT = CommonParameterType.SECRET_INPUT.value
|
SECRET_INPUT = CommonParameterType.SECRET_INPUT
|
||||||
FILE = CommonParameterType.FILE.value
|
FILE = CommonParameterType.FILE
|
||||||
FILES = CommonParameterType.FILES.value
|
FILES = CommonParameterType.FILES
|
||||||
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
|
APP_SELECTOR = CommonParameterType.APP_SELECTOR
|
||||||
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
|
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR
|
||||||
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
|
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR
|
||||||
ANY = CommonParameterType.ANY.value
|
ANY = CommonParameterType.ANY
|
||||||
DYNAMIC_SELECT = CommonParameterType.DYNAMIC_SELECT.value
|
DYNAMIC_SELECT = CommonParameterType.DYNAMIC_SELECT
|
||||||
|
|
||||||
# deprecated, should not use.
|
# deprecated, should not use.
|
||||||
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES.value
|
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES
|
||||||
|
|
||||||
# MCP object and array type parameters
|
# MCP object and array type parameters
|
||||||
ARRAY = CommonParameterType.ARRAY.value
|
ARRAY = CommonParameterType.ARRAY
|
||||||
OBJECT = CommonParameterType.OBJECT.value
|
OBJECT = CommonParameterType.OBJECT
|
||||||
|
|
||||||
|
|
||||||
class MCPServerParameterType(enum.StrEnum):
|
class MCPServerParameterType(StrEnum):
|
||||||
"""
|
"""
|
||||||
MCP server got complex parameter types
|
MCP server got complex parameter types
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ARRAY = "array"
|
ARRAY = auto()
|
||||||
OBJECT = "object"
|
OBJECT = auto()
|
||||||
|
|
||||||
|
|
||||||
class PluginParameterAutoGenerate(BaseModel):
|
class PluginParameterAutoGenerate(BaseModel):
|
||||||
class Type(enum.StrEnum):
|
class Type(StrEnum):
|
||||||
PROMPT_INSTRUCTION = "prompt_instruction"
|
PROMPT_INSTRUCTION = auto()
|
||||||
|
|
||||||
type: Type
|
type: Type
|
||||||
|
|
||||||
|
|
@ -93,7 +93,7 @@ class PluginParameter(BaseModel):
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
def as_normal_type(typ: enum.StrEnum):
|
def as_normal_type(typ: StrEnum):
|
||||||
if typ.value in {
|
if typ.value in {
|
||||||
PluginParameterType.SECRET_INPUT,
|
PluginParameterType.SECRET_INPUT,
|
||||||
PluginParameterType.SELECT,
|
PluginParameterType.SELECT,
|
||||||
|
|
@ -102,7 +102,7 @@ def as_normal_type(typ: enum.StrEnum):
|
||||||
return typ.value
|
return typ.value
|
||||||
|
|
||||||
|
|
||||||
def cast_parameter_value(typ: enum.StrEnum, value: Any, /):
|
def cast_parameter_value(typ: StrEnum, value: Any, /):
|
||||||
try:
|
try:
|
||||||
match typ.value:
|
match typ.value:
|
||||||
case PluginParameterType.STRING | PluginParameterType.SECRET_INPUT | PluginParameterType.SELECT:
|
case PluginParameterType.STRING | PluginParameterType.SECRET_INPUT | PluginParameterType.SELECT:
|
||||||
|
|
@ -190,7 +190,7 @@ def cast_parameter_value(typ: enum.StrEnum, value: Any, /):
|
||||||
raise ValueError(f"The tool parameter value {value} is not in correct type of {as_normal_type(typ)}.")
|
raise ValueError(f"The tool parameter value {value} is not in correct type of {as_normal_type(typ)}.")
|
||||||
|
|
||||||
|
|
||||||
def init_frontend_parameter(rule: PluginParameter, type: enum.StrEnum, value: Any):
|
def init_frontend_parameter(rule: PluginParameter, type: StrEnum, value: Any):
|
||||||
"""
|
"""
|
||||||
init frontend parameter by rule
|
init frontend parameter by rule
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import datetime
|
import datetime
|
||||||
import enum
|
|
||||||
import re
|
import re
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from packaging.version import InvalidVersion, Version
|
from packaging.version import InvalidVersion, Version
|
||||||
|
|
@ -16,11 +16,11 @@ from core.tools.entities.common_entities import I18nObject
|
||||||
from core.tools.entities.tool_entities import ToolProviderEntity
|
from core.tools.entities.tool_entities import ToolProviderEntity
|
||||||
|
|
||||||
|
|
||||||
class PluginInstallationSource(enum.StrEnum):
|
class PluginInstallationSource(StrEnum):
|
||||||
Github = "github"
|
Github = auto()
|
||||||
Marketplace = "marketplace"
|
Marketplace = auto()
|
||||||
Package = "package"
|
Package = auto()
|
||||||
Remote = "remote"
|
Remote = auto()
|
||||||
|
|
||||||
|
|
||||||
class PluginResourceRequirements(BaseModel):
|
class PluginResourceRequirements(BaseModel):
|
||||||
|
|
@ -58,10 +58,10 @@ class PluginResourceRequirements(BaseModel):
|
||||||
permission: Optional[Permission] = Field(default=None)
|
permission: Optional[Permission] = Field(default=None)
|
||||||
|
|
||||||
|
|
||||||
class PluginCategory(enum.StrEnum):
|
class PluginCategory(StrEnum):
|
||||||
Tool = "tool"
|
Tool = auto()
|
||||||
Model = "model"
|
Model = auto()
|
||||||
Extension = "extension"
|
Extension = auto()
|
||||||
AgentStrategy = "agent-strategy"
|
AgentStrategy = "agent-strategy"
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -206,10 +206,10 @@ class ToolProviderID(GenericProviderID):
|
||||||
|
|
||||||
|
|
||||||
class PluginDependency(BaseModel):
|
class PluginDependency(BaseModel):
|
||||||
class Type(enum.StrEnum):
|
class Type(StrEnum):
|
||||||
Github = PluginInstallationSource.Github.value
|
Github = PluginInstallationSource.Github
|
||||||
Marketplace = PluginInstallationSource.Marketplace.value
|
Marketplace = PluginInstallationSource.Marketplace
|
||||||
Package = PluginInstallationSource.Package.value
|
Package = PluginInstallationSource.Package
|
||||||
|
|
||||||
class Github(BaseModel):
|
class Github(BaseModel):
|
||||||
repo: str
|
repo: str
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import enum
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from collections.abc import Mapping, Sequence
|
from collections.abc import Mapping, Sequence
|
||||||
|
from enum import StrEnum, auto
|
||||||
from typing import TYPE_CHECKING, Any, Optional, cast
|
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||||
|
|
||||||
from core.app.app_config.entities import PromptTemplateEntity
|
from core.app.app_config.entities import PromptTemplateEntity
|
||||||
|
|
@ -25,9 +25,9 @@ if TYPE_CHECKING:
|
||||||
from core.file.models import File
|
from core.file.models import File
|
||||||
|
|
||||||
|
|
||||||
class ModelMode(enum.StrEnum):
|
class ModelMode(StrEnum):
|
||||||
COMPLETION = "completion"
|
COMPLETION = auto()
|
||||||
CHAT = "chat"
|
CHAT = auto()
|
||||||
|
|
||||||
|
|
||||||
prompt_file_contents: dict[str, Any] = {}
|
prompt_file_contents: dict[str, Any] = {}
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
|
|
||||||
|
|
||||||
class Field(Enum):
|
class Field(StrEnum):
|
||||||
CONTENT_KEY = "page_content"
|
CONTENT_KEY = "page_content"
|
||||||
METADATA_KEY = "metadata"
|
METADATA_KEY = "metadata"
|
||||||
GROUP_KEY = "group_id"
|
GROUP_KEY = "group_id"
|
||||||
VECTOR = "vector"
|
VECTOR = auto()
|
||||||
# Sparse Vector aims to support full text search
|
# Sparse Vector aims to support full text search
|
||||||
SPARSE_VECTOR = "sparse_vector"
|
SPARSE_VECTOR = auto()
|
||||||
TEXT_KEY = "text"
|
TEXT_KEY = "text"
|
||||||
PRIMARY_KEY = "id"
|
PRIMARY_KEY = "id"
|
||||||
DOC_ID = "metadata.doc_id"
|
DOC_ID = "metadata.doc_id"
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import uuid
|
import uuid
|
||||||
from enum import Enum
|
from enum import StrEnum
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from clickhouse_connect import get_client
|
from clickhouse_connect import get_client
|
||||||
|
|
@ -27,7 +27,7 @@ class MyScaleConfig(BaseModel):
|
||||||
fts_params: str
|
fts_params: str
|
||||||
|
|
||||||
|
|
||||||
class SortOrder(Enum):
|
class SortOrder(StrEnum):
|
||||||
ASC = "ASC"
|
ASC = "ASC"
|
||||||
DESC = "DESC"
|
DESC = "DESC"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
from enum import Enum
|
from enum import StrEnum
|
||||||
|
|
||||||
|
|
||||||
class DatasourceType(Enum):
|
class DatasourceType(StrEnum):
|
||||||
FILE = "upload_file"
|
FILE = "upload_file"
|
||||||
NOTION = "notion_import"
|
NOTION = "notion_import"
|
||||||
WEBSITE = "website_crawl"
|
WEBSITE = "website_crawl"
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,15 @@
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
|
|
||||||
|
|
||||||
class BuiltInField(StrEnum):
|
class BuiltInField(StrEnum):
|
||||||
document_name = "document_name"
|
document_name = auto()
|
||||||
uploader = "uploader"
|
uploader = auto()
|
||||||
upload_date = "upload_date"
|
upload_date = auto()
|
||||||
last_update_date = "last_update_date"
|
last_update_date = auto()
|
||||||
source = "source"
|
source = auto()
|
||||||
|
|
||||||
|
|
||||||
class MetadataDataSource(Enum):
|
class MetadataDataSource(StrEnum):
|
||||||
upload_file = "file_upload"
|
upload_file = "file_upload"
|
||||||
website_crawl = "website"
|
website_crawl = "website"
|
||||||
notion_import = "notion"
|
notion_import = "notion"
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
import base64
|
import base64
|
||||||
import contextlib
|
import contextlib
|
||||||
import enum
|
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_serializer, field_validator, model_validator
|
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_serializer, field_validator, model_validator
|
||||||
|
|
@ -22,37 +21,37 @@ from core.tools.entities.common_entities import I18nObject
|
||||||
from core.tools.entities.constants import TOOL_SELECTOR_MODEL_IDENTITY
|
from core.tools.entities.constants import TOOL_SELECTOR_MODEL_IDENTITY
|
||||||
|
|
||||||
|
|
||||||
class ToolLabelEnum(Enum):
|
class ToolLabelEnum(StrEnum):
|
||||||
SEARCH = "search"
|
SEARCH = auto()
|
||||||
IMAGE = "image"
|
IMAGE = auto()
|
||||||
VIDEOS = "videos"
|
VIDEOS = auto()
|
||||||
WEATHER = "weather"
|
WEATHER = auto()
|
||||||
FINANCE = "finance"
|
FINANCE = auto()
|
||||||
DESIGN = "design"
|
DESIGN = auto()
|
||||||
TRAVEL = "travel"
|
TRAVEL = auto()
|
||||||
SOCIAL = "social"
|
SOCIAL = auto()
|
||||||
NEWS = "news"
|
NEWS = auto()
|
||||||
MEDICAL = "medical"
|
MEDICAL = auto()
|
||||||
PRODUCTIVITY = "productivity"
|
PRODUCTIVITY = auto()
|
||||||
EDUCATION = "education"
|
EDUCATION = auto()
|
||||||
BUSINESS = "business"
|
BUSINESS = auto()
|
||||||
ENTERTAINMENT = "entertainment"
|
ENTERTAINMENT = auto()
|
||||||
UTILITIES = "utilities"
|
UTILITIES = auto()
|
||||||
OTHER = "other"
|
OTHER = auto()
|
||||||
|
|
||||||
|
|
||||||
class ToolProviderType(enum.StrEnum):
|
class ToolProviderType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for tool provider
|
Enum class for tool provider
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PLUGIN = "plugin"
|
PLUGIN = auto()
|
||||||
BUILT_IN = "builtin"
|
BUILT_IN = "builtin"
|
||||||
WORKFLOW = "workflow"
|
WORKFLOW = auto()
|
||||||
API = "api"
|
API = auto()
|
||||||
APP = "app"
|
APP = auto()
|
||||||
DATASET_RETRIEVAL = "dataset-retrieval"
|
DATASET_RETRIEVAL = "dataset-retrieval"
|
||||||
MCP = "mcp"
|
MCP = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str) -> "ToolProviderType":
|
def value_of(cls, value: str) -> "ToolProviderType":
|
||||||
|
|
@ -68,15 +67,15 @@ class ToolProviderType(enum.StrEnum):
|
||||||
raise ValueError(f"invalid mode value {value}")
|
raise ValueError(f"invalid mode value {value}")
|
||||||
|
|
||||||
|
|
||||||
class ApiProviderSchemaType(Enum):
|
class ApiProviderSchemaType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for api provider schema type.
|
Enum class for api provider schema type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
OPENAPI = "openapi"
|
OPENAPI = auto()
|
||||||
SWAGGER = "swagger"
|
SWAGGER = auto()
|
||||||
OPENAI_PLUGIN = "openai_plugin"
|
OPENAI_PLUGIN = auto()
|
||||||
OPENAI_ACTIONS = "openai_actions"
|
OPENAI_ACTIONS = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str) -> "ApiProviderSchemaType":
|
def value_of(cls, value: str) -> "ApiProviderSchemaType":
|
||||||
|
|
@ -92,14 +91,14 @@ class ApiProviderSchemaType(Enum):
|
||||||
raise ValueError(f"invalid mode value {value}")
|
raise ValueError(f"invalid mode value {value}")
|
||||||
|
|
||||||
|
|
||||||
class ApiProviderAuthType(Enum):
|
class ApiProviderAuthType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for api provider auth type.
|
Enum class for api provider auth type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
NONE = "none"
|
NONE = auto()
|
||||||
API_KEY_HEADER = "api_key_header"
|
API_KEY_HEADER = auto()
|
||||||
API_KEY_QUERY = "api_key_query"
|
API_KEY_QUERY = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str) -> "ApiProviderAuthType":
|
def value_of(cls, value: str) -> "ApiProviderAuthType":
|
||||||
|
|
@ -176,10 +175,10 @@ class ToolInvokeMessage(BaseModel):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
class LogMessage(BaseModel):
|
class LogMessage(BaseModel):
|
||||||
class LogStatus(Enum):
|
class LogStatus(StrEnum):
|
||||||
START = "start"
|
START = auto()
|
||||||
ERROR = "error"
|
ERROR = auto()
|
||||||
SUCCESS = "success"
|
SUCCESS = auto()
|
||||||
|
|
||||||
id: str
|
id: str
|
||||||
label: str = Field(..., description="The label of the log")
|
label: str = Field(..., description="The label of the log")
|
||||||
|
|
@ -193,19 +192,19 @@ class ToolInvokeMessage(BaseModel):
|
||||||
retriever_resources: list[RetrievalSourceMetadata] = Field(..., description="retriever resources")
|
retriever_resources: list[RetrievalSourceMetadata] = Field(..., description="retriever resources")
|
||||||
context: str = Field(..., description="context")
|
context: str = Field(..., description="context")
|
||||||
|
|
||||||
class MessageType(Enum):
|
class MessageType(StrEnum):
|
||||||
TEXT = "text"
|
TEXT = auto()
|
||||||
IMAGE = "image"
|
IMAGE = auto()
|
||||||
LINK = "link"
|
LINK = auto()
|
||||||
BLOB = "blob"
|
BLOB = auto()
|
||||||
JSON = "json"
|
JSON = auto()
|
||||||
IMAGE_LINK = "image_link"
|
IMAGE_LINK = auto()
|
||||||
BINARY_LINK = "binary_link"
|
BINARY_LINK = auto()
|
||||||
VARIABLE = "variable"
|
VARIABLE = auto()
|
||||||
FILE = "file"
|
FILE = auto()
|
||||||
LOG = "log"
|
LOG = auto()
|
||||||
BLOB_CHUNK = "blob_chunk"
|
BLOB_CHUNK = auto()
|
||||||
RETRIEVER_RESOURCES = "retriever_resources"
|
RETRIEVER_RESOURCES = auto()
|
||||||
|
|
||||||
type: MessageType = MessageType.TEXT
|
type: MessageType = MessageType.TEXT
|
||||||
"""
|
"""
|
||||||
|
|
@ -250,29 +249,29 @@ class ToolParameter(PluginParameter):
|
||||||
Overrides type
|
Overrides type
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class ToolParameterType(enum.StrEnum):
|
class ToolParameterType(StrEnum):
|
||||||
"""
|
"""
|
||||||
removes TOOLS_SELECTOR from PluginParameterType
|
removes TOOLS_SELECTOR from PluginParameterType
|
||||||
"""
|
"""
|
||||||
|
|
||||||
STRING = PluginParameterType.STRING.value
|
STRING = PluginParameterType.STRING
|
||||||
NUMBER = PluginParameterType.NUMBER.value
|
NUMBER = PluginParameterType.NUMBER
|
||||||
BOOLEAN = PluginParameterType.BOOLEAN.value
|
BOOLEAN = PluginParameterType.BOOLEAN
|
||||||
SELECT = PluginParameterType.SELECT.value
|
SELECT = PluginParameterType.SELECT
|
||||||
SECRET_INPUT = PluginParameterType.SECRET_INPUT.value
|
SECRET_INPUT = PluginParameterType.SECRET_INPUT
|
||||||
FILE = PluginParameterType.FILE.value
|
FILE = PluginParameterType.FILE
|
||||||
FILES = PluginParameterType.FILES.value
|
FILES = PluginParameterType.FILES
|
||||||
APP_SELECTOR = PluginParameterType.APP_SELECTOR.value
|
APP_SELECTOR = PluginParameterType.APP_SELECTOR
|
||||||
MODEL_SELECTOR = PluginParameterType.MODEL_SELECTOR.value
|
MODEL_SELECTOR = PluginParameterType.MODEL_SELECTOR
|
||||||
ANY = PluginParameterType.ANY.value
|
ANY = PluginParameterType.ANY
|
||||||
DYNAMIC_SELECT = PluginParameterType.DYNAMIC_SELECT.value
|
DYNAMIC_SELECT = PluginParameterType.DYNAMIC_SELECT
|
||||||
|
|
||||||
# MCP object and array type parameters
|
# MCP object and array type parameters
|
||||||
ARRAY = MCPServerParameterType.ARRAY.value
|
ARRAY = MCPServerParameterType.ARRAY
|
||||||
OBJECT = MCPServerParameterType.OBJECT.value
|
OBJECT = MCPServerParameterType.OBJECT
|
||||||
|
|
||||||
# deprecated, should not use.
|
# deprecated, should not use.
|
||||||
SYSTEM_FILES = PluginParameterType.SYSTEM_FILES.value
|
SYSTEM_FILES = PluginParameterType.SYSTEM_FILES
|
||||||
|
|
||||||
def as_normal_type(self):
|
def as_normal_type(self):
|
||||||
return as_normal_type(self)
|
return as_normal_type(self)
|
||||||
|
|
@ -280,10 +279,10 @@ class ToolParameter(PluginParameter):
|
||||||
def cast_value(self, value: Any):
|
def cast_value(self, value: Any):
|
||||||
return cast_parameter_value(self, value)
|
return cast_parameter_value(self, value)
|
||||||
|
|
||||||
class ToolParameterForm(Enum):
|
class ToolParameterForm(StrEnum):
|
||||||
SCHEMA = "schema" # should be set while adding tool
|
SCHEMA = auto() # should be set while adding tool
|
||||||
FORM = "form" # should be set before invoking tool
|
FORM = auto() # should be set before invoking tool
|
||||||
LLM = "llm" # will be set by LLM
|
LLM = auto() # will be set by LLM
|
||||||
|
|
||||||
type: ToolParameterType = Field(..., description="The type of the parameter")
|
type: ToolParameterType = Field(..., description="The type of the parameter")
|
||||||
human_description: Optional[I18nObject] = Field(default=None, description="The description presented to the user")
|
human_description: Optional[I18nObject] = Field(default=None, description="The description presented to the user")
|
||||||
|
|
@ -446,14 +445,14 @@ class ToolLabel(BaseModel):
|
||||||
icon: str = Field(..., description="The icon of the tool")
|
icon: str = Field(..., description="The icon of the tool")
|
||||||
|
|
||||||
|
|
||||||
class ToolInvokeFrom(Enum):
|
class ToolInvokeFrom(StrEnum):
|
||||||
"""
|
"""
|
||||||
Enum class for tool invoke
|
Enum class for tool invoke
|
||||||
"""
|
"""
|
||||||
|
|
||||||
WORKFLOW = "workflow"
|
WORKFLOW = auto()
|
||||||
AGENT = "agent"
|
AGENT = auto()
|
||||||
PLUGIN = "plugin"
|
PLUGIN = auto()
|
||||||
|
|
||||||
|
|
||||||
class ToolSelector(BaseModel):
|
class ToolSelector(BaseModel):
|
||||||
|
|
@ -478,9 +477,9 @@ class ToolSelector(BaseModel):
|
||||||
return self.model_dump()
|
return self.model_dump()
|
||||||
|
|
||||||
|
|
||||||
class CredentialType(enum.StrEnum):
|
class CredentialType(StrEnum):
|
||||||
API_KEY = "api-key"
|
API_KEY = "api-key"
|
||||||
OAUTH2 = "oauth2"
|
OAUTH2 = auto()
|
||||||
|
|
||||||
def get_name(self):
|
def get_name(self):
|
||||||
if self == CredentialType.API_KEY:
|
if self == CredentialType.API_KEY:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
@ -11,12 +11,12 @@ from libs.datetime_utils import naive_utc_now
|
||||||
|
|
||||||
|
|
||||||
class RouteNodeState(BaseModel):
|
class RouteNodeState(BaseModel):
|
||||||
class Status(Enum):
|
class Status(StrEnum):
|
||||||
RUNNING = "running"
|
RUNNING = auto()
|
||||||
SUCCESS = "success"
|
SUCCESS = auto()
|
||||||
FAILED = "failed"
|
FAILED = auto()
|
||||||
PAUSED = "paused"
|
PAUSED = auto()
|
||||||
EXCEPTION = "exception"
|
EXCEPTION = auto()
|
||||||
|
|
||||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
"""node state id"""
|
"""node state id"""
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from enum import Enum, StrEnum
|
from enum import IntEnum, StrEnum, auto
|
||||||
from typing import Any, Literal, Union
|
from typing import Any, Literal, Union
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
@ -25,9 +25,9 @@ class AgentNodeData(BaseNodeData):
|
||||||
agent_parameters: dict[str, AgentInput]
|
agent_parameters: dict[str, AgentInput]
|
||||||
|
|
||||||
|
|
||||||
class ParamsAutoGenerated(Enum):
|
class ParamsAutoGenerated(IntEnum):
|
||||||
CLOSE = 0
|
CLOSE = auto()
|
||||||
OPEN = 1
|
OPEN = auto()
|
||||||
|
|
||||||
|
|
||||||
class AgentOldVersionModelFeatures(StrEnum):
|
class AgentOldVersionModelFeatures(StrEnum):
|
||||||
|
|
@ -38,8 +38,8 @@ class AgentOldVersionModelFeatures(StrEnum):
|
||||||
TOOL_CALL = "tool-call"
|
TOOL_CALL = "tool-call"
|
||||||
MULTI_TOOL_CALL = "multi-tool-call"
|
MULTI_TOOL_CALL = "multi-tool-call"
|
||||||
AGENT_THOUGHT = "agent-thought"
|
AGENT_THOUGHT = "agent-thought"
|
||||||
VISION = "vision"
|
VISION = auto()
|
||||||
STREAM_TOOL_CALL = "stream-tool-call"
|
STREAM_TOOL_CALL = "stream-tool-call"
|
||||||
DOCUMENT = "document"
|
DOCUMENT = auto()
|
||||||
VIDEO = "video"
|
VIDEO = auto()
|
||||||
AUDIO = "audio"
|
AUDIO = auto()
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
@ -19,9 +19,9 @@ class GenerateRouteChunk(BaseModel):
|
||||||
Generate Route Chunk.
|
Generate Route Chunk.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class ChunkType(Enum):
|
class ChunkType(StrEnum):
|
||||||
VAR = "var"
|
VAR = auto()
|
||||||
TEXT = "text"
|
TEXT = auto()
|
||||||
|
|
||||||
type: ChunkType = Field(..., description="generate route chunk type")
|
type: ChunkType = Field(..., description="generate route chunk type")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -259,7 +259,7 @@ class KnowledgeRetrievalNode(BaseNode):
|
||||||
)
|
)
|
||||||
all_documents = []
|
all_documents = []
|
||||||
dataset_retrieval = DatasetRetrieval()
|
dataset_retrieval = DatasetRetrieval()
|
||||||
if node_data.retrieval_mode == DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE.value:
|
if node_data.retrieval_mode == DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE:
|
||||||
# fetch model config
|
# fetch model config
|
||||||
if node_data.single_retrieval_config is None:
|
if node_data.single_retrieval_config is None:
|
||||||
raise ValueError("single_retrieval_config is required")
|
raise ValueError("single_retrieval_config is required")
|
||||||
|
|
@ -291,7 +291,7 @@ class KnowledgeRetrievalNode(BaseNode):
|
||||||
metadata_filter_document_ids=metadata_filter_document_ids,
|
metadata_filter_document_ids=metadata_filter_document_ids,
|
||||||
metadata_condition=metadata_condition,
|
metadata_condition=metadata_condition,
|
||||||
)
|
)
|
||||||
elif node_data.retrieval_mode == DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE.value:
|
elif node_data.retrieval_mode == DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE:
|
||||||
if node_data.multiple_retrieval_config is None:
|
if node_data.multiple_retrieval_config is None:
|
||||||
raise ValueError("multiple_retrieval_config is required")
|
raise ValueError("multiple_retrieval_config is required")
|
||||||
if node_data.multiple_retrieval_config.reranking_mode == "reranking_model":
|
if node_data.multiple_retrieval_config.reranking_mode == "reranking_model":
|
||||||
|
|
|
||||||
|
|
@ -9,19 +9,19 @@ import json
|
||||||
import logging
|
import logging
|
||||||
from dataclasses import asdict, dataclass
|
from dataclasses import asdict, dataclass
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FileStatus(Enum):
|
class FileStatus(StrEnum):
|
||||||
"""File status enumeration"""
|
"""File status enumeration"""
|
||||||
|
|
||||||
ACTIVE = "active" # Active status
|
ACTIVE = auto() # Active status
|
||||||
ARCHIVED = "archived" # Archived
|
ARCHIVED = auto() # Archived
|
||||||
DELETED = "deleted" # Deleted (soft delete)
|
DELETED = auto() # Deleted (soft delete)
|
||||||
BACKUP = "backup" # Backup file
|
BACKUP = auto() # Backup file
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|
|
||||||
|
|
@ -5,13 +5,13 @@ According to ClickZetta's permission model, different Volume types have differen
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from enum import Enum
|
from enum import StrEnum
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class VolumePermission(Enum):
|
class VolumePermission(StrEnum):
|
||||||
"""Volume permission type enumeration"""
|
"""Volume permission type enumeration"""
|
||||||
|
|
||||||
READ = "SELECT" # Corresponds to ClickZetta's SELECT permission
|
READ = "SELECT" # Corresponds to ClickZetta's SELECT permission
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ eliminates the need for repetitive language switching logic.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from typing import Any, Optional, Protocol
|
from typing import Any, Optional, Protocol
|
||||||
|
|
||||||
from flask import render_template
|
from flask import render_template
|
||||||
|
|
@ -17,30 +17,30 @@ from extensions.ext_mail import mail
|
||||||
from services.feature_service import BrandingModel, FeatureService
|
from services.feature_service import BrandingModel, FeatureService
|
||||||
|
|
||||||
|
|
||||||
class EmailType(Enum):
|
class EmailType(StrEnum):
|
||||||
"""Enumeration of supported email types."""
|
"""Enumeration of supported email types."""
|
||||||
|
|
||||||
RESET_PASSWORD = "reset_password"
|
RESET_PASSWORD = auto()
|
||||||
RESET_PASSWORD_WHEN_ACCOUNT_NOT_EXIST = "reset_password_when_account_not_exist"
|
RESET_PASSWORD_WHEN_ACCOUNT_NOT_EXIST = auto()
|
||||||
INVITE_MEMBER = "invite_member"
|
INVITE_MEMBER = auto()
|
||||||
EMAIL_CODE_LOGIN = "email_code_login"
|
EMAIL_CODE_LOGIN = auto()
|
||||||
CHANGE_EMAIL_OLD = "change_email_old"
|
CHANGE_EMAIL_OLD = auto()
|
||||||
CHANGE_EMAIL_NEW = "change_email_new"
|
CHANGE_EMAIL_NEW = auto()
|
||||||
CHANGE_EMAIL_COMPLETED = "change_email_completed"
|
CHANGE_EMAIL_COMPLETED = auto()
|
||||||
OWNER_TRANSFER_CONFIRM = "owner_transfer_confirm"
|
OWNER_TRANSFER_CONFIRM = auto()
|
||||||
OWNER_TRANSFER_OLD_NOTIFY = "owner_transfer_old_notify"
|
OWNER_TRANSFER_OLD_NOTIFY = auto()
|
||||||
OWNER_TRANSFER_NEW_NOTIFY = "owner_transfer_new_notify"
|
OWNER_TRANSFER_NEW_NOTIFY = auto()
|
||||||
ACCOUNT_DELETION_SUCCESS = "account_deletion_success"
|
ACCOUNT_DELETION_SUCCESS = auto()
|
||||||
ACCOUNT_DELETION_VERIFICATION = "account_deletion_verification"
|
ACCOUNT_DELETION_VERIFICATION = auto()
|
||||||
ENTERPRISE_CUSTOM = "enterprise_custom"
|
ENTERPRISE_CUSTOM = auto()
|
||||||
QUEUE_MONITOR_ALERT = "queue_monitor_alert"
|
QUEUE_MONITOR_ALERT = auto()
|
||||||
DOCUMENT_CLEAN_NOTIFY = "document_clean_notify"
|
DOCUMENT_CLEAN_NOTIFY = auto()
|
||||||
EMAIL_REGISTER = "email_register"
|
EMAIL_REGISTER = auto()
|
||||||
EMAIL_REGISTER_WHEN_ACCOUNT_EXIST = "email_register_when_account_exist"
|
EMAIL_REGISTER_WHEN_ACCOUNT_EXIST = auto()
|
||||||
RESET_PASSWORD_WHEN_ACCOUNT_NOT_EXIST_NO_REGISTER = "reset_password_when_account_not_exist_no_register"
|
RESET_PASSWORD_WHEN_ACCOUNT_NOT_EXIST_NO_REGISTER = auto()
|
||||||
|
|
||||||
|
|
||||||
class EmailLanguage(Enum):
|
class EmailLanguage(StrEnum):
|
||||||
"""Supported email languages with fallback handling."""
|
"""Supported email languages with fallback handling."""
|
||||||
|
|
||||||
EN_US = "en-US"
|
EN_US = "en-US"
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ class AppIconUrlField(fields.Raw):
|
||||||
if isinstance(obj, dict) and "app" in obj:
|
if isinstance(obj, dict) and "app" in obj:
|
||||||
obj = obj["app"]
|
obj = obj["app"]
|
||||||
|
|
||||||
if isinstance(obj, App | Site) and obj.icon_type == IconType.IMAGE.value:
|
if isinstance(obj, App | Site) and obj.icon_type == IconType.IMAGE:
|
||||||
return file_helpers.get_signed_file_url(obj.icon)
|
return file_helpers.get_signed_file_url(obj.icon)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -224,35 +224,35 @@ class Dataset(Base):
|
||||||
doc_metadata.append(
|
doc_metadata.append(
|
||||||
{
|
{
|
||||||
"id": "built-in",
|
"id": "built-in",
|
||||||
"name": BuiltInField.document_name.value,
|
"name": BuiltInField.document_name,
|
||||||
"type": "string",
|
"type": "string",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
doc_metadata.append(
|
doc_metadata.append(
|
||||||
{
|
{
|
||||||
"id": "built-in",
|
"id": "built-in",
|
||||||
"name": BuiltInField.uploader.value,
|
"name": BuiltInField.uploader,
|
||||||
"type": "string",
|
"type": "string",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
doc_metadata.append(
|
doc_metadata.append(
|
||||||
{
|
{
|
||||||
"id": "built-in",
|
"id": "built-in",
|
||||||
"name": BuiltInField.upload_date.value,
|
"name": BuiltInField.upload_date,
|
||||||
"type": "time",
|
"type": "time",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
doc_metadata.append(
|
doc_metadata.append(
|
||||||
{
|
{
|
||||||
"id": "built-in",
|
"id": "built-in",
|
||||||
"name": BuiltInField.last_update_date.value,
|
"name": BuiltInField.last_update_date,
|
||||||
"type": "time",
|
"type": "time",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
doc_metadata.append(
|
doc_metadata.append(
|
||||||
{
|
{
|
||||||
"id": "built-in",
|
"id": "built-in",
|
||||||
"name": BuiltInField.source.value,
|
"name": BuiltInField.source,
|
||||||
"type": "string",
|
"type": "string",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
@ -544,7 +544,7 @@ class Document(Base):
|
||||||
"id": "built-in",
|
"id": "built-in",
|
||||||
"name": BuiltInField.source,
|
"name": BuiltInField.source,
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"value": MetadataDataSource[self.data_source_type].value,
|
"value": MetadataDataSource[self.data_source_type],
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
return built_in_fields
|
return built_in_fields
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ import re
|
||||||
import uuid
|
import uuid
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
|
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
|
||||||
|
|
||||||
from core.plugin.entities.plugin import GenericProviderID
|
from core.plugin.entities.plugin import GenericProviderID
|
||||||
|
|
@ -62,9 +62,9 @@ class AppMode(StrEnum):
|
||||||
raise ValueError(f"invalid mode value {value}")
|
raise ValueError(f"invalid mode value {value}")
|
||||||
|
|
||||||
|
|
||||||
class IconType(Enum):
|
class IconType(StrEnum):
|
||||||
IMAGE = "image"
|
IMAGE = auto()
|
||||||
EMOJI = "emoji"
|
EMOJI = auto()
|
||||||
|
|
||||||
|
|
||||||
class App(Base):
|
class App(Base):
|
||||||
|
|
@ -149,15 +149,15 @@ class App(Base):
|
||||||
if app_model_config.agent_mode_dict.get("enabled", False) and app_model_config.agent_mode_dict.get(
|
if app_model_config.agent_mode_dict.get("enabled", False) and app_model_config.agent_mode_dict.get(
|
||||||
"strategy", ""
|
"strategy", ""
|
||||||
) in {"function_call", "react"}:
|
) in {"function_call", "react"}:
|
||||||
self.mode = AppMode.AGENT_CHAT.value
|
self.mode = AppMode.AGENT_CHAT
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def mode_compatible_with_agent(self) -> str:
|
def mode_compatible_with_agent(self) -> str:
|
||||||
if self.mode == AppMode.CHAT.value and self.is_agent:
|
if self.mode == AppMode.CHAT and self.is_agent:
|
||||||
return AppMode.AGENT_CHAT.value
|
return AppMode.AGENT_CHAT
|
||||||
|
|
||||||
return str(self.mode)
|
return str(self.mode)
|
||||||
|
|
||||||
|
|
@ -713,7 +713,7 @@ class Conversation(Base):
|
||||||
model_config = {}
|
model_config = {}
|
||||||
app_model_config: Optional[AppModelConfig] = None
|
app_model_config: Optional[AppModelConfig] = None
|
||||||
|
|
||||||
if self.mode == AppMode.ADVANCED_CHAT.value:
|
if self.mode == AppMode.ADVANCED_CHAT:
|
||||||
if self.override_model_configs:
|
if self.override_model_configs:
|
||||||
override_model_configs = json.loads(self.override_model_configs)
|
override_model_configs = json.loads(self.override_model_configs)
|
||||||
model_config = override_model_configs
|
model_config = override_model_configs
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
from enum import StrEnum, auto
|
||||||
from functools import cached_property
|
from functools import cached_property
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
|
@ -12,9 +12,9 @@ from .engine import db
|
||||||
from .types import StringUUID
|
from .types import StringUUID
|
||||||
|
|
||||||
|
|
||||||
class ProviderType(Enum):
|
class ProviderType(StrEnum):
|
||||||
CUSTOM = "custom"
|
CUSTOM = auto()
|
||||||
SYSTEM = "system"
|
SYSTEM = auto()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def value_of(value: str) -> "ProviderType":
|
def value_of(value: str) -> "ProviderType":
|
||||||
|
|
@ -24,14 +24,14 @@ class ProviderType(Enum):
|
||||||
raise ValueError(f"No matching enum found for value '{value}'")
|
raise ValueError(f"No matching enum found for value '{value}'")
|
||||||
|
|
||||||
|
|
||||||
class ProviderQuotaType(Enum):
|
class ProviderQuotaType(StrEnum):
|
||||||
PAID = "paid"
|
PAID = auto()
|
||||||
"""hosted paid quota"""
|
"""hosted paid quota"""
|
||||||
|
|
||||||
FREE = "free"
|
FREE = auto()
|
||||||
"""third-party free quota"""
|
"""third-party free quota"""
|
||||||
|
|
||||||
TRIAL = "trial"
|
TRIAL = auto()
|
||||||
"""hosted trial quota"""
|
"""hosted trial quota"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import json
|
||||||
import logging
|
import logging
|
||||||
from collections.abc import Mapping, Sequence
|
from collections.abc import Mapping, Sequence
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum, StrEnum
|
from enum import StrEnum, auto
|
||||||
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
|
@ -41,13 +41,13 @@ from .types import EnumText, StringUUID
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class WorkflowType(Enum):
|
class WorkflowType(StrEnum):
|
||||||
"""
|
"""
|
||||||
Workflow Type Enum
|
Workflow Type Enum
|
||||||
"""
|
"""
|
||||||
|
|
||||||
WORKFLOW = "workflow"
|
WORKFLOW = auto()
|
||||||
CHAT = "chat"
|
CHAT = auto()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def value_of(cls, value: str) -> "WorkflowType":
|
def value_of(cls, value: str) -> "WorkflowType":
|
||||||
|
|
@ -777,7 +777,7 @@ class WorkflowNodeExecutionModel(Base):
|
||||||
return extras
|
return extras
|
||||||
|
|
||||||
|
|
||||||
class WorkflowAppLogCreatedFrom(Enum):
|
class WorkflowAppLogCreatedFrom(StrEnum):
|
||||||
"""
|
"""
|
||||||
Workflow App Log Created From Enum
|
Workflow App Log Created From Enum
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -32,14 +32,14 @@ class AdvancedPromptTemplateService:
|
||||||
def get_common_prompt(cls, app_mode: str, model_mode: str, has_context: str):
|
def get_common_prompt(cls, app_mode: str, model_mode: str, has_context: str):
|
||||||
context_prompt = copy.deepcopy(CONTEXT)
|
context_prompt = copy.deepcopy(CONTEXT)
|
||||||
|
|
||||||
if app_mode == AppMode.CHAT.value:
|
if app_mode == AppMode.CHAT:
|
||||||
if model_mode == "completion":
|
if model_mode == "completion":
|
||||||
return cls.get_completion_prompt(
|
return cls.get_completion_prompt(
|
||||||
copy.deepcopy(CHAT_APP_COMPLETION_PROMPT_CONFIG), has_context, context_prompt
|
copy.deepcopy(CHAT_APP_COMPLETION_PROMPT_CONFIG), has_context, context_prompt
|
||||||
)
|
)
|
||||||
elif model_mode == "chat":
|
elif model_mode == "chat":
|
||||||
return cls.get_chat_prompt(copy.deepcopy(CHAT_APP_CHAT_PROMPT_CONFIG), has_context, context_prompt)
|
return cls.get_chat_prompt(copy.deepcopy(CHAT_APP_CHAT_PROMPT_CONFIG), has_context, context_prompt)
|
||||||
elif app_mode == AppMode.COMPLETION.value:
|
elif app_mode == AppMode.COMPLETION:
|
||||||
if model_mode == "completion":
|
if model_mode == "completion":
|
||||||
return cls.get_completion_prompt(
|
return cls.get_completion_prompt(
|
||||||
copy.deepcopy(COMPLETION_APP_COMPLETION_PROMPT_CONFIG), has_context, context_prompt
|
copy.deepcopy(COMPLETION_APP_COMPLETION_PROMPT_CONFIG), has_context, context_prompt
|
||||||
|
|
@ -73,7 +73,7 @@ class AdvancedPromptTemplateService:
|
||||||
def get_baichuan_prompt(cls, app_mode: str, model_mode: str, has_context: str):
|
def get_baichuan_prompt(cls, app_mode: str, model_mode: str, has_context: str):
|
||||||
baichuan_context_prompt = copy.deepcopy(BAICHUAN_CONTEXT)
|
baichuan_context_prompt = copy.deepcopy(BAICHUAN_CONTEXT)
|
||||||
|
|
||||||
if app_mode == AppMode.CHAT.value:
|
if app_mode == AppMode.CHAT:
|
||||||
if model_mode == "completion":
|
if model_mode == "completion":
|
||||||
return cls.get_completion_prompt(
|
return cls.get_completion_prompt(
|
||||||
copy.deepcopy(BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG), has_context, baichuan_context_prompt
|
copy.deepcopy(BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG), has_context, baichuan_context_prompt
|
||||||
|
|
@ -82,7 +82,7 @@ class AdvancedPromptTemplateService:
|
||||||
return cls.get_chat_prompt(
|
return cls.get_chat_prompt(
|
||||||
copy.deepcopy(BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG), has_context, baichuan_context_prompt
|
copy.deepcopy(BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG), has_context, baichuan_context_prompt
|
||||||
)
|
)
|
||||||
elif app_mode == AppMode.COMPLETION.value:
|
elif app_mode == AppMode.COMPLETION:
|
||||||
if model_mode == "completion":
|
if model_mode == "completion":
|
||||||
return cls.get_completion_prompt(
|
return cls.get_completion_prompt(
|
||||||
copy.deepcopy(BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG),
|
copy.deepcopy(BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG),
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ class AppGenerateService:
|
||||||
request_id = RateLimit.gen_request_key()
|
request_id = RateLimit.gen_request_key()
|
||||||
try:
|
try:
|
||||||
request_id = rate_limit.enter(request_id)
|
request_id = rate_limit.enter(request_id)
|
||||||
if app_model.mode == AppMode.COMPLETION.value:
|
if app_model.mode == AppMode.COMPLETION:
|
||||||
return rate_limit.generate(
|
return rate_limit.generate(
|
||||||
CompletionAppGenerator.convert_to_event_stream(
|
CompletionAppGenerator.convert_to_event_stream(
|
||||||
CompletionAppGenerator().generate(
|
CompletionAppGenerator().generate(
|
||||||
|
|
@ -69,7 +69,7 @@ class AppGenerateService:
|
||||||
),
|
),
|
||||||
request_id=request_id,
|
request_id=request_id,
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
|
elif app_model.mode == AppMode.AGENT_CHAT or app_model.is_agent:
|
||||||
return rate_limit.generate(
|
return rate_limit.generate(
|
||||||
AgentChatAppGenerator.convert_to_event_stream(
|
AgentChatAppGenerator.convert_to_event_stream(
|
||||||
AgentChatAppGenerator().generate(
|
AgentChatAppGenerator().generate(
|
||||||
|
|
@ -78,7 +78,7 @@ class AppGenerateService:
|
||||||
),
|
),
|
||||||
request_id,
|
request_id,
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.CHAT.value:
|
elif app_model.mode == AppMode.CHAT:
|
||||||
return rate_limit.generate(
|
return rate_limit.generate(
|
||||||
ChatAppGenerator.convert_to_event_stream(
|
ChatAppGenerator.convert_to_event_stream(
|
||||||
ChatAppGenerator().generate(
|
ChatAppGenerator().generate(
|
||||||
|
|
@ -87,7 +87,7 @@ class AppGenerateService:
|
||||||
),
|
),
|
||||||
request_id=request_id,
|
request_id=request_id,
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.ADVANCED_CHAT.value:
|
elif app_model.mode == AppMode.ADVANCED_CHAT:
|
||||||
workflow_id = args.get("workflow_id")
|
workflow_id = args.get("workflow_id")
|
||||||
workflow = cls._get_workflow(app_model, invoke_from, workflow_id)
|
workflow = cls._get_workflow(app_model, invoke_from, workflow_id)
|
||||||
return rate_limit.generate(
|
return rate_limit.generate(
|
||||||
|
|
@ -103,7 +103,7 @@ class AppGenerateService:
|
||||||
),
|
),
|
||||||
request_id=request_id,
|
request_id=request_id,
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.WORKFLOW.value:
|
elif app_model.mode == AppMode.WORKFLOW:
|
||||||
workflow_id = args.get("workflow_id")
|
workflow_id = args.get("workflow_id")
|
||||||
workflow = cls._get_workflow(app_model, invoke_from, workflow_id)
|
workflow = cls._get_workflow(app_model, invoke_from, workflow_id)
|
||||||
return rate_limit.generate(
|
return rate_limit.generate(
|
||||||
|
|
@ -155,14 +155,14 @@ class AppGenerateService:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_single_iteration(cls, app_model: App, user: Account, node_id: str, args: Any, streaming: bool = True):
|
def generate_single_iteration(cls, app_model: App, user: Account, node_id: str, args: Any, streaming: bool = True):
|
||||||
if app_model.mode == AppMode.ADVANCED_CHAT.value:
|
if app_model.mode == AppMode.ADVANCED_CHAT:
|
||||||
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
||||||
return AdvancedChatAppGenerator.convert_to_event_stream(
|
return AdvancedChatAppGenerator.convert_to_event_stream(
|
||||||
AdvancedChatAppGenerator().single_iteration_generate(
|
AdvancedChatAppGenerator().single_iteration_generate(
|
||||||
app_model=app_model, workflow=workflow, node_id=node_id, user=user, args=args, streaming=streaming
|
app_model=app_model, workflow=workflow, node_id=node_id, user=user, args=args, streaming=streaming
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.WORKFLOW.value:
|
elif app_model.mode == AppMode.WORKFLOW:
|
||||||
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
||||||
return AdvancedChatAppGenerator.convert_to_event_stream(
|
return AdvancedChatAppGenerator.convert_to_event_stream(
|
||||||
WorkflowAppGenerator().single_iteration_generate(
|
WorkflowAppGenerator().single_iteration_generate(
|
||||||
|
|
@ -174,14 +174,14 @@ class AppGenerateService:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_single_loop(cls, app_model: App, user: Account, node_id: str, args: Any, streaming: bool = True):
|
def generate_single_loop(cls, app_model: App, user: Account, node_id: str, args: Any, streaming: bool = True):
|
||||||
if app_model.mode == AppMode.ADVANCED_CHAT.value:
|
if app_model.mode == AppMode.ADVANCED_CHAT:
|
||||||
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
||||||
return AdvancedChatAppGenerator.convert_to_event_stream(
|
return AdvancedChatAppGenerator.convert_to_event_stream(
|
||||||
AdvancedChatAppGenerator().single_loop_generate(
|
AdvancedChatAppGenerator().single_loop_generate(
|
||||||
app_model=app_model, workflow=workflow, node_id=node_id, user=user, args=args, streaming=streaming
|
app_model=app_model, workflow=workflow, node_id=node_id, user=user, args=args, streaming=streaming
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.WORKFLOW.value:
|
elif app_model.mode == AppMode.WORKFLOW:
|
||||||
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
||||||
return AdvancedChatAppGenerator.convert_to_event_stream(
|
return AdvancedChatAppGenerator.convert_to_event_stream(
|
||||||
WorkflowAppGenerator().single_loop_generate(
|
WorkflowAppGenerator().single_loop_generate(
|
||||||
|
|
|
||||||
|
|
@ -40,15 +40,15 @@ class AppService:
|
||||||
filters = [App.tenant_id == tenant_id, App.is_universal == False]
|
filters = [App.tenant_id == tenant_id, App.is_universal == False]
|
||||||
|
|
||||||
if args["mode"] == "workflow":
|
if args["mode"] == "workflow":
|
||||||
filters.append(App.mode == AppMode.WORKFLOW.value)
|
filters.append(App.mode == AppMode.WORKFLOW)
|
||||||
elif args["mode"] == "completion":
|
elif args["mode"] == "completion":
|
||||||
filters.append(App.mode == AppMode.COMPLETION.value)
|
filters.append(App.mode == AppMode.COMPLETION)
|
||||||
elif args["mode"] == "chat":
|
elif args["mode"] == "chat":
|
||||||
filters.append(App.mode == AppMode.CHAT.value)
|
filters.append(App.mode == AppMode.CHAT)
|
||||||
elif args["mode"] == "advanced-chat":
|
elif args["mode"] == "advanced-chat":
|
||||||
filters.append(App.mode == AppMode.ADVANCED_CHAT.value)
|
filters.append(App.mode == AppMode.ADVANCED_CHAT)
|
||||||
elif args["mode"] == "agent-chat":
|
elif args["mode"] == "agent-chat":
|
||||||
filters.append(App.mode == AppMode.AGENT_CHAT.value)
|
filters.append(App.mode == AppMode.AGENT_CHAT)
|
||||||
|
|
||||||
if args.get("is_created_by_me", False):
|
if args.get("is_created_by_me", False):
|
||||||
filters.append(App.created_by == user_id)
|
filters.append(App.created_by == user_id)
|
||||||
|
|
@ -171,7 +171,7 @@ class AppService:
|
||||||
assert isinstance(current_user, Account)
|
assert isinstance(current_user, Account)
|
||||||
assert current_user.current_tenant_id is not None
|
assert current_user.current_tenant_id is not None
|
||||||
# get original app model config
|
# get original app model config
|
||||||
if app.mode == AppMode.AGENT_CHAT.value or app.is_agent:
|
if app.mode == AppMode.AGENT_CHAT or app.is_agent:
|
||||||
model_config = app.app_model_config
|
model_config = app.app_model_config
|
||||||
if not model_config:
|
if not model_config:
|
||||||
return app
|
return app
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ logger = logging.getLogger(__name__)
|
||||||
class AudioService:
|
class AudioService:
|
||||||
@classmethod
|
@classmethod
|
||||||
def transcript_asr(cls, app_model: App, file: FileStorage, end_user: Optional[str] = None):
|
def transcript_asr(cls, app_model: App, file: FileStorage, end_user: Optional[str] = None):
|
||||||
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
workflow = app_model.workflow
|
workflow = app_model.workflow
|
||||||
if workflow is None:
|
if workflow is None:
|
||||||
raise ValueError("Speech to text is not enabled")
|
raise ValueError("Speech to text is not enabled")
|
||||||
|
|
@ -88,7 +88,7 @@ class AudioService:
|
||||||
def invoke_tts(text_content: str, app_model: App, voice: Optional[str] = None, is_draft: bool = False):
|
def invoke_tts(text_content: str, app_model: App, voice: Optional[str] = None, is_draft: bool = False):
|
||||||
with app.app_context():
|
with app.app_context():
|
||||||
if voice is None:
|
if voice is None:
|
||||||
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||||
if is_draft:
|
if is_draft:
|
||||||
workflow = WorkflowService().get_draft_workflow(app_model=app_model)
|
workflow = WorkflowService().get_draft_workflow(app_model=app_model)
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -1004,7 +1004,7 @@ class DocumentService:
|
||||||
if dataset.built_in_field_enabled:
|
if dataset.built_in_field_enabled:
|
||||||
if document.doc_metadata:
|
if document.doc_metadata:
|
||||||
doc_metadata = copy.deepcopy(document.doc_metadata)
|
doc_metadata = copy.deepcopy(document.doc_metadata)
|
||||||
doc_metadata[BuiltInField.document_name.value] = name
|
doc_metadata[BuiltInField.document_name] = name
|
||||||
document.doc_metadata = doc_metadata
|
document.doc_metadata = doc_metadata
|
||||||
|
|
||||||
document.name = name
|
document.name = name
|
||||||
|
|
|
||||||
|
|
@ -229,7 +229,7 @@ class MessageService:
|
||||||
|
|
||||||
model_manager = ModelManager()
|
model_manager = ModelManager()
|
||||||
|
|
||||||
if app_model.mode == AppMode.ADVANCED_CHAT.value:
|
if app_model.mode == AppMode.ADVANCED_CHAT:
|
||||||
workflow_service = WorkflowService()
|
workflow_service = WorkflowService()
|
||||||
if invoke_from == InvokeFrom.DEBUGGER:
|
if invoke_from == InvokeFrom.DEBUGGER:
|
||||||
workflow = workflow_service.get_draft_workflow(app_model=app_model)
|
workflow = workflow_service.get_draft_workflow(app_model=app_model)
|
||||||
|
|
|
||||||
|
|
@ -131,11 +131,11 @@ class MetadataService:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_built_in_fields():
|
def get_built_in_fields():
|
||||||
return [
|
return [
|
||||||
{"name": BuiltInField.document_name.value, "type": "string"},
|
{"name": BuiltInField.document_name, "type": "string"},
|
||||||
{"name": BuiltInField.uploader.value, "type": "string"},
|
{"name": BuiltInField.uploader, "type": "string"},
|
||||||
{"name": BuiltInField.upload_date.value, "type": "time"},
|
{"name": BuiltInField.upload_date, "type": "time"},
|
||||||
{"name": BuiltInField.last_update_date.value, "type": "time"},
|
{"name": BuiltInField.last_update_date, "type": "time"},
|
||||||
{"name": BuiltInField.source.value, "type": "string"},
|
{"name": BuiltInField.source, "type": "string"},
|
||||||
]
|
]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
@ -153,11 +153,11 @@ class MetadataService:
|
||||||
doc_metadata = {}
|
doc_metadata = {}
|
||||||
else:
|
else:
|
||||||
doc_metadata = copy.deepcopy(document.doc_metadata)
|
doc_metadata = copy.deepcopy(document.doc_metadata)
|
||||||
doc_metadata[BuiltInField.document_name.value] = document.name
|
doc_metadata[BuiltInField.document_name] = document.name
|
||||||
doc_metadata[BuiltInField.uploader.value] = document.uploader
|
doc_metadata[BuiltInField.uploader] = document.uploader
|
||||||
doc_metadata[BuiltInField.upload_date.value] = document.upload_date.timestamp()
|
doc_metadata[BuiltInField.upload_date] = document.upload_date.timestamp()
|
||||||
doc_metadata[BuiltInField.last_update_date.value] = document.last_update_date.timestamp()
|
doc_metadata[BuiltInField.last_update_date] = document.last_update_date.timestamp()
|
||||||
doc_metadata[BuiltInField.source.value] = MetadataDataSource[document.data_source_type].value
|
doc_metadata[BuiltInField.source] = MetadataDataSource[document.data_source_type]
|
||||||
document.doc_metadata = doc_metadata
|
document.doc_metadata = doc_metadata
|
||||||
db.session.add(document)
|
db.session.add(document)
|
||||||
dataset.built_in_field_enabled = True
|
dataset.built_in_field_enabled = True
|
||||||
|
|
@ -183,11 +183,11 @@ class MetadataService:
|
||||||
doc_metadata = {}
|
doc_metadata = {}
|
||||||
else:
|
else:
|
||||||
doc_metadata = copy.deepcopy(document.doc_metadata)
|
doc_metadata = copy.deepcopy(document.doc_metadata)
|
||||||
doc_metadata.pop(BuiltInField.document_name.value, None)
|
doc_metadata.pop(BuiltInField.document_name, None)
|
||||||
doc_metadata.pop(BuiltInField.uploader.value, None)
|
doc_metadata.pop(BuiltInField.uploader, None)
|
||||||
doc_metadata.pop(BuiltInField.upload_date.value, None)
|
doc_metadata.pop(BuiltInField.upload_date, None)
|
||||||
doc_metadata.pop(BuiltInField.last_update_date.value, None)
|
doc_metadata.pop(BuiltInField.last_update_date, None)
|
||||||
doc_metadata.pop(BuiltInField.source.value, None)
|
doc_metadata.pop(BuiltInField.source, None)
|
||||||
document.doc_metadata = doc_metadata
|
document.doc_metadata = doc_metadata
|
||||||
db.session.add(document)
|
db.session.add(document)
|
||||||
document_ids.append(document.id)
|
document_ids.append(document.id)
|
||||||
|
|
@ -211,11 +211,11 @@ class MetadataService:
|
||||||
for metadata_value in operation.metadata_list:
|
for metadata_value in operation.metadata_list:
|
||||||
doc_metadata[metadata_value.name] = metadata_value.value
|
doc_metadata[metadata_value.name] = metadata_value.value
|
||||||
if dataset.built_in_field_enabled:
|
if dataset.built_in_field_enabled:
|
||||||
doc_metadata[BuiltInField.document_name.value] = document.name
|
doc_metadata[BuiltInField.document_name] = document.name
|
||||||
doc_metadata[BuiltInField.uploader.value] = document.uploader
|
doc_metadata[BuiltInField.uploader] = document.uploader
|
||||||
doc_metadata[BuiltInField.upload_date.value] = document.upload_date.timestamp()
|
doc_metadata[BuiltInField.upload_date] = document.upload_date.timestamp()
|
||||||
doc_metadata[BuiltInField.last_update_date.value] = document.last_update_date.timestamp()
|
doc_metadata[BuiltInField.last_update_date] = document.last_update_date.timestamp()
|
||||||
doc_metadata[BuiltInField.source.value] = MetadataDataSource[document.data_source_type].value
|
doc_metadata[BuiltInField.source] = MetadataDataSource[document.data_source_type]
|
||||||
document.doc_metadata = doc_metadata
|
document.doc_metadata = doc_metadata
|
||||||
db.session.add(document)
|
db.session.add(document)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
|
||||||
|
|
@ -256,7 +256,7 @@ class PluginMigration:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
agent_app_model_config_ids = [
|
agent_app_model_config_ids = [
|
||||||
app.app_model_config_id for app in apps if app.is_agent or app.mode == AppMode.AGENT_CHAT.value
|
app.app_model_config_id for app in apps if app.is_agent or app.mode == AppMode.AGENT_CHAT
|
||||||
]
|
]
|
||||||
|
|
||||||
rs = session.query(AppModelConfig).where(AppModelConfig.id.in_(agent_app_model_config_ids)).all()
|
rs = session.query(AppModelConfig).where(AppModelConfig.id.in_(agent_app_model_config_ids)).all()
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ class WorkflowConverter:
|
||||||
new_app = App()
|
new_app = App()
|
||||||
new_app.tenant_id = app_model.tenant_id
|
new_app.tenant_id = app_model.tenant_id
|
||||||
new_app.name = name or app_model.name + "(workflow)"
|
new_app.name = name or app_model.name + "(workflow)"
|
||||||
new_app.mode = AppMode.ADVANCED_CHAT.value if app_model.mode == AppMode.CHAT.value else AppMode.WORKFLOW.value
|
new_app.mode = AppMode.ADVANCED_CHAT if app_model.mode == AppMode.CHAT else AppMode.WORKFLOW
|
||||||
new_app.icon_type = icon_type or app_model.icon_type
|
new_app.icon_type = icon_type or app_model.icon_type
|
||||||
new_app.icon = icon or app_model.icon
|
new_app.icon = icon or app_model.icon
|
||||||
new_app.icon_background = icon_background or app_model.icon_background
|
new_app.icon_background = icon_background or app_model.icon_background
|
||||||
|
|
@ -203,7 +203,7 @@ class WorkflowConverter:
|
||||||
app_mode_enum = AppMode.value_of(app_model.mode)
|
app_mode_enum = AppMode.value_of(app_model.mode)
|
||||||
app_config: EasyUIBasedAppConfig
|
app_config: EasyUIBasedAppConfig
|
||||||
if app_mode_enum == AppMode.AGENT_CHAT or app_model.is_agent:
|
if app_mode_enum == AppMode.AGENT_CHAT or app_model.is_agent:
|
||||||
app_model.mode = AppMode.AGENT_CHAT.value
|
app_model.mode = AppMode.AGENT_CHAT
|
||||||
app_config = AgentChatAppConfigManager.get_app_config(
|
app_config = AgentChatAppConfigManager.get_app_config(
|
||||||
app_model=app_model, app_model_config=app_model_config
|
app_model=app_model, app_model_config=app_model_config
|
||||||
)
|
)
|
||||||
|
|
@ -279,7 +279,7 @@ class WorkflowConverter:
|
||||||
"app_id": app_model.id,
|
"app_id": app_model.id,
|
||||||
"tool_variable": tool_variable,
|
"tool_variable": tool_variable,
|
||||||
"inputs": inputs,
|
"inputs": inputs,
|
||||||
"query": "{{#sys.query#}}" if app_model.mode == AppMode.CHAT.value else "",
|
"query": "{{#sys.query#}}" if app_model.mode == AppMode.CHAT else "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -618,7 +618,7 @@ class WorkflowConverter:
|
||||||
:param app_model: App instance
|
:param app_model: App instance
|
||||||
:return: AppMode
|
:return: AppMode
|
||||||
"""
|
"""
|
||||||
if app_model.mode == AppMode.COMPLETION.value:
|
if app_model.mode == AppMode.COMPLETION:
|
||||||
return AppMode.WORKFLOW
|
return AppMode.WORKFLOW
|
||||||
else:
|
else:
|
||||||
return AppMode.ADVANCED_CHAT
|
return AppMode.ADVANCED_CHAT
|
||||||
|
|
|
||||||
|
|
@ -828,7 +828,7 @@ class WorkflowService:
|
||||||
# chatbot convert to workflow mode
|
# chatbot convert to workflow mode
|
||||||
workflow_converter = WorkflowConverter()
|
workflow_converter = WorkflowConverter()
|
||||||
|
|
||||||
if app_model.mode not in {AppMode.CHAT.value, AppMode.COMPLETION.value}:
|
if app_model.mode not in {AppMode.CHAT, AppMode.COMPLETION}:
|
||||||
raise ValueError(f"Current App mode: {app_model.mode} is not supported convert to workflow.")
|
raise ValueError(f"Current App mode: {app_model.mode} is not supported convert to workflow.")
|
||||||
|
|
||||||
# convert to workflow
|
# convert to workflow
|
||||||
|
|
@ -844,11 +844,11 @@ class WorkflowService:
|
||||||
return new_app
|
return new_app
|
||||||
|
|
||||||
def validate_features_structure(self, app_model: App, features: dict):
|
def validate_features_structure(self, app_model: App, features: dict):
|
||||||
if app_model.mode == AppMode.ADVANCED_CHAT.value:
|
if app_model.mode == AppMode.ADVANCED_CHAT:
|
||||||
return AdvancedChatAppConfigManager.config_validate(
|
return AdvancedChatAppConfigManager.config_validate(
|
||||||
tenant_id=app_model.tenant_id, config=features, only_structure_validate=True
|
tenant_id=app_model.tenant_id, config=features, only_structure_validate=True
|
||||||
)
|
)
|
||||||
elif app_model.mode == AppMode.WORKFLOW.value:
|
elif app_model.mode == AppMode.WORKFLOW:
|
||||||
return WorkflowAppConfigManager.config_validate(
|
return WorkflowAppConfigManager.config_validate(
|
||||||
tenant_id=app_model.tenant_id, config=features, only_structure_validate=True
|
tenant_id=app_model.tenant_id, config=features, only_structure_validate=True
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
|
|
||||||
# Test data for Baichuan model
|
# Test data for Baichuan model
|
||||||
args = {
|
args = {
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "baichuan-13b-chat",
|
"model_name": "baichuan-13b-chat",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
@ -77,7 +77,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
|
|
||||||
# Test data for common model
|
# Test data for common model
|
||||||
args = {
|
args = {
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
@ -116,7 +116,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
|
|
||||||
for model_name in test_cases:
|
for model_name in test_cases:
|
||||||
args = {
|
args = {
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": model_name,
|
"model_name": model_name,
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
@ -144,7 +144,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "completion", "true")
|
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT, "completion", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -173,7 +173,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "chat", "true")
|
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT, "chat", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -202,7 +202,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.COMPLETION.value, "completion", "true")
|
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.COMPLETION, "completion", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -230,7 +230,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.COMPLETION.value, "chat", "true")
|
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.COMPLETION, "chat", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -257,7 +257,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "completion", "false")
|
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT, "completion", "false")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -303,7 +303,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "unsupported_mode", "true")
|
result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT, "unsupported_mode", "true")
|
||||||
|
|
||||||
# Assert: Verify empty dict is returned
|
# Assert: Verify empty dict is returned
|
||||||
assert result == {}
|
assert result == {}
|
||||||
|
|
@ -442,7 +442,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "completion", "true")
|
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT, "completion", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -473,7 +473,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "chat", "true")
|
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT, "chat", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -502,7 +502,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.COMPLETION.value, "completion", "true")
|
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.COMPLETION, "completion", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -530,7 +530,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.COMPLETION.value, "chat", "true")
|
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.COMPLETION, "chat", "true")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -557,7 +557,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "completion", "false")
|
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT, "completion", "false")
|
||||||
|
|
||||||
# Assert: Verify the expected outcomes
|
# Assert: Verify the expected outcomes
|
||||||
assert result is not None
|
assert result is not None
|
||||||
|
|
@ -603,7 +603,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Act: Execute the method under test
|
# Act: Execute the method under test
|
||||||
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "unsupported_mode", "true")
|
result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT, "unsupported_mode", "true")
|
||||||
|
|
||||||
# Assert: Verify empty dict is returned
|
# Assert: Verify empty dict is returned
|
||||||
assert result == {}
|
assert result == {}
|
||||||
|
|
@ -621,7 +621,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Test all app modes
|
# Test all app modes
|
||||||
app_modes = [AppMode.CHAT.value, AppMode.COMPLETION.value]
|
app_modes = [AppMode.CHAT, AppMode.COMPLETION]
|
||||||
model_modes = ["completion", "chat"]
|
model_modes = ["completion", "chat"]
|
||||||
|
|
||||||
for app_mode in app_modes:
|
for app_mode in app_modes:
|
||||||
|
|
@ -653,7 +653,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
|
|
||||||
# Test all app modes
|
# Test all app modes
|
||||||
app_modes = [AppMode.CHAT.value, AppMode.COMPLETION.value]
|
app_modes = [AppMode.CHAT, AppMode.COMPLETION]
|
||||||
model_modes = ["completion", "chat"]
|
model_modes = ["completion", "chat"]
|
||||||
|
|
||||||
for app_mode in app_modes:
|
for app_mode in app_modes:
|
||||||
|
|
@ -686,10 +686,10 @@ class TestAdvancedPromptTemplateService:
|
||||||
# Test edge cases
|
# Test edge cases
|
||||||
edge_cases = [
|
edge_cases = [
|
||||||
{"app_mode": "", "model_mode": "completion", "model_name": "gpt-3.5-turbo", "has_context": "true"},
|
{"app_mode": "", "model_mode": "completion", "model_name": "gpt-3.5-turbo", "has_context": "true"},
|
||||||
{"app_mode": AppMode.CHAT.value, "model_mode": "", "model_name": "gpt-3.5-turbo", "has_context": "true"},
|
{"app_mode": AppMode.CHAT, "model_mode": "", "model_name": "gpt-3.5-turbo", "has_context": "true"},
|
||||||
{"app_mode": AppMode.CHAT.value, "model_mode": "completion", "model_name": "", "has_context": "true"},
|
{"app_mode": AppMode.CHAT, "model_mode": "completion", "model_name": "", "has_context": "true"},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "",
|
"has_context": "",
|
||||||
|
|
@ -723,7 +723,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
|
|
||||||
# Test with context
|
# Test with context
|
||||||
args = {
|
args = {
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
@ -757,7 +757,7 @@ class TestAdvancedPromptTemplateService:
|
||||||
|
|
||||||
# Test with context
|
# Test with context
|
||||||
args = {
|
args = {
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "baichuan-13b-chat",
|
"model_name": "baichuan-13b-chat",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
@ -786,25 +786,25 @@ class TestAdvancedPromptTemplateService:
|
||||||
# Test different scenarios
|
# Test different scenarios
|
||||||
test_scenarios = [
|
test_scenarios = [
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "chat",
|
"model_mode": "chat",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.COMPLETION.value,
|
"app_mode": AppMode.COMPLETION,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.COMPLETION.value,
|
"app_mode": AppMode.COMPLETION,
|
||||||
"model_mode": "chat",
|
"model_mode": "chat",
|
||||||
"model_name": "gpt-3.5-turbo",
|
"model_name": "gpt-3.5-turbo",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
@ -843,25 +843,25 @@ class TestAdvancedPromptTemplateService:
|
||||||
# Test different scenarios
|
# Test different scenarios
|
||||||
test_scenarios = [
|
test_scenarios = [
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "baichuan-13b-chat",
|
"model_name": "baichuan-13b-chat",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.CHAT.value,
|
"app_mode": AppMode.CHAT,
|
||||||
"model_mode": "chat",
|
"model_mode": "chat",
|
||||||
"model_name": "baichuan-13b-chat",
|
"model_name": "baichuan-13b-chat",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.COMPLETION.value,
|
"app_mode": AppMode.COMPLETION,
|
||||||
"model_mode": "completion",
|
"model_mode": "completion",
|
||||||
"model_name": "baichuan-13b-chat",
|
"model_name": "baichuan-13b-chat",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"app_mode": AppMode.COMPLETION.value,
|
"app_mode": AppMode.COMPLETION,
|
||||||
"model_mode": "chat",
|
"model_mode": "chat",
|
||||||
"model_name": "baichuan-13b-chat",
|
"model_name": "baichuan-13b-chat",
|
||||||
"has_context": "true",
|
"has_context": "true",
|
||||||
|
|
|
||||||
|
|
@ -255,7 +255,7 @@ class TestMetadataService:
|
||||||
mock_external_service_dependencies["current_user"].id = account.id
|
mock_external_service_dependencies["current_user"].id = account.id
|
||||||
|
|
||||||
# Try to create metadata with built-in field name
|
# Try to create metadata with built-in field name
|
||||||
built_in_field_name = BuiltInField.document_name.value
|
built_in_field_name = BuiltInField.document_name
|
||||||
metadata_args = MetadataArgs(type="string", name=built_in_field_name)
|
metadata_args = MetadataArgs(type="string", name=built_in_field_name)
|
||||||
|
|
||||||
# Act & Assert: Verify proper error handling
|
# Act & Assert: Verify proper error handling
|
||||||
|
|
@ -375,7 +375,7 @@ class TestMetadataService:
|
||||||
metadata = MetadataService.create_metadata(dataset.id, metadata_args)
|
metadata = MetadataService.create_metadata(dataset.id, metadata_args)
|
||||||
|
|
||||||
# Try to update with built-in field name
|
# Try to update with built-in field name
|
||||||
built_in_field_name = BuiltInField.document_name.value
|
built_in_field_name = BuiltInField.document_name
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="Metadata name already exists in Built-in fields."):
|
with pytest.raises(ValueError, match="Metadata name already exists in Built-in fields."):
|
||||||
MetadataService.update_metadata_name(dataset.id, metadata.id, built_in_field_name)
|
MetadataService.update_metadata_name(dataset.id, metadata.id, built_in_field_name)
|
||||||
|
|
@ -540,11 +540,11 @@ class TestMetadataService:
|
||||||
field_names = [field["name"] for field in result]
|
field_names = [field["name"] for field in result]
|
||||||
field_types = [field["type"] for field in result]
|
field_types = [field["type"] for field in result]
|
||||||
|
|
||||||
assert BuiltInField.document_name.value in field_names
|
assert BuiltInField.document_name in field_names
|
||||||
assert BuiltInField.uploader.value in field_names
|
assert BuiltInField.uploader in field_names
|
||||||
assert BuiltInField.upload_date.value in field_names
|
assert BuiltInField.upload_date in field_names
|
||||||
assert BuiltInField.last_update_date.value in field_names
|
assert BuiltInField.last_update_date in field_names
|
||||||
assert BuiltInField.source.value in field_names
|
assert BuiltInField.source in field_names
|
||||||
|
|
||||||
# Verify field types
|
# Verify field types
|
||||||
assert "string" in field_types
|
assert "string" in field_types
|
||||||
|
|
@ -682,11 +682,11 @@ class TestMetadataService:
|
||||||
|
|
||||||
# Set document metadata with built-in fields
|
# Set document metadata with built-in fields
|
||||||
document.doc_metadata = {
|
document.doc_metadata = {
|
||||||
BuiltInField.document_name.value: document.name,
|
BuiltInField.document_name: document.name,
|
||||||
BuiltInField.uploader.value: "test_uploader",
|
BuiltInField.uploader: "test_uploader",
|
||||||
BuiltInField.upload_date.value: 1234567890.0,
|
BuiltInField.upload_date: 1234567890.0,
|
||||||
BuiltInField.last_update_date.value: 1234567890.0,
|
BuiltInField.last_update_date: 1234567890.0,
|
||||||
BuiltInField.source.value: "test_source",
|
BuiltInField.source: "test_source",
|
||||||
}
|
}
|
||||||
db.session.add(document)
|
db.session.add(document)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
|
||||||
|
|
@ -96,7 +96,7 @@ class TestWorkflowService:
|
||||||
app.tenant_id = fake.uuid4()
|
app.tenant_id = fake.uuid4()
|
||||||
app.name = fake.company()
|
app.name = fake.company()
|
||||||
app.description = fake.text()
|
app.description = fake.text()
|
||||||
app.mode = AppMode.WORKFLOW.value
|
app.mode = AppMode.WORKFLOW
|
||||||
app.icon_type = "emoji"
|
app.icon_type = "emoji"
|
||||||
app.icon = "🤖"
|
app.icon = "🤖"
|
||||||
app.icon_background = "#FFEAD5"
|
app.icon_background = "#FFEAD5"
|
||||||
|
|
@ -883,7 +883,7 @@ class TestWorkflowService:
|
||||||
|
|
||||||
# Create chat mode app
|
# Create chat mode app
|
||||||
app = self._create_test_app(db_session_with_containers, fake)
|
app = self._create_test_app(db_session_with_containers, fake)
|
||||||
app.mode = AppMode.CHAT.value
|
app.mode = AppMode.CHAT
|
||||||
|
|
||||||
# Create app model config (required for conversion)
|
# Create app model config (required for conversion)
|
||||||
from models.model import AppModelConfig
|
from models.model import AppModelConfig
|
||||||
|
|
@ -926,7 +926,7 @@ class TestWorkflowService:
|
||||||
|
|
||||||
# Assert
|
# Assert
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert result.mode == AppMode.ADVANCED_CHAT.value # CHAT mode converts to ADVANCED_CHAT, not WORKFLOW
|
assert result.mode == AppMode.ADVANCED_CHAT # CHAT mode converts to ADVANCED_CHAT, not WORKFLOW
|
||||||
assert result.name == conversion_args["name"]
|
assert result.name == conversion_args["name"]
|
||||||
assert result.icon == conversion_args["icon"]
|
assert result.icon == conversion_args["icon"]
|
||||||
assert result.icon_type == conversion_args["icon_type"]
|
assert result.icon_type == conversion_args["icon_type"]
|
||||||
|
|
@ -945,7 +945,7 @@ class TestWorkflowService:
|
||||||
|
|
||||||
# Create completion mode app
|
# Create completion mode app
|
||||||
app = self._create_test_app(db_session_with_containers, fake)
|
app = self._create_test_app(db_session_with_containers, fake)
|
||||||
app.mode = AppMode.COMPLETION.value
|
app.mode = AppMode.COMPLETION
|
||||||
|
|
||||||
# Create app model config (required for conversion)
|
# Create app model config (required for conversion)
|
||||||
from models.model import AppModelConfig
|
from models.model import AppModelConfig
|
||||||
|
|
@ -988,7 +988,7 @@ class TestWorkflowService:
|
||||||
|
|
||||||
# Assert
|
# Assert
|
||||||
assert result is not None
|
assert result is not None
|
||||||
assert result.mode == AppMode.WORKFLOW.value
|
assert result.mode == AppMode.WORKFLOW
|
||||||
assert result.name == conversion_args["name"]
|
assert result.name == conversion_args["name"]
|
||||||
assert result.icon == conversion_args["icon"]
|
assert result.icon == conversion_args["icon"]
|
||||||
assert result.icon_type == conversion_args["icon_type"]
|
assert result.icon_type == conversion_args["icon_type"]
|
||||||
|
|
@ -1007,7 +1007,7 @@ class TestWorkflowService:
|
||||||
|
|
||||||
# Create workflow mode app (already in workflow mode)
|
# Create workflow mode app (already in workflow mode)
|
||||||
app = self._create_test_app(db_session_with_containers, fake)
|
app = self._create_test_app(db_session_with_containers, fake)
|
||||||
app.mode = AppMode.WORKFLOW.value
|
app.mode = AppMode.WORKFLOW
|
||||||
|
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
|
|
||||||
|
|
@ -1030,7 +1030,7 @@ class TestWorkflowService:
|
||||||
# Arrange
|
# Arrange
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
app = self._create_test_app(db_session_with_containers, fake)
|
app = self._create_test_app(db_session_with_containers, fake)
|
||||||
app.mode = AppMode.ADVANCED_CHAT.value
|
app.mode = AppMode.ADVANCED_CHAT
|
||||||
|
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
|
|
||||||
|
|
@ -1061,7 +1061,7 @@ class TestWorkflowService:
|
||||||
# Arrange
|
# Arrange
|
||||||
fake = Faker()
|
fake = Faker()
|
||||||
app = self._create_test_app(db_session_with_containers, fake)
|
app = self._create_test_app(db_session_with_containers, fake)
|
||||||
app.mode = AppMode.WORKFLOW.value
|
app.mode = AppMode.WORKFLOW
|
||||||
|
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ class TestHandleMCPRequest:
|
||||||
"""Setup test fixtures"""
|
"""Setup test fixtures"""
|
||||||
self.app = Mock(spec=App)
|
self.app = Mock(spec=App)
|
||||||
self.app.name = "test_app"
|
self.app.name = "test_app"
|
||||||
self.app.mode = AppMode.CHAT.value
|
self.app.mode = AppMode.CHAT
|
||||||
|
|
||||||
self.mcp_server = Mock(spec=AppMCPServer)
|
self.mcp_server = Mock(spec=AppMCPServer)
|
||||||
self.mcp_server.description = "Test server"
|
self.mcp_server.description = "Test server"
|
||||||
|
|
@ -196,7 +196,7 @@ class TestIndividualHandlers:
|
||||||
def test_handle_list_tools(self):
|
def test_handle_list_tools(self):
|
||||||
"""Test list tools handler"""
|
"""Test list tools handler"""
|
||||||
app_name = "test_app"
|
app_name = "test_app"
|
||||||
app_mode = AppMode.CHAT.value
|
app_mode = AppMode.CHAT
|
||||||
description = "Test server"
|
description = "Test server"
|
||||||
parameters_dict: dict[str, str] = {}
|
parameters_dict: dict[str, str] = {}
|
||||||
user_input_form: list[VariableEntity] = []
|
user_input_form: list[VariableEntity] = []
|
||||||
|
|
@ -212,7 +212,7 @@ class TestIndividualHandlers:
|
||||||
def test_handle_call_tool(self, mock_app_generate):
|
def test_handle_call_tool(self, mock_app_generate):
|
||||||
"""Test call tool handler"""
|
"""Test call tool handler"""
|
||||||
app = Mock(spec=App)
|
app = Mock(spec=App)
|
||||||
app.mode = AppMode.CHAT.value
|
app.mode = AppMode.CHAT
|
||||||
|
|
||||||
# Create mock request
|
# Create mock request
|
||||||
mock_request = Mock()
|
mock_request = Mock()
|
||||||
|
|
@ -252,7 +252,7 @@ class TestUtilityFunctions:
|
||||||
|
|
||||||
def test_build_parameter_schema_chat_mode(self):
|
def test_build_parameter_schema_chat_mode(self):
|
||||||
"""Test building parameter schema for chat mode"""
|
"""Test building parameter schema for chat mode"""
|
||||||
app_mode = AppMode.CHAT.value
|
app_mode = AppMode.CHAT
|
||||||
parameters_dict: dict[str, str] = {"name": "Enter your name"}
|
parameters_dict: dict[str, str] = {"name": "Enter your name"}
|
||||||
|
|
||||||
user_input_form = [
|
user_input_form = [
|
||||||
|
|
@ -275,7 +275,7 @@ class TestUtilityFunctions:
|
||||||
|
|
||||||
def test_build_parameter_schema_workflow_mode(self):
|
def test_build_parameter_schema_workflow_mode(self):
|
||||||
"""Test building parameter schema for workflow mode"""
|
"""Test building parameter schema for workflow mode"""
|
||||||
app_mode = AppMode.WORKFLOW.value
|
app_mode = AppMode.WORKFLOW
|
||||||
parameters_dict: dict[str, str] = {"input_text": "Enter text"}
|
parameters_dict: dict[str, str] = {"input_text": "Enter text"}
|
||||||
|
|
||||||
user_input_form = [
|
user_input_form = [
|
||||||
|
|
@ -298,7 +298,7 @@ class TestUtilityFunctions:
|
||||||
def test_prepare_tool_arguments_chat_mode(self):
|
def test_prepare_tool_arguments_chat_mode(self):
|
||||||
"""Test preparing tool arguments for chat mode"""
|
"""Test preparing tool arguments for chat mode"""
|
||||||
app = Mock(spec=App)
|
app = Mock(spec=App)
|
||||||
app.mode = AppMode.CHAT.value
|
app.mode = AppMode.CHAT
|
||||||
|
|
||||||
arguments = {"query": "test question", "name": "John"}
|
arguments = {"query": "test question", "name": "John"}
|
||||||
|
|
||||||
|
|
@ -312,7 +312,7 @@ class TestUtilityFunctions:
|
||||||
def test_prepare_tool_arguments_workflow_mode(self):
|
def test_prepare_tool_arguments_workflow_mode(self):
|
||||||
"""Test preparing tool arguments for workflow mode"""
|
"""Test preparing tool arguments for workflow mode"""
|
||||||
app = Mock(spec=App)
|
app = Mock(spec=App)
|
||||||
app.mode = AppMode.WORKFLOW.value
|
app.mode = AppMode.WORKFLOW
|
||||||
|
|
||||||
arguments = {"input_text": "test input"}
|
arguments = {"input_text": "test input"}
|
||||||
|
|
||||||
|
|
@ -324,7 +324,7 @@ class TestUtilityFunctions:
|
||||||
def test_prepare_tool_arguments_completion_mode(self):
|
def test_prepare_tool_arguments_completion_mode(self):
|
||||||
"""Test preparing tool arguments for completion mode"""
|
"""Test preparing tool arguments for completion mode"""
|
||||||
app = Mock(spec=App)
|
app = Mock(spec=App)
|
||||||
app.mode = AppMode.COMPLETION.value
|
app.mode = AppMode.COMPLETION
|
||||||
|
|
||||||
arguments = {"name": "John"}
|
arguments = {"name": "John"}
|
||||||
|
|
||||||
|
|
@ -336,7 +336,7 @@ class TestUtilityFunctions:
|
||||||
def test_extract_answer_from_mapping_response_chat(self):
|
def test_extract_answer_from_mapping_response_chat(self):
|
||||||
"""Test extracting answer from mapping response for chat mode"""
|
"""Test extracting answer from mapping response for chat mode"""
|
||||||
app = Mock(spec=App)
|
app = Mock(spec=App)
|
||||||
app.mode = AppMode.CHAT.value
|
app.mode = AppMode.CHAT
|
||||||
|
|
||||||
response = {"answer": "test answer", "other": "data"}
|
response = {"answer": "test answer", "other": "data"}
|
||||||
|
|
||||||
|
|
@ -347,7 +347,7 @@ class TestUtilityFunctions:
|
||||||
def test_extract_answer_from_mapping_response_workflow(self):
|
def test_extract_answer_from_mapping_response_workflow(self):
|
||||||
"""Test extracting answer from mapping response for workflow mode"""
|
"""Test extracting answer from mapping response for workflow mode"""
|
||||||
app = Mock(spec=App)
|
app = Mock(spec=App)
|
||||||
app.mode = AppMode.WORKFLOW.value
|
app.mode = AppMode.WORKFLOW
|
||||||
|
|
||||||
response = {"data": {"outputs": {"result": "test result"}}}
|
response = {"data": {"outputs": {"result": "test result"}}}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ def test__convert_to_http_request_node_for_chatbot(default_variables):
|
||||||
app_model = MagicMock()
|
app_model = MagicMock()
|
||||||
app_model.id = "app_id"
|
app_model.id = "app_id"
|
||||||
app_model.tenant_id = "tenant_id"
|
app_model.tenant_id = "tenant_id"
|
||||||
app_model.mode = AppMode.CHAT.value
|
app_model.mode = AppMode.CHAT
|
||||||
|
|
||||||
api_based_extension_id = "api_based_extension_id"
|
api_based_extension_id = "api_based_extension_id"
|
||||||
mock_api_based_extension = APIBasedExtension(
|
mock_api_based_extension = APIBasedExtension(
|
||||||
|
|
@ -127,7 +127,7 @@ def test__convert_to_http_request_node_for_workflow_app(default_variables):
|
||||||
app_model = MagicMock()
|
app_model = MagicMock()
|
||||||
app_model.id = "app_id"
|
app_model.id = "app_id"
|
||||||
app_model.tenant_id = "tenant_id"
|
app_model.tenant_id = "tenant_id"
|
||||||
app_model.mode = AppMode.WORKFLOW.value
|
app_model.mode = AppMode.WORKFLOW
|
||||||
|
|
||||||
api_based_extension_id = "api_based_extension_id"
|
api_based_extension_id = "api_based_extension_id"
|
||||||
mock_api_based_extension = APIBasedExtension(
|
mock_api_based_extension = APIBasedExtension(
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue