diff --git a/api/.env.example b/api/.env.example index 1ff6b3be8b..74f83aa06c 100644 --- a/api/.env.example +++ b/api/.env.example @@ -433,3 +433,5 @@ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 CREATE_TIDB_SERVICE_JOB_ENABLED=false +# Maximum number of submitted thread count in a ThreadPool for parallel node execution +MAX_SUBMIT_COUNT=100 diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index f1cb3efda7..e79401bdfd 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -439,6 +439,17 @@ class WorkflowConfig(BaseSettings): ) +class WorkflowNodeExecutionConfig(BaseSettings): + """ + Configuration for workflow node execution + """ + + MAX_SUBMIT_COUNT: PositiveInt = Field( + description="Maximum number of submitted thread count in a ThreadPool for parallel node execution", + default=100, + ) + + class AuthConfig(BaseSettings): """ Configuration for authentication and OAuth @@ -775,6 +786,7 @@ class FeatureConfig( ToolConfig, UpdateConfig, WorkflowConfig, + WorkflowNodeExecutionConfig, WorkspaceConfig, LoginConfig, # hosted services config diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py index 0c2ccd826e..51db50ec3d 100644 --- a/api/configs/packaging/__init__.py +++ b/api/configs/packaging/__init__.py @@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings): CURRENT_VERSION: str = Field( description="Dify version", - default="0.13.2", + default="0.14.0", ) COMMIT_SHA: str = Field( diff --git a/api/core/model_runtime/model_providers/baichuan/llm/llm.py b/api/core/model_runtime/model_providers/baichuan/llm/llm.py index 91a14bf100..e8d18cfff1 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/llm.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/llm.py @@ -10,6 +10,7 @@ from core.model_runtime.entities.llm_entities import ( from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, PromptMessage, + PromptMessageContentType, PromptMessageTool, SystemPromptMessage, ToolPromptMessage, @@ -105,7 +106,11 @@ class BaichuanLanguageModel(LargeLanguageModel): if isinstance(message.content, str): message_dict = {"role": "user", "content": message.content} else: - raise ValueError("User message content must be str") + for message_content in message.content: + if message_content.type == PromptMessageContentType.TEXT: + message_dict = {"role": "user", "content": message_content.data} + elif message_content.type == PromptMessageContentType.IMAGE: + raise ValueError("Content object type not support image_url") elif isinstance(message, AssistantPromptMessage): message = cast(AssistantPromptMessage, message) message_dict = {"role": "assistant", "content": message.content} diff --git a/api/core/model_runtime/model_providers/deepseek/llm/llm.py b/api/core/model_runtime/model_providers/deepseek/llm/llm.py index 610dc7b458..0a81f0c094 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/llm.py +++ b/api/core/model_runtime/model_providers/deepseek/llm/llm.py @@ -24,6 +24,9 @@ class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel): user: Optional[str] = None, ) -> Union[LLMResult, Generator]: self._add_custom_parameters(credentials) + # {"response_format": "xx"} need convert to {"response_format": {"type": "xx"}} + if "response_format" in model_parameters: + model_parameters["response_format"] = {"type": model_parameters.get("response_format")} return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) def validate_credentials(self, model: str, credentials: dict) -> None: diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py index 266f1216f8..0c61e19f06 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py @@ -68,7 +68,12 @@ class MaaSClient(MaasService): content = [] for message_content in message.content: if message_content.type == PromptMessageContentType.TEXT: - raise ValueError("Content object type only support image_url") + content.append( + { + "type": "text", + "text": message_content.data, + } + ) elif message_content.type == PromptMessageContentType.IMAGE: message_content = cast(ImagePromptMessageContent, message_content) image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data) diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index e03d4a7194..034b4bd399 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -9,6 +9,7 @@ from typing import Any, Optional, cast from flask import Flask, current_app +from configs import dify_config from core.app.apps.base_app_queue_manager import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult @@ -52,7 +53,12 @@ logger = logging.getLogger(__name__) class GraphEngineThreadPool(ThreadPoolExecutor): def __init__( - self, max_workers=None, thread_name_prefix="", initializer=None, initargs=(), max_submit_count=100 + self, + max_workers=None, + thread_name_prefix="", + initializer=None, + initargs=(), + max_submit_count=dify_config.MAX_SUBMIT_COUNT, ) -> None: super().__init__(max_workers, thread_name_prefix, initializer, initargs) self.max_submit_count = max_submit_count @@ -92,7 +98,7 @@ class GraphEngine: max_execution_time: int, thread_pool_id: Optional[str] = None, ) -> None: - thread_pool_max_submit_count = 100 + thread_pool_max_submit_count = dify_config.MAX_SUBMIT_COUNT thread_pool_max_workers = 10 # init thread pool diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index cbabe7a3c5..d935228c16 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -163,7 +163,9 @@ class IterationNode(BaseNode[IterationNodeData]): if self.node_data.is_parallel: futures: list[Future] = [] q: Queue = Queue() - thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100) + thread_pool = GraphEngineThreadPool( + max_workers=self.node_data.parallel_nums, max_submit_count=dify_config.MAX_SUBMIT_COUNT + ) for index, item in enumerate(iterator_list_value): future: Future = thread_pool.submit( self._run_single_iter_parallel, diff --git a/api/models/workflow.py b/api/models/workflow.py index 1b0af85f08..09e3728d7c 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -225,8 +225,10 @@ class Workflow(db.Model): from models.tools import WorkflowToolProvider return ( - db.session.query(WorkflowToolProvider).filter(WorkflowToolProvider.app_id == self.app_id).first() - is not None + db.session.query(WorkflowToolProvider) + .filter(WorkflowToolProvider.tenant_id == self.tenant_id, WorkflowToolProvider.app_id == self.app_id) + .count() + > 0 ) @property diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 4392407a73..6c38b5c4f9 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: # Startup mode, 'api' starts the API server. @@ -227,7 +227,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: CONSOLE_WEB_URL: '' @@ -397,7 +397,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.13.2 + image: langgenius/dify-web:0.14.0 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 56effa6293..669f6eb4dd 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -292,7 +292,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: # Use the shared environment variables. @@ -312,7 +312,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: # Use the shared environment variables. @@ -331,7 +331,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.13.2 + image: langgenius/dify-web:0.14.0 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/web/app/components/workflow/hooks/use-workflow-run.ts b/web/app/components/workflow/hooks/use-workflow-run.ts index f6a9d24cd3..a01b2d3154 100644 --- a/web/app/components/workflow/hooks/use-workflow-run.ts +++ b/web/app/components/workflow/hooks/use-workflow-run.ts @@ -192,6 +192,7 @@ export const useWorkflowRun = () => { const newNodes = produce(nodes, (draft) => { draft.forEach((node) => { node.data._waitingRun = true + node.data._runningBranchId = undefined }) }) setNodes(newNodes) diff --git a/web/app/components/workflow/nodes/iteration/use-config.ts b/web/app/components/workflow/nodes/iteration/use-config.ts index 6fb8797dcd..fd69fecaf0 100644 --- a/web/app/components/workflow/nodes/iteration/use-config.ts +++ b/web/app/components/workflow/nodes/iteration/use-config.ts @@ -52,6 +52,12 @@ const useConfig = (id: string, payload: IterationNodeType) => { [VarType.number]: VarType.arrayNumber, [VarType.object]: VarType.arrayObject, [VarType.file]: VarType.arrayFile, + // list operator node can output array + [VarType.array]: VarType.array, + [VarType.arrayFile]: VarType.arrayFile, + [VarType.arrayString]: VarType.arrayString, + [VarType.arrayNumber]: VarType.arrayNumber, + [VarType.arrayObject]: VarType.arrayObject, } as Record)[outputItemType] || VarType.arrayString }) setInputs(newInputs) diff --git a/web/app/components/workflow/nodes/tool/components/input-var-list.tsx b/web/app/components/workflow/nodes/tool/components/input-var-list.tsx index 2cc1a0ee50..de3bb2d2fb 100644 --- a/web/app/components/workflow/nodes/tool/components/input-var-list.tsx +++ b/web/app/components/workflow/nodes/tool/components/input-var-list.tsx @@ -59,20 +59,12 @@ const InputVarList: FC = ({ const newValue = produce(value, (draft: ToolVarInputs) => { const target = draft[variable] if (target) { - if (!isSupportConstantValue || varKindType === VarKindType.variable) { - if (isSupportConstantValue) - target.type = VarKindType.variable - - target.value = varValue as ValueSelector - } - else { - target.type = VarKindType.constant - target.value = varValue as string - } + target.type = varKindType + target.value = varValue } else { draft[variable] = { - type: VarKindType.variable, + type: varKindType, value: varValue, } } @@ -170,7 +162,7 @@ const InputVarList: FC = ({ value={varInput?.type === VarKindType.constant ? (varInput?.value || '') : (varInput?.value || [])} onChange={handleNotMixedTypeChange(variable)} onOpen={handleOpen(index)} - defaultVarKindType={isNumber ? VarKindType.constant : VarKindType.variable} + defaultVarKindType={varInput?.type || (isNumber ? VarKindType.constant : VarKindType.variable)} isSupportConstantValue={isSupportConstantValue} filterVar={isNumber ? filterVar : undefined} availableVars={isSelect ? availableVars : undefined} diff --git a/web/app/components/workflow/nodes/tool/use-config.ts b/web/app/components/workflow/nodes/tool/use-config.ts index 2d603c99e5..be40a7fd1a 100644 --- a/web/app/components/workflow/nodes/tool/use-config.ts +++ b/web/app/components/workflow/nodes/tool/use-config.ts @@ -132,7 +132,7 @@ const useConfig = (id: string, payload: ToolNodeType) => { draft.tool_parameters = {} }) setInputs(inputsWithDefaultValue) - // eslint-disable-next-line react-hooks/exhaustive-deps + // eslint-disable-next-line react-hooks/exhaustive-deps }, [currTool]) // setting when call @@ -214,8 +214,13 @@ const useConfig = (id: string, payload: ToolNodeType) => { .map(k => inputs.tool_parameters[k]) const varInputs = getInputVars(hadVarParams.map((p) => { - if (p.type === VarType.variable) + if (p.type === VarType.variable) { + // handle the old wrong value not crash the page + if (!(p.value as any).join) + return `{{#${p.value}#}}` + return `{{#${(p.value as ValueSelector).join('.')}#}}` + } return p.value as string })) diff --git a/web/package.json b/web/package.json index 32ee6cbc26..2859406b21 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.13.2", + "version": "0.14.0", "private": true, "engines": { "node": ">=18.17.0"