From 79801f5c306b68f31966674063c520812a0e5118 Mon Sep 17 00:00:00 2001 From: zhaobingshuang <1475195565@qq.com> Date: Mon, 16 Dec 2024 12:58:03 +0800 Subject: [PATCH 1/8] fix: deepseek reports an error when using Response Format #11677 (#11678) Co-authored-by: zhaobs --- api/core/model_runtime/model_providers/deepseek/llm/llm.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/api/core/model_runtime/model_providers/deepseek/llm/llm.py b/api/core/model_runtime/model_providers/deepseek/llm/llm.py index 610dc7b458..0a81f0c094 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/llm.py +++ b/api/core/model_runtime/model_providers/deepseek/llm/llm.py @@ -24,6 +24,9 @@ class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel): user: Optional[str] = None, ) -> Union[LLMResult, Generator]: self._add_custom_parameters(credentials) + # {"response_format": "xx"} need convert to {"response_format": {"type": "xx"}} + if "response_format" in model_parameters: + model_parameters["response_format"] = {"type": model_parameters.get("response_format")} return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) def validate_credentials(self, model: str, credentials: dict) -> None: From 63f1dd78775e52128dfe52c4e514ed7607f241ac Mon Sep 17 00:00:00 2001 From: Kazuhisa Wada <153587838+kazuhisa-wada@users.noreply.github.com> Date: Mon, 16 Dec 2024 13:59:37 +0900 Subject: [PATCH 2/8] Make max_submit_count configurable via Config (#11673) --- api/.env.example | 2 ++ api/configs/feature/__init__.py | 12 ++++++++++++ api/core/workflow/graph_engine/graph_engine.py | 10 ++++++++-- api/core/workflow/nodes/iteration/iteration_node.py | 4 +++- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/api/.env.example b/api/.env.example index 1ff6b3be8b..74f83aa06c 100644 --- a/api/.env.example +++ b/api/.env.example @@ -433,3 +433,5 @@ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 CREATE_TIDB_SERVICE_JOB_ENABLED=false +# Maximum number of submitted thread count in a ThreadPool for parallel node execution +MAX_SUBMIT_COUNT=100 diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index f1cb3efda7..e79401bdfd 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -439,6 +439,17 @@ class WorkflowConfig(BaseSettings): ) +class WorkflowNodeExecutionConfig(BaseSettings): + """ + Configuration for workflow node execution + """ + + MAX_SUBMIT_COUNT: PositiveInt = Field( + description="Maximum number of submitted thread count in a ThreadPool for parallel node execution", + default=100, + ) + + class AuthConfig(BaseSettings): """ Configuration for authentication and OAuth @@ -775,6 +786,7 @@ class FeatureConfig( ToolConfig, UpdateConfig, WorkflowConfig, + WorkflowNodeExecutionConfig, WorkspaceConfig, LoginConfig, # hosted services config diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index e03d4a7194..034b4bd399 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -9,6 +9,7 @@ from typing import Any, Optional, cast from flask import Flask, current_app +from configs import dify_config from core.app.apps.base_app_queue_manager import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult @@ -52,7 +53,12 @@ logger = logging.getLogger(__name__) class GraphEngineThreadPool(ThreadPoolExecutor): def __init__( - self, max_workers=None, thread_name_prefix="", initializer=None, initargs=(), max_submit_count=100 + self, + max_workers=None, + thread_name_prefix="", + initializer=None, + initargs=(), + max_submit_count=dify_config.MAX_SUBMIT_COUNT, ) -> None: super().__init__(max_workers, thread_name_prefix, initializer, initargs) self.max_submit_count = max_submit_count @@ -92,7 +98,7 @@ class GraphEngine: max_execution_time: int, thread_pool_id: Optional[str] = None, ) -> None: - thread_pool_max_submit_count = 100 + thread_pool_max_submit_count = dify_config.MAX_SUBMIT_COUNT thread_pool_max_workers = 10 # init thread pool diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index cbabe7a3c5..d935228c16 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -163,7 +163,9 @@ class IterationNode(BaseNode[IterationNodeData]): if self.node_data.is_parallel: futures: list[Future] = [] q: Queue = Queue() - thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100) + thread_pool = GraphEngineThreadPool( + max_workers=self.node_data.parallel_nums, max_submit_count=dify_config.MAX_SUBMIT_COUNT + ) for index, item in enumerate(iterator_list_value): future: Future = thread_pool.submit( self._run_single_iter_parallel, From daccb10d8ce74a0dd2f7328fd14ed8ace3acc682 Mon Sep 17 00:00:00 2001 From: zhongliliu-butterfly <157568943+zhongliliu-butterfly@users.noreply.github.com> Date: Mon, 16 Dec 2024 13:05:27 +0800 Subject: [PATCH 3/8] fix: volcengine_maas and baichuan message error (#11625) Co-authored-by: zhongliliu --- api/core/model_runtime/model_providers/baichuan/llm/llm.py | 7 ++++++- .../model_providers/volcengine_maas/legacy/client.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/api/core/model_runtime/model_providers/baichuan/llm/llm.py b/api/core/model_runtime/model_providers/baichuan/llm/llm.py index 91a14bf100..e8d18cfff1 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/llm.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/llm.py @@ -10,6 +10,7 @@ from core.model_runtime.entities.llm_entities import ( from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, PromptMessage, + PromptMessageContentType, PromptMessageTool, SystemPromptMessage, ToolPromptMessage, @@ -105,7 +106,11 @@ class BaichuanLanguageModel(LargeLanguageModel): if isinstance(message.content, str): message_dict = {"role": "user", "content": message.content} else: - raise ValueError("User message content must be str") + for message_content in message.content: + if message_content.type == PromptMessageContentType.TEXT: + message_dict = {"role": "user", "content": message_content.data} + elif message_content.type == PromptMessageContentType.IMAGE: + raise ValueError("Content object type not support image_url") elif isinstance(message, AssistantPromptMessage): message = cast(AssistantPromptMessage, message) message_dict = {"role": "assistant", "content": message.content} diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py index 266f1216f8..0c61e19f06 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py @@ -68,7 +68,12 @@ class MaaSClient(MaasService): content = [] for message_content in message.content: if message_content.type == PromptMessageContentType.TEXT: - raise ValueError("Content object type only support image_url") + content.append( + { + "type": "text", + "text": message_content.data, + } + ) elif message_content.type == PromptMessageContentType.IMAGE: message_content = cast(ImagePromptMessageContent, message_content) image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data) From 4b402c4041e553a01629c57c295e6517a5ea9f62 Mon Sep 17 00:00:00 2001 From: wangbin77 Date: Mon, 16 Dec 2024 13:05:38 +0800 Subject: [PATCH 4/8] fix: enhance workflow.tool_published performance (#11640) Co-authored-by: wangbin --- api/models/workflow.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/models/workflow.py b/api/models/workflow.py index 1b0af85f08..09e3728d7c 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -225,8 +225,10 @@ class Workflow(db.Model): from models.tools import WorkflowToolProvider return ( - db.session.query(WorkflowToolProvider).filter(WorkflowToolProvider.app_id == self.app_id).first() - is not None + db.session.query(WorkflowToolProvider) + .filter(WorkflowToolProvider.tenant_id == self.tenant_id, WorkflowToolProvider.app_id == self.app_id) + .count() + > 0 ) @property From 607a22ad1221ecc51a23831d17f76f39886a5b8b Mon Sep 17 00:00:00 2001 From: Joel Date: Mon, 16 Dec 2024 14:33:00 +0800 Subject: [PATCH 5/8] fix: tool constant params change cause page crashed (#11682) --- .../nodes/tool/components/input-var-list.tsx | 16 ++++------------ .../components/workflow/nodes/tool/use-config.ts | 9 +++++++-- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/web/app/components/workflow/nodes/tool/components/input-var-list.tsx b/web/app/components/workflow/nodes/tool/components/input-var-list.tsx index 5d29b767ad..db1a32e319 100644 --- a/web/app/components/workflow/nodes/tool/components/input-var-list.tsx +++ b/web/app/components/workflow/nodes/tool/components/input-var-list.tsx @@ -61,20 +61,12 @@ const InputVarList: FC = ({ const newValue = produce(value, (draft: ToolVarInputs) => { const target = draft[variable] if (target) { - if (!isSupportConstantValue || varKindType === VarKindType.variable) { - if (isSupportConstantValue) - target.type = VarKindType.variable - - target.value = varValue as ValueSelector - } - else { - target.type = VarKindType.constant - target.value = varValue as string - } + target.type = varKindType + target.value = varValue } else { draft[variable] = { - type: VarKindType.variable, + type: varKindType, value: varValue, } } @@ -173,7 +165,7 @@ const InputVarList: FC = ({ value={varInput?.type === VarKindType.constant ? (varInput?.value || '') : (varInput?.value || [])} onChange={handleNotMixedTypeChange(variable)} onOpen={handleOpen(index)} - defaultVarKindType={isNumber ? VarKindType.constant : VarKindType.variable} + defaultVarKindType={varInput?.type || (isNumber ? VarKindType.constant : VarKindType.variable)} isSupportConstantValue={isSupportConstantValue} filterVar={isNumber ? filterVar : undefined} availableVars={isSelect ? availableVars : undefined} diff --git a/web/app/components/workflow/nodes/tool/use-config.ts b/web/app/components/workflow/nodes/tool/use-config.ts index df8ad47985..94046ba4fa 100644 --- a/web/app/components/workflow/nodes/tool/use-config.ts +++ b/web/app/components/workflow/nodes/tool/use-config.ts @@ -132,7 +132,7 @@ const useConfig = (id: string, payload: ToolNodeType) => { draft.tool_parameters = {} }) setInputs(inputsWithDefaultValue) - // eslint-disable-next-line react-hooks/exhaustive-deps + // eslint-disable-next-line react-hooks/exhaustive-deps }, [currTool]) // setting when call @@ -214,8 +214,13 @@ const useConfig = (id: string, payload: ToolNodeType) => { .map(k => inputs.tool_parameters[k]) const varInputs = getInputVars(hadVarParams.map((p) => { - if (p.type === VarType.variable) + if (p.type === VarType.variable) { + // handle the old wrong value not crash the page + if (!(p.value as any).join) + return `{{#${p.value}#}}` + return `{{#${(p.value as ValueSelector).join('.')}#}}` + } return p.value as string })) From 41de7e76ecafddd61028d53dd7f5ea045d7e0ae1 Mon Sep 17 00:00:00 2001 From: Joel Date: Mon, 16 Dec 2024 15:06:03 +0800 Subject: [PATCH 6/8] fix: iteration output array type causes always outputting string array (#11686) --- web/app/components/workflow/nodes/iteration/use-config.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/web/app/components/workflow/nodes/iteration/use-config.ts b/web/app/components/workflow/nodes/iteration/use-config.ts index 6fb8797dcd..fd69fecaf0 100644 --- a/web/app/components/workflow/nodes/iteration/use-config.ts +++ b/web/app/components/workflow/nodes/iteration/use-config.ts @@ -52,6 +52,12 @@ const useConfig = (id: string, payload: IterationNodeType) => { [VarType.number]: VarType.arrayNumber, [VarType.object]: VarType.arrayObject, [VarType.file]: VarType.arrayFile, + // list operator node can output array + [VarType.array]: VarType.array, + [VarType.arrayFile]: VarType.arrayFile, + [VarType.arrayString]: VarType.arrayString, + [VarType.arrayNumber]: VarType.arrayNumber, + [VarType.arrayObject]: VarType.arrayObject, } as Record)[outputItemType] || VarType.arrayString }) setInputs(newInputs) From 9f602f73ebdc455b7efe0991cdc5d9c4549f2e25 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Mon, 16 Dec 2024 15:39:53 +0800 Subject: [PATCH 7/8] fix: workflow continue on error edge color (#11689) --- web/app/components/workflow/hooks/use-workflow-run.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/web/app/components/workflow/hooks/use-workflow-run.ts b/web/app/components/workflow/hooks/use-workflow-run.ts index f6a9d24cd3..a01b2d3154 100644 --- a/web/app/components/workflow/hooks/use-workflow-run.ts +++ b/web/app/components/workflow/hooks/use-workflow-run.ts @@ -192,6 +192,7 @@ export const useWorkflowRun = () => { const newNodes = produce(nodes, (draft) => { draft.forEach((node) => { node.data._waitingRun = true + node.data._runningBranchId = undefined }) }) setNodes(newNodes) From 967eb811120417a57f95eb72e3d9ee9086a929af Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 16 Dec 2024 15:49:17 +0800 Subject: [PATCH 8/8] chore: bump version to 0.14.0 (#11679) Signed-off-by: -LAN- --- api/configs/packaging/__init__.py | 2 +- docker-legacy/docker-compose.yaml | 6 +++--- docker/docker-compose.yaml | 6 +++--- web/package.json | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py index 0c2ccd826e..51db50ec3d 100644 --- a/api/configs/packaging/__init__.py +++ b/api/configs/packaging/__init__.py @@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings): CURRENT_VERSION: str = Field( description="Dify version", - default="0.13.2", + default="0.14.0", ) COMMIT_SHA: str = Field( diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 4392407a73..6c38b5c4f9 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: # Startup mode, 'api' starts the API server. @@ -227,7 +227,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: CONSOLE_WEB_URL: '' @@ -397,7 +397,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.13.2 + image: langgenius/dify-web:0.14.0 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index cfc3d750c9..eece49b113 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -292,7 +292,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: # Use the shared environment variables. @@ -312,7 +312,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.13.2 + image: langgenius/dify-api:0.14.0 restart: always environment: # Use the shared environment variables. @@ -331,7 +331,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.13.2 + image: langgenius/dify-web:0.14.0 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/web/package.json b/web/package.json index 44e92806d3..c2ed7502f1 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.13.2", + "version": "0.14.0", "private": true, "engines": { "node": ">=18.17.0"