From 6563cb6ec637a3bb0659007656a0ad204876ef63 Mon Sep 17 00:00:00 2001 From: Pascal M <11357019+perzeuss@users.noreply.github.com> Date: Tue, 7 May 2024 04:08:18 +0200 Subject: [PATCH 001/267] fix: prevent http node overwrite on open (#4127) --- web/app/components/workflow/nodes/http/use-config.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/workflow/nodes/http/use-config.ts b/web/app/components/workflow/nodes/http/use-config.ts index 768a67617a..8be66a7c35 100644 --- a/web/app/components/workflow/nodes/http/use-config.ts +++ b/web/app/components/workflow/nodes/http/use-config.ts @@ -29,8 +29,8 @@ const useConfig = (id: string, payload: HttpNodeType) => { const isReady = defaultConfig && Object.keys(defaultConfig).length > 0 if (isReady) { setInputs({ - ...inputs, ...defaultConfig, + ...inputs, }) } // eslint-disable-next-line react-hooks/exhaustive-deps From 76bec6ce7fb51e57ce62b65f5f1933e254dc0a40 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Tue, 7 May 2024 12:07:56 +0800 Subject: [PATCH 002/267] feat: add http node max size env (#4137) --- api/.env.example | 2 ++ api/core/workflow/nodes/http_request/http_executor.py | 9 +++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/api/.env.example b/api/.env.example index 30bbf331a4..01326a0cc8 100644 --- a/api/.env.example +++ b/api/.env.example @@ -163,6 +163,8 @@ API_TOOL_DEFAULT_READ_TIMEOUT=60 HTTP_REQUEST_MAX_CONNECT_TIMEOUT=300 HTTP_REQUEST_MAX_READ_TIMEOUT=600 HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 # 10MB +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 # 1MB # Log file path LOG_FILE= diff --git a/api/core/workflow/nodes/http_request/http_executor.py b/api/core/workflow/nodes/http_request/http_executor.py index c2beb7a383..1fb73afd12 100644 --- a/api/core/workflow/nodes/http_request/http_executor.py +++ b/api/core/workflow/nodes/http_request/http_executor.py @@ -1,4 +1,5 @@ import json +import os from copy import deepcopy from random import randint from typing import Any, Optional, Union @@ -13,10 +14,10 @@ from core.workflow.entities.variable_pool import ValueType, VariablePool from core.workflow.nodes.http_request.entities import HttpRequestNodeData from core.workflow.utils.variable_template_parser import VariableTemplateParser -MAX_BINARY_SIZE = 1024 * 1024 * 10 # 10MB -READABLE_MAX_BINARY_SIZE = '10MB' -MAX_TEXT_SIZE = 1024 * 1024 // 10 # 0.1MB -READABLE_MAX_TEXT_SIZE = '0.1MB' +MAX_BINARY_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_BINARY_SIZE', str(1024 * 1024 * 10))) # 10MB +READABLE_MAX_BINARY_SIZE = f'{MAX_BINARY_SIZE / 1024 / 1024:.2f}MB' +MAX_TEXT_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_TEXT_SIZE', str(1024 * 1024))) # 10MB # 1MB +READABLE_MAX_TEXT_SIZE = f'{MAX_TEXT_SIZE / 1024 / 1024:.2f}MB' class HttpExecutorResponse: From 45d21677a0d152fac1a37225dd3bfe54b5cbec72 Mon Sep 17 00:00:00 2001 From: Fyphen Date: Tue, 7 May 2024 13:25:01 +0900 Subject: [PATCH 003/267] Improved Japanese translation (#4119) --- README_JA.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/README_JA.md b/README_JA.md index af97252eae..ea1c717272 100644 --- a/README_JA.md +++ b/README_JA.md @@ -2,7 +2,7 @@

Dify Cloud · - 自己ホスティング · + セルフホスト · ドキュメント · デモのスケジュール

@@ -54,7 +54,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ -**2. 網羅的なモデルサポート**: +**2. 包括的なモデルサポート**: 数百のプロプライエタリ/オープンソースのLLMと、数十の推論プロバイダーおよびセルフホスティングソリューションとのシームレスな統合を提供します。GPT、Mistral、Llama3、およびOpenAI API互換のモデルをカバーします。サポートされているモデルプロバイダーの完全なリストは[こちら](https://docs.dify.ai/getting-started/readme/model-providers)をご覧ください。 ![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3) @@ -94,9 +94,9 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ サポートされているLLM - 豊富なバリエーション - 豊富なバリエーション - 豊富なバリエーション + バリエーション豊富 + バリエーション豊富 + バリエーション豊富 OpenAIのみ @@ -146,34 +146,34 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ ## Difyの使用方法 - **クラウド
** -[こちら](https://dify.ai)のDify Cloudサービスを利用して、セットアップが不要で誰でも試すことができます。サンドボックスプランでは、200回の無料のGPT-4呼び出しが含まれています。 +[こちら](https://dify.ai)のDify Cloudサービスを利用して、セットアップ不要で試すことができます。サンドボックスプランには、200回の無料のGPT-4呼び出しが含まれています。 - **Dify Community Editionのセルフホスティング
** -この[スターターガイド](#quick-start)を使用して、環境でDifyをすばやく実行できます。 -さらなる参照や詳細な手順については、[ドキュメント](https://docs.dify.ai)をご覧ください。 +この[スターターガイド](#quick-start)を使用して、ローカル環境でDifyを簡単に実行できます。 +さらなる参考資料や詳細な手順については、[ドキュメント](https://docs.dify.ai)をご覧ください。 - **エンタープライズ/組織向けのDify
** 追加のエンタープライズ向け機能を提供しています。[こちらからミーティングを予約](https://cal.com/guchenhe/30min)したり、[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)してエンタープライズのニーズについて相談してください。
> AWSを使用しているスタートアップや中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで独自のAWS VPCにデプロイできます。カスタムロゴとブランディングでアプリを作成するオプションを備えた手頃な価格のAMIオファリングです。 -## 先を見る +## 最新の情報を入手 -GitHubでDifyにスターを付け、新しいリリースをすぐに通知されます。 +GitHub上でDifyにスターを付けることで、Difyに関する新しいニュースを受け取れます。 ![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4) ## クイックスタート -> Difyをインストールする前に、マシンが以下の最小システム要件を満たしていることを確認してください: +> Difyをインストールする前に、お使いのマシンが以下の最小システム要件を満たしていることを確認してください: > >- CPU >= 2コア >- RAM >= 4GB
-Difyサーバーを起動する最も簡単な方法は、当社の[docker-compose.yml](docker/docker-compose.yaml)ファイルを実行することです。インストールコマンドを実行する前に、マシンに[Docker](https://docs.docker.com/get-docker/)と[Docker Compose](https://docs.docker.com/compose/install/)がインストールされていることを確認してください。 +Difyサーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml)ファイルを実行することです。インストールコマンドを実行する前に、マシンに[Docker](https://docs.docker.com/get-docker/)と[Docker Compose](https://docs.docker.com/compose/install/)がインストールされていることを確認してください。 ```bash cd docker @@ -216,7 +216,7 @@ docker compose up -d * [Discord](https://discord.gg/FngNHpbcY7). 主に: アプリケーションの共有やコミュニティとの交流。 * [Twitter](https://twitter.com/dify_ai). 主に: アプリケーションの共有やコミュニティとの交流。 -または、直接チームメンバーとミーティングをスケジュールします: +または、直接チームメンバーとミーティングをスケジュール: @@ -227,7 +227,7 @@ docker compose up -d - + @@ -242,4 +242,4 @@ docker compose up -d ## ライセンス -このリポジトリは、Dify Open Source License にいくつかの追加制限を加えた[Difyオープンソースライセンス](LICENSE)の下で利用可能です。 \ No newline at end of file +このリポジトリは、Dify Open Source License にいくつかの追加制限を加えた[Difyオープンソースライセンス](LICENSE)の下で利用可能です。 From 049abd698fbe3be8704e2ea8496ca06b777d2294 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Tue, 7 May 2024 12:37:18 +0800 Subject: [PATCH 004/267] improve: test CodeExecutor with code templates and extract CodeLanguage enum (#4098) --- .../helper/code_executor/code_executor.py | 35 ++++++++++++------- .../builtin/code/tools/simple_code.py | 6 ++-- api/core/workflow/nodes/code/code_node.py | 8 ++--- .../nodes/code_executor/test_code_executor.py | 11 ++++++ .../code_executor/test_code_javascript.py | 21 +++++++---- ...test_code_jina2.py => test_code_jinja2.py} | 10 ++++-- .../nodes/code_executor/test_code_python3.py | 21 +++++++---- 7 files changed, 79 insertions(+), 33 deletions(-) create mode 100644 api/tests/integration_tests/workflow/nodes/code_executor/test_code_executor.py rename api/tests/integration_tests/workflow/nodes/code_executor/{test_code_jina2.py => test_code_jinja2.py} (65%) diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 063a21b192..ec685ae814 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -1,3 +1,4 @@ +from enum import Enum from typing import Literal, Optional from httpx import post @@ -28,7 +29,25 @@ class CodeExecutionResponse(BaseModel): data: Data +class CodeLanguage(str, Enum): + PYTHON3 = 'python3' + JINJA2 = 'jinja2' + JAVASCRIPT = 'javascript' + + class CodeExecutor: + code_template_transformers = { + CodeLanguage.PYTHON3: PythonTemplateTransformer, + CodeLanguage.JINJA2: Jinja2TemplateTransformer, + CodeLanguage.JAVASCRIPT: NodeJsTemplateTransformer, + } + + code_language_to_running_language = { + CodeLanguage.JAVASCRIPT: 'nodejs', + CodeLanguage.JINJA2: CodeLanguage.PYTHON3, + CodeLanguage.PYTHON3: CodeLanguage.PYTHON3, + } + @classmethod def execute_code(cls, language: Literal['python3', 'javascript', 'jinja2'], preload: str, code: str) -> str: """ @@ -44,9 +63,7 @@ class CodeExecutor: } data = { - 'language': 'python3' if language == 'jinja2' else - 'nodejs' if language == 'javascript' else - 'python3' if language == 'python3' else None, + 'language': cls.code_language_to_running_language.get(language), 'code': code, 'preload': preload } @@ -86,15 +103,9 @@ class CodeExecutor: :param inputs: inputs :return: """ - template_transformer = None - if language == 'python3': - template_transformer = PythonTemplateTransformer - elif language == 'jinja2': - template_transformer = Jinja2TemplateTransformer - elif language == 'javascript': - template_transformer = NodeJsTemplateTransformer - else: - raise CodeExecutionException('Unsupported language') + template_transformer = cls.code_template_transformers.get(language) + if not template_transformer: + raise CodeExecutionException(f'Unsupported language {language}') runner, preload = template_transformer.transform_caller(code, inputs) diff --git a/api/core/tools/provider/builtin/code/tools/simple_code.py b/api/core/tools/provider/builtin/code/tools/simple_code.py index ae9b1cb612..37645bf0d0 100644 --- a/api/core/tools/provider/builtin/code/tools/simple_code.py +++ b/api/core/tools/provider/builtin/code/tools/simple_code.py @@ -1,6 +1,6 @@ from typing import Any -from core.helper.code_executor.code_executor import CodeExecutor +from core.helper.code_executor.code_executor import CodeExecutor, CodeLanguage from core.tools.entities.tool_entities import ToolInvokeMessage from core.tools.tool.builtin_tool import BuiltinTool @@ -11,10 +11,10 @@ class SimpleCode(BuiltinTool): invoke simple code """ - language = tool_parameters.get('language', 'python3') + language = tool_parameters.get('language', CodeLanguage.PYTHON3) code = tool_parameters.get('code', '') - if language not in ['python3', 'javascript']: + if language not in [CodeLanguage.PYTHON3, CodeLanguage.JAVASCRIPT]: raise ValueError(f'Only python3 and javascript are supported, not {language}') result = CodeExecutor.execute_code(language, '', code) diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 2c1529f492..12e7ae940f 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -1,7 +1,7 @@ import os from typing import Optional, Union, cast -from core.helper.code_executor.code_executor import CodeExecutionException, CodeExecutor +from core.helper.code_executor.code_executor import CodeExecutionException, CodeExecutor, CodeLanguage from core.workflow.entities.node_entities import NodeRunResult, NodeType from core.workflow.entities.variable_pool import VariablePool from core.workflow.nodes.base_node import BaseNode @@ -39,7 +39,7 @@ class CodeNode(BaseNode): :param filters: filter by node config parameters. :return: """ - if filters and filters.get("code_language") == "javascript": + if filters and filters.get("code_language") == CodeLanguage.JAVASCRIPT: return { "type": "code", "config": { @@ -53,7 +53,7 @@ class CodeNode(BaseNode): "value_selector": [] } ], - "code_language": "javascript", + "code_language": CodeLanguage.JAVASCRIPT, "code": JAVASCRIPT_DEFAULT_CODE, "outputs": { "result": { @@ -77,7 +77,7 @@ class CodeNode(BaseNode): "value_selector": [] } ], - "code_language": "python3", + "code_language": CodeLanguage.PYTHON3, "code": PYTHON_DEFAULT_CODE, "outputs": { "result": { diff --git a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_executor.py b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_executor.py new file mode 100644 index 0000000000..ae6e7ceaa7 --- /dev/null +++ b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_executor.py @@ -0,0 +1,11 @@ +import pytest + +from core.helper.code_executor.code_executor import CodeExecutionException, CodeExecutor + +CODE_LANGUAGE = 'unsupported_language' + + +def test_unsupported_with_code_template(): + with pytest.raises(CodeExecutionException) as e: + CodeExecutor.execute_workflow_code_template(language=CODE_LANGUAGE, code='', inputs={}) + assert str(e.value) == f'Unsupported language {CODE_LANGUAGE}' diff --git a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_javascript.py b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_javascript.py index c794ae8e4b..19c9d18307 100644 --- a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_javascript.py +++ b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_javascript.py @@ -1,6 +1,9 @@ -from core.helper.code_executor.code_executor import CodeExecutor +from textwrap import dedent -CODE_LANGUAGE = 'javascript' +from core.helper.code_executor.code_executor import CodeExecutor, CodeLanguage +from core.workflow.nodes.code.code_node import JAVASCRIPT_DEFAULT_CODE + +CODE_LANGUAGE = CodeLanguage.JAVASCRIPT def test_javascript_plain(): @@ -10,9 +13,15 @@ def test_javascript_plain(): def test_javascript_json(): - code = """ -obj = {'Hello': 'World'} -console.log(JSON.stringify(obj)) - """ + code = dedent(""" + obj = {'Hello': 'World'} + console.log(JSON.stringify(obj)) + """) result = CodeExecutor.execute_code(language=CODE_LANGUAGE, preload='', code=code) assert result == '{"Hello":"World"}\n' + + +def test_javascript_with_code_template(): + result = CodeExecutor.execute_workflow_code_template( + language=CODE_LANGUAGE, code=JAVASCRIPT_DEFAULT_CODE, inputs={'arg1': 'Hello', 'arg2': 'World'}) + assert result == {'result': 'HelloWorld'} diff --git a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jina2.py b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py similarity index 65% rename from api/tests/integration_tests/workflow/nodes/code_executor/test_code_jina2.py rename to api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py index aae3c7acec..6793cd3cc2 100644 --- a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jina2.py +++ b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py @@ -1,9 +1,9 @@ import base64 -from core.helper.code_executor.code_executor import CodeExecutor +from core.helper.code_executor.code_executor import CodeExecutor, CodeLanguage from core.helper.code_executor.jinja2_transformer import JINJA2_PRELOAD, PYTHON_RUNNER -CODE_LANGUAGE = 'jinja2' +CODE_LANGUAGE = CodeLanguage.JINJA2 def test_jinja2(): @@ -12,3 +12,9 @@ def test_jinja2(): code = PYTHON_RUNNER.replace('{{code}}', template).replace('{{inputs}}', inputs) result = CodeExecutor.execute_code(language=CODE_LANGUAGE, preload=JINJA2_PRELOAD, code=code) assert result == '<>Hello World<>\n' + + +def test_jinja2_with_code_template(): + result = CodeExecutor.execute_workflow_code_template( + language=CODE_LANGUAGE, code='Hello {{template}}', inputs={'template': 'World'}) + assert result == {'result': 'Hello World'} diff --git a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_python3.py b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_python3.py index 1983bc5e6b..b5c59c93fc 100644 --- a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_python3.py +++ b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_python3.py @@ -1,6 +1,9 @@ -from core.helper.code_executor.code_executor import CodeExecutor +from textwrap import dedent -CODE_LANGUAGE = 'python3' +from core.helper.code_executor.code_executor import CodeExecutor, CodeLanguage +from core.workflow.nodes.code.code_node import PYTHON_DEFAULT_CODE + +CODE_LANGUAGE = CodeLanguage.PYTHON3 def test_python3_plain(): @@ -10,9 +13,15 @@ def test_python3_plain(): def test_python3_json(): - code = """ -import json -print(json.dumps({'Hello': 'World'})) - """ + code = dedent(""" + import json + print(json.dumps({'Hello': 'World'})) + """) result = CodeExecutor.execute_code(language=CODE_LANGUAGE, preload='', code=code) assert result == '{"Hello": "World"}\n' + + +def test_python3_with_code_template(): + result = CodeExecutor.execute_workflow_code_template( + language=CODE_LANGUAGE, code=PYTHON_DEFAULT_CODE, inputs={'arg1': 'Hello', 'arg2': 'World'}) + assert result == {'result': 'HelloWorld'} From e7fe7ec0f65d67a012db0b91d303bb0fbc2f004a Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Tue, 7 May 2024 13:02:00 +0800 Subject: [PATCH 005/267] feat: support time format (#4138) --- .../provider/builtin/time/tools/current_time.py | 7 ++++--- .../provider/builtin/time/tools/current_time.yaml | 13 +++++++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/api/core/tools/provider/builtin/time/tools/current_time.py b/api/core/tools/provider/builtin/time/tools/current_time.py index 8722274565..90c01665e6 100644 --- a/api/core/tools/provider/builtin/time/tools/current_time.py +++ b/api/core/tools/provider/builtin/time/tools/current_time.py @@ -17,11 +17,12 @@ class CurrentTimeTool(BuiltinTool): """ # get timezone tz = tool_parameters.get('timezone', 'UTC') + fm = tool_parameters.get('format') or '%Y-%m-%d %H:%M:%S %Z' if tz == 'UTC': - return self.create_text_message(f'{datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S %Z")}') - + return self.create_text_message(f'{datetime.now(timezone.utc).strftime(fm)}') + try: tz = pytz_timezone(tz) except: return self.create_text_message(f'Invalid timezone: {tz}') - return self.create_text_message(f'{datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S %Z")}') \ No newline at end of file + return self.create_text_message(f'{datetime.now(tz).strftime(fm)}') \ No newline at end of file diff --git a/api/core/tools/provider/builtin/time/tools/current_time.yaml b/api/core/tools/provider/builtin/time/tools/current_time.yaml index f0b5f53bd8..d85d81ad54 100644 --- a/api/core/tools/provider/builtin/time/tools/current_time.yaml +++ b/api/core/tools/provider/builtin/time/tools/current_time.yaml @@ -12,6 +12,19 @@ description: pt_BR: A tool for getting the current time. llm: A tool for getting the current time. parameters: + - name: format + type: string + required: false + label: + en_US: Format + zh_Hans: 格式 + pt_BR: Format + human_description: + en_US: Time format in strftime standard. + zh_Hans: strftime 标准的时间格式。 + pt_BR: Time format in strftime standard. + form: form + default: "%Y-%m-%d %H:%M:%S" - name: timezone type: select required: false From d5d8b98d8245a968316479af17c0e41a7a453104 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Tue, 7 May 2024 13:49:45 +0800 Subject: [PATCH 006/267] feat: support openai stream usage (#4140) --- .../model_providers/openai/llm/llm.py | 105 +++++++++++++----- api/requirements.txt | 2 +- 2 files changed, 77 insertions(+), 30 deletions(-) diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index b7db39376c..69afabadb3 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -378,6 +378,11 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): if user: extra_model_kwargs['user'] = user + if stream: + extra_model_kwargs['stream_options'] = { + "include_usage": True + } + # text completion model response = client.completions.create( prompt=prompt_messages[0].content, @@ -446,8 +451,24 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): :return: llm response chunk generator result """ full_text = '' + prompt_tokens = 0 + completion_tokens = 0 + + final_chunk = LLMResultChunk( + model=model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content=''), + ) + ) + for chunk in response: if len(chunk.choices) == 0: + if chunk.usage: + # calculate num tokens + prompt_tokens = chunk.usage.prompt_tokens + completion_tokens = chunk.usage.completion_tokens continue delta = chunk.choices[0] @@ -464,20 +485,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): full_text += text if delta.finish_reason is not None: - # calculate num tokens - if chunk.usage: - # transform usage - prompt_tokens = chunk.usage.prompt_tokens - completion_tokens = chunk.usage.completion_tokens - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - completion_tokens = self._num_tokens_from_string(model, full_text) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( + final_chunk = LLMResultChunk( model=chunk.model, prompt_messages=prompt_messages, system_fingerprint=chunk.system_fingerprint, @@ -485,7 +493,6 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): index=delta.index, message=assistant_prompt_message, finish_reason=delta.finish_reason, - usage=usage ) ) else: @@ -499,6 +506,19 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): ) ) + if not prompt_tokens: + prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) + + if not completion_tokens: + completion_tokens = self._num_tokens_from_string(model, full_text) + + # transform usage + usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) + + final_chunk.delta.usage = usage + + yield final_chunk + def _chat_generate(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, @@ -531,6 +551,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): model_parameters["response_format"] = response_format + extra_model_kwargs = {} if tools: @@ -547,6 +568,11 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): if user: extra_model_kwargs['user'] = user + if stream: + extra_model_kwargs['stream_options'] = { + 'include_usage': True + } + # clear illegal prompt messages prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) @@ -630,8 +656,24 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): """ full_assistant_content = '' delta_assistant_message_function_call_storage: ChoiceDeltaFunctionCall = None + prompt_tokens = 0 + completion_tokens = 0 + final_tool_calls = [] + final_chunk = LLMResultChunk( + model=model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content=''), + ) + ) + for chunk in response: if len(chunk.choices) == 0: + if chunk.usage: + # calculate num tokens + prompt_tokens = chunk.usage.prompt_tokens + completion_tokens = chunk.usage.completion_tokens continue delta = chunk.choices[0] @@ -667,6 +709,8 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) function_call = self._extract_response_function_call(assistant_message_function_call) tool_calls = [function_call] if function_call else [] + if tool_calls: + final_tool_calls.extend(tool_calls) # transform assistant message to prompt message assistant_prompt_message = AssistantPromptMessage( @@ -677,19 +721,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): full_assistant_content += delta.delta.content if delta.delta.content else '' if has_finish_reason: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - - full_assistant_prompt_message = AssistantPromptMessage( - content=full_assistant_content, - tool_calls=tool_calls - ) - completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( + final_chunk = LLMResultChunk( model=chunk.model, prompt_messages=prompt_messages, system_fingerprint=chunk.system_fingerprint, @@ -697,7 +729,6 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): index=delta.index, message=assistant_prompt_message, finish_reason=delta.finish_reason, - usage=usage ) ) else: @@ -711,6 +742,22 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): ) ) + if not prompt_tokens: + prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) + + if not completion_tokens: + full_assistant_prompt_message = AssistantPromptMessage( + content=full_assistant_content, + tool_calls=final_tool_calls + ) + completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message]) + + # transform usage + usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) + final_chunk.delta.usage = usage + + yield final_chunk + def _extract_response_tool_calls(self, response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall]) \ -> list[AssistantPromptMessage.ToolCall]: diff --git a/api/requirements.txt b/api/requirements.txt index 9d79afa4ec..e2c430c9d6 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -9,7 +9,7 @@ flask-restful~=0.3.10 flask-cors~=4.0.0 gunicorn~=22.0.0 gevent~=23.9.1 -openai~=1.13.3 +openai~=1.26.0 tiktoken~=0.6.0 psycopg2-binary~=2.9.6 pycryptodome==3.19.1 From 6f1911533c514a3655de140f3c9e101234f390ff Mon Sep 17 00:00:00 2001 From: Weaxs <459312872@qq.com> Date: Mon, 6 May 2024 23:40:24 -0700 Subject: [PATCH 007/267] bug fix: update minimax model_apis (#4116) --- api/core/model_runtime/model_providers/minimax/llm/llm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/core/model_runtime/model_providers/minimax/llm/llm.py b/api/core/model_runtime/model_providers/minimax/llm/llm.py index cc88d15736..1fab20ebbc 100644 --- a/api/core/model_runtime/model_providers/minimax/llm/llm.py +++ b/api/core/model_runtime/model_providers/minimax/llm/llm.py @@ -34,6 +34,8 @@ from core.model_runtime.model_providers.minimax.llm.types import MinimaxMessage class MinimaxLargeLanguageModel(LargeLanguageModel): model_apis = { + 'abab6.5s-chat': MinimaxChatCompletionPro, + 'abab6.5-chat': MinimaxChatCompletionPro, 'abab6-chat': MinimaxChatCompletionPro, 'abab5.5s-chat': MinimaxChatCompletionPro, 'abab5.5-chat': MinimaxChatCompletionPro, From 6271463240da1e0bbb693e8dc18227ccc703fb96 Mon Sep 17 00:00:00 2001 From: Patryk Garstecki Date: Tue, 7 May 2024 09:41:57 +0200 Subject: [PATCH 008/267] feat(Languages): :alien: add pl-PL language (#4128) --- api/constants/languages.py | 3 +- web/i18n/language.ts | 9 + web/i18n/pl-PL/app-annotation.ts | 89 +++++ web/i18n/pl-PL/app-api.ts | 102 +++++ web/i18n/pl-PL/app-debug.ts | 463 ++++++++++++++++++++++ web/i18n/pl-PL/app-log.ts | 95 +++++ web/i18n/pl-PL/app-overview.ts | 162 ++++++++ web/i18n/pl-PL/app.ts | 97 +++++ web/i18n/pl-PL/billing.ts | 127 ++++++ web/i18n/pl-PL/common.ts | 547 ++++++++++++++++++++++++++ web/i18n/pl-PL/custom.ts | 31 ++ web/i18n/pl-PL/dataset-creation.ts | 146 +++++++ web/i18n/pl-PL/dataset-documents.ts | 350 ++++++++++++++++ web/i18n/pl-PL/dataset-hit-testing.ts | 28 ++ web/i18n/pl-PL/dataset-settings.ts | 38 ++ web/i18n/pl-PL/dataset.ts | 55 +++ web/i18n/pl-PL/explore.ts | 42 ++ web/i18n/pl-PL/layout.ts | 4 + web/i18n/pl-PL/login.ts | 66 ++++ web/i18n/pl-PL/register.ts | 4 + web/i18n/pl-PL/run-log.ts | 29 ++ web/i18n/pl-PL/share-app.ts | 75 ++++ web/i18n/pl-PL/tools.ts | 119 ++++++ web/i18n/pl-PL/workflow.ts | 354 +++++++++++++++++ 24 files changed, 3034 insertions(+), 1 deletion(-) create mode 100644 web/i18n/pl-PL/app-annotation.ts create mode 100644 web/i18n/pl-PL/app-api.ts create mode 100644 web/i18n/pl-PL/app-debug.ts create mode 100644 web/i18n/pl-PL/app-log.ts create mode 100644 web/i18n/pl-PL/app-overview.ts create mode 100644 web/i18n/pl-PL/app.ts create mode 100644 web/i18n/pl-PL/billing.ts create mode 100644 web/i18n/pl-PL/common.ts create mode 100644 web/i18n/pl-PL/custom.ts create mode 100644 web/i18n/pl-PL/dataset-creation.ts create mode 100644 web/i18n/pl-PL/dataset-documents.ts create mode 100644 web/i18n/pl-PL/dataset-hit-testing.ts create mode 100644 web/i18n/pl-PL/dataset-settings.ts create mode 100644 web/i18n/pl-PL/dataset.ts create mode 100644 web/i18n/pl-PL/explore.ts create mode 100644 web/i18n/pl-PL/layout.ts create mode 100644 web/i18n/pl-PL/login.ts create mode 100644 web/i18n/pl-PL/register.ts create mode 100644 web/i18n/pl-PL/run-log.ts create mode 100644 web/i18n/pl-PL/share-app.ts create mode 100644 web/i18n/pl-PL/tools.ts create mode 100644 web/i18n/pl-PL/workflow.ts diff --git a/api/constants/languages.py b/api/constants/languages.py index bdfd8022a3..7eb00816ab 100644 --- a/api/constants/languages.py +++ b/api/constants/languages.py @@ -1,6 +1,6 @@ -languages = ['en-US', 'zh-Hans', 'zh-Hant', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN'] +languages = ['en-US', 'zh-Hans', 'zh-Hant', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN', 'pl-PL'] language_timezone_mapping = { 'en-US': 'America/New_York', @@ -16,6 +16,7 @@ language_timezone_mapping = { 'it-IT': 'Europe/Rome', 'uk-UA': 'Europe/Kyiv', 'vi-VN': 'Asia/Ho_Chi_Minh', + 'pl-PL': 'Europe/Warsaw', } diff --git a/web/i18n/language.ts b/web/i18n/language.ts index 16ddae6dbd..4a8b03f00a 100644 --- a/web/i18n/language.ts +++ b/web/i18n/language.ts @@ -19,6 +19,7 @@ export type I18nText = { 'vi-VN': string 'de_DE': string 'zh_Hant': string + 'pl-PL': string } export const languages = [ @@ -112,6 +113,12 @@ export const languages = [ example: 'Xin chào, Dify!', supported: true, }, + { + value: 'pl-PL', + name: 'Polski (Polish)', + example: 'Cześć, Dify!', + supported: true, + }, ] export const LanguagesSupported = languages.filter(item => item.supported).map(item => item.value) @@ -133,6 +140,7 @@ export const NOTICE_I18N = { de_DE: 'Wichtiger Hinweis', ja_JP: '重要なお知らせ', ko_KR: '중요 공지', + pl_PL: 'Ważne ogłoszenie', uk_UA: 'Важливе повідомлення', vi_VN: 'Thông báo quan trọng', }, @@ -145,6 +153,7 @@ export const NOTICE_I18N = { de_DE: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', ja_JP: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', ko_KR: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', + pl_PL: 'Nasz system będzie niedostępny od 19:00 do 24:00 UTC 28 sierpnia w celu aktualizacji. W przypadku pytań prosimy o kontakt z naszym zespołem wsparcia (support@dify.ai). Doceniamy Twoją cierpliwość.', uk_UA: 'Наша система буде недоступна з 19:00 до 24:00 UTC 28 серпня для оновлення. Якщо у вас виникнуть запитання, будь ласка, зв’яжіться з нашою службою підтримки (support@dify.ai). Дякуємо за терпіння.', vi_VN: 'Hệ thống của chúng tôi sẽ ngừng hoạt động từ 19:00 đến 24:00 UTC vào ngày 28 tháng 8 để nâng cấp. Nếu có thắc mắc, vui lòng liên hệ với nhóm hỗ trợ của chúng tôi (support@dify.ai). Chúng tôi đánh giá cao sự kiên nhẫn của bạn.', }, diff --git a/web/i18n/pl-PL/app-annotation.ts b/web/i18n/pl-PL/app-annotation.ts new file mode 100644 index 0000000000..81a525935e --- /dev/null +++ b/web/i18n/pl-PL/app-annotation.ts @@ -0,0 +1,89 @@ +const translation = { + title: 'Adnotacje', + name: 'Odpowiedź adnotacji', + editBy: 'Odpowiedź edytowana przez {{author}}', + noData: { + title: 'Brak adnotacji', + description: + 'Możesz edytować adnotacje podczas debugowania aplikacji lub importować adnotacje tutaj w celu uzyskania wysokiej jakości odpowiedzi.', + }, + table: { + header: { + question: 'pytanie', + answer: 'odpowiedź', + createdAt: 'utworzono', + hits: 'trafienia', + actions: 'akcje', + addAnnotation: 'Dodaj adnotację', + bulkImport: 'Masowy import', + bulkExport: 'Masowy eksport', + clearAll: 'Wyczyść wszystkie adnotacje', + }, + }, + editModal: { + title: 'Edytuj odpowiedź adnotacji', + queryName: 'Zapytanie użytkownika', + answerName: 'Bot opowiadający historie', + yourAnswer: 'Twoja odpowiedź', + answerPlaceholder: 'Wpisz tutaj swoją odpowiedź', + yourQuery: 'Twoje zapytanie', + queryPlaceholder: 'Wpisz tutaj swoje zapytanie', + removeThisCache: 'Usuń tę adnotację', + createdAt: 'Utworzono', + }, + addModal: { + title: 'Dodaj odpowiedź adnotacji', + queryName: 'Pytanie', + answerName: 'Odpowiedź', + answerPlaceholder: 'Wpisz tutaj odpowiedź', + queryPlaceholder: 'Wpisz tutaj zapytanie', + createNext: 'Dodaj kolejną odpowiedź adnotacji', + }, + batchModal: { + title: 'Masowy import', + csvUploadTitle: 'Przeciągnij i upuść tutaj swój plik CSV, lub ', + browse: 'przeglądaj', + tip: 'Plik CSV musi spełniać następującą strukturę:', + question: 'pytanie', + answer: 'odpowiedź', + contentTitle: 'zawartość fragmentu', + content: 'zawartość', + template: 'Pobierz szablon tutaj', + cancel: 'Anuluj', + run: 'Uruchom batch', + runError: 'Uruchomienie batcha nie powiodło się', + processing: 'Przetwarzanie batcha', + completed: 'Import zakończony', + error: 'Błąd importu', + ok: 'OK', + }, + errorMessage: { + answerRequired: 'Odpowiedź jest wymagana', + queryRequired: 'Pytanie jest wymagane', + }, + viewModal: { + annotatedResponse: 'Odpowiedź adnotacji', + hitHistory: 'Historia trafień', + hit: 'Trafienie', + hits: 'Trafienia', + noHitHistory: 'Brak historii trafień', + }, + hitHistoryTable: { + query: 'Zapytanie', + match: 'Dopasowanie', + response: 'Odpowiedź', + source: 'Źródło', + score: 'Wynik', + time: 'Czas', + }, + initSetup: { + title: 'Początkowa konfiguracja odpowiedzi adnotacji', + configTitle: 'Konfiguracja odpowiedzi adnotacji', + confirmBtn: 'Zapisz i włącz', + configConfirmBtn: 'Zapisz', + }, + embeddingModelSwitchTip: + 'Model wektoryzacji tekstu adnotacji, przełączanie modeli spowoduje ponowne osadzenie, co wiąże się z dodatkowymi kosztami.', +} + +export default translation diff --git a/web/i18n/pl-PL/app-api.ts b/web/i18n/pl-PL/app-api.ts new file mode 100644 index 0000000000..46f9cbb454 --- /dev/null +++ b/web/i18n/pl-PL/app-api.ts @@ -0,0 +1,102 @@ +const translation = { + apiServer: 'Serwer API', + apiKey: 'Klucz API', + status: 'Status', + disabled: 'Wyłączony', + ok: 'W usłudze', + copy: 'Kopiuj', + copied: 'Skopiowane', + play: 'Graj', + pause: 'Pauza', + playing: 'Gra', + loading: 'Ładowanie', + merMaind: { + rerender: 'Przerób Renderowanie', + }, + never: 'Nigdy', + apiKeyModal: { + apiSecretKey: 'Tajny klucz API', + apiSecretKeyTips: + 'Aby zapobiec nadużyciom API, chron swój klucz API. Unikaj używania go jako zwykłego tekstu w kodzie front-end. :)', + createNewSecretKey: 'Utwórz nowy tajny klucz', + secretKey: 'Tajny Klucz', + created: 'UTWORZONY', + lastUsed: 'OSTATNIO UŻYWANY', + generateTips: 'Przechowuj ten klucz w bezpiecznym i dostępnym miejscu.', + }, + actionMsg: { + deleteConfirmTitle: 'Usunąć ten tajny klucz?', + deleteConfirmTips: 'Tej akcji nie można cofnąć.', + ok: 'OK', + }, + completionMode: { + title: 'Zakończenie App API', + info: 'Do generowania tekstu wysokiej jakości, takiego jak artykuły, podsumowania i tłumaczenia, użyj API completion-messages z danymi wejściowymi użytkownika. Generowanie tekstu zależy od parametrów modelu i szablonów promptów ustawionych w Dify Prompt Engineering.', + createCompletionApi: 'Utwórz Wiadomość Zakończenia', + createCompletionApiTip: + 'Utwórz Wiadomość Zakończenia, aby obsługiwać tryb pytanie-odpowiedź.', + inputsTips: + '(Opcjonalnie) Podaj pola wejściowe użytkownika jako pary klucz-wartość, odpowiadające zmiennym w Prompt Eng. Klucz to nazwa zmiennej, Wartość to wartość parametru. Jeśli typ pola to Wybierz, przesłana Wartość musi być jednym z predefiniowanych wyborów.', + queryTips: 'Treść tekstu wprowadzanego przez użytkownika.', + blocking: + 'Typ blokujący, czekanie na zakończenie wykonania i zwrócenie wyników. (Żądania mogą być przerywane, jeśli proces jest długi)', + streaming: + 'zwraca strumieniowo. Implementacja strumieniowego zwrotu na podstawie SSE (Server-Sent Events).', + messageFeedbackApi: 'Informacje zwrotne o wiadomości (lubię)', + messageFeedbackApiTip: + 'Oceniaj otrzymane wiadomości w imieniu użytkowników końcowych na podstawie polubień lub niepolubień. Te dane są widoczne na stronie Logi i adnotacje i są używane do przyszłego dostrojenia modelu.', + messageIDTip: 'ID wiadomości', + ratingTip: 'lubię lub nie lubię, null to cofnięcie', + parametersApi: 'Uzyskaj informacje o parametrach aplikacji', + parametersApiTip: + 'Pobierz skonfigurowane parametry wejściowe, w tym nazwy zmiennych, nazwy pól, typy i domyślne wartości. Zwykle używane do wyświetlania tych pól w formularzu lub wypełniania domyślnych wartości po załadowaniu klienta.', + }, + chatMode: { + title: 'Chat App API', + info: 'Do wszechstronnych aplikacji konwersacyjnych w formacie Q&A, wywołaj API chat-messages, aby rozpocząć dialog. Utrzymuj trwające rozmowy, przekazując zwrócone conversation_id. Parametry odpowiedzi i szablony zależą od ustawień Dify Prompt Eng.', + createChatApi: 'Utwórz wiadomość czatu', + createChatApiTip: + 'Utwórz nową wiadomość konwersacji lub kontynuuj istniejący dialog.', + inputsTips: + '(Opcjonalnie) Podaj pola wejściowe użytkownika jako pary klucz-wartość, odpowiadające zmiennym w Prompt Eng. Klucz to nazwa zmiennej, Wartość to wartość parametru. Jeśli typ pola to Wybierz, przesłana Wartość musi być jednym z predefiniowanych wyborów.', + queryTips: 'Treść pytania/wprowadzanej przez użytkownika', + blocking: + 'Typ blokujący, czekanie na zakończenie wykonania i zwrócenie wyników. (Żądania mogą być przerywane, jeśli proces jest długi)', + streaming: + 'zwraca strumieniowo. Implementacja strumieniowego zwrotu na podstawie SSE (Server-Sent Events).', + conversationIdTip: + '(Opcjonalnie) ID rozmowy: pozostaw puste dla pierwszej rozmowy; przekaż conversation_id z kontekstu, aby kontynuować dialog.', + messageFeedbackApi: 'Informacje zwrotne od użytkownika terminala, lubię', + messageFeedbackApiTip: + 'Oceniaj otrzymane wiadomości w imieniu użytkowników końcowych na podstawie polubień lub niepolubień. Te dane są widoczne na stronie Logi i adnotacje i są używane do przyszłego dostrojenia modelu.', + messageIDTip: 'ID wiadomości', + ratingTip: 'lubię lub nie lubię, null to cofnięcie', + chatMsgHistoryApi: 'Pobierz historię wiadomości czatu', + chatMsgHistoryApiTip: + 'Pierwsza strona zwraca najnowsze `limit` wiadomości, które są w odwrotnej kolejności.', + chatMsgHistoryConversationIdTip: 'ID rozmowy', + chatMsgHistoryFirstId: + 'ID pierwszego rekordu czatu na bieżącej stronie. Domyślnie brak.', + chatMsgHistoryLimit: 'Ile czatów jest zwracanych w jednym żądaniu', + conversationsListApi: 'Pobierz listę rozmów', + conversationsListApiTip: + 'Pobiera listę sesji bieżącego użytkownika. Domyślnie zwraca ostatnie 20 sesji.', + conversationsListFirstIdTip: + 'ID ostatniego rekordu na bieżącej stronie, domyślnie brak.', + conversationsListLimitTip: 'Ile czatów jest zwracanych w jednym żądaniu', + conversationRenamingApi: 'Zmiana nazwy rozmowy', + conversationRenamingApiTip: + 'Zmień nazwy rozmów; nazwa jest wyświetlana w interfejsach klienta wielosesyjnego.', + conversationRenamingNameTip: 'Nowa nazwa', + parametersApi: 'Uzyskaj informacje o parametrach aplikacji', + parametersApiTip: + 'Pobierz skonfigurowane parametry wejściowe, w tym nazwy zmiennych, nazwy pól, typy i domyślne wartości. Zwykle używane do wyświetlania tych pól w formularzu lub wypełniania domyślnych wartości po załadowaniu klienta.', + }, + develop: { + requestBody: 'Ciało żądania', + pathParams: 'Parametry ścieżki', + query: 'Zapytanie', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/app-debug.ts b/web/i18n/pl-PL/app-debug.ts new file mode 100644 index 0000000000..9d51b9ee46 --- /dev/null +++ b/web/i18n/pl-PL/app-debug.ts @@ -0,0 +1,463 @@ +const translation = { + pageTitle: { + line1: 'MONIT', + line2: 'Inżynieria', + }, + orchestrate: 'Orkiestracja', + promptMode: { + simple: 'Przełącz na tryb Ekspert, aby edytować cały MONIT', + advanced: 'Tryb Ekspert', + switchBack: 'Przełącz z powrotem', + advancedWarning: { + title: + 'Przełączyłeś się na Tryb Ekspert, i po modyfikacji MONITU, NIE można powrócić do trybu podstawowego.', + description: 'W Trybie Ekspert, możesz edytować cały MONIT.', + learnMore: 'Dowiedz się więcej', + ok: 'OK', + }, + operation: { + addMessage: 'Dodaj Wiadomość', + }, + contextMissing: + 'Brak komponentu kontekstowego, skuteczność monitu może być niewystarczająca.', + }, + operation: { + applyConfig: 'Publikuj', + resetConfig: 'Resetuj', + debugConfig: 'Debuguj', + addFeature: 'Dodaj funkcję', + automatic: 'Automatyczny', + stopResponding: 'Przestaje odpowiadać', + agree: 'lubię', + disagree: 'nie lubię', + cancelAgree: 'Anuluj polubienie', + cancelDisagree: 'Anuluj niepolubienie', + userAction: 'Akcja użytkownika ', + }, + notSetAPIKey: { + title: 'Klucz dostawcy LLM nie został ustawiony', + trailFinished: 'Ścieżka zakończona', + description: + 'Klucz dostawcy LLM nie został ustawiony, musi zostać ustawiony przed debugowaniem.', + settingBtn: 'Przejdź do ustawień', + }, + trailUseGPT4Info: { + title: 'Obecnie nie obsługuje GPT-4', + description: 'Użyj GPT-4, proszę ustawić klucz API.', + }, + feature: { + groupChat: { + title: 'Rozmowy grupowe', + description: + 'Dodanie ustawień przedkonwersacyjnych dla aplikacji może poprawić doświadczenia użytkownika.', + }, + groupExperience: { + title: 'Poprawa doświadczenia', + }, + conversationOpener: { + title: 'Otwieracze do rozmów', + description: + 'W aplikacji czatowej pierwsze zdanie, które AI aktywnie wypowiada do użytkownika, zazwyczaj służy jako powitanie.', + }, + suggestedQuestionsAfterAnswer: { + title: 'Nawiązanie', + description: 'Ustawienie kolejnych pytań może poprawić czat.', + resDes: '3 sugestie dla kolejnego pytania użytkownika.', + tryToAsk: 'Spróbuj zapytać', + }, + moreLikeThis: { + title: 'Więcej takich jak ten', + description: + 'Generuj wiele tekstów na raz, a następnie edytuj i kontynuuj generowanie', + generateNumTip: 'Liczba generowanych razów', + tip: 'Korzystanie z tej funkcji spowoduje dodatkowe zużycie tokenów', + }, + speechToText: { + title: 'Mowa na tekst', + description: 'Po włączeniu można używać wprowadzania głosowego.', + resDes: 'Wprowadzanie głosowe jest włączone', + }, + textToSpeech: { + title: 'Tekst na mowę', + description: 'Po włączeniu tekst można przekształcić w mowę.', + resDes: 'Tekst na audio jest włączony', + }, + citation: { + title: 'Cytaty i odniesienia', + description: + 'Po włączeniu, pokaż dokument źródłowy i przypisaną sekcję wygenerowanej treści.', + resDes: 'Cytaty i odniesienia są włączone', + }, + annotation: { + title: 'Odpowiedź z adnotacją', + description: + 'Możesz ręcznie dodać odpowiedź wysokiej jakości do pamięci podręcznej dla priorytetowego dopasowania do podobnych pytań użytkownika.', + resDes: 'Odpowiedź z adnotacją jest włączona', + scoreThreshold: { + title: 'Próg wyników', + description: + 'Służy do ustawienia progu podobieństwa dla odpowiedzi z adnotacją.', + easyMatch: 'Łatwe dopasowanie', + accurateMatch: 'Dokładne dopasowanie', + }, + matchVariable: { + title: 'Zmienna dopasowania', + choosePlaceholder: 'Wybierz zmienną do dopasowania', + }, + cacheManagement: 'Adnotacje', + cached: 'Zanotowano', + remove: 'Usuń', + removeConfirm: 'Usunąć tę adnotację?', + add: 'Dodaj adnotację', + edit: 'Edytuj adnotację', + }, + dataSet: { + title: 'Kontekst', + noData: 'Możesz importować wiedzę jako kontekst', + words: 'Słowa', + textBlocks: 'Bloki tekstu', + selectTitle: 'Wybierz odniesienie do wiedzy', + selected: 'Wiedza wybrana', + noDataSet: 'Nie znaleziono wiedzy', + toCreate: 'Przejdź do tworzenia', + notSupportSelectMulti: 'Obecnie obsługiwana jest tylko jedna wiedza', + queryVariable: { + title: 'Zmienna zapytania', + tip: 'Ta zmienna będzie używana jako dane wejściowe zapytania do odzyskiwania kontekstu, uzyskując informacje kontekstowe związane z wprowadzonymi danymi.', + choosePlaceholder: 'Wybierz zmienną zapytania', + noVar: 'Brak zmiennych', + noVarTip: 'proszę stworzyć zmienną w sekcji Zmienne', + unableToQueryDataSet: 'Nie można odzyskać wiedzy', + unableToQueryDataSetTip: + 'Nie udało się pomyślnie odzyskać wiedzy, proszę wybrać zmienną zapytania kontekstowego w sekcji kontekstowej.', + ok: 'OK', + contextVarNotEmpty: + 'zmienna zapytania kontekstowego nie może być pusta', + deleteContextVarTitle: 'Usunąć zmienną „{{varName}}”?', + deleteContextVarTip: + 'Ta zmienna została ustawiona jako zmienna zapytania kontekstowego, a jej usunięcie wpłynie na normalne korzystanie z wiedzy. Jeśli nadal potrzebujesz jej usunąć, wybierz ją ponownie w sekcji kontekstowej.', + }, + }, + tools: { + title: 'Narzędzia', + tips: 'Narzędzia zapewniają standardową metodę wywołania API, przyjmując dane wejściowe użytkownika lub zmienne jako parametry żądania do zapytania o dane zewnętrzne jako kontekst.', + toolsInUse: '{{count}} narzędzi w użyciu', + modal: { + title: 'Narzędzie', + toolType: { + title: 'Typ narzędzia', + placeholder: 'Wybierz typ narzędzia', + }, + name: { + title: 'Nazwa', + placeholder: 'Wprowadź nazwę', + }, + variableName: { + title: 'Nazwa zmiennej', + placeholder: 'Wprowadź nazwę zmiennej', + }, + }, + }, + conversationHistory: { + title: 'Historia konwersacji', + description: 'Ustaw prefixy dla ról w rozmowie', + tip: 'Historia konwersacji nie jest włączona, proszę dodać w monicie powyżej.', + learnMore: 'Dowiedz się więcej', + editModal: { + title: 'Edycja nazw ról konwersacyjnych', + userPrefix: 'Prefix użytkownika', + assistantPrefix: 'Prefix asystenta', + }, + }, + toolbox: { + title: 'SKRZYNKA NARZĘDZIOWA', + }, + moderation: { + title: 'Moderacja treści', + description: + 'Zabezpiecz wyjście modelu, używając API moderacji lub utrzymując listę wrażliwych słów.', + allEnabled: 'Treść WEJŚCIOWA/WYJŚCIOWA Włączona', + inputEnabled: 'Treść WEJŚCIOWA Włączona', + outputEnabled: 'Treść WYJŚCIOWA Włączona', + modal: { + title: 'Ustawienia moderacji treści', + provider: { + title: 'Dostawca', + openai: 'Moderacja OpenAI', + openaiTip: { + prefix: + 'Moderacja OpenAI wymaga skonfigurowanego klucza API OpenAI w ', + suffix: '.', + }, + keywords: 'Słowa kluczowe', + }, + keywords: { + tip: 'Po jednym w wierszu, oddzielone znakiem nowej linii. Maksymalnie 100 znaków na wiersz.', + placeholder: 'Po jednym w wierszu, oddzielone znakiem nowej linii', + line: 'Linia', + }, + content: { + input: 'Moderuj treść WEJŚCIOWĄ', + output: 'Moderuj treść WYJŚCIOWĄ', + preset: 'Ustawione odpowiedzi', + placeholder: 'Tutaj wprowadź ustawione odpowiedzi', + condition: + 'Treść WEJŚCIA i WYJŚCIA musi być włączona przynajmniej jedna', + fromApi: 'Ustawione odpowiedzi zwracane przez API', + errorMessage: 'Ustawione odpowiedzi nie mogą być puste', + supportMarkdown: 'Obsługuje Markdown', + }, + openaiNotConfig: { + before: + 'Moderacja OpenAI wymaga skonfigurowanego klucza API OpenAI w', + after: '', + }, + }, + }, + }, + automatic: { + title: 'Zautomatyzowana orkiestracja aplikacji', + description: + 'Opisz swój scenariusz, Dify zorkiestruje aplikację dla Ciebie.', + intendedAudience: 'Dla kogo jest przeznaczona ta aplikacja?', + intendedAudiencePlaceHolder: 'np. Uczeń', + solveProblem: + 'Jakie problemy mają nadzieję, że AI może rozwiązać dla nich?', + solveProblemPlaceHolder: + 'np. Wyciąganie wniosków i podsumowanie informacji z długich raportów i artykułów', + generate: 'Generuj', + audiencesRequired: 'Wymagana publiczności', + problemRequired: 'Wymagany problem', + resTitle: 'Stworzyliśmy następującą aplikację dla Ciebie.', + apply: 'Zastosuj tę orkiestrację', + noData: + 'Opisz swój przypadek po lewej, podgląd orkiestracji pojawi się tutaj.', + loading: 'Orkiestracja aplikacji dla Ciebie...', + overwriteTitle: 'Zastąpić istniejącą konfigurację?', + overwriteMessage: + 'Zastosowanie tej orkiestracji zastąpi istniejącą konfigurację.', + }, + resetConfig: { + title: 'Potwierdź reset?', + message: + 'Reset odrzuca zmiany, przywracając ostatnią opublikowaną konfigurację.', + }, + errorMessage: { + nameOfKeyRequired: 'nazwa klucza: {{key}} wymagana', + valueOfVarRequired: '{{key}} wartość nie może być pusta', + queryRequired: 'Tekst żądania jest wymagany.', + waitForResponse: 'Proszę czekać na odpowiedź na poprzednią wiadomość.', + waitForBatchResponse: 'Proszę czekać na odpowiedź na zadanie wsadowe.', + notSelectModel: 'Proszę wybrać model', + waitForImgUpload: 'Proszę czekać na przesłanie obrazu', + }, + chatSubTitle: 'Instrukcje', + completionSubTitle: 'Prefix Monitu', + promptTip: + 'Monity kierują odpowiedziami AI za pomocą instrukcji i ograniczeń. Wstaw zmienne takie jak {{input}}. Ten monit nie będzie widoczny dla użytkowników.', + formattingChangedTitle: 'Zmiana formatowania', + formattingChangedText: + 'Modyfikacja formatowania zresetuje obszar debugowania, czy jesteś pewien?', + variableTitle: 'Zmienne', + variableTip: + 'Użytkownicy wypełniają zmienne w formularzu, automatycznie zastępując zmienne w monicie.', + notSetVar: + 'Zmienne pozwalają użytkownikom wprowadzać słowa wstępujące lub otwierające uwagi podczas wypełniania formularzy. Możesz spróbować wpisać "{{input}}" w słowach monitu.', + autoAddVar: + 'Niezdefiniowane zmienne odwołują się w pre-monicie, czy chcesz je dodać do formularza wejściowego użytkownika?', + variableTable: { + key: 'Klucz Zmiennej', + name: 'Nazwa Pola Wejściowego Użytkownika', + optional: 'Opcjonalnie', + type: 'Typ Wejścia', + action: 'Akcje', + typeString: 'String', + typeSelect: 'Wybierz', + }, + varKeyError: { + canNoBeEmpty: 'Klucz zmiennej nie może być pusty', + tooLong: + 'Klucz zmiennej: {{key}} za długi. Nie może być dłuższy niż 30 znaków', + notValid: + 'Klucz zmiennej: {{key}} jest nieprawidłowy. Może zawierać tylko litery, cyfry i podkreślenia', + notStartWithNumber: + 'Klucz zmiennej: {{key}} nie może zaczynać się od cyfry', + keyAlreadyExists: 'Klucz zmiennej: :{{key}} już istnieje', + }, + otherError: { + promptNoBeEmpty: 'Monit nie może być pusty', + historyNoBeEmpty: 'Historia konwersacji musi być ustawiona w monicie', + queryNoBeEmpty: 'Zapytanie musi być ustawione w monicie', + }, + variableConig: { + 'addModalTitle': 'Dodaj Pole Wejściowe', + 'editModalTitle': 'Edytuj Pole Wejściowe', + 'description': 'Ustawienia dla zmiennej {{varName}}', + 'fieldType': 'Typ pola', + 'string': 'Krótki tekst', + 'text-input': 'Krótki tekst', + 'paragraph': 'Akapit', + 'select': 'Wybierz', + 'number': 'Numer', + 'notSet': 'Nie ustawione, spróbuj wpisać {{input}} w monicie wstępnym', + 'stringTitle': 'Opcje pola tekstowego formularza', + 'maxLength': 'Maksymalna długość', + 'options': 'Opcje', + 'addOption': 'Dodaj opcję', + 'apiBasedVar': 'Zmienna oparta na API', + 'varName': 'Nazwa zmiennej', + 'labelName': 'Nazwa etykiety', + 'inputPlaceholder': 'Proszę wpisać', + 'required': 'Wymagane', + 'errorMsg': { + varNameRequired: 'Wymagana nazwa zmiennej', + labelNameRequired: 'Wymagana nazwa etykiety', + varNameCanBeRepeat: 'Nazwa zmiennej nie może się powtarzać', + atLeastOneOption: 'Wymagana jest co najmniej jedna opcja', + optionRepeat: 'Powtarzają się opcje', + }, + }, + vision: { + name: 'Wizja', + description: + 'Włączenie Wizji pozwoli modelowi przyjmować obrazy i odpowiadać na pytania o nich.', + settings: 'Ustawienia', + visionSettings: { + title: 'Ustawienia Wizji', + resolution: 'Rozdzielczość', + resolutionTooltip: `niska rozdzielczość pozwoli modelowi odbierać obrazy o rozdzielczości 512 x 512 i reprezentować obraz z limitem 65 tokenów. Pozwala to API na szybsze odpowiedzi i zużywa mniej tokenów wejściowych dla przypadków, które nie wymagają wysokiego szczegółu. + \n + wysoka rozdzielczość pozwala najpierw modelowi zobaczyć obraz niskiej rozdzielczości, a następnie tworzy szczegółowe przycięcia obrazów wejściowych jako 512px kwadratów w oparciu o rozmiar obrazu wejściowego. Każde z tych szczegółowych przycięć używa dwukrotności budżetu tokenów, co daje razem 129 tokenów.`, + high: 'Wysoka', + low: 'Niska', + uploadMethod: 'Metoda przesyłania', + both: 'Obie', + localUpload: 'Przesyłanie lokalne', + url: 'URL', + uploadLimit: 'Limit przesyłania', + }, + }, + voice: { + name: 'Głos', + defaultDisplay: 'Domyślny Głos', + description: 'Ustawienia głosu tekstu na mowę', + settings: 'Ustawienia', + voiceSettings: { + title: 'Ustawienia Głosu', + language: 'Język', + resolutionTooltip: 'Wsparcie językowe głosu tekstu na mowę.', + voice: 'Głos', + }, + }, + openingStatement: { + title: 'Wstęp do rozmowy', + add: 'Dodaj', + writeOpner: 'Napisz wstęp', + placeholder: + 'Tutaj napisz swoją wiadomość wprowadzającą, możesz użyć zmiennych, spróbuj wpisać {{variable}}.', + openingQuestion: 'Pytania otwierające', + noDataPlaceHolder: + 'Rozpoczynanie rozmowy z użytkownikiem może pomóc AI nawiązać bliższe połączenie z nim w aplikacjach konwersacyjnych.', + varTip: 'Możesz używać zmiennych, spróbuj wpisać {{variable}}', + tooShort: + 'Wymagane jest co najmniej 20 słów wstępnego monitu, aby wygenerować uwagi wstępne do rozmowy.', + notIncludeKey: + 'Wstępny monit nie zawiera zmiennej: {{key}}. Proszę dodać ją do wstępnego monitu.', + }, + modelConfig: { + model: 'Model', + setTone: 'Ustaw ton odpowiedzi', + title: 'Model i parametry', + modeType: { + chat: 'Czat', + completion: 'Uzupełnienie', + }, + }, + inputs: { + title: 'Debugowanie i podgląd', + noPrompt: 'Spróbuj wpisać jakiś monit w polu przedmonitu', + userInputField: 'Pole wejściowe użytkownika', + noVar: + 'Wypełnij wartość zmiennej, która będzie automatycznie zastępowana w monicie za każdym razem, gdy rozpocznie się nowa sesja.', + chatVarTip: + 'Wypełnij wartość zmiennej, która będzie automatycznie zastępowana w monicie za każdym razem, gdy rozpocznie się nowa sesja', + completionVarTip: + 'Wypełnij wartość zmiennej, która będzie automatycznie zastępowana w słowach monitu za każdym razem, gdy zostanie przesłane pytanie.', + previewTitle: 'Podgląd monitu', + queryTitle: 'Treść zapytania', + queryPlaceholder: 'Proszę wprowadzić tekst żądania.', + run: 'URUCHOM', + }, + result: 'Tekst wyjściowy', + datasetConfig: { + settingTitle: 'Ustawienia odzyskiwania', + knowledgeTip: 'Kliknij przycisk „+”, aby dodać wiedzę', + retrieveOneWay: { + title: 'Odzyskiwanie N-do-1', + description: + 'Na podstawie zamiaru użytkownika i opisów Wiedzy, Agent samodzielnie wybiera najlepszą Wiedzę do zapytania. Najlepiej sprawdza się w aplikacjach o wyraźnej, ograniczonej Wiedzy.', + }, + retrieveMultiWay: { + title: 'Odzyskiwanie wielościeżkowe', + description: + 'Na podstawie zamiaru użytkownika, zapytania obejmują wszystkie Wiedze, pobierają odpowiedni tekst z wielu źródeł i wybierają najlepsze wyniki dopasowane do zapytań użytkownika po ponownym rankingu. Wymagana jest konfiguracja API modelu Przerankowania.', + }, + rerankModelRequired: 'Wymagany model Przerankowania', + params: 'Parametry', + top_k: 'Najlepsze K', + top_kTip: + 'Używane do filtrowania fragmentów najbardziej podobnych do pytań użytkownika. System również dynamicznie dostosowuje wartość Najlepszych K, zgodnie z maksymalną liczbą tokenów wybranego modelu.', + score_threshold: 'Próg punktacji', + score_thresholdTip: + 'Używany do ustawienia progu podobieństwa dla filtrowania fragmentów.', + retrieveChangeTip: + 'Modyfikacja trybu indeksowania i odzyskiwania może wpłynąć na aplikacje powiązane z tą Wiedzą.', + }, + debugAsSingleModel: 'Debuguj jako pojedynczy model', + debugAsMultipleModel: 'Debuguj jako wiele modeli', + duplicateModel: 'Duplikuj', + publishAs: 'Opublikuj jako', + assistantType: { + name: 'Typ asystenta', + chatAssistant: { + name: 'Podstawowy Asystent', + description: + 'Buduj asystenta opartego na czacie, korzystając z dużego modelu językowego', + }, + agentAssistant: { + name: 'Asystent Agent', + description: + 'Buduj inteligentnego agenta, który może autonomicznie wybierać narzędzia do wykonywania zadań', + }, + }, + agent: { + agentMode: 'Tryb Agenta', + agentModeDes: 'Ustaw rodzaj trybu wnioskowania dla agenta', + agentModeType: { + ReACT: 'ReAct', + functionCall: 'Wywołanie funkcji', + }, + setting: { + name: 'Ustawienia Agenta', + description: + 'Ustawienia Asystenta Agenta pozwalają ustawić tryb agenta i zaawansowane funkcje, takie jak wbudowane monity, dostępne tylko w typie Agent.', + maximumIterations: { + name: 'Maksymalna liczba iteracji', + description: + 'Ogranicz liczbę iteracji, które asystent agenta może wykonać', + }, + }, + buildInPrompt: 'Wbudowany Monit', + firstPrompt: 'Pierwszy Monit', + nextIteration: 'Następna Iteracja', + promptPlaceholder: 'Napisz tutaj swój monit', + tools: { + name: 'Narzędzia', + description: + 'Używanie narzędzi może rozszerzyć możliwości LLM, takie jak wyszukiwanie w internecie lub wykonywanie obliczeń naukowych', + enabled: 'Włączone', + }, + }, +} + +export default translation diff --git a/web/i18n/pl-PL/app-log.ts b/web/i18n/pl-PL/app-log.ts new file mode 100644 index 0000000000..048958b110 --- /dev/null +++ b/web/i18n/pl-PL/app-log.ts @@ -0,0 +1,95 @@ +const translation = { + title: 'Dzienniki', + description: + 'Dzienniki rejestrują stan działania aplikacji, w tym dane wejściowe użytkowników i odpowiedzi AI.', + dateTimeFormat: 'DD/MM/YYYY HH:mm', + table: { + header: { + time: 'Czas', + endUser: 'Użytkownik końcowy', + input: 'Wejście', + output: 'Wyjście', + summary: 'Tytuł', + messageCount: 'Liczba wiadomości', + userRate: 'Ocena użytkownika', + adminRate: 'Ocena operatora', + startTime: 'CZAS STARTU', + status: 'STATUS', + runtime: 'CZAS DZIAŁANIA', + tokens: 'TOKENY', + user: 'UŻYTKOWNIK KOŃCOWY', + version: 'WERSJA', + }, + pagination: { + previous: 'Poprzedni', + next: 'Następny', + }, + empty: { + noChat: 'Brak rozmowy', + noOutput: 'Brak wyników', + element: { + title: 'Czy ktoś jest?', + content: + 'Obserwuj i adnotuj interakcje między użytkownikami końcowymi a aplikacjami AI tutaj, aby ciągle poprawiać dokładność AI. Możesz spróbować udostępnić lub przetestować aplikację internetową samodzielnie, a następnie wrócić na tę stronę.', + }, + }, + }, + detail: { + time: 'Czas', + conversationId: 'ID rozmowy', + promptTemplate: 'Szablon monitu', + promptTemplateBeforeChat: + 'Szablon monitu przed rozmową · Jako wiadomość systemowa', + annotationTip: 'Usprawnienia oznaczone przez {{user}}', + timeConsuming: '', + second: 's', + tokenCost: 'Wydatkowane tokeny', + loading: 'ładowanie', + operation: { + like: 'lubię', + dislike: 'nie lubię', + addAnnotation: 'Dodaj usprawnienie', + editAnnotation: 'Edytuj usprawnienie', + annotationPlaceholder: + 'Wprowadź oczekiwaną odpowiedź, którą chcesz, aby AI odpowiedziało, co może być używane do dokładnego dostrojenia modelu i ciągłej poprawy jakości generacji tekstu w przyszłości.', + }, + variables: 'Zmienne', + uploadImages: 'Przesłane obrazy', + }, + filter: { + period: { + today: 'Dzisiaj', + last7days: 'Ostatnie 7 dni', + last4weeks: 'Ostatnie 4 tygodnie', + last3months: 'Ostatnie 3 miesiące', + last12months: 'Ostatnie 12 miesięcy', + monthToDate: 'Od początku miesiąca', + quarterToDate: 'Od początku kwartału', + yearToDate: 'Od początku roku', + allTime: 'Cały czas', + }, + annotation: { + all: 'Wszystkie', + annotated: 'Zanotowane usprawnienia ({{count}} elementów)', + not_annotated: 'Nie zanotowane', + }, + }, + workflowTitle: 'Dzienniki przepływu pracy', + workflowSubtitle: 'Dziennik zarejestrował operację Automatyzacji.', + runDetail: { + title: 'Dziennik rozmowy', + workflowTitle: 'Szczegół dziennika', + }, + promptLog: 'Dziennik monitów', + agentLog: 'Dziennik agenta', + viewLog: 'Zobacz dziennik', + agentLogDetail: { + agentMode: 'Tryb agenta', + toolUsed: 'Użyte narzędzia', + iterations: 'Iteracje', + iteration: 'Iteracja', + finalProcessing: 'Końcowa obróbka', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/app-overview.ts b/web/i18n/pl-PL/app-overview.ts new file mode 100644 index 0000000000..347c4599ea --- /dev/null +++ b/web/i18n/pl-PL/app-overview.ts @@ -0,0 +1,162 @@ +const translation = { + welcome: { + firstStepTip: 'Aby rozpocząć,', + enterKeyTip: 'wprowadź poniżej swój klucz API OpenAI', + getKeyTip: 'Pobierz swój klucz API z pulpitu nawigacyjnego OpenAI', + placeholder: 'Twój klucz API OpenAI (np. sk-xxxx)', + }, + apiKeyInfo: { + cloud: { + trial: { + title: 'Korzystasz z limitu próbnego {{providerName}}.', + description: + 'Limit próbny jest dostarczany do użytku testowego. Zanim wykorzystasz dozwolone wywołania limitu próbnego, skonfiguruj swojego własnego dostawcę modelu lub zakup dodatkowy limit.', + }, + exhausted: { + title: + 'Twój limit próbny został wyczerpany, proszę skonfiguruj swój klucz API.', + description: + 'Twój limit próbny został wyczerpany. Skonfiguruj swojego własnego dostawcę modelu lub zakup dodatkowy limit.', + }, + }, + selfHost: { + title: { + row1: 'Aby rozpocząć,', + row2: 'najpierw skonfiguruj swojego dostawcę modelu.', + }, + }, + callTimes: 'Liczba wywołań', + usedToken: 'Zużyty token', + setAPIBtn: 'Przejdź do konfiguracji dostawcy modelu', + tryCloud: 'Lub wypróbuj wersję chmurową Dify z darmowym limitem', + }, + overview: { + title: 'Przegląd', + appInfo: { + explanation: 'Gotowa do użycia aplikacja internetowa AI', + accessibleAddress: 'Publiczny adres URL', + preview: 'Podgląd', + regenerate: 'Wygeneruj ponownie', + preUseReminder: 'Przed kontynuowaniem włącz aplikację WebApp.', + settings: { + entry: 'Ustawienia', + title: 'Ustawienia WebApp', + webName: 'Nazwa WebApp', + webDesc: 'Opis WebApp', + webDescTip: + 'Ten tekst będzie wyświetlany po stronie klienta, zapewniając podstawowe wskazówki, jak korzystać z aplikacji', + webDescPlaceholder: 'Wpisz opis WebApp', + language: 'Język', + more: { + entry: 'Pokaż więcej ustawień', + copyright: 'Prawa autorskie', + copyRightPlaceholder: 'Wprowadź nazwę autora lub organizacji', + privacyPolicy: 'Polityka prywatności', + privacyPolicyPlaceholder: 'Wprowadź link do polityki prywatności', + privacyPolicyTip: + 'Pomaga odwiedzającym zrozumieć, jakie dane zbiera aplikacja, zobacz Politykę prywatności Dify.', + }, + }, + embedded: { + entry: 'Osadzone', + title: 'Osadź na stronie internetowej', + explanation: + 'Wybierz sposób osadzenia aplikacji czatu na swojej stronie internetowej', + iframe: + 'Aby dodać aplikację czatu w dowolnym miejscu na swojej stronie internetowej, dodaj ten kod iframe do swojego kodu HTML.', + scripts: + 'Aby dodać aplikację czatu w prawym dolnym rogu swojej strony internetowej, dodaj ten kod do swojego HTML.', + chromePlugin: 'Zainstaluj rozszerzenie Chrome Dify Chatbot', + copied: 'Skopiowane', + copy: 'Kopiuj', + }, + qrcode: { + title: 'Kod QR do udostępniania', + scan: 'Skanuj aplikację udostępniania', + download: 'Pobierz kod QR', + }, + customize: { + way: 'sposób', + entry: 'Dostosuj', + title: 'Dostosuj aplikację internetową AI', + explanation: + 'Możesz dostosować front aplikacji internetowej do swoich scenariuszy i potrzeb stylowych.', + way1: { + name: 'Skopiuj kod klienta, zmodyfikuj go i wdroż na Vercel (zalecane)', + step1: 'Skopiuj kod klienta i zmodyfikuj go', + step1Tip: + 'Kliknij tutaj, aby skopiować kod źródłowy na swoje konto GitHub i zmodyfikować kod', + step1Operation: 'Dify-WebClient', + step2: 'Wdroż na Vercel', + step2Tip: + 'Kliknij tutaj, aby zaimportować repozytorium do Vercel i wdrożyć', + step2Operation: 'Import repozytorium', + step3: 'Konfiguracja zmiennych środowiskowych', + step3Tip: 'Dodaj następujące zmienne środowiskowe w Vercel', + }, + way2: { + name: 'Napisz kod po stronie klienta, aby wywołać API i wdrożyć go na serwerze', + operation: 'Dokumentacja', + }, + }, + }, + apiInfo: { + title: 'API usługi w tle', + explanation: 'Łatwe do zintegrowania z twoją aplikacją', + accessibleAddress: 'Punkt końcowy API usługi', + doc: 'Dokumentacja API', + }, + status: { + running: 'W usłudze', + disable: 'Wyłącz', + }, + }, + analysis: { + title: 'Analiza', + ms: 'ms', + tokenPS: 'Tokeny/s', + totalMessages: { + title: 'Łączna liczba wiadomości', + explanation: + 'Dzienna liczba interakcji z AI; inżynieria i debugowanie monitów wykluczone.', + }, + activeUsers: { + title: 'Aktywni użytkownicy', + explanation: + 'Unikalni użytkownicy uczestniczący w pytaniach i odpowiedziach z AI; inżynieria i debugowanie monitów wykluczone.', + }, + tokenUsage: { + title: 'Zużycie tokenów', + explanation: + 'Odbija dziennie używane tokeny modelu językowego dla aplikacji, przydatne do kontroli kosztów.', + consumed: 'Zużyte', + }, + avgSessionInteractions: { + title: 'Śr. interakcji w sesji', + explanation: + 'Liczba ciągłych komunikacji użytkownik-AI; dla aplikacji opartych na rozmowach.', + }, + avgUserInteractions: { + title: 'Śr. interakcji użytkownika', + explanation: + 'Odbija dzienną częstotliwość użytkowania przez użytkowników. Ta metryka odzwierciedla przywiązanie użytkowników.', + }, + userSatisfactionRate: { + title: 'Wskaźnik zadowolenia użytkowników', + explanation: + 'Liczba polubień na 1000 wiadomości. Wskazuje to proporcję odpowiedzi, z których użytkownicy są bardzo zadowoleni.', + }, + avgResponseTime: { + title: 'Śr. czas odpowiedzi', + explanation: + 'Czas (ms) potrzebny AI na przetworzenie/odpowiedź; dla aplikacji opartych na tekście.', + }, + tps: { + title: 'Szybkość wydajności tokenów', + explanation: + 'Mierzy wydajność LLM. Liczy szybkość wydajności tokenów LLM od początku żądania do zakończenia wyjścia.', + }, + }, +} + +export default translation diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts new file mode 100644 index 0000000000..1bf6e2f376 --- /dev/null +++ b/web/i18n/pl-PL/app.ts @@ -0,0 +1,97 @@ +const translation = { + createApp: 'UTWÓRZ APLIKACJĘ', + types: { + all: 'Wszystkie', + chatbot: 'Chatbot', + agent: 'Agent', + workflow: 'Przepływ pracy', + completion: 'Zakończenie', + }, + duplicate: 'Duplikuj', + duplicateTitle: 'Duplikuj aplikację', + export: 'Eksportuj DSL', + exportFailed: 'Eksport DSL nie powiódł się.', + importDSL: 'Importuj plik DSL', + createFromConfigFile: 'Utwórz z pliku DSL', + deleteAppConfirmTitle: 'Usunąć tę aplikację?', + deleteAppConfirmContent: + 'Usunięcie aplikacji jest nieodwracalne. Użytkownicy nie będą mieli już dostępu do twojej aplikacji, a wszystkie konfiguracje monitów i dzienniki zostaną trwale usunięte.', + appDeleted: 'Aplikacja usunięta', + appDeleteFailed: 'Nie udało się usunąć aplikacji', + join: 'Dołącz do społeczności', + communityIntro: + 'Dyskutuj z członkami zespołu, współtwórcami i deweloperami na różnych kanałach.', + roadmap: 'Zobacz naszą mapę drogową', + newApp: { + startFromBlank: 'Utwórz od podstaw', + startFromTemplate: 'Utwórz z szablonu', + captionAppType: 'Jaki typ aplikacji chcesz stworzyć?', + chatbotDescription: + 'Zbuduj aplikację opartą na czacie. Ta aplikacja używa formatu pytań i odpowiedzi, umożliwiając wielokrotne rundy ciągłej konwersacji.', + completionDescription: + 'Zbuduj aplikację generującą teksty wysokiej jakości na podstawie monitów, takich jak generowanie artykułów, streszczeń, tłumaczeń i innych.', + completionWarning: 'Ten typ aplikacji nie będzie już obsługiwany.', + agentDescription: + 'Zbuduj inteligentnego agenta, który może autonomicznie wybierać narzędzia do wykonywania zadań', + workflowDescription: + 'Zbuduj aplikację, która w oparciu o przepływ pracy generuje teksty wysokiej jakości z dużą możliwością dostosowania. Jest odpowiednia dla doświadczonych użytkowników.', + workflowWarning: 'Obecnie w fazie beta', + chatbotType: 'Metoda orkiestracji chatbota', + basic: 'Podstawowy', + basicTip: 'Dla początkujących, można przełączyć się później na Chatflow', + basicFor: 'Dla początkujących', + basicDescription: + 'Podstawowa orkiestracja pozwala na skonfigurowanie aplikacji Chatbot za pomocą prostych ustawień, bez możliwości modyfikacji wbudowanych monitów. Jest odpowiednia dla początkujących.', + advanced: 'Chatflow', + advancedFor: 'Dla zaawansowanych użytkowników', + advancedDescription: + 'Orkiestracja przepływu pracy organizuje Chatboty w formie przepływów pracy, oferując wysoki stopień dostosowania, w tym możliwość edycji wbudowanych monitów. Jest odpowiednia dla doświadczonych użytkowników.', + captionName: 'Ikona i nazwa aplikacji', + appNamePlaceholder: 'Podaj nazwę swojej aplikacji', + captionDescription: 'Opis', + appDescriptionPlaceholder: 'Wprowadź opis aplikacji', + useTemplate: 'Użyj tego szablonu', + previewDemo: 'Podgląd demo', + chatApp: 'Asystent', + chatAppIntro: + 'Chcę zbudować aplikację opartą na czacie. Ta aplikacja używa formatu pytań i odpowiedzi, umożliwiając wielokrotne rundy ciągłej konwersacji.', + agentAssistant: 'Nowy asystent agenta', + completeApp: 'Generator tekstu', + completeAppIntro: + 'Chcę stworzyć aplikację, która generuje teksty wysokiej jakości na podstawie monitów, takich jak generowanie artykułów, streszczeń, tłumaczeń i innych.', + showTemplates: 'Chcę wybrać z szablonu', + hideTemplates: 'Wróć do wyboru trybu', + Create: 'Utwórz', + Cancel: 'Anuluj', + nameNotEmpty: 'Nazwa nie może być pusta', + appTemplateNotSelected: 'Proszę wybrać szablon', + appTypeRequired: 'Proszę wybrać typ aplikacji', + appCreated: 'Aplikacja utworzona', + appCreateFailed: 'Nie udało się utworzyć aplikacji', + }, + editApp: 'Edytuj informacje', + editAppTitle: 'Edytuj informacje o aplikacji', + editDone: 'Informacje o aplikacji zaktualizowane', + editFailed: 'Nie udało się zaktualizować informacji o aplikacji', + emoji: { + ok: 'OK', + cancel: 'Anuluj', + }, + switch: 'Przełącz na Orkiestrację Przepływu Pracy', + switchTipStart: + 'Dla ciebie zostanie utworzona nowa kopia aplikacji, a nowa kopia przełączy się na Orkiestrację Przepływu Pracy. Nowa kopia będzie ', + switchTip: 'nie pozwoli', + switchTipEnd: ' na powrót do Podstawowej Orkiestracji.', + switchLabel: 'Kopia aplikacji do utworzenia', + removeOriginal: 'Usuń oryginalną aplikację', + switchStart: 'Rozpocznij przełączanie', + typeSelector: { + all: 'WSZYSTKIE Typy', + chatbot: 'Chatbot', + agent: 'Agent', + workflow: 'Przepływ pracy', + completion: 'Zakończenie', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/billing.ts b/web/i18n/pl-PL/billing.ts new file mode 100644 index 0000000000..40ddc1f732 --- /dev/null +++ b/web/i18n/pl-PL/billing.ts @@ -0,0 +1,127 @@ +const translation = { + currentPlan: 'Obecny plan', + upgradeBtn: { + plain: 'Ulepsz plan', + encourage: 'Ulepsz teraz', + encourageShort: 'Ulepsz', + }, + viewBilling: 'Zarządzaj rozliczeniami i subskrypcjami', + buyPermissionDeniedTip: + 'Skontaktuj się z administratorem swojej firmy, aby zasubskrybować', + plansCommon: { + title: 'Wybierz plan odpowiedni dla siebie', + yearlyTip: 'Otrzymaj 2 miesiące za darmo, subskrybując rocznie!', + mostPopular: 'Najpopularniejszy', + planRange: { + monthly: 'Miesięczny', + yearly: 'Roczny', + }, + month: 'miesiąc', + year: 'rok', + save: 'Oszczędź ', + free: 'Darmowy', + currentPlan: 'Obecny plan', + contractSales: 'Skontaktuj się z działem sprzedaży', + contractOwner: 'Skontaktuj się z zarządcą zespołu', + startForFree: 'Zacznij za darmo', + getStartedWith: 'Rozpocznij z ', + contactSales: 'Kontakt z działem sprzedaży', + talkToSales: 'Porozmawiaj z działem sprzedaży', + modelProviders: 'Dostawcy modeli', + teamMembers: 'Członkowie zespołu', + buildApps: 'Twórz aplikacje', + vectorSpace: 'Przestrzeń wektorowa', + vectorSpaceBillingTooltip: + 'Każdy 1MB może przechowywać około 1,2 miliona znaków z wektoryzowanych danych (szacowane na podstawie OpenAI Embeddings, różni się w zależności od modelu).', + vectorSpaceTooltip: + 'Przestrzeń wektorowa jest systemem pamięci długoterminowej wymaganym dla LLM, aby zrozumieć Twoje dane.', + documentsUploadQuota: 'Limit przesyłanych dokumentów', + documentProcessingPriority: 'Priorytet przetwarzania dokumentów', + documentProcessingPriorityTip: + 'Dla wyższego priorytetu przetwarzania dokumentów, ulepsz swój plan.', + documentProcessingPriorityUpgrade: + 'Przetwarzaj więcej danych z większą dokładnością i w szybszym tempie.', + priority: { + 'standard': 'Standardowy', + 'priority': 'Priorytetowy', + 'top-priority': 'Najwyższy priorytet', + }, + logsHistory: 'Historia logów', + customTools: 'Niestandardowe narzędzia', + unavailable: 'Niedostępne', + days: 'dni', + unlimited: 'Nieograniczony', + support: 'Wsparcie', + supportItems: { + communityForums: 'Forum społecznościowe', + emailSupport: 'Wsparcie mailowe', + priorityEmail: 'Priorytetowa pomoc mailowa i czat', + logoChange: 'Zmiana logo', + SSOAuthentication: 'Uwierzytelnianie SSO', + personalizedSupport: 'Personalizowane wsparcie', + dedicatedAPISupport: 'Dedykowane wsparcie API', + customIntegration: 'Niestandardowa integracja i wsparcie', + ragAPIRequest: 'Żądania API RAG', + bulkUpload: 'Masowe przesyłanie dokumentów', + agentMode: 'Tryb agenta', + workflow: 'Przepływ pracy', + }, + comingSoon: 'Wkrótce dostępne', + member: 'Członek', + memberAfter: 'Członek', + messageRequest: { + title: 'Limity kredytów wiadomości', + tooltip: + 'Limity wywołań wiadomości dla różnych planów używających modeli OpenAI (z wyjątkiem gpt4). Wiadomości przekraczające limit będą korzystać z twojego klucza API OpenAI.', + }, + annotatedResponse: { + title: 'Limity kredytów na adnotacje', + tooltip: + 'Ręczna edycja i adnotacja odpowiedzi zapewniają możliwość dostosowania wysokiej jakości odpowiedzi na pytania dla aplikacji. (Stosowane tylko w aplikacjach czatowych)', + }, + ragAPIRequestTooltip: + 'Odnosi się do liczby wywołań API wykorzystujących tylko zdolności przetwarzania bazy wiedzy Dify.', + receiptInfo: + 'Tylko właściciel zespołu i administrator zespołu mogą subskrybować i przeglądać informacje o rozliczeniach', + }, + plans: { + sandbox: { + name: 'Sandbox', + description: '200 razy darmowa próba GPT', + includesTitle: 'Zawiera:', + }, + professional: { + name: 'Profesjonalny', + description: + 'Dla osób fizycznych i małych zespołów, aby odblokować więcej mocy w przystępnej cenie.', + includesTitle: 'Wszystko w darmowym planie, plus:', + }, + team: { + name: 'Zespół', + description: + 'Współpracuj bez ograniczeń i ciesz się najwyższą wydajnością.', + includesTitle: 'Wszystko w planie Profesjonalnym, plus:', + }, + enterprise: { + name: 'Przedsiębiorstwo', + description: + 'Uzyskaj pełne możliwości i wsparcie dla systemów o kluczowym znaczeniu dla misji.', + includesTitle: 'Wszystko w planie Zespołowym, plus:', + }, + }, + vectorSpace: { + fullTip: 'Przestrzeń wektorowa jest pełna.', + fullSolution: 'Ulepsz swój plan, aby uzyskać więcej miejsca.', + }, + apps: { + fullTipLine1: 'Ulepsz swój plan, aby', + fullTipLine2: 'tworzyć więcej aplikacji.', + }, + annotatedResponse: { + fullTipLine1: 'Ulepsz swój plan, aby', + fullTipLine2: 'adnotować więcej rozmów.', + quotaTitle: 'Limit adnotacji odpowiedzi', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts new file mode 100644 index 0000000000..108935a125 --- /dev/null +++ b/web/i18n/pl-PL/common.ts @@ -0,0 +1,547 @@ +const translation = { + api: { + success: 'Sukces', + actionSuccess: 'Akcja powiodła się', + saved: 'Zapisane', + create: 'Utworzono', + remove: 'Usunięto', + }, + operation: { + create: 'Utwórz', + confirm: 'Potwierdź', + cancel: 'Anuluj', + clear: 'Wyczyść', + save: 'Zapisz', + edit: 'Edytuj', + add: 'Dodaj', + added: 'Dodano', + refresh: 'Odśwież', + reset: 'Resetuj', + search: 'Szukaj', + change: 'Zmień', + remove: 'Usuń', + send: 'Wyślij', + copy: 'Kopiuj', + lineBreak: 'Złamanie linii', + sure: 'Jestem pewien', + download: 'Pobierz', + delete: 'Usuń', + settings: 'Ustawienia', + setup: 'Konfiguruj', + getForFree: 'Zdobądź za darmo', + reload: 'Przeładuj', + ok: 'OK', + log: 'Dziennik', + learnMore: 'Dowiedz się więcej', + params: 'Parametry', + duplicate: 'Duplikuj', + rename: 'Zmień nazwę', + }, + placeholder: { + input: 'Proszę wprowadzić', + select: 'Proszę wybrać', + }, + voice: { + language: { + zhHans: 'Chiński', + zhHant: 'Chiński tradycyjny', + enUS: 'Angielski', + deDE: 'Niemiecki', + frFR: 'Francuski', + esES: 'Hiszpański', + itIT: 'Włoski', + thTH: 'Tajski', + idID: 'Indonezyjski', + jaJP: 'Japoński', + koKR: 'Koreański', + ptBR: 'Portugalski', + ruRU: 'Rosyjski', + ukUA: 'Ukraiński', + viVN: 'Wietnamski', + }, + }, + unit: { + char: 'znaki', + }, + actionMsg: { + noModification: 'W tej chwili brak zmian.', + modifiedSuccessfully: 'Zmodyfikowano pomyślnie', + modifiedUnsuccessfully: 'Nie udało się zmodyfikować', + copySuccessfully: 'Skopiowano pomyślnie', + paySucceeded: 'Płatność zakończona sukcesem', + payCancelled: 'Płatność anulowana', + generatedSuccessfully: 'Wygenerowano pomyślnie', + generatedUnsuccessfully: 'Nie udało się wygenerować', + }, + model: { + params: { + temperature: 'Temperatura', + temperatureTip: + 'Kontroluje przypadkowość: obniżenie powoduje mniej przypadkowych uzupełnień. Gdy temperatura zbliża się do zera, model staje się deterministyczny i powtarzalny.', + top_p: 'Top P', + top_pTip: + 'Kontroluje różnorodność poprzez próbkowanie jądra: 0,5 oznacza, że rozważane są połowa wszystkich opcji ważonych prawdopodobieństwem.', + presence_penalty: 'Kara za obecność', + presence_penaltyTip: + 'Jak bardzo karać nowe tokeny w zależności od tego, czy pojawiły się już w tekście.\nZwiększa prawdopodobieństwo, że model zacznie rozmawiać o nowych tematach.', + frequency_penalty: 'Kara za częstotliwość', + frequency_penaltyTip: + 'Jak bardzo karać nowe tokeny bazując na ich dotychczasowej częstotliwości w tekście.\nZmniejsza prawdopodobieństwo, że model będzie powtarzał tę samą linię dosłownie.', + max_tokens: 'Maksymalna liczba tokenów', + max_tokensTip: + 'Służy do ograniczania maksymalnej długości odpowiedzi w tokenach. \nWiększe wartości mogą ograniczyć miejsce na słowa wstępne, dzienniki rozmów i Wiedzę. \nZaleca się ustawienie go poniżej dwóch trzecich\ngpt-4-1106-preview, gpt-4-vision-preview maksymalna liczba tokenów (input 128k output 4k)', + maxTokenSettingTip: + 'Twoje ustawienie maksymalnej liczby tokenów jest wysokie, potencjalnie ograniczając miejsce na monity, zapytania i dane. Rozważ ustawienie go poniżej 2/3.', + setToCurrentModelMaxTokenTip: + 'Maksymalna liczba tokenów została zaktualizowana do 80% maksymalnej liczby tokenów obecnego modelu {{maxToken}}.', + stop_sequences: 'Sekwencje zatrzymujące', + stop_sequencesTip: + 'Do czterech sekwencji, w których API przestanie generować dalsze tokeny. Zwrócony tekst nie będzie zawierał sekwencji zatrzymującej.', + stop_sequencesPlaceholder: 'Wpisz sekwencję i naciśnij Tab', + }, + tone: { + Creative: 'Kreatywny', + Balanced: 'Zrównoważony', + Precise: 'Precyzyjny', + Custom: 'Niestandardowy', + }, + addMoreModel: 'Przejdź do ustawień, aby dodać więcej modeli', + }, + menus: { + status: 'beta', + explore: 'Eksploruj', + apps: 'Studio', + plugins: 'Pluginy', + pluginsTips: + 'Integruj pluginy stron trzecich lub twórz pluginy AI kompatybilne z ChatGPT.', + datasets: 'Wiedza', + datasetsTips: + 'NADCHODZI: Importuj swoje własne dane tekstowe lub wpisuj dane w czasie rzeczywistym przez Webhook, aby wzmocnić kontekst LLM.', + newApp: 'Nowa aplikacja', + newDataset: 'Utwórz Wiedzę', + tools: 'Narzędzia', + }, + userProfile: { + settings: 'Ustawienia', + workspace: 'Przestrzeń robocza', + createWorkspace: 'Utwórz przestrzeń roboczą', + helpCenter: 'Pomoc', + roadmapAndFeedback: 'Opinie', + community: 'Społeczność', + about: 'O', + logout: 'Wyloguj się', + }, + settings: { + accountGroup: 'KONTO', + workplaceGroup: 'PRZESTRZEŃ ROBOCZA', + account: 'Moje konto', + members: 'Członkowie', + billing: 'Rozliczenia', + integrations: 'Integracje', + language: 'Język', + provider: 'Dostawca modelu', + dataSource: 'Źródło danych', + plugin: 'Pluginy', + apiBasedExtension: 'Rozszerzenie API', + }, + account: { + avatar: 'Awatar', + name: 'Nazwa', + email: 'Email', + password: 'Hasło', + passwordTip: + 'Możesz ustawić stałe hasło, jeśli nie chcesz używać tymczasowych kodów logowania', + setPassword: 'Ustaw hasło', + resetPassword: 'Zresetuj hasło', + currentPassword: 'Obecne hasło', + newPassword: 'Nowe hasło', + confirmPassword: 'Potwierdź hasło', + notEqual: 'Dwa hasła są różne.', + langGeniusAccount: 'Konto Dify', + langGeniusAccountTip: 'Twoje konto Dify i powiązane dane użytkownika.', + editName: 'Edytuj nazwę', + showAppLength: 'Pokaż {{length}} aplikacje', + }, + members: { + team: 'Zespół', + invite: 'Dodaj', + name: 'NAZWA', + lastActive: 'OSTATNIA AKTYWNOŚĆ', + role: 'ROLE', + pending: 'Oczekujący...', + owner: 'Właściciel', + admin: 'Admin', + adminTip: 'Może tworzyć aplikacje i zarządzać ustawieniami zespołu', + normal: 'Normalny', + normalTip: 'Może tylko korzystać z aplikacji, nie może tworzyć aplikacji', + inviteTeamMember: 'Dodaj członka zespołu', + inviteTeamMemberTip: + 'Mogą uzyskać bezpośredni dostęp do danych Twojego zespołu po zalogowaniu.', + email: 'Email', + emailInvalid: 'Nieprawidłowy format e-maila', + emailPlaceholder: 'Proszę podać adresy e-mail', + sendInvite: 'Wyślij zaproszenie', + invitedAsRole: 'Zaproszony jako użytkownik typu {{role}}', + invitationSent: 'Zaproszenie wysłane', + invitationSentTip: + 'Zaproszenie zostało wysłane, a oni mogą zalogować się do Dify, aby uzyskać dostęp do danych Twojego zespołu.', + invitationLink: 'Link zaproszenia', + failedinvitationEmails: 'Poniższe osoby nie zostały pomyślnie zaproszone', + ok: 'OK', + removeFromTeam: 'Usuń z zespołu', + removeFromTeamTip: 'Usunie dostęp do zespołu', + setAdmin: 'Ustaw jako administratora', + setMember: 'Ustaw jako zwykłego członka', + disinvite: 'Anuluj zaproszenie', + deleteMember: 'Usuń członka', + you: '(Ty)', + }, + integrations: { + connected: 'Połączony', + google: 'Google', + googleAccount: 'Zaloguj się przy użyciu konta Google', + github: 'GitHub', + githubAccount: 'Zaloguj się przy użyciu konta GitHub', + connect: 'Połącz', + }, + language: { + displayLanguage: 'Język interfejsu', + timezone: 'Strefa czasowa', + }, + provider: { + apiKey: 'Klucz API', + enterYourKey: 'Wprowadź tutaj swój klucz API', + invalidKey: 'Nieprawidłowy klucz API OpenAI', + validatedError: 'Weryfikacja nie powiodła się: ', + validating: 'Weryfikowanie klucza...', + saveFailed: 'Zapis klucza API nie powiódł się', + apiKeyExceedBill: 'Ten KLUCZ API nie ma dostępnych limitów, przeczytaj', + addKey: 'Dodaj klucz', + comingSoon: 'Już wkrótce', + editKey: 'Edytuj', + invalidApiKey: 'Nieprawidłowy klucz API', + azure: { + apiBase: 'Podstawa API', + apiBasePlaceholder: + 'Adres URL podstawowy Twojego końcowego punktu Azure OpenAI.', + apiKey: 'Klucz API', + apiKeyPlaceholder: 'Wprowadź tutaj swój klucz API', + helpTip: 'Dowiedz się więcej o usłudze Azure OpenAI', + }, + openaiHosted: { + openaiHosted: 'Hostowany OpenAI', + onTrial: 'NA PROBĘ', + exhausted: 'WYCZERPANY LIMIT', + desc: 'Usługa hostowania OpenAI dostarczana przez Dify pozwala korzystać z modeli takich jak GPT-3.5. Przed wyczerpaniem limitu próbnego należy skonfigurować inne dostawców modeli.', + callTimes: 'Czasy wywołań', + usedUp: 'Limit próbny został wyczerpany. Dodaj własnego dostawcę modeli.', + useYourModel: 'Aktualnie używany jest własny dostawca modeli.', + close: 'Zamknij', + }, + anthropicHosted: { + anthropicHosted: 'Anthropic Claude', + onTrial: 'NA PROBĘ', + exhausted: 'WYCZERPANY LIMIT', + desc: 'Potężny model, który doskonale sprawdza się w szerokim spektrum zadań, od zaawansowanego dialogu i generowania treści twórczych po szczegółowe instrukcje.', + callTimes: 'Czasy wywołań', + usedUp: 'Limit próbny został wyczerpany. Dodaj własnego dostawcę modeli.', + useYourModel: 'Aktualnie używany jest własny dostawca modeli.', + close: 'Zamknij', + }, + anthropic: { + using: 'Zdolność do osadzania jest używana', + enableTip: + 'Aby włączyć model Anthropica, musisz najpierw powiązać się z usługą OpenAI lub Azure OpenAI.', + notEnabled: 'Nie włączono', + keyFrom: 'Pobierz swój klucz API od Anthropic', + }, + encrypted: { + front: 'Twój KLUCZ API będzie szyfrowany i przechowywany za pomocą', + back: ' technologii.', + }, + }, + modelProvider: { + notConfigured: + 'Systemowy model nie został jeszcze w pełni skonfigurowany, co może skutkować niedostępnością niektórych funkcji.', + systemModelSettings: 'Ustawienia modelu systemowego', + systemModelSettingsLink: + 'Dlaczego konieczne jest skonfigurowanie modelu systemowego?', + selectModel: 'Wybierz swój model', + setupModelFirst: 'Proszę najpierw skonfigurować swój model', + systemReasoningModel: { + key: 'Model wnioskowania systemowego', + tip: 'Ustaw domyślny model wnioskowania do użytku przy tworzeniu aplikacji, a także cechy takie jak generowanie nazw dialogów i sugestie następnego pytania będą również korzystać z domyślnego modelu wnioskowania.', + }, + embeddingModel: { + key: 'Model osadzania', + tip: 'Ustaw domyślny model do przetwarzania osadzania dokumentów wiedzy; zarówno pozyskiwanie, jak i importowanie wiedzy wykorzystują ten model osadzania do przetwarzania wektorowego. Zmiana spowoduje niezgodność wymiarów wektorów między importowaną wiedzą a pytaniem, co skutkować będzie niepowodzeniem w pozyskiwaniu. Aby uniknąć niepowodzeń, prosimy nie zmieniać tego modelu dowolnie.', + required: 'Model osadzania jest wymagany', + }, + speechToTextModel: { + key: 'Model mowy na tekst', + tip: 'Ustaw domyślny model do przetwarzania mowy na tekst w rozmowach.', + }, + ttsModel: { + key: 'Model tekstu na mowę', + tip: 'Ustaw domyślny model dla konwersji tekstu na mowę w rozmowach.', + }, + rerankModel: { + key: 'Model ponownego rankingu', + tip: 'Model ponownego rankingu zmieni kolejność listy dokumentów kandydatów na podstawie semantycznego dopasowania z zapytaniem użytkownika, poprawiając wyniki rankingu semantycznego', + }, + quota: 'Limit', + searchModel: 'Model wyszukiwania', + noModelFound: 'Nie znaleziono modelu dla {{model}}', + models: 'Modele', + showMoreModelProvider: 'Pokaż więcej dostawców modeli', + selector: { + tip: 'Ten model został usunięty. Proszę dodać model lub wybrać inny model.', + emptyTip: 'Brak dostępnych modeli', + emptySetting: 'Przejdź do ustawień, aby skonfigurować', + rerankTip: 'Proszę skonfigurować model ponownego rankingu', + }, + card: { + quota: 'LIMIT', + onTrial: 'Na próbę', + paid: 'Płatny', + quotaExhausted: 'Wyczerpany limit', + callTimes: 'Czasy wywołań', + tokens: 'Tokeny', + buyQuota: 'Kup limit', + priorityUse: 'Używanie z priorytetem', + removeKey: 'Usuń klucz API', + tip: 'Priorytet zostanie nadany płatnemu limitowi. Po wyczerpaniu limitu próbnego zostanie użyty limit płatny.', + }, + item: { + deleteDesc: + '{{modelName}} są używane jako modele wnioskowania systemowego. Niektóre funkcje mogą nie być dostępne po usunięciu. Proszę potwierdź.', + freeQuota: 'LIMIT GRATIS', + }, + addApiKey: 'Dodaj swój klucz API', + invalidApiKey: 'Nieprawidłowy klucz API', + encrypted: { + front: 'Twój KLUCZ API będzie szyfrowany i przechowywany za pomocą', + back: ' technologii.', + }, + freeQuota: { + howToEarn: 'Jak zdobyć', + }, + addMoreModelProvider: 'DODAJ WIĘCEJ DOSTAWCÓW MODELI', + addModel: 'Dodaj model', + modelsNum: '{{num}} Modele', + showModels: 'Pokaż modele', + showModelsNum: 'Pokaż {{num}} modele', + collapse: 'Zwiń', + config: 'Konfiguracja', + modelAndParameters: 'Model i parametry', + model: 'Model', + featureSupported: '{{feature}} obsługiwane', + callTimes: 'Czasy wywołań', + credits: 'Kredyty wiadomości', + buyQuota: 'Kup limit', + getFreeTokens: 'Odbierz darmowe tokeny', + priorityUsing: 'Priorytetyzacja użycia', + deprecated: 'Przestarzałe', + confirmDelete: 'potwierdzić usunięcie?', + quotaTip: 'Pozostałe dostępne darmowe tokeny', + loadPresets: 'Załaduj ustawienia wstępne', + parameters: 'PARAMETRY', + }, + dataSource: { + add: 'Dodaj źródło danych', + connect: 'Połącz', + notion: { + title: 'Notion', + description: 'Korzystanie z Notion jako źródła danych dla Wiedzy.', + connectedWorkspace: 'Połączona przestrzeń robocza', + addWorkspace: 'Dodaj przestrzeń roboczą', + connected: 'Połączono', + disconnected: 'Rozłączono', + changeAuthorizedPages: 'Zmień uprawnione strony', + pagesAuthorized: 'Strony autoryzowane', + sync: 'Synchronizuj', + remove: 'Usuń', + selector: { + pageSelected: 'Zaznaczone strony', + searchPages: 'Szukaj stron...', + noSearchResult: 'Brak wyników wyszukiwania', + addPages: 'Dodaj strony', + preview: 'PODGLĄD', + }, + }, + }, + plugin: { + serpapi: { + apiKey: 'Klucz API', + apiKeyPlaceholder: 'Wprowadź swój klucz API', + keyFrom: 'Pobierz swój klucz SerpAPI ze strony konta SerpAPI', + }, + }, + apiBasedExtension: { + title: + 'Rozszerzenia oparte na interfejsie API zapewniają scentralizowane zarządzanie interfejsami API, upraszczając konfigurację dla łatwego użytkowania w aplikacjach Dify.', + link: 'Dowiedz się, jak opracować własne rozszerzenie interfejsu API.', + linkUrl: 'https://docs.dify.ai/features/extension/api_based_extension', + add: 'Dodaj rozszerzenie interfejsu API', + selector: { + title: 'Rozszerzenie interfejsu API', + placeholder: 'Wybierz rozszerzenie interfejsu API', + manage: 'Zarządzaj rozszerzeniem interfejsu API', + }, + modal: { + title: 'Dodaj rozszerzenie interfejsu API', + editTitle: 'Edytuj rozszerzenie interfejsu API', + name: { + title: 'Nazwa', + placeholder: 'Proszę wprowadź nazwę', + }, + apiEndpoint: { + title: 'Koniec API', + placeholder: 'Proszę wprowadź koniec API', + }, + apiKey: { + title: 'Klucz API', + placeholder: 'Proszę wprowadź klucz API', + lengthError: 'Długość klucza API nie może być mniejsza niż 5 znaków', + }, + }, + type: 'Typ', + }, + about: { + changeLog: 'Dziennik zmian', + updateNow: 'Aktualizuj teraz', + nowAvailable: 'Dify {{version}} jest teraz dostępny.', + latestAvailable: 'Dify {{version}} jest najnowszą dostępną wersją.', + }, + appMenus: { + overview: 'Przegląd', + promptEng: 'Orkiestracja', + apiAccess: 'Dostęp API', + logAndAnn: 'Logi i ogł.', + logs: 'Logi', + }, + environment: { + testing: 'TESTOWANIE', + development: 'ROZWOJOWA', + }, + appModes: { + completionApp: 'Generator tekstu', + chatApp: 'Aplikacja czatowa', + }, + datasetMenus: { + documents: 'Dokumenty', + hitTesting: 'Testowanie poboru', + settings: 'Ustawienia', + emptyTip: + 'Wiedza nie została powiązana, przejdź do aplikacji lub wtyczki, aby ukończyć powiązanie.', + viewDoc: 'Zobacz dokumentację', + relatedApp: 'powiązane aplikacje', + }, + voiceInput: { + speaking: 'Mów teraz...', + converting: 'Konwertowanie na tekst...', + notAllow: 'mikrofon nieautoryzowany', + }, + modelName: { + 'gpt-3.5-turbo': 'GPT-3.5-Turbo', + 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', + 'gpt-4': 'GPT-4', + 'gpt-4-32k': 'GPT-4-32K', + 'text-davinci-003': 'Tekst-Davinci-003', + 'text-embedding-ada-002': 'Tekst-Wan-Ada-002', + 'whisper-1': 'Szept-1', + 'claude-instant-1': 'Claude-Natychmiastowy', + 'claude-2': 'Claude-2', + }, + chat: { + renameConversation: 'Zmień nazwę rozmowy', + conversationName: 'Nazwa rozmowy', + conversationNamePlaceholder: 'Proszę wprowadź nazwę rozmowy', + conversationNameCanNotEmpty: 'Nazwa rozmowy wymagana', + citation: { + title: 'Cytaty', + linkToDataset: 'Link do Wiedzy', + characters: 'Postacie:', + hitCount: 'Liczba trafień:', + vectorHash: 'Wektor hash:', + hitScore: 'Wynik trafień:', + }, + }, + promptEditor: { + placeholder: + 'Wpisz swoje słowo kluczowe tutaj, wprowadź \'{\' aby wstawić zmienną, wprowadź \'/\' aby wstawić blok treści słownika', + context: { + item: { + title: 'Kontekst', + desc: 'Wstaw szablon kontekstu', + }, + modal: { + title: '{{num}} Wiedzy w Kontekście', + add: 'Dodaj Kontekst ', + footer: 'Możesz zarządzać kontekstami poniżej w sekcji Kontekstów.', + }, + }, + history: { + item: { + title: 'Historia rozmów', + desc: 'Wstaw szablon historycznej wiadomości', + }, + modal: { + title: 'PRZYKŁAD', + user: 'Cześć', + assistant: 'Cześć! W czym mogę pomóc?', + edit: 'Edytuj nazwy ról rozmów', + }, + }, + variable: { + item: { + title: 'Zmienne i Narzędzia Zewnętrzne', + desc: 'Wstaw Zmienne i Narzędzia Zewnętrzne', + }, + outputToolDisabledItem: { + title: 'Zmienne', + desc: 'Wstaw Zmienne', + }, + modal: { + add: 'Nowa zmienna', + addTool: 'Nowe narzędzie', + }, + }, + query: { + item: { + title: 'Zapytanie', + desc: 'Wstaw szablon zapytania użytkownika', + }, + }, + existed: 'Już istnieje w poleceniu', + }, + imageUploader: { + uploadFromComputer: 'Załaduj z komputera', + uploadFromComputerReadError: 'Błąd odczytu obrazu, spróbuj ponownie.', + uploadFromComputerUploadError: + 'Błąd przesyłania obrazu, prześlij go ponownie.', + uploadFromComputerLimit: + 'Obrazy do przesłania nie mogą przekroczyć {{size}} MB', + pasteImageLink: 'Wklej link do obrazu', + pasteImageLinkInputPlaceholder: 'Wklej tutaj link do obrazu', + pasteImageLinkInvalid: 'Nieprawidłowy link obrazu', + imageUpload: 'Przesyłanie obrazu', + }, + tag: { + placeholder: 'Wszystkie tagi', + addNew: 'Dodaj nowy tag', + noTag: 'Brak tagów', + noTagYet: 'Brak tagów jeszcze', + addTag: 'Dodaj tagi', + editTag: 'Edytuj tagi', + manageTags: 'Zarządzaj Tagami', + selectorPlaceholder: 'Wpisz, aby wyszukać lub utworzyć', + create: 'Utwórz', + delete: 'Usuń tag', + deleteTip: 'Ten tag jest używany, czy chcesz go usunąć?', + created: 'Tag został pomyślnie utworzony', + failed: 'Nie udało się utworzyć tagu', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/custom.ts b/web/i18n/pl-PL/custom.ts new file mode 100644 index 0000000000..15d71cceea --- /dev/null +++ b/web/i18n/pl-PL/custom.ts @@ -0,0 +1,31 @@ +const translation = { + custom: 'Dostosowanie', + upgradeTip: { + prefix: 'Zaktualizuj swój plan, aby', + suffix: 'dostosować swoją markę.', + }, + webapp: { + title: 'Dostosuj markę aplikacji internetowej', + removeBrand: 'Usuń zasilane przez Dify', + changeLogo: 'Zmień obraz marki zasilany przez Brand', + changeLogoTip: 'Format SVG lub PNG o minimalnym rozmiarze 40x40px', + }, + app: { + title: 'Dostosuj markę nagłówka aplikacji', + changeLogoTip: 'Format SVG lub PNG o minimalnym rozmiarze 80x80px', + }, + upload: 'Prześlij', + uploading: 'Przesyłanie', + uploadedFail: + 'Wystąpił problem podczas przesyłania obrazu, proszę spróbować ponownie.', + change: 'Zmień', + apply: 'Zastosuj', + restore: 'Przywróć domyślne', + customize: { + contactUs: ' skontaktuj się z nami ', + prefix: 'Aby dostosować logo marki w aplikacji, proszę', + suffix: 'dla aktualizacji do wersji Enterprise.', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/dataset-creation.ts b/web/i18n/pl-PL/dataset-creation.ts new file mode 100644 index 0000000000..1b12e51b05 --- /dev/null +++ b/web/i18n/pl-PL/dataset-creation.ts @@ -0,0 +1,146 @@ +const translation = { + steps: { + header: { + creation: 'Utwórz Wiedzę', + update: 'Dodaj dane', + }, + one: 'Wybierz źródło danych', + two: 'Przetwarzanie i Czyszczenie Tekstu', + three: 'Wykonaj i zakończ', + }, + error: { + unavailable: 'Ta Wiedza nie jest dostępna', + }, + stepOne: { + filePreview: 'Podgląd pliku', + pagePreview: 'Podgląd strony', + dataSourceType: { + file: 'Importuj z pliku tekstowego', + notion: 'Synchronizuj z Notion', + web: 'Synchronizuj z witryny', + }, + uploader: { + title: 'Prześlij plik tekstowy', + button: 'Przeciągnij i upuść plik lub', + browse: 'Przeglądaj', + tip: 'Obsługuje {{supportTypes}}. Maksymalnie {{size}}MB każdy.', + validation: { + typeError: 'Nieobsługiwany typ pliku', + size: 'Plik jest za duży. Maksymalnie {{size}}MB', + count: 'Nieobsługiwane przesyłanie wielu plików', + filesNumber: 'Osiągnąłeś limit przesłania partii {{filesNumber}}.', + }, + cancel: 'Anuluj', + change: 'Zmień', + failed: 'Przesyłanie nie powiodło się', + }, + notionSyncTitle: 'Notion nie jest podłączony', + notionSyncTip: + 'Aby synchronizować z Notion, najpierw trzeba ustanowić połączenie z Notion.', + connect: 'Przejdź do połączenia', + button: 'dalej', + emptyDatasetCreation: 'Chcę utworzyć pustą Wiedzę', + modal: { + title: 'Utwórz pustą Wiedzę', + tip: 'Pusta Wiedza nie będzie zawierała żadnych dokumentów, a można przesyłać dokumenty w dowolnym momencie.', + input: 'Nazwa Wiedzy', + placeholder: 'Proszę wpisz', + nameNotEmpty: 'Nazwa nie może być pusta', + nameLengthInvaild: 'Nazwa musi zawierać od 1 do 40 znaków', + cancelButton: 'Anuluj', + confirmButton: 'Utwórz', + failed: 'Utworzenie nie powiodło się', + }, + }, + stepTwo: { + segmentation: 'Ustawienia bloków tekstu', + auto: 'Automatycznie', + autoDescription: + 'Automatyczne ustawianie bloków i reguł preprocessingu. Nieużytkownicy są zaleceni do wyboru tej opcji.', + custom: 'Niestandardowo', + customDescription: + 'Dostosuj reguły bloków, długość bloków i reguły preprocessingu itp.', + separator: 'Separator bloków', + separatorPlaceholder: + 'Na przykład nowa linia (\\n) lub specjalny separator (np. "***")', + maxLength: 'Maksymalna długość bloku', + overlap: 'Nakładka bloków', + overlapTip: + 'Ustawienie nakładki bloków pozwala zachować semantyczną zgodność między nimi, poprawiając efekt pobierania. Zaleca się ustawienie 10%-25% maksymalnej długości bloku.', + overlapCheck: + 'nakładka bloków nie powinna być większa niż maksymalna długość bloku', + rules: 'Reguły preprocessingu tekstu', + removeExtraSpaces: 'Zastąp kolejne spacje, nowe linie i tabulatory', + removeUrlEmails: 'Usuń wszystkie adresy URL i e-maile', + removeStopwords: 'Usuń słowa powszechne takie jak "a", "an", "the"', + preview: 'Potwierdź i Podgląd', + reset: 'Reset', + indexMode: 'Tryb indeksowania', + qualified: 'Wysoka jakość', + recommend: 'Polecać', + qualifiedTip: + 'Wywołaj domyślne interfejsy wbudowania systemu do przetwarzania, zapewniając wyższą dokładność podczas zapytań przez użytkowników.', + warning: 'Proszę najpierw skonfigurować klucz API dostawcy modelu.', + click: 'Przejdź do ustawień', + economical: 'Ekonomiczny', + economicalTip: + 'Użyj offline\'owych silników wektorowych, indeksów słów kluczowych itp., aby zmniejszyć dokładność bez wydawania tokenów', + QATitle: 'Segmentacja w formacie pytania i odpowiedzi', + QATip: 'Włączenie tej opcji spowoduje zużycie większej liczby tokenów', + QALanguage: 'Segmentacja przy użyciu', + emstimateCost: 'Oszacowanie', + emstimateSegment: 'Oszacowane bloki', + segmentCount: 'bloki', + calculating: 'Obliczanie...', + fileSource: 'Przetwarzaj dokumenty', + notionSource: 'Przetwarzaj strony', + other: 'i inne ', + fileUnit: ' plików', + notionUnit: ' stron', + previousStep: 'Poprzedni krok', + nextStep: 'Zapisz & Przetwarzaj', + save: 'Zapisz & Przetwarzaj', + cancel: 'Anuluj', + sideTipTitle: 'Dlaczego blok i preprocess?', + sideTipP1: + 'Podczas przetwarzania danych tekstowych, blok i czyszczenie są dwoma ważnymi krokami preprocessingu.', + sideTipP2: + 'Segmentacja dzieli długi tekst na akapity, dzięki czemu modele są w stanie lepiej zrozumieć. Poprawia to jakość i trafność wyników modelu.', + sideTipP3: + 'Czyszczenie usuwa zbędne znaki i formatowanie, sprawiając, że Wiedza jest czystsza i łatwiejsza do analizy.', + sideTipP4: + 'Odpowiednie blok i czyszczenie poprawiają wydajność modelu, zapewniając bardziej dokładne i wartościowe wyniki.', + previewTitle: 'Podgląd', + previewTitleButton: 'Podgląd', + previewButton: 'Przełącz do formatu pytania i odpowiedzi', + previewSwitchTipStart: + 'Aktulany podgląd bloku jest w formacie tekstu, przełączenie na podgląd w formacie pytania i odpowiedzi spowoduje', + previewSwitchTipEnd: ' dodatkowe zużycie tokenów', + characters: 'znaki', + indexSettedTip: 'Aby zmienić metodę indeksowania, przejdź do ', + retrivalSettedTip: 'Aby zmienić metodę indeksowania, przejdź do ', + datasetSettingLink: 'ustawień Wiedzy.', + }, + stepThree: { + creationTitle: '🎉 Utworzono Wiedzę', + creationContent: + 'Automatycznie nadaliśmy nazwę Wiedzy, możesz ją dowolnie zmienić w każdej chwili', + label: 'Nazwa Wiedzy', + additionTitle: '🎉 Przesłano dokument', + additionP1: 'Dokument został przesłany do Wiedzy', + additionP2: ', możesz go znaleźć na liście dokumentów Wiedzy.', + stop: 'Zatrzymaj przetwarzanie', + resume: 'Wznów przetwarzanie', + navTo: 'Przejdź do dokumentu', + sideTipTitle: 'Co dalej', + sideTipContent: + 'Po zakończeniu indeksowania dokumentu, Wiedza może być zintegrowana z aplikacją jako kontekst, można znaleźć ustawienie kontekstu na stronie orkiestracji. Można również stworzyć ją jako niezależny plugin indeksowania ChatGPT do wydania.', + modelTitle: 'Czy na pewno chcesz zatrzymać embedded?', + modelContent: + 'Jeśli będziesz potrzebować wznowić przetwarzanie później, będziesz kontynuować od miejsca, w którym przerwałeś.', + modelButtonConfirm: 'Potwierdź', + modelButtonCancel: 'Anuluj', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/dataset-documents.ts b/web/i18n/pl-PL/dataset-documents.ts new file mode 100644 index 0000000000..f8617a29cf --- /dev/null +++ b/web/i18n/pl-PL/dataset-documents.ts @@ -0,0 +1,350 @@ +const translation = { + list: { + title: 'Dokumenty', + desc: 'Wszystkie pliki wiedzy są tutaj pokazane, a cała wiedza może być powiązana z odnośnikami Dify lub zindeksowana za pomocą wtyczki Chat.', + addFile: 'dodaj plik', + addPages: 'Dodaj strony', + table: { + header: { + fileName: 'NAZWA PLIKU', + words: 'SŁOWA', + hitCount: 'LICZBA ZNALEZIEŃ', + uploadTime: 'CZAS WGRANIA', + status: 'STATUS', + action: 'AKCJA', + }, + }, + action: { + uploadFile: 'Wgraj nowy plik', + settings: 'Ustawienia segmentacji', + addButton: 'Dodaj fragment', + add: 'Dodaj fragment', + batchAdd: 'Dodaj partię', + archive: 'Archiwum', + unarchive: 'Usuń z archiwum', + delete: 'Usuń', + enableWarning: 'Zarchiwizowany plik nie może zostać włączony', + sync: 'Synchronizuj', + }, + index: { + enable: 'Włącz', + disable: 'Wyłącz', + all: 'Wszystkie', + enableTip: 'Plik może być zindeksowany', + disableTip: 'Plik nie może być zindeksowany', + }, + status: { + queuing: 'Oczekiwanie', + indexing: 'Indeksowanie', + paused: 'Wstrzymane', + error: 'Błąd', + available: 'Dostępny', + enabled: 'Włączony', + disabled: 'Wyłączony', + archived: 'Zaarchiwizowany', + }, + empty: { + title: 'Nie ma jeszcze dokumentacji', + upload: { + tip: 'Możesz wgrać pliki, synchronizować z witryny lub z aplikacji internetowych takich jak Notion, GitHub, itp.', + }, + sync: { + tip: 'Dify regularnie pobiera pliki z Twojego Notion i dokonuje ich przetwarzania.', + }, + }, + delete: { + title: 'Czy na pewno chcesz usunąć?', + content: + 'Jeśli będziesz musiał wznowić przetwarzanie później, będziesz kontynuować tam, gdzie przerwałeś', + }, + batchModal: { + title: 'Dodaj partię fragmentów', + csvUploadTitle: 'Przeciągnij i upuść swój plik CSV tutaj, lub ', + browse: 'wybierz', + tip: 'Plik CSV musi być zgodny z następującą strukturą:', + question: 'pytanie', + answer: 'odpowiedź', + contentTitle: 'treść fragmentu', + content: 'treść', + template: 'Pobierz szablon tutaj', + cancel: 'Anuluj', + run: 'Uruchom partię', + runError: 'Błąd uruchomienia partii', + processing: 'Przetwarzanie partii', + completed: 'Import zakończony', + error: 'Błąd importu', + ok: 'OK', + }, + }, + metadata: { + title: 'Metadane', + desc: 'Etykietowanie metadanych dla dokumentów pozwala sztucznej inteligencji na dostęp do nich w odpowiednim czasie i ujawnia źródło odniesień dla użytkowników.', + dateTimeFormat: 'D MMMM YYYY, HH:mm', + docTypeSelectTitle: 'Wybierz rodzaj dokumentu', + docTypeChangeTitle: 'Zmień rodzaj dokumentu', + docTypeSelectWarning: + 'Jeśli zmieniony zostanie rodzaj dokumentu, teraz wypełnione metadane nie zostaną zachowane', + firstMetaAction: 'Zacznijmy', + placeholder: { + add: 'Dodaj ', + select: 'Wybierz ', + }, + source: { + upload_file: 'Wgraj plik', + notion: 'Synchronizuj z Notion', + github: 'Synchronizuj z Github', + }, + type: { + book: 'Książka', + webPage: 'Strona internetowa', + paper: 'Artykuł', + socialMediaPost: 'Post w mediach społecznościowych', + personalDocument: 'Dokument osobisty', + businessDocument: 'Dokument biznesowy', + IMChat: 'Czat na komunikatorze', + wikipediaEntry: 'Artykuł w Wikipedii', + notion: 'Synchronizuj z Notion', + github: 'Synchronizuj z Github', + technicalParameters: 'Parametry techniczne', + }, + field: { + processRule: { + processDoc: 'Przetwórz dokument', + segmentRule: 'Reguła fragmentacji', + segmentLength: 'Długość fragmentów', + processClean: 'Oczyszczanie tekstu', + }, + book: { + title: 'Tytuł', + language: 'Język', + author: 'Autor', + publisher: 'Wydawca', + publicationDate: 'Data publikacji', + ISBN: 'ISBN', + category: 'Kategoria', + }, + webPage: { + title: 'Tytuł', + url: 'URL', + language: 'Język', + authorPublisher: 'Autor/Wydawca', + publishDate: 'Data publikacji', + topicsKeywords: 'Tematy/Słowa kluczowe', + description: 'Opis', + }, + paper: { + title: 'Tytuł', + language: 'Język', + author: 'Autor', + publishDate: 'Data publikacji', + journalConferenceName: 'Nazwa czasopisma/konferencji', + volumeIssuePage: 'Tom/Wydanie/Strona', + DOI: 'DOI', + topicsKeywords: 'Tematy/Słowa kluczowe', + abstract: 'Abstrakt', + }, + socialMediaPost: { + platform: 'Platforma', + authorUsername: 'Autor/Nazwa użytkownika', + publishDate: 'Data publikacji', + postURL: 'Adres URL posta', + topicsTags: 'Tematy/Tagi', + }, + personalDocument: { + title: 'Tytuł', + author: 'Autor', + creationDate: 'Data utworzenia', + lastModifiedDate: 'Data ostatniej modyfikacji', + documentType: 'Typ dokumentu', + tagsCategory: 'Tagi/Kategoria', + }, + businessDocument: { + title: 'Tytuł', + author: 'Autor', + creationDate: 'Data utworzenia', + lastModifiedDate: 'Data ostatniej modyfikacji', + documentType: 'Typ dokumentu', + departmentTeam: 'Dział/Zespół', + }, + IMChat: { + chatPlatform: 'Platforma czatu', + chatPartiesGroupName: 'Podmioty czatu/Nazwa grupy', + participants: 'Uczestnicy', + startDate: 'Data rozpoczęcia', + endDate: 'Data zakończenia', + topicsKeywords: 'Tematy/Słowa kluczowe', + fileType: 'Typ pliku', + }, + wikipediaEntry: { + title: 'Tytuł', + language: 'Język', + webpageURL: 'Adres URL strony internetowej', + editorContributor: 'Edytor/Współtwórca', + lastEditDate: 'Data ostatniej edycji', + summaryIntroduction: 'Podsumowanie/Wstęp', + }, + notion: { + title: 'Tytuł', + language: 'Język', + author: 'Autor', + createdTime: 'Czas utworzenia', + lastModifiedTime: 'Czas ostatniej modyfikacji', + url: 'URL', + tag: 'Tag', + description: 'Opis', + }, + github: { + repoName: 'Nazwa repozytorium', + repoDesc: 'Opis repozytorium', + repoOwner: 'Właściciel repozytorium', + fileName: 'Nazwa pliku', + filePath: 'Ścieżka pliku', + programmingLang: 'Język programowania', + url: 'URL', + license: 'Licencja', + lastCommitTime: 'Czas ostatniego zobowiązania', + lastCommitAuthor: 'Autor ostatniego zobowiązania', + }, + originInfo: { + originalFilename: 'Oryginalna nazwa pliku', + originalFileSize: 'Oryginalny rozmiar pliku', + uploadDate: 'Data wgrywania', + lastUpdateDate: 'Data ostatniej aktualizacji', + source: 'Źródło', + }, + technicalParameters: { + segmentSpecification: 'Specyfikacja fragmentów', + segmentLength: 'Długość fragmentów', + avgParagraphLength: 'Średnia długość akapitu', + paragraphs: 'Akapity', + hitCount: 'Liczba odwołań', + embeddingTime: 'Czas embedowania', + embeddedSpend: 'Wydatki związane z embedowaniem', + }, + }, + languageMap: { + zh: 'Chiński', + en: 'Angielski', + es: 'Hiszpański', + fr: 'Francuski', + de: 'Niemiecki', + ja: 'Japoński', + ko: 'Koreański', + ru: 'Rosyjski', + ar: 'Arabski', + pt: 'Portugalski', + it: 'Włoski', + nl: 'Holenderski', + pl: 'Polski', + sv: 'Szwedzki', + tr: 'Turecki', + he: 'Hebrajski', + hi: 'Hinduski', + da: 'Duński', + fi: 'Fiński', + no: 'Norweski', + hu: 'Węgierski', + el: 'Grecki', + cs: 'Czeski', + th: 'Tajski', + id: 'Indonezyjski', + }, + categoryMap: { + book: { + fiction: 'Literatura piękna', + biography: 'Biografia', + history: 'Historia', + science: 'Nauka', + technology: 'Technologia', + education: 'Edukacja', + philosophy: 'Filozofia', + religion: 'Religia', + socialSciences: 'Nauki społeczne', + art: 'Sztuka', + travel: 'Podróże', + health: 'Zdrowie', + selfHelp: 'Samorozwój', + businessEconomics: 'Biznes/ekonomia', + cooking: 'Gotowanie', + childrenYoungAdults: 'Dzieci/Młodzież', + comicsGraphicNovels: 'Komiksy/Graphic Novels', + poetry: 'Poezja', + drama: 'Dramat', + other: 'Inne', + }, + personalDoc: { + notes: 'Notatki', + blogDraft: 'Wersja robocza bloga', + diary: 'Dziennik', + researchReport: 'Raport badawczy', + bookExcerpt: 'Fragment książki', + schedule: 'Harmonogram', + list: 'Lista', + projectOverview: 'Przegląd projektu', + photoCollection: 'Kolekcja zdjęć', + creativeWriting: 'Twórcze pisanie', + codeSnippet: 'Fragment kodu', + designDraft: 'Projekt/wersja robocza', + personalResume: 'CV', + other: 'Inne', + }, + businessDoc: { + meetingMinutes: 'Protokoły zebrań', + researchReport: 'Raport badawczy', + proposal: 'Propozycja', + employeeHandbook: 'Podręcznik pracownika', + trainingMaterials: 'Materiały szkoleniowe', + requirementsDocument: 'Dokument wymagań', + designDocument: 'Dokument projektowy', + productSpecification: 'Specyfikacja produktu', + financialReport: 'Raport finansowy', + marketAnalysis: 'Analiza rynku', + projectPlan: 'Plan projektu', + teamStructure: 'Struktura zespołu', + policiesProcedures: 'Zasady i procedury', + contractsAgreements: 'Umowy', + emailCorrespondence: 'Korespondencja e-mailowa', + other: 'Inne', + }, + }, + }, + embedding: { + processing: 'Przetwarzanie osadzania...', + paused: 'Osadzanie wstrzymane', + completed: 'Osadzanie zakończone', + error: 'Błąd osadzania', + docName: 'Przetwarzanie wstępne dokumentu', + mode: 'Reguła segmentacji', + segmentLength: 'Długość fragmentów', + textCleaning: 'Predefinicja tekstu i czyszczenie', + segments: 'Akapity', + highQuality: 'Tryb wysokiej jakości', + economy: 'Tryb ekonomiczny', + estimate: 'Szacowany czas', + stop: 'Zatrzymaj przetwarzanie', + resume: 'Wznów przetwarzanie', + automatic: 'Automatyczny', + custom: 'Niestandardowy', + previewTip: 'Podgląd akapitu będzie dostępny po zakończeniu osadzania', + }, + segment: { + paragraphs: 'Akapity', + keywords: 'Słowa kluczowe', + addKeyWord: 'Dodaj słowo kluczowe', + keywordError: 'Maksymalna długość słowa kluczowego wynosi 20', + characters: 'znaków', + hitCount: 'Liczba odwołań', + vectorHash: 'Wektor hash: ', + questionPlaceholder: 'dodaj pytanie tutaj', + questionEmpty: 'Pytanie nie może być puste', + answerPlaceholder: 'dodaj odpowiedź tutaj', + answerEmpty: 'Odpowiedź nie może być pusta', + contentPlaceholder: 'dodaj treść tutaj', + contentEmpty: 'Treść nie może być pusta', + newTextSegment: 'Nowy segment tekstowy', + newQaSegment: 'Nowy segment Q&A', + delete: 'Usunąć ten fragment?', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/dataset-hit-testing.ts b/web/i18n/pl-PL/dataset-hit-testing.ts new file mode 100644 index 0000000000..25c6babc55 --- /dev/null +++ b/web/i18n/pl-PL/dataset-hit-testing.ts @@ -0,0 +1,28 @@ +const translation = { + title: 'Testowanie odzyskiwania', + desc: 'Przetestuj efekt uderzenia wiedzy na podstawie podanego tekstu zapytania.', + dateTimeFormat: 'MM/DD/YYYY hh:mm A', + recents: 'Ostatnie', + table: { + header: { + source: 'Źródło', + text: 'Tekst', + time: 'Czas', + }, + }, + input: { + title: 'Tekst źródłowy', + placeholder: 'Proszę wpisać tekst, zaleca się krótkie zdanie deklaratywne.', + countWarning: 'Do 200 znaków.', + indexWarning: 'Tylko wiedza wysokiej jakości.', + testing: 'Testowanie', + }, + hit: { + title: 'AKAPITY ODZYSKIWANIA', + emptyTip: 'Wyniki testowania odzyskiwania będą tu pokazane', + }, + noRecentTip: 'Brak ostatnich wyników zapytań tutaj', + viewChart: 'Zobacz WYKRES WEKTOROWY', +} + +export default translation diff --git a/web/i18n/pl-PL/dataset-settings.ts b/web/i18n/pl-PL/dataset-settings.ts new file mode 100644 index 0000000000..06788f9332 --- /dev/null +++ b/web/i18n/pl-PL/dataset-settings.ts @@ -0,0 +1,38 @@ +const translation = { + title: 'Ustawienia wiedzy', + desc: 'Tutaj możesz modyfikować właściwości i metody działania Wiedzy.', + form: { + name: 'Nazwa wiedzy', + namePlaceholder: 'Proszę wprowadzić nazwę wiedzy', + nameError: 'Nazwa nie może być pusta', + desc: 'Opis wiedzy', + descInfo: + 'Proszę napisać klarowny opis tekstowy, aby zarysować zawartość Wiedzy. Ten opis będzie wykorzystywany jako podstawa do dopasowywania podczas wyboru z wielu wiedz dla wnioskowania.', + descPlaceholder: + 'Opisz, co znajduje się w tej Wiedzy. Szczegółowy opis pozwala sztucznej inteligencji na dostęp do treści Wiedzy w odpowiednim czasie. Jeśli jest pusty, Dify użyje domyślnej strategii trafień.', + descWrite: 'Dowiedz się, jak napisać dobry opis Wiedzy.', + permissions: 'Uprawnienia', + permissionsOnlyMe: 'Tylko ja', + permissionsAllMember: 'Wszyscy członkowie zespołu', + indexMethod: 'Metoda indeksowania', + indexMethodHighQuality: 'Wysoka jakość', + indexMethodHighQualityTip: + 'Wywołaj interfejs wbudowywanie OpenAI do przetwarzania, aby zapewnić wyższą dokładność przy zapytaniach użytkowników.', + indexMethodEconomy: 'Ekonomiczna', + indexMethodEconomyTip: + 'Użyj silników wektorów offline, indeksów słów kluczowych itp., aby zmniejszyć dokładność bez wydawania tokenów', + embeddingModel: 'Model wbudowywania', + embeddingModelTip: 'Aby zmienić model wbudowywania, przejdź do ', + embeddingModelTipLink: 'Ustawienia', + retrievalSetting: { + title: 'Ustawienia doboru', + learnMore: 'Dowiedz się więcej', + description: ' dotyczące metody doboru.', + longDescription: + ' dotyczące metody doboru, możesz to zmienić w dowolnym momencie w ustawieniach wiedzy.', + }, + save: 'Zapisz', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/dataset.ts b/web/i18n/pl-PL/dataset.ts new file mode 100644 index 0000000000..2401004a22 --- /dev/null +++ b/web/i18n/pl-PL/dataset.ts @@ -0,0 +1,55 @@ +const translation = { + knowledge: 'Wiedza', + documentCount: ' dokumenty', + wordCount: ' k słów', + appCount: ' powiązane aplikacje', + createDataset: 'Utwórz Wiedzę', + createDatasetIntro: + 'Zaimportuj własne dane tekstowe lub zapisuj dane w czasie rzeczywistym za pomocą Webhooka w celu wzmocnienia kontekstu LLM.', + deleteDatasetConfirmTitle: 'Czy na pewno usunąć tę Wiedzę?', + deleteDatasetConfirmContent: + 'Usunięcie Wiedzy jest nieodwracalne. Użytkownicy nie będą już mieli dostępu do Twojej Wiedzy, a wszystkie konfiguracje i logi zostaną trwale usunięte.', + datasetDeleted: 'Wiedza usunięta', + datasetDeleteFailed: 'Nie udało się usunąć Wiedzy', + didYouKnow: 'Czy wiedziałeś?', + intro1: 'Wiedzę można zintegrować z aplikacją Dify ', + intro2: 'jako kontekst', + intro3: ',', + intro4: 'lub ', + intro5: 'może być utworzona', + intro6: ' jako samodzielny wtyczka indeksująca ChatGPT do publikacji', + unavailable: 'Niedostępny', + unavailableTip: + 'Model osadzający jest niedostępny, domyślny model osadzający musi być skonfigurowany', + datasets: 'WIEDZA', + datasetsApi: 'DOSTĘP DO API', + retrieval: { + semantic_search: { + title: 'Wyszukiwanie wektorowe', + description: + 'Generowanie osadzeń zapytań i wyszukiwanie fragmentów tekstu najbardziej podobnych do ich wektorowej reprezentacji.', + }, + full_text_search: { + title: 'Wyszukiwanie pełnotekstowe', + description: + 'Indeksowanie wszystkich terminów w dokumencie, umożliwiając użytkownikom wyszukiwanie dowolnego terminu i odzyskiwanie odpowiedniego fragmentu tekstu zawierającego te terminy.', + }, + hybrid_search: { + title: 'Wyszukiwanie hybrydowe', + description: + 'Wykonaj jednocześnie pełnotekstowe wyszukiwanie i wyszukiwanie wektorowe, ponownie porządkuj, aby wybrać najlepsze dopasowanie dla zapytania użytkownika. Konieczna jest konfiguracja API Rerank model.', + recommend: 'Polecany', + }, + invertedIndex: { + title: 'Indeks odwrócony', + description: + 'Indeks odwrócony to struktura używana do efektywnego odzyskiwania informacji. Zorganizowane według terminów, każdy termin wskazuje na dokumenty lub strony internetowe zawierające go.', + }, + change: 'Zmień', + changeRetrievalMethod: 'Zmień metodę odzyskiwania', + }, + docsFailedNotice: 'nie udało się zindeksować dokumentów', + retry: 'Ponów', +} + +export default translation diff --git a/web/i18n/pl-PL/explore.ts b/web/i18n/pl-PL/explore.ts new file mode 100644 index 0000000000..052eb303bd --- /dev/null +++ b/web/i18n/pl-PL/explore.ts @@ -0,0 +1,42 @@ +const translation = { + title: 'Odkryj', + sidebar: { + discovery: 'Odkrywanie', + chat: 'Czat', + workspace: 'Przestrzeń robocza', + action: { + pin: 'Przypnij', + unpin: 'Odepnij', + rename: 'Zmień nazwę', + delete: 'Usuń', + }, + delete: { + title: 'Usuń aplikację', + content: 'Czy na pewno chcesz usunąć tę aplikację?', + }, + }, + apps: { + title: 'Odkrywaj aplikacje stworzone przez Dify', + description: + 'Wykorzystaj te aplikacje szablonowe natychmiast lub dostosuj własne aplikacje na podstawie szablonów.', + allCategories: 'Polecane', + }, + appCard: { + addToWorkspace: 'Dodaj do przestrzeni roboczej', + customize: 'Dostosuj', + }, + appCustomize: { + title: 'Utwórz aplikację z {{name}}', + subTitle: 'Ikona i nazwa aplikacji', + nameRequired: 'Nazwa aplikacji jest wymagana', + }, + category: { + Assistant: 'Asystent', + Writing: 'Pisanie', + Translate: 'Tłumaczenie', + Programming: 'Programowanie', + HR: 'HR', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/layout.ts b/web/i18n/pl-PL/layout.ts new file mode 100644 index 0000000000..928649474b --- /dev/null +++ b/web/i18n/pl-PL/layout.ts @@ -0,0 +1,4 @@ +const translation = { +} + +export default translation diff --git a/web/i18n/pl-PL/login.ts b/web/i18n/pl-PL/login.ts new file mode 100644 index 0000000000..b629ee598a --- /dev/null +++ b/web/i18n/pl-PL/login.ts @@ -0,0 +1,66 @@ +const translation = { + pageTitle: 'Cześć, zaczynajmy!👋', + welcome: 'Witaj w Dify, zaloguj się, aby kontynuować.', + email: 'Adres e-mail', + emailPlaceholder: 'Twój adres e-mail', + password: 'Hasło', + passwordPlaceholder: 'Twoje hasło', + name: 'Nazwa użytkownika', + namePlaceholder: 'Twoja nazwa użytkownika', + forget: 'Zapomniałeś hasła?', + signBtn: 'Zaloguj się', + sso: 'Kontynuuj za pomocą SSO', + installBtn: 'Ustaw', + setAdminAccount: 'Ustawianie konta administratora', + setAdminAccountDesc: + 'Maksymalne uprawnienia dla konta administratora, które można użyć do tworzenia aplikacji i zarządzania dostawcami LLM, itp.', + createAndSignIn: 'Utwórz i zaloguj się', + oneMoreStep: 'Jeszcze jeden krok', + createSample: + 'Na podstawie tych informacji, utworzymy dla Ciebie przykładową aplikację', + invitationCode: 'Kod zaproszenia', + invitationCodePlaceholder: 'Twój kod zaproszenia', + interfaceLanguage: 'Język interfejsu', + timezone: 'Strefa czasowa', + go: 'Przejdź do Dify', + sendUsMail: + 'Wyślij nam e-mail z swoim wstępem, a my zajmiemy się prośbą o zaproszenie.', + acceptPP: 'Przeczytałem/am i akceptuję politykę prywatności', + reset: 'Uruchom poniższą komendę, aby zresetować swoje hasło', + withGitHub: 'Kontynuuj za pomocą GitHub', + withGoogle: 'Kontynuuj za pomocą Google', + rightTitle: 'Odblokuj pełny potencjał LLM', + rightDesc: + 'Łatwo buduj wizualnie atrakcyjne, działające i udoskonalane aplikacje AI.', + tos: 'Warunki świadczenia usług', + pp: 'Polityka prywatności', + tosDesc: 'Założeniem konta zgadzasz się z naszymi', + goToInit: 'Jeśli nie zainicjowałeś konta, przejdź do strony inicjalizacji', + donthave: 'Nie masz?', + invalidInvitationCode: 'Niewłaściwy kod zaproszenia', + accountAlreadyInited: 'Konto już zainicjowane', + error: { + emailEmpty: 'Adres e-mail jest wymagany', + emailInValid: 'Proszę wpisać prawidłowy adres e-mail', + nameEmpty: 'Nazwa jest wymagana', + passwordEmpty: 'Hasło jest wymagane', + passwordInvalid: + 'Hasło musi zawierać litery i cyfry, a jego długość musi być większa niż 8', + }, + license: { + tip: 'Przed rozpoczęciem wersji społecznościowej Dify, przeczytaj GitHub', + link: 'Licencję open-source', + }, + join: 'Dołącz', + joinTipStart: 'Zapraszam Cię do dołączenia do', + joinTipEnd: 'zespołu na Dify', + invalid: 'Link wygasł', + explore: 'Odkryj Dify', + activatedTipStart: 'Dołączyłeś do', + activatedTipEnd: 'zespołu', + activated: 'Zaloguj się teraz', + adminInitPassword: 'Hasło inicjalizacyjne administratora', + validate: 'Sprawdź', +} + +export default translation diff --git a/web/i18n/pl-PL/register.ts b/web/i18n/pl-PL/register.ts new file mode 100644 index 0000000000..928649474b --- /dev/null +++ b/web/i18n/pl-PL/register.ts @@ -0,0 +1,4 @@ +const translation = { +} + +export default translation diff --git a/web/i18n/pl-PL/run-log.ts b/web/i18n/pl-PL/run-log.ts new file mode 100644 index 0000000000..a134057530 --- /dev/null +++ b/web/i18n/pl-PL/run-log.ts @@ -0,0 +1,29 @@ +const translation = { + input: 'WEJŚCIE', + result: 'WYNIK', + detail: 'SZCZEGÓŁY', + tracing: 'ŚLEDZENIE', + resultPanel: { + status: 'STATUS', + time: 'CZAS WYKONANIA', + tokens: 'CAŁKOWITA LICZBA TOKENÓW', + }, + meta: { + title: 'METADANE', + status: 'Status', + version: 'Wersja', + executor: 'Wykonawca', + startTime: 'Czas rozpoczęcia', + time: 'Czas trwania', + tokens: 'Liczba tokenów', + steps: 'Kroki wykonania', + }, + resultEmpty: { + title: 'To wykonanie generuje tylko format JSON,', + tipLeft: 'proszę przejdź do ', + link: 'panelu szczegółów', + tipRight: ' aby je zobaczyć.', + }, +} + +export default translation diff --git a/web/i18n/pl-PL/share-app.ts b/web/i18n/pl-PL/share-app.ts new file mode 100644 index 0000000000..d286a9c900 --- /dev/null +++ b/web/i18n/pl-PL/share-app.ts @@ -0,0 +1,75 @@ +const translation = { + common: { + welcome: 'Witaj w użyciu', + appUnavailable: 'Aplikacja jest niedostępna', + appUnkonwError: 'Aplikacja jest niedostępna', + }, + chat: { + newChat: 'Nowy czat', + pinnedTitle: 'Przypięte', + unpinnedTitle: 'Czaty', + newChatDefaultName: 'Nowa rozmowa', + resetChat: 'Resetuj rozmowę', + powerBy: 'Działany przez', + prompt: 'Podpowiedź', + privatePromptConfigTitle: 'Ustawienia rozmowy', + publicPromptConfigTitle: 'Początkowa podpowiedź', + configStatusDes: + 'Przed rozpoczęciem możesz zmodyfikować ustawienia rozmowy', + configDisabled: 'Ustawienia poprzedniej sesji zostały użyte w tej sesji.', + startChat: 'Zacznij czat', + privacyPolicyLeft: 'Proszę przeczytać ', + privacyPolicyMiddle: 'politykę prywatności', + privacyPolicyRight: ' dostarczoną przez dewelopera aplikacji.', + deleteConversation: { + title: 'Usuń rozmowę', + content: 'Czy na pewno chcesz usunąć tę rozmowę?', + }, + tryToSolve: 'Spróbuj rozwiązać', + temporarySystemIssue: 'Przepraszamy, tymczasowy problem systemowy.', + }, + generation: { + tabs: { + create: 'Uruchom raz', + batch: 'Uruchom partię', + saved: 'Zapisane', + }, + savedNoData: { + title: 'Nie zapisałeś jeszcze wyniku!', + description: + 'Zacznij generować treść i znajdź swoje zapisane wyniki tutaj.', + startCreateContent: 'Zacznij tworzyć treść', + }, + title: 'Uzupełnianie AI', + queryTitle: 'Zapytaj o treść', + completionResult: 'Wynik uzupełnienia', + queryPlaceholder: 'Wpisz swoją treść zapytania...', + run: 'Wykonaj', + copy: 'Kopiuj', + resultTitle: 'Uzupełnianie AI', + noData: 'AI poda Ci to, czego chcesz tutaj.', + csvUploadTitle: 'Przeciągnij i upuść plik CSV tutaj lub ', + browse: 'przeglądaj', + csvStructureTitle: 'Plik CSV musi być zgodny z następującą strukturą:', + downloadTemplate: 'Pobierz szablon tutaj', + field: 'Pole', + batchFailed: { + info: '{{num}} nieudanych wykonan', + retry: 'Powtórz', + outputPlaceholder: 'Brak treści wyjściowej', + }, + errorMsg: { + empty: 'Proszę wprowadź treść w załadowanym pliku.', + fileStructNotMatch: 'Załadowany plik CSV nie pasuje do struktury.', + emptyLine: 'Wiersz {{rowIndex}} jest pusty', + invalidLine: + 'Wiersz {{rowIndex}}: wartość {{varName}} nie może być pusta', + moreThanMaxLengthLine: + 'Wiersz {{rowIndex}}: wartość {{varName}} nie może mieć więcej niż {{maxLength}} znaków', + atLeastOne: + 'Proszę wprowadź co najmniej jeden wiersz w załadowanym pliku.', + }, + }, +} + +export default translation diff --git a/web/i18n/pl-PL/tools.ts b/web/i18n/pl-PL/tools.ts new file mode 100644 index 0000000000..6de77304dd --- /dev/null +++ b/web/i18n/pl-PL/tools.ts @@ -0,0 +1,119 @@ +const translation = { + title: 'Narzędzia', + createCustomTool: 'Utwórz niestandardowe narzędzie', + type: { + all: 'Wszystkie', + builtIn: 'Wbudowane', + custom: 'Niestandardowe', + }, + contribute: { + line1: 'Interesuje mnie ', + line2: 'współtworzenie narzędzi dla Dify.', + viewGuide: 'Zobacz przewodnik', + }, + author: 'Przez', + auth: { + unauthorized: 'Autoryzacja', + authorized: 'Zautoryzowane', + setup: 'Skonfiguruj autoryzację aby użyć', + setupModalTitle: 'Konfiguruj autoryzację', + setupModalTitleDescription: + 'Po skonfigurowaniu poświadczeń wszyscy członkowie w przestrzeni roboczej mogą używać tego narzędzia podczas projektowania aplikacji.', + }, + includeToolNum: '{{num}} narzędzi zawarte', + addTool: 'Dodaj narzędzie', + createTool: { + title: 'Utwórz niestandardowe narzędzie', + editAction: 'Konfiguruj', + editTitle: 'Edytuj niestandardowe narzędzie', + name: 'Nazwa', + toolNamePlaceHolder: 'Wprowadź nazwę narzędzia', + schema: 'Schemat', + schemaPlaceHolder: 'Wprowadź tutaj swój schemat OpenAPI', + viewSchemaSpec: 'Zobacz specyfikację OpenAPI-Swagger', + importFromUrl: 'Importuj z adresu URL', + importFromUrlPlaceHolder: 'https://...', + urlError: 'Proszę podać prawidłowy URL', + examples: 'Przykłady', + exampleOptions: { + json: 'Pogoda (JSON)', + yaml: 'Sklep Zoologiczny (YAML)', + blankTemplate: 'Pusty szablon', + }, + availableTools: { + title: 'Dostępne narzędzia', + name: 'Nazwa', + description: 'Opis', + method: 'Metoda', + path: 'Ścieżka', + action: 'Akcje', + test: 'Test', + }, + authMethod: { + title: 'Metoda autoryzacji', + type: 'Typ autoryzacji', + keyTooltip: + 'Klucz nagłówka HTTP, Możesz pozostawić go z "Autoryzacja" jeśli nie wiesz co to jest lub ustaw go na niestandardową wartość', + types: { + none: 'Brak', + api_key: 'Klucz API', + apiKeyPlaceholder: 'Nazwa nagłówka HTTP dla Klucza API', + apiValuePlaceholder: 'Wprowadź Klucz API', + }, + key: 'Klucz', + value: 'Wartość', + }, + authHeaderPrefix: { + title: 'Typ autoryzacji', + types: { + basic: 'Podstawowa', + bearer: 'Bearer', + custom: 'Niestandardowa', + }, + }, + privacyPolicy: 'Polityka prywatności', + privacyPolicyPlaceholder: 'Proszę wprowadzić politykę prywatności', + }, + test: { + title: 'Test', + parametersValue: 'Parametry i Wartość', + parameters: 'Parametry', + value: 'Wartość', + testResult: 'Wyniki testu', + testResultPlaceholder: 'Wynik testu pojawi się tutaj', + }, + thought: { + using: 'Używanie', + used: 'Użyty', + requestTitle: 'Żądanie do', + responseTitle: 'Odpowiedź od', + }, + setBuiltInTools: { + info: 'Informacje', + setting: 'Ustawienia', + toolDescription: 'Opis narzędzia', + parameters: 'parametry', + string: 'ciąg znaków', + number: 'liczba', + required: 'Wymagane', + infoAndSetting: 'Informacje i Ustawienia', + }, + noCustomTool: { + title: 'Brak niestandardowych narzędzi!', + content: + 'Dodaj i zarządzaj niestandardowymi narzędziami tutaj, aby budować aplikacje AI.', + createTool: 'Utwórz Narzędzie', + }, + noSearchRes: { + title: 'Przykro nam, brak wyników!', + content: + 'Nie znaleźliśmy żadnych narzędzi pasujących do Twojego wyszukiwania.', + reset: 'Resetuj Wyszukiwanie', + }, + builtInPromptTitle: 'Komunikat', + toolRemoved: 'Narzędzie usunięte', + notAuthorized: 'Narzędzie nieautoryzowane', + howToGet: 'Jak uzyskać', +} + +export default translation diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts new file mode 100644 index 0000000000..c408575840 --- /dev/null +++ b/web/i18n/pl-PL/workflow.ts @@ -0,0 +1,354 @@ +const translation = { + common: { + editing: 'Edytowanie', + autoSaved: 'Autozapisano', + unpublished: 'Nieopublikowany', + published: 'Opublikowany', + publish: 'Opublikuj', + update: 'Aktualizuj', + run: 'Uruchom', + running: 'Uruchamianie', + inRunMode: 'W trybie Uruchom', + inPreview: 'Podgląd', + inPreviewMode: 'W trybie Podgląd', + preview: 'Podgląd', + viewRunHistory: 'Zobacz historię uruchomień', + runHistory: 'Historia Uruchomień', + goBackToEdit: 'Wróć do edytora', + conversationLog: 'Dziennik Konwersacji', + features: 'Funkcje', + debugAndPreview: 'Debugowanie i Podgląd', + restart: 'Uruchom Ponownie', + currentDraft: 'Aktualny Szkic', + currentDraftUnpublished: 'Aktualny Szkic Nieopublikowany', + latestPublished: 'Ostatnio Opublikowany', + publishedAt: 'Opublikowany', + restore: 'Przywróć', + runApp: 'Uruchom Aplikację', + batchRunApp: 'Uruchom Aplikację Partiami', + accessAPIReference: 'Dostęp do Referencji API', + embedIntoSite: 'Osadź na Stronie', + addTitle: 'Dodaj tytuł...', + addDescription: 'Dodaj opis...', + noVar: 'Brak zmiennej', + searchVar: 'Szukaj zmiennej', + variableNamePlaceholder: 'Nazwa zmiennej', + setVarValuePlaceholder: 'Ustaw zmienną', + needConnecttip: 'Ten krok nie jest połączony z niczym', + maxTreeDepth: 'Maksymalny limit {{depth}} węzłów na gałąź', + needEndNode: 'Należy dodać Blok Końcowy', + needAnswerNode: 'Należy dodać Blok Odpowiedzi', + workflowProcess: 'Proces Przepływu', + notRunning: 'Nie uruchamiany jeszcze', + previewPlaceholder: 'Wprowadź treść w poniższym polu, aby rozpocząć debugowanie Chatbota', + effectVarConfirm: { + title: 'Usuń Zmienną', + content: 'Zmienna jest używana w innych węzłach. Czy nadal chcesz ją usunąć?', + }, + insertVarTip: 'Naciśnij klawisz \'/\' aby szybko wstawić', + processData: 'Przetwarzaj Dane', + input: 'Wejście', + output: 'Wyjście', + jinjaEditorPlaceholder: 'Wprowadź \'/\' lub \'{\' aby wstawić zmienną', + viewOnly: 'Tylko Podgląd', + showRunHistory: 'Pokaż Historię Uruchomień', + enableJinja: 'Włącz wsparcie dla szablonów Jinja', + learnMore: 'Czytaj więcej', + }, + errorMsg: { + fieldRequired: '{{field}} jest wymagane', + authRequired: 'Autoryzacja jest wymagana', + invalidJson: '{{field}} jest nieprawidłowy JSON', + fields: { + variable: 'Nazwa Zmiennej', + variableValue: 'Wartość Zmiennej', + code: 'Kod', + model: 'Model', + rerankModel: 'Model Ponownej Klasyfikacji', + }, + invalidVariable: 'Nieprawidłowa zmienna', + }, + singleRun: { + testRun: 'Testuj Uruchomienie ', + startRun: 'Uruchomienie', + running: 'Uruchamianie', + }, + tabs: { + 'searchBlock': 'Szukaj Bloku', + 'blocks': 'Bloki', + 'builtInTool': 'Wbudowany Narzędzie', + 'customTool': 'Niestandardowe Narzędzie', + 'question-understand': 'Zrozumienie Pytania', + 'logic': 'Logika', + 'transform': 'Transformacja', + 'utilities': 'Użyteczność', + 'noResult': 'Brak pasujących wyników', + }, + blocks: { + 'start': 'Start', + 'end': 'Koniec', + 'answer': 'Odpowiedź', + 'llm': 'LLM', + 'knowledge-retrieval': 'Odzyskiwanie Wiedzy', + 'question-classifier': 'Klasyfikator Pytań', + 'if-else': 'IF/ELSE', + 'code': 'Kod', + 'template-transform': 'Szablon', + 'http-request': 'Żądanie HTTP', + 'variable-assigner': 'Przypisywacz Zmiennych', + }, + blocksAbout: { + 'start': 'Definiuje początkowe parametry uruchamiania przepływu', + 'end': 'Definiuje koniec i typ wyniku przepływu', + 'answer': 'Definiuje treść odpowiedzi w rozmowie czatowej', + 'llm': 'Wywołuje duże modele językowe do odpowiedzi na pytania lub przetwarzania języka naturalnego', + 'knowledge-retrieval': 'Pozwala na wyszukiwanie treści tekstowych związanych z pytaniami użytkowników z Wiedzy', + 'question-classifier': 'Definiuje warunki klasyfikacji pytań użytkowników, LLM może określić, jak postępuje rozmowa na podstawie opisu klasyfikacji', + 'if-else': 'Pozwala na podział przepływu na dwie gałęzie na podstawie warunków if/else', + 'code': 'Wykonuje fragment kodu Pythona lub NodeJS w celu zastosowania niestandardowej logiki', + 'template-transform': 'Konwertuje dane na ciąg za pomocą składni szablonu Jinja', + 'http-request': 'Pozwala na wysyłanie żądań serwera za pośrednictwem protokołu HTTP', + 'variable-assigner': 'Przypisuje zmienne w różnych gałęziach do tej samej zmiennej w celu uzyskania zharmonizowanej konfiguracji post-węzłów', + }, + operator: { + zoomIn: 'Powiększ', + zoomOut: 'Pomniejsz', + zoomTo50: 'Powiększ do 50%', + zoomTo100: 'Powiększ do 100%', + zoomToFit: 'Dopasuj do rozmiaru', + }, + panel: { + userInputField: 'Pole Wejściowe Użytkownika', + changeBlock: 'Zmień Blok', + helpLink: 'Link Pomocniczy', + about: 'O Autorze', + createdBy: 'Utworzone przez', + nextStep: 'Następny Krok', + addNextStep: 'Dodaj następny blok w tym przepływie', + selectNextStep: 'Wybierz Następny Blok', + runThisStep: 'Uruchom ten krok', + checklist: 'Lista kontrolna', + checklistTip: 'Upewnij się, że wszystkie problemy są rozwiązane przed opublikowaniem', + checklistResolved: 'Wszystkie problemy zostały rozwiązane', + organizeBlocks: 'Organizuj bloki', + change: 'Zmień', + }, + nodes: { + common: { + outputVars: 'Zmienne Wyjściowe', + insertVarTip: 'Wstaw Zmienną', + memory: { + memory: 'Pamięć', + memoryTip: 'Ustawienia pamięci czatu', + windowSize: 'Rozmiar Okna', + conversationRoleName: 'Nazwa Roli Konwersacji', + user: 'Prefiks użytkownika', + assistant: 'Prefiks asystenta', + }, + memories: { + title: 'Wspomnienia', + tip: 'Pamięć czatu', + builtIn: 'Wbudowane', + }, + }, + start: { + required: 'wymagane', + inputField: 'Pole Wejściowe', + builtInVar: 'Zmienne Wbudowane', + outputVars: { + query: 'Wejście użytkownika', + memories: { + des: 'Historia rozmowy', + type: 'typ wiadomości', + content: 'treść wiadomości', + }, + files: 'Lista plików', + }, + noVarTip: 'Ustaw wejścia, które można użyć w Przepływie', + }, + end: { + outputs: 'Wyjścia', + output: { + type: 'typ wyjścia', + variable: 'zmienna wyjścia', + }, + type: { + 'none': 'Brak', + 'plain-text': 'Tekst Prosty', + 'structured': 'Strukturyzowany', + }, + }, + answer: { + answer: 'Odpowiedź', + outputVars: 'Zmienne Wyjściowe', + }, + llm: { + model: 'model', + variables: 'zmienne', + context: 'kontekst', + contextTooltip: 'Możesz zaimportować Wiedzę jako kontekst', + notSetContextInPromptTip: 'Aby włączyć funkcję kontekstu, proszę wypełnić zmienną kontekstu w PROMPT.', + prompt: 'prompt', + roleDescription: { + system: 'Daj instrukcje na wysokim poziomie dla rozmowy', + user: 'Dostarcz instrukcje, zapytania lub jakiekolwiek wejście oparte na tekście do modelu', + assistant: 'Odpowiedzi modelu na wiadomości użytkownika', + }, + addMessage: 'Dodaj Wiadomość', + vision: 'wizja', + files: 'Pliki', + resolution: { + name: 'Rozdzielczość', + high: 'Wysoka', + low: 'Niska', + }, + outputVars: { + output: 'Generuj zawartość', + usage: 'Informacje o użyciu modelu', + }, + singleRun: { + variable: 'Zmienna', + }, + sysQueryInUser: 'sys.query w wiadomości użytkownika jest wymagane', + }, + knowledgeRetrieval: { + queryVariable: 'Zmienna Zapytania', + knowledge: 'Wiedza', + outputVars: { + output: 'Odzyskane dane podzielone', + content: 'Zawartość podzielona', + title: 'Tytuł podzielony', + icon: 'Ikona podzielona', + url: 'URL podzielony', + metadata: 'Inne metadane', + }, + }, + http: { + inputVars: 'Zmienne Wejściowe', + api: 'API', + apiPlaceholder: 'Wprowadź URL, wpisz \'/\' aby wstawić zmienną', + notStartWithHttp: 'API powinno rozpoczynać się od http:// lub https://', + key: 'Klucz', + value: 'Wartość', + bulkEdit: 'Edycja Masowa', + keyValueEdit: 'Edycja Klucz-Wartość', + headers: 'Nagłówki', + params: 'Parametry', + body: 'Treść', + outputVars: { + body: 'Zawartość Odpowiedzi', + statusCode: 'Kod Stanu Odpowiedzi', + headers: 'Lista Nagłówków Odpowiedzi w formacie JSON', + files: 'Lista plików', + }, + authorization: { + 'authorization': 'Autoryzacja', + 'authorizationType': 'Typ Autoryzacji', + 'no-auth': 'Brak', + 'api-key': 'Klucz API', + 'auth-type': 'Typ Autoryzacji', + 'basic': 'Podstawowa', + 'bearer': 'Bearer', + 'custom': 'Niestandardowa', + 'api-key-title': 'Klucz API', + 'header': 'Nagłówek', + }, + insertVarPlaceholder: 'wpisz \'/\' aby wstawić zmienną', + timeout: { + title: 'Limit czasu', + connectLabel: 'Limit czasu połączenia', + connectPlaceholder: 'Wprowadź limit czasu połączenia w sekundach', + readLabel: 'Limit czasu odczytu', + readPlaceholder: 'Wprowadź limit czasu odczytu w sekundach', + writeLabel: 'Limit czasu zapisu', + writePlaceholder: 'Wprowadź limit czasu zapisu w sekundach', + }, + }, + code: { + inputVars: 'Zmienne Wejściowe', + outputVars: 'Zmienne Wyjściowe', + }, + templateTransform: { + inputVars: 'Zmienne Wejściowe', + code: 'Kod', + codeSupportTip: 'Obsługuje tylko Jinja2', + outputVars: { + output: 'Przekształcona zawartość', + }, + }, + ifElse: { + if: 'Jeśli', + else: 'W przeciwnym razie', + elseDescription: 'Służy do zdefiniowania logiki, która powinna być wykonana, gdy warunek if nie jest spełniony.', + and: 'i', + or: 'lub', + operator: 'Operator', + notSetVariable: 'Najpierw ustaw zmienną', + comparisonOperator: { + 'contains': 'zawiera', + 'not contains': 'nie zawiera', + 'start with': 'zaczyna się od', + 'end with': 'kończy się na', + 'is': 'jest', + 'is not': 'nie jest', + 'empty': 'jest pusty', + 'not empty': 'nie jest pusty', + 'null': 'jest pusty', + 'not null': 'nie jest pusty', + }, + enterValue: 'Wprowadź wartość', + addCondition: 'Dodaj Warunek', + conditionNotSetup: 'Warunek NIE jest ustawiony', + }, + variableAssigner: { + title: 'Przypisz zmienne', + outputType: 'Typ Wyjścia', + outputVarType: 'Typ Zmiennej Wyjściowej', + varNotSet: 'Zmienna nieustawiona', + noVarTip: 'Dodaj zmienne do przypisania', + type: { + string: 'Tekst', + number: 'Liczba', + object: 'Obiekt', + array: 'Tablica', + }, + outputVars: { + output: 'Wartość zmiennej przypisanej', + }, + }, + tool: { + toAuthorize: 'Aby autoryzować', + inputVars: 'Zmienne Wejściowe', + outputVars: { + text: 'wygenerowana zawartość narzędzia', + files: { + title: 'wygenerowane pliki narzędzia', + type: 'Typ wsparcia. Obecnie obsługuje tylko obraz', + transfer_method: 'Metoda transferu. Wartość to remote_url lub local_file', + url: 'URL obrazu', + upload_file_id: 'Identyfikator przesyłanego pliku', + }, + }, + }, + questionClassifiers: { + model: 'model', + inputVars: 'Zmienne Wejściowe', + outputVars: { + className: 'Nazwa Klasy', + }, + class: 'Klasa', + classNamePlaceholder: 'Wpisz nazwę swojej klasy', + advancedSetting: 'Ustawienia Zaawansowane', + topicName: 'Nazwa Tematu', + topicPlaceholder: 'Wpisz nazwę swojego tematu', + addClass: 'Dodaj Klasę', + instruction: 'Instrukcja', + instructionPlaceholder: 'Wpisz swoją instrukcję', + }, + }, + tracing: { + stopBy: 'Zatrzymano przez {{user}}', + }, +} + +export default translation From 087b7a66070d80fe4298846fe46b389e632ecbd3 Mon Sep 17 00:00:00 2001 From: "Charlie.Wei" Date: Tue, 7 May 2024 15:55:23 +0800 Subject: [PATCH 009/267] azure_openai add gpt-4-turbo-2024-04-09 model (#4144) Co-authored-by: luowei Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- .../model_providers/azure_openai/_constant.py | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index e81a120fa0..99378f3aab 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -482,6 +482,82 @@ LLM_BASE_MODELS = [ ) ) ), + AzureBaseModel( + base_model_name='gpt-4-turbo-2024-04-09', + entity=AIModelEntity( + model='fake-deployment-name', + label=I18nObject( + en_US='fake-deployment-name-label', + ), + model_type=ModelType.LLM, + features=[ + ModelFeature.AGENT_THOUGHT, + ModelFeature.VISION, + ModelFeature.MULTI_TOOL_CALL, + ModelFeature.STREAM_TOOL_CALL, + ], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, + }, + parameter_rules=[ + ParameterRule( + name='temperature', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], + ), + ParameterRule( + name='top_p', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], + ), + ParameterRule( + name='presence_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], + ), + ParameterRule( + name='frequency_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], + ), + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='seed', + label=I18nObject( + zh_Hans='种子', + en_US='Seed' + ), + type='int', + help=I18nObject( + zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。', + en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.' + ), + required=False, + precision=2, + min=0, + max=1, + ), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), + ], + pricing=PriceConfig( + input=0.001, + output=0.003, + unit=0.001, + currency='USD', + ) + ) + ), AzureBaseModel( base_model_name='gpt-4-vision-preview', entity=AIModelEntity( From c2f0f958efe76c9f8d9e2abb87bef5560bcce481 Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 7 May 2024 16:38:23 +0800 Subject: [PATCH 010/267] fix: passing in 0 as a numeric variable will be converted to null (#4148) --- api/core/app/apps/base_app_generator.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/api/core/app/apps/base_app_generator.py b/api/core/app/apps/base_app_generator.py index 9d88c834e6..20ae6ff676 100644 --- a/api/core/app/apps/base_app_generator.py +++ b/api/core/app/apps/base_app_generator.py @@ -13,7 +13,9 @@ class BaseAppGenerator: for variable_config in variables: variable = variable_config.variable - if variable not in user_inputs or not user_inputs[variable]: + if (variable not in user_inputs + or user_inputs[variable] is None + or (isinstance(user_inputs[variable], str) and user_inputs[variable] == '')): if variable_config.required: raise ValueError(f"{variable} is required in input form") else: @@ -22,7 +24,7 @@ class BaseAppGenerator: value = user_inputs[variable] - if value: + if value is not None: if variable_config.type != VariableEntity.Type.NUMBER and not isinstance(value, str): raise ValueError(f"{variable} in input form must be a string") elif variable_config.type == VariableEntity.Type.NUMBER and isinstance(value, str): @@ -44,7 +46,7 @@ class BaseAppGenerator: if value and isinstance(value, str): filtered_inputs[variable] = value.replace('\x00', '') else: - filtered_inputs[variable] = value if value else None + filtered_inputs[variable] = value if value is not None else None return filtered_inputs From e3538096803311f7864c8ae9641d316ccc112c2d Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Tue, 7 May 2024 16:44:27 +0800 Subject: [PATCH 011/267] question classifier optimize (#4147) --- .../question_classifier_node.py | 37 ++++++++++++------- .../question_classifier/template_prompts.py | 28 +++++++------- 2 files changed, 38 insertions(+), 27 deletions(-) diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index 9ec0df721c..770027d06c 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -1,3 +1,4 @@ +import json import logging from typing import Optional, Union, cast @@ -62,13 +63,20 @@ class QuestionClassifierNode(LLMNode): prompt_messages=prompt_messages, stop=stop ) - categories = [_class.name for _class in node_data.classes] + category_name = node_data.classes[0].name + category_id = node_data.classes[0].id try: result_text_json = parse_and_check_json_markdown(result_text, []) - #result_text_json = json.loads(result_text.strip('```JSON\n')) - categories_result = result_text_json.get('categories', []) - if categories_result: - categories = categories_result + # result_text_json = json.loads(result_text.strip('```JSON\n')) + if 'category_name' in result_text_json and 'category_id' in result_text_json: + category_id_result = result_text_json['category_id'] + classes = node_data.classes + classes_map = {class_.id: class_.name for class_ in classes} + category_ids = [_class.id for _class in classes] + if category_id_result in category_ids: + category_name = classes_map[category_id_result] + category_id = category_id_result + except Exception: logging.error(f"Failed to parse result text: {result_text}") try: @@ -81,17 +89,15 @@ class QuestionClassifierNode(LLMNode): 'usage': jsonable_encoder(usage), } outputs = { - 'class_name': categories[0] if categories else '' + 'class_name': category_name } - classes = node_data.classes - classes_map = {class_.name: class_.id for class_ in classes} return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=variables, process_data=process_data, outputs=outputs, - edge_source_handle=classes_map.get(categories[0], None), + edge_source_handle=category_id, metadata={ NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens, NodeRunMetadataKey.TOTAL_PRICE: usage.total_price, @@ -210,8 +216,13 @@ class QuestionClassifierNode(LLMNode): -> Union[list[ChatModelMessage], CompletionModelPromptTemplate]: model_mode = ModelMode.value_of(node_data.model.mode) classes = node_data.classes - class_names = [class_.name for class_ in classes] - class_names_str = ','.join(f'"{name}"' for name in class_names) + categories = [] + for class_ in classes: + category = { + 'category_id': class_.id, + 'category_name': class_.name + } + categories.append(category) instruction = node_data.instruction if node_data.instruction else '' input_text = query memory_str = '' @@ -248,7 +259,7 @@ class QuestionClassifierNode(LLMNode): user_prompt_message_3 = ChatModelMessage( role=PromptMessageRole.USER, text=QUESTION_CLASSIFIER_USER_PROMPT_3.format(input_text=input_text, - categories=class_names_str, + categories=json.dumps(categories), classification_instructions=instruction) ) prompt_messages.append(user_prompt_message_3) @@ -257,7 +268,7 @@ class QuestionClassifierNode(LLMNode): return CompletionModelPromptTemplate( text=QUESTION_CLASSIFIER_COMPLETION_PROMPT.format(histories=memory_str, input_text=input_text, - categories=class_names_str, + categories=json.dumps(categories), classification_instructions=instruction) ) diff --git a/api/core/workflow/nodes/question_classifier/template_prompts.py b/api/core/workflow/nodes/question_classifier/template_prompts.py index 5bef0250e3..ea24baa522 100644 --- a/api/core/workflow/nodes/question_classifier/template_prompts.py +++ b/api/core/workflow/nodes/question_classifier/template_prompts.py @@ -6,7 +6,7 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """ ### Task Your task is to assign one categories ONLY to the input text and only one category may be assigned returned in the output.Additionally, you need to extract the key words from the text that are related to the classification. ### Format - The input text is in the variable text_field.Categories are specified as a comma-separated list in the variable categories or left empty for automatic determination.Classification instructions may be included to improve the classification accuracy. + The input text is in the variable text_field.Categories are specified as a category list in the variable categories or left empty for automatic determination.Classification instructions may be included to improve the classification accuracy. ### Constraint DO NOT include anything other than the JSON array in your response. ### Memory @@ -18,33 +18,33 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """ QUESTION_CLASSIFIER_USER_PROMPT_1 = """ { "input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], - "categories": ["Customer Service", "Satisfaction", "Sales", "Product"], + "categories": [{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"},{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"},{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"},{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}], "classification_instructions": ["classify the text based on the feedback provided by customer"]} """ QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """ ```json - {"keywords": ["recently", "great experience", "company", "service", "prompt", "staff", "friendly"], - "categories": ["Customer Service"]} + {"category_id": "f5660049-284f-41a7-b301-fd24176a711c", + "category_name": "Customer Service"} ``` """ QUESTION_CLASSIFIER_USER_PROMPT_2 = """ {"input_text": ["bad service, slow to bring the food"], - "categories": ["Food Quality", "Experience", "Price" ], + "categories": [{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"},{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"},{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}], "classification_instructions": []} """ QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """ ```json - {"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"], - "categories": ["Experience"]} + {"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f", + "category_name": "Experience"} ``` """ QUESTION_CLASSIFIER_USER_PROMPT_3 = """ '{{"input_text": ["{input_text}"],', - '"categories": ["{categories}" ], ', + '"categories": {categories}, ', '"classification_instructions": ["{classification_instructions}"]}}' """ @@ -54,16 +54,16 @@ You are a text classification engine that analyzes text data and assigns categor ### Task Your task is to assign one categories ONLY to the input text and only one category may be assigned returned in the output. Additionally, you need to extract the key words from the text that are related to the classification. ### Format -The input text is in the variable text_field. Categories are specified as a comma-separated list in the variable categories or left empty for automatic determination. Classification instructions may be included to improve the classification accuracy. +The input text is in the variable text_field. Categories are specified as a category list in the variable categories or left empty for automatic determination. Classification instructions may be included to improve the classification accuracy. ### Constraint DO NOT include anything other than the JSON array in your response. ### Example Here is the chat example between human and assistant, inside XML tags. -User:{{"input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."],"categories": ["Customer Service, Satisfaction, Sales, Product"], "classification_instructions": ["classify the text based on the feedback provided by customer"]}} -Assistant:{{"keywords": ["recently", "great experience", "company", "service", "prompt", "staff", "friendly"],"categories": ["Customer Service"]}} -User:{{"input_text": ["bad service, slow to bring the food"],"categories": ["Food Quality, Experience, Price" ], "classification_instructions": []}} -Assistant:{{"keywords": ["recently", "great experience", "company", "service", "prompt", "staff", "friendly"],"categories": ["Customer Service"]}}{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"categories": ["Experience""]}} +User:{{"input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], "categories": [{{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"}},{{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"}},{{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"}},{{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}}], "classification_instructions": ["classify the text based on the feedback provided by customer"]}} +Assistant:{{"category_id": "f5660049-284f-41a7-b301-fd24176a711c","category_name": "Customer Service"}} +User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"}},{{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"}},{{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}}], "classification_instructions": []}} +Assistant:{{"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Customer Service"}} ### Memory Here is the chat histories between human and assistant, inside XML tags. @@ -71,6 +71,6 @@ Here is the chat histories between human and assistant, inside ### User Input -{{"input_text" : ["{input_text}"], "categories" : ["{categories}"],"classification_instruction" : ["{classification_instructions}"]}} +{{"input_text" : ["{input_text}"], "categories" : {categories},"classification_instruction" : ["{classification_instructions}"]}} ### Assistant Output """ \ No newline at end of file From d51f52a649bc165a6299c2e0ca2123040e3133a8 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Tue, 7 May 2024 16:56:25 +0800 Subject: [PATCH 012/267] fix: http authorization leakage (#4146) --- .../workflow/nodes/http_request/entities.py | 13 ++++++++---- .../nodes/http_request/http_executor.py | 17 ++++++++++++++-- .../nodes/http_request/http_request_node.py | 20 +++++++++++-------- .../workflow/nodes/test_http.py | 7 +++++++ 4 files changed, 43 insertions(+), 14 deletions(-) diff --git a/api/core/workflow/nodes/http_request/entities.py b/api/core/workflow/nodes/http_request/entities.py index d88ad999b7..31d5a679b0 100644 --- a/api/core/workflow/nodes/http_request/entities.py +++ b/api/core/workflow/nodes/http_request/entities.py @@ -1,9 +1,13 @@ +import os from typing import Literal, Optional, Union from pydantic import BaseModel, validator from core.workflow.entities.base_node_data_entities import BaseNodeData +MAX_CONNECT_TIMEOUT = int(os.environ.get('HTTP_REQUEST_MAX_CONNECT_TIMEOUT', '300')) +MAX_READ_TIMEOUT = int(os.environ.get('HTTP_REQUEST_MAX_READ_TIMEOUT', '600')) +MAX_WRITE_TIMEOUT = int(os.environ.get('HTTP_REQUEST_MAX_WRITE_TIMEOUT', '600')) class HttpRequestNodeData(BaseNodeData): """ @@ -36,9 +40,9 @@ class HttpRequestNodeData(BaseNodeData): data: Union[None, str] class Timeout(BaseModel): - connect: int - read: int - write: int + connect: int = MAX_CONNECT_TIMEOUT + read: int = MAX_READ_TIMEOUT + write: int = MAX_WRITE_TIMEOUT method: Literal['get', 'post', 'put', 'patch', 'delete', 'head'] url: str @@ -46,4 +50,5 @@ class HttpRequestNodeData(BaseNodeData): headers: str params: str body: Optional[Body] - timeout: Optional[Timeout] \ No newline at end of file + timeout: Optional[Timeout] + mask_authorization_header: Optional[bool] = True diff --git a/api/core/workflow/nodes/http_request/http_executor.py b/api/core/workflow/nodes/http_request/http_executor.py index 1fb73afd12..4ca8a81d8c 100644 --- a/api/core/workflow/nodes/http_request/http_executor.py +++ b/api/core/workflow/nodes/http_request/http_executor.py @@ -19,7 +19,6 @@ READABLE_MAX_BINARY_SIZE = f'{MAX_BINARY_SIZE / 1024 / 1024:.2f}MB' MAX_TEXT_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_TEXT_SIZE', str(1024 * 1024))) # 10MB # 1MB READABLE_MAX_TEXT_SIZE = f'{MAX_TEXT_SIZE / 1024 / 1024:.2f}MB' - class HttpExecutorResponse: headers: dict[str, str] response: Union[httpx.Response, requests.Response] @@ -345,10 +344,13 @@ class HttpExecutor: # validate response return self._validate_and_parse_response(response) - def to_raw_request(self) -> str: + def to_raw_request(self, mask_authorization_header: Optional[bool] = True) -> str: """ convert to raw request """ + if mask_authorization_header == None: + mask_authorization_header = True + server_url = self.server_url if self.params: server_url += f'?{urlencode(self.params)}' @@ -357,6 +359,17 @@ class HttpExecutor: headers = self._assembling_headers() for k, v in headers.items(): + if mask_authorization_header: + # get authorization header + if self.authorization.type == 'api-key': + authorization_header = 'Authorization' + if self.authorization.config and self.authorization.config.header: + authorization_header = self.authorization.config.header + + if k.lower() == authorization_header.lower(): + raw_request += f'{k}: {"*" * len(v)}\n' + continue + raw_request += f'{k}: {v}\n' raw_request += '\n' diff --git a/api/core/workflow/nodes/http_request/http_request_node.py b/api/core/workflow/nodes/http_request/http_request_node.py index cba1a11a8a..bfd686175a 100644 --- a/api/core/workflow/nodes/http_request/http_request_node.py +++ b/api/core/workflow/nodes/http_request/http_request_node.py @@ -1,5 +1,4 @@ import logging -import os from mimetypes import guess_extension from os import path from typing import cast @@ -9,14 +8,15 @@ from core.tools.tool_file_manager import ToolFileManager from core.workflow.entities.node_entities import NodeRunResult, NodeType from core.workflow.entities.variable_pool import VariablePool from core.workflow.nodes.base_node import BaseNode -from core.workflow.nodes.http_request.entities import HttpRequestNodeData +from core.workflow.nodes.http_request.entities import ( + MAX_CONNECT_TIMEOUT, + MAX_READ_TIMEOUT, + MAX_WRITE_TIMEOUT, + HttpRequestNodeData, +) from core.workflow.nodes.http_request.http_executor import HttpExecutor, HttpExecutorResponse from models.workflow import WorkflowNodeExecutionStatus -MAX_CONNECT_TIMEOUT = int(os.environ.get('HTTP_REQUEST_MAX_CONNECT_TIMEOUT', '300')) -MAX_READ_TIMEOUT = int(os.environ.get('HTTP_REQUEST_MAX_READ_TIMEOUT', '600')) -MAX_WRITE_TIMEOUT = int(os.environ.get('HTTP_REQUEST_MAX_WRITE_TIMEOUT', '600')) - HTTP_REQUEST_DEFAULT_TIMEOUT = HttpRequestNodeData.Timeout(connect=min(10, MAX_CONNECT_TIMEOUT), read=min(60, MAX_READ_TIMEOUT), write=min(20, MAX_WRITE_TIMEOUT)) @@ -63,7 +63,9 @@ class HttpRequestNode(BaseNode): process_data = {} if http_executor: process_data = { - 'request': http_executor.to_raw_request(), + 'request': http_executor.to_raw_request( + mask_authorization_header=node_data.mask_authorization_header + ), } return NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, @@ -82,7 +84,9 @@ class HttpRequestNode(BaseNode): 'files': files, }, process_data={ - 'request': http_executor.to_raw_request(), + 'request': http_executor.to_raw_request( + mask_authorization_header=node_data.mask_authorization_header + ), } ) diff --git a/api/tests/integration_tests/workflow/nodes/test_http.py b/api/tests/integration_tests/workflow/nodes/test_http.py index 63b6b7d962..10e3d53608 100644 --- a/api/tests/integration_tests/workflow/nodes/test_http.py +++ b/api/tests/integration_tests/workflow/nodes/test_http.py @@ -38,6 +38,7 @@ def test_get(setup_http_mock): 'headers': 'X-Header:123', 'params': 'A:b', 'body': None, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) @@ -95,6 +96,7 @@ def test_custom_authorization_header(setup_http_mock): 'headers': 'X-Header:123', 'params': 'A:b', 'body': None, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) @@ -126,6 +128,7 @@ def test_template(setup_http_mock): 'headers': 'X-Header:123\nX-Header2:{{#a.b123.args2#}}', 'params': 'A:b\nTemplate:{{#a.b123.args2#}}', 'body': None, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) @@ -161,6 +164,7 @@ def test_json(setup_http_mock): 'type': 'json', 'data': '{"a": "{{#a.b123.args1#}}"}' }, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) @@ -193,6 +197,7 @@ def test_x_www_form_urlencoded(setup_http_mock): 'type': 'x-www-form-urlencoded', 'data': 'a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}' }, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) @@ -225,6 +230,7 @@ def test_form_data(setup_http_mock): 'type': 'form-data', 'data': 'a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}' }, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) @@ -260,6 +266,7 @@ def test_none_data(setup_http_mock): 'type': 'none', 'data': '123123123' }, + 'mask_authorization_header': False, } }, **BASIC_NODE_DATA) From bb7c62777d07b388f089b9a67e7d341b2250cbcb Mon Sep 17 00:00:00 2001 From: Tomy <103342866+Tomywang999@users.noreply.github.com> Date: Tue, 7 May 2024 17:14:24 +0800 Subject: [PATCH 013/267] Add support for local ai speech to text (#3921) Co-authored-by: Yeuoly --- .../model_providers/localai/localai.yaml | 4 + .../localai/speech2text/__init__.py | 0 .../localai/speech2text/speech2text.py | 101 ++++++++++++++++++ .../model_runtime/localai/test_speech2text.py | 54 ++++++++++ 4 files changed, 159 insertions(+) create mode 100644 api/core/model_runtime/model_providers/localai/speech2text/__init__.py create mode 100644 api/core/model_runtime/model_providers/localai/speech2text/speech2text.py create mode 100644 api/tests/integration_tests/model_runtime/localai/test_speech2text.py diff --git a/api/core/model_runtime/model_providers/localai/localai.yaml b/api/core/model_runtime/model_providers/localai/localai.yaml index a870914632..151f02ee6f 100644 --- a/api/core/model_runtime/model_providers/localai/localai.yaml +++ b/api/core/model_runtime/model_providers/localai/localai.yaml @@ -15,6 +15,7 @@ help: supported_model_types: - llm - text-embedding + - speech2text configurate_methods: - customizable-model model_credential_schema: @@ -57,6 +58,9 @@ model_credential_schema: zh_Hans: 在此输入LocalAI的服务器地址,如 http://192.168.1.100:8080 en_US: Enter the url of your LocalAI, e.g. http://192.168.1.100:8080 - variable: context_size + show_on: + - variable: __model_type + value: llm label: zh_Hans: 上下文大小 en_US: Context size diff --git a/api/core/model_runtime/model_providers/localai/speech2text/__init__.py b/api/core/model_runtime/model_providers/localai/speech2text/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/localai/speech2text/speech2text.py b/api/core/model_runtime/model_providers/localai/speech2text/speech2text.py new file mode 100644 index 0000000000..d7403aff4f --- /dev/null +++ b/api/core/model_runtime/model_providers/localai/speech2text/speech2text.py @@ -0,0 +1,101 @@ +from typing import IO, Optional + +from requests import Request, Session +from yarl import URL + +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel + + +class LocalAISpeech2text(Speech2TextModel): + """ + Model class for Local AI Text to speech model. + """ + + def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: + """ + Invoke large language model + + :param model: model name + :param credentials: model credentials + :param file: audio file + :param user: unique user id + :return: text for given audio file + """ + + url = str(URL(credentials['server_url']) / "v1/audio/transcriptions") + data = {"model": model} + files = {"file": file} + + session = Session() + request = Request("POST", url, data=data, files=files) + prepared_request = session.prepare_request(request) + response = session.send(prepared_request) + + if 'error' in response.json(): + raise InvokeServerUnavailableError("Empty response") + + return response.json()["text"] + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + audio_file_path = self._get_demo_file_path() + + with open(audio_file_path, 'rb') as audio_file: + self._invoke(model, credentials, audio_file) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + return { + InvokeConnectionError: [ + InvokeConnectionError + ], + InvokeServerUnavailableError: [ + InvokeServerUnavailableError + ], + InvokeRateLimitError: [ + InvokeRateLimitError + ], + InvokeAuthorizationError: [ + InvokeAuthorizationError + ], + InvokeBadRequestError: [ + InvokeBadRequestError + ], + } + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: + """ + used to define customizable model schema + """ + entity = AIModelEntity( + model=model, + label=I18nObject( + en_US=model + ), + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_type=ModelType.SPEECH2TEXT, + model_properties={}, + parameter_rules=[] + ) + + return entity \ No newline at end of file diff --git a/api/tests/integration_tests/model_runtime/localai/test_speech2text.py b/api/tests/integration_tests/model_runtime/localai/test_speech2text.py new file mode 100644 index 0000000000..3fd2ebed4f --- /dev/null +++ b/api/tests/integration_tests/model_runtime/localai/test_speech2text.py @@ -0,0 +1,54 @@ +import os + +import pytest + +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.localai.speech2text.speech2text import LocalAISpeech2text + + +def test_validate_credentials(): + model = LocalAISpeech2text() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model='whisper-1', + credentials={ + 'server_url': 'invalid_url' + } + ) + + model.validate_credentials( + model='whisper-1', + credentials={ + 'server_url': os.environ.get('LOCALAI_SERVER_URL') + } + ) + + +def test_invoke_model(): + model = LocalAISpeech2text() + + # Get the directory of the current file + current_dir = os.path.dirname(os.path.abspath(__file__)) + + # Get assets directory + assets_dir = os.path.join(os.path.dirname(current_dir), 'assets') + + # Construct the path to the audio file + audio_file_path = os.path.join(assets_dir, 'audio.mp3') + + # Open the file and get the file object + with open(audio_file_path, 'rb') as audio_file: + file = audio_file + + result = model.invoke( + model='whisper-1', + credentials={ + 'server_url': os.environ.get('LOCALAI_SERVER_URL') + }, + file=file, + user="abc-123" + ) + + assert isinstance(result, str) + assert result == '1, 2, 3, 4, 5, 6, 7, 8, 9, 10' \ No newline at end of file From f361c7004dbcaadb9b65d27965511566df8c33bb Mon Sep 17 00:00:00 2001 From: Minamiyama Date: Tue, 7 May 2024 17:37:36 +0800 Subject: [PATCH 014/267] feat: support vision models from xinference (#4094) Co-authored-by: Yeuoly --- .../model_providers/xinference/llm/llm.py | 93 +++++++++++++------ .../xinference/xinference_helper.py | 14 ++- 2 files changed, 72 insertions(+), 35 deletions(-) diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index 602d0b749f..cc3ce17975 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -28,7 +28,10 @@ from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, + ImagePromptMessageContent, PromptMessage, + PromptMessageContent, + PromptMessageContentType, PromptMessageTool, SystemPromptMessage, ToolPromptMessage, @@ -61,8 +64,8 @@ from core.model_runtime.utils import helper class XinferenceAILargeLanguageModel(LargeLanguageModel): - def _invoke(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], - model_parameters: dict, tools: list[PromptMessageTool] | None = None, + def _invoke(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], + model_parameters: dict, tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None, stream: bool = True, user: str | None = None) \ -> LLMResult | Generator: """ @@ -99,7 +102,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): try: if "/" in credentials['model_uid'] or "?" in credentials['model_uid'] or "#" in credentials['model_uid']: raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") - + extra_param = XinferenceHelper.get_xinference_extra_parameter( server_url=credentials['server_url'], model_uid=credentials['model_uid'] @@ -111,10 +114,13 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): credentials['completion_type'] = 'completion' else: raise ValueError(f'xinference model ability {extra_param.model_ability} is not supported, check if you have the right model type') - + if extra_param.support_function_call: credentials['support_function_call'] = True + if extra_param.support_vision: + credentials['support_vision'] = True + if extra_param.context_length: credentials['context_length'] = extra_param.context_length @@ -135,7 +141,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): """ return self._num_tokens_from_messages(prompt_messages, tools) - def _num_tokens_from_messages(self, messages: list[PromptMessage], tools: list[PromptMessageTool], + def _num_tokens_from_messages(self, messages: list[PromptMessage], tools: list[PromptMessageTool], is_completion_model: bool = False) -> int: def tokens(text: str): return self._get_num_tokens_by_gpt2(text) @@ -155,7 +161,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): text = '' for item in value: if isinstance(item, dict) and item['type'] == 'text': - text += item.text + text += item['text'] value = text @@ -191,7 +197,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): num_tokens += self._num_tokens_for_tools(tools) return num_tokens - + def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: """ Calculate num tokens for tool calling @@ -234,7 +240,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): num_tokens += tokens(required_field) return num_tokens - + def _convert_prompt_message_to_text(self, message: list[PromptMessage]) -> str: """ convert prompt message to text @@ -260,7 +266,26 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): if isinstance(message.content, str): message_dict = {"role": "user", "content": message.content} else: - raise ValueError("User message content must be str") + sub_messages = [] + for message_content in message.content: + if message_content.type == PromptMessageContentType.TEXT: + message_content = cast(PromptMessageContent, message_content) + sub_message_dict = { + "type": "text", + "text": message_content.data + } + sub_messages.append(sub_message_dict) + elif message_content.type == PromptMessageContentType.IMAGE: + message_content = cast(ImagePromptMessageContent, message_content) + sub_message_dict = { + "type": "image_url", + "image_url": { + "url": message_content.data, + "detail": message_content.detail.value + } + } + sub_messages.append(sub_message_dict) + message_dict = {"role": "user", "content": sub_messages} elif isinstance(message, AssistantPromptMessage): message = cast(AssistantPromptMessage, message) message_dict = {"role": "assistant", "content": message.content} @@ -277,7 +302,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): message_dict = {"tool_call_id": message.tool_call_id, "role": "tool", "content": message.content} else: raise ValueError(f"Unknown message type {type(message)}") - + return message_dict def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: @@ -338,8 +363,18 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): completion_type = LLMMode.COMPLETION.value else: raise ValueError(f'xinference model ability {extra_args.model_ability} is not supported') - + + + features = [] + support_function_call = credentials.get('support_function_call', False) + if support_function_call: + features.append(ModelFeature.TOOL_CALL) + + support_vision = credentials.get('support_vision', False) + if support_vision: + features.append(ModelFeature.VISION) + context_length = credentials.get('context_length', 2048) entity = AIModelEntity( @@ -349,10 +384,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): ), fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_type=ModelType.LLM, - features=[ - ModelFeature.TOOL_CALL - ] if support_function_call else [], - model_properties={ + features=features, + model_properties={ ModelPropertyKey.MODE: completion_type, ModelPropertyKey.CONTEXT_SIZE: context_length }, @@ -360,22 +393,22 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): ) return entity - - def _generate(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], + + def _generate(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, extra_model_kwargs: XinferenceModelExtraParameter, - tools: list[PromptMessageTool] | None = None, + tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None, stream: bool = True, user: str | None = None) \ -> LLMResult | Generator: """ generate text from LLM see `core.model_runtime.model_providers.__base.large_language_model.LargeLanguageModel._generate` - + extra_model_kwargs can be got by `XinferenceHelper.get_xinference_extra_parameter` """ if 'server_url' not in credentials: raise CredentialsValidateFailedError('server_url is required in credentials') - + if credentials['server_url'].endswith('/'): credentials['server_url'] = credentials['server_url'][:-1] @@ -408,11 +441,11 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): 'function': helper.dump_model(tool) } for tool in tools ] - + vision = credentials.get('support_vision', False) if isinstance(xinference_model, RESTfulChatModelHandle | RESTfulChatglmCppChatModelHandle): resp = client.chat.completions.create( model=credentials['model_uid'], - messages=[self._convert_prompt_message_to_dict(message) for message in prompt_messages], + messages=[self._convert_prompt_message_to_dict(message) for message in prompt_messages], stream=stream, user=user, **generate_config, @@ -497,7 +530,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): """ if len(resp.choices) == 0: raise InvokeServerUnavailableError("Empty response") - + assistant_message = resp.choices[0].message # convert tool call to assistant message tool call @@ -527,7 +560,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): ) return response - + def _handle_chat_stream_response(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], tools: list[PromptMessageTool], resp: Iterator[ChatCompletionChunk]) -> Generator: @@ -544,7 +577,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ''): continue - + # check if there is a tool call in the response function_call = None tool_calls = [] @@ -573,9 +606,9 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) completion_tokens = self._num_tokens_from_messages(messages=[temp_assistant_prompt_message], tools=[]) - usage = self._calc_response_usage(model=model, credentials=credentials, + usage = self._calc_response_usage(model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens) - + yield LLMResultChunk( model=model, prompt_messages=prompt_messages, @@ -608,7 +641,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): """ if len(resp.choices) == 0: raise InvokeServerUnavailableError("Empty response") - + assistant_message = resp.choices[0].text # transform assistant message to prompt message @@ -670,9 +703,9 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): completion_tokens = self._num_tokens_from_messages( messages=[temp_assistant_prompt_message], tools=[], is_completion_model=True ) - usage = self._calc_response_usage(model=model, credentials=credentials, + usage = self._calc_response_usage(model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens) - + yield LLMResultChunk( model=model, prompt_messages=prompt_messages, diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py index 66dab65804..9a3fc9b193 100644 --- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py +++ b/api/core/model_runtime/model_providers/xinference/xinference_helper.py @@ -14,13 +14,15 @@ class XinferenceModelExtraParameter: max_tokens: int = 512 context_length: int = 2048 support_function_call: bool = False + support_vision: bool = False - def __init__(self, model_format: str, model_handle_type: str, model_ability: list[str], - support_function_call: bool, max_tokens: int, context_length: int) -> None: + def __init__(self, model_format: str, model_handle_type: str, model_ability: list[str], + support_function_call: bool, support_vision: bool, max_tokens: int, context_length: int) -> None: self.model_format = model_format self.model_handle_type = model_handle_type self.model_ability = model_ability self.support_function_call = support_function_call + self.support_vision = support_vision self.max_tokens = max_tokens self.context_length = context_length @@ -71,7 +73,7 @@ class XinferenceHelper: raise RuntimeError(f'get xinference model extra parameter failed, url: {url}, error: {e}') if response.status_code != 200: raise RuntimeError(f'get xinference model extra parameter failed, status code: {response.status_code}, response: {response.text}') - + response_json = response.json() model_format = response_json.get('model_format', 'ggmlv3') @@ -87,17 +89,19 @@ class XinferenceHelper: model_handle_type = 'chat' else: raise NotImplementedError(f'xinference model handle type {model_handle_type} is not supported') - + support_function_call = 'tools' in model_ability + support_vision = 'vision' in model_ability max_tokens = response_json.get('max_tokens', 512) context_length = response_json.get('context_length', 2048) - + return XinferenceModelExtraParameter( model_format=model_format, model_handle_type=model_handle_type, model_ability=model_ability, support_function_call=support_function_call, + support_vision=support_vision, max_tokens=max_tokens, context_length=context_length ) \ No newline at end of file From 543a00e5977b663df1d06481e289ff9a5c53de5b Mon Sep 17 00:00:00 2001 From: VoidIsVoid <343750470@qq.com> Date: Tue, 7 May 2024 17:43:24 +0800 Subject: [PATCH 015/267] feat: update model_provider jina to support custom url and model (#4110) Co-authored-by: Gimling Co-authored-by: takatost --- .../model_providers/jina/jina.yaml | 38 ++++++++++++++++++ .../model_providers/jina/rerank/rerank.py | 24 ++++++++++- .../jina/text_embedding/text_embedding.py | 40 ++++++++++++++----- 3 files changed, 91 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml index 935546234b..23e18ad75f 100644 --- a/api/core/model_runtime/model_providers/jina/jina.yaml +++ b/api/core/model_runtime/model_providers/jina/jina.yaml @@ -19,6 +19,7 @@ supported_model_types: - rerank configurate_methods: - predefined-model + - customizable-model provider_credential_schema: credential_form_schemas: - variable: api_key @@ -29,3 +30,40 @@ provider_credential_schema: placeholder: zh_Hans: 在此输入您的 API Key en_US: Enter your API Key +model_credential_schema: + model: + label: + en_US: Model Name + zh_Hans: 模型名称 + placeholder: + en_US: Enter your model name + zh_Hans: 输入模型名称 + credential_form_schemas: + - variable: api_key + label: + en_US: API Key + type: secret-input + required: true + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key + - variable: base_url + label: + zh_Hans: 服务器 URL + en_US: Base URL + type: text-input + required: true + placeholder: + zh_Hans: Base URL, e.g. https://api.jina.ai/v1 + en_US: Base URL, e.g. https://api.jina.ai/v1 + default: 'https://api.jina.ai/v1' + - variable: context_size + label: + zh_Hans: 上下文大小 + en_US: Context size + placeholder: + zh_Hans: 输入上下文大小 + en_US: Enter context size + required: false + type: text-input + default: '8192' diff --git a/api/core/model_runtime/model_providers/jina/rerank/rerank.py b/api/core/model_runtime/model_providers/jina/rerank/rerank.py index f644ea6512..de7e038b9f 100644 --- a/api/core/model_runtime/model_providers/jina/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/jina/rerank/rerank.py @@ -2,6 +2,8 @@ from typing import Optional import httpx +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, @@ -38,9 +40,13 @@ class JinaRerankModel(RerankModel): if len(docs) == 0: return RerankResult(model=model, docs=[]) + base_url = credentials.get('base_url', 'https://api.jina.ai/v1') + if base_url.endswith('/'): + base_url = base_url[:-1] + try: response = httpx.post( - "https://api.jina.ai/v1/rerank", + base_url + '/rerank', json={ "model": model, "query": query, @@ -103,3 +109,19 @@ class JinaRerankModel(RerankModel): InvokeAuthorizationError: [httpx.HTTPStatusError], InvokeBadRequestError: [httpx.RequestError] } + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: + """ + generate custom model entities from credentials + """ + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + model_type=ModelType.RERANK, + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get('context_size')) + } + ) + + return entity \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index da922232c0..74a1aabf7a 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -4,7 +4,8 @@ from typing import Optional from requests import post -from core.model_runtime.entities.model_entities import PriceType +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, @@ -23,8 +24,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): """ Model class for Jina text embedding model. """ - api_base: str = 'https://api.jina.ai/v1/embeddings' - models: list[str] = ['jina-embeddings-v2-base-en', 'jina-embeddings-v2-small-en', 'jina-embeddings-v2-base-zh', 'jina-embeddings-v2-base-de'] + api_base: str = 'https://api.jina.ai/v1' def _invoke(self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None) \ @@ -39,11 +39,14 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): :return: embeddings result """ api_key = credentials['api_key'] - if model not in self.models: - raise InvokeBadRequestError('Invalid model name') if not api_key: raise CredentialsValidateFailedError('api_key is required') - url = self.api_base + + base_url = credentials.get('base_url', self.api_base) + if base_url.endswith('/'): + base_url = base_url[:-1] + + url = base_url + '/embeddings' headers = { 'Authorization': 'Bearer ' + api_key, 'Content-Type': 'application/json' @@ -70,7 +73,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): elif response.status_code == 500: raise InvokeServerUnavailableError(msg) else: - raise InvokeError(msg) + raise InvokeBadRequestError(msg) except JSONDecodeError as e: raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") @@ -118,8 +121,8 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): """ try: self._invoke(model=model, credentials=credentials, texts=['ping']) - except InvokeAuthorizationError: - raise CredentialsValidateFailedError('Invalid api key') + except Exception as e: + raise CredentialsValidateFailedError(f'Credentials validation failed: {e}') @property def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: @@ -137,7 +140,8 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): InvokeAuthorizationError ], InvokeBadRequestError: [ - KeyError + KeyError, + InvokeBadRequestError ] } @@ -170,3 +174,19 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): ) return usage + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: + """ + generate custom model entities from credentials + """ + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + model_type=ModelType.TEXT_EMBEDDING, + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get('context_size')) + } + ) + + return entity From 591b9936855bba0734caf20a3c3220d95c7d35c4 Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Tue, 7 May 2024 17:47:20 +0800 Subject: [PATCH 016/267] fix dataset segment update api not effect issue (#4151) --- api/services/dataset_service.py | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 17399c8ac8..ab8b8487af 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -43,6 +43,7 @@ from services.vector_service import VectorService from tasks.clean_notion_document_task import clean_notion_document_task from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task from tasks.delete_segment_from_index_task import delete_segment_from_index_task +from tasks.disable_segment_from_index_task import disable_segment_from_index_task from tasks.document_indexing_task import document_indexing_task from tasks.document_indexing_update_task import document_indexing_update_task from tasks.duplicate_document_indexing_task import duplicate_document_indexing_task @@ -1241,6 +1242,25 @@ class SegmentService: cache_result = redis_client.get(indexing_cache_key) if cache_result is not None: raise ValueError("Segment is indexing, please try again later") + if 'enabled' in args and args['enabled'] is not None: + action = args['enabled'] + if segment.enabled != action: + if not action: + segment.enabled = action + segment.disabled_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) + segment.disabled_by = current_user.id + db.session.add(segment) + db.session.commit() + # Set cache to prevent indexing the same segment multiple times + redis_client.setex(indexing_cache_key, 600, 1) + disable_segment_from_index_task.delay(segment.id) + return segment + if not segment.enabled: + if 'enabled' in args and args['enabled'] is not None: + if not args['enabled']: + raise ValueError("Can't update disabled segment") + else: + raise ValueError("Can't update disabled segment") try: content = args['content'] if segment.content == content: @@ -1248,8 +1268,9 @@ class SegmentService: segment.answer = args['answer'] if 'keywords' in args and args['keywords']: segment.keywords = args['keywords'] - if 'enabled' in args and args['enabled'] is not None: - segment.enabled = args['enabled'] + segment.enabled = True + segment.disabled_at = None + segment.disabled_by = None db.session.add(segment) db.session.commit() # update segment index task @@ -1294,12 +1315,16 @@ class SegmentService: segment.completed_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) segment.updated_by = current_user.id segment.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) + segment.enabled = True + segment.disabled_at = None + segment.disabled_by = None if document.doc_form == 'qa_model': segment.answer = args['answer'] db.session.add(segment) db.session.commit() # update segment vector index VectorService.update_segment_vector(args['keywords'], segment, dataset) + except Exception as e: logging.exception("update segment index failed") segment.enabled = False From 2fdd64c1b570cf25c1101c51fb012d3cdf3e645c Mon Sep 17 00:00:00 2001 From: Moonlit Date: Tue, 7 May 2024 18:12:13 +0800 Subject: [PATCH 017/267] feat: add proxy configuration for Cohere model (#4152) --- .../model_providers/cohere/cohere.yaml | 18 ++++++++++++++++++ .../model_providers/cohere/llm/llm.py | 9 +++++---- .../model_providers/cohere/rerank/rerank.py | 2 +- .../cohere/text_embedding/text_embedding.py | 4 ++-- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/api/core/model_runtime/model_providers/cohere/cohere.yaml b/api/core/model_runtime/model_providers/cohere/cohere.yaml index c889a6bfe0..bd40057fe9 100644 --- a/api/core/model_runtime/model_providers/cohere/cohere.yaml +++ b/api/core/model_runtime/model_providers/cohere/cohere.yaml @@ -32,6 +32,15 @@ provider_credential_schema: zh_Hans: 在此输入您的 API Key en_US: Enter your API Key show_on: [ ] + - variable: base_url + label: + zh_Hans: API Base + en_US: API Base + type: text-input + required: false + placeholder: + zh_Hans: 在此输入您的 API Base,如 https://api.cohere.ai/v1 + en_US: Enter your API Base, e.g. https://api.cohere.ai/v1 model_credential_schema: model: label: @@ -70,3 +79,12 @@ model_credential_schema: placeholder: zh_Hans: 在此输入您的 API Key en_US: Enter your API Key + - variable: base_url + label: + zh_Hans: API Base + en_US: API Base + type: text-input + required: false + placeholder: + zh_Hans: 在此输入您的 API Base,如 https://api.cohere.ai/v1 + en_US: Enter your API Base, e.g. https://api.cohere.ai/v1 diff --git a/api/core/model_runtime/model_providers/cohere/llm/llm.py b/api/core/model_runtime/model_providers/cohere/llm/llm.py index 6ace77b813..f9fae5e8ca 100644 --- a/api/core/model_runtime/model_providers/cohere/llm/llm.py +++ b/api/core/model_runtime/model_providers/cohere/llm/llm.py @@ -173,7 +173,7 @@ class CohereLargeLanguageModel(LargeLanguageModel): :return: full response or stream response chunk generator result """ # initialize client - client = cohere.Client(credentials.get('api_key')) + client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url')) if stop: model_parameters['end_sequences'] = stop @@ -233,7 +233,8 @@ class CohereLargeLanguageModel(LargeLanguageModel): return response - def _handle_generate_stream_response(self, model: str, credentials: dict, response: Iterator[GenerateStreamedResponse], + def _handle_generate_stream_response(self, model: str, credentials: dict, + response: Iterator[GenerateStreamedResponse], prompt_messages: list[PromptMessage]) -> Generator: """ Handle llm stream response @@ -317,7 +318,7 @@ class CohereLargeLanguageModel(LargeLanguageModel): :return: full response or stream response chunk generator result """ # initialize client - client = cohere.Client(credentials.get('api_key')) + client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url')) if stop: model_parameters['stop_sequences'] = stop @@ -636,7 +637,7 @@ class CohereLargeLanguageModel(LargeLanguageModel): :return: number of tokens """ # initialize client - client = cohere.Client(credentials.get('api_key')) + client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url')) response = client.tokenize( text=text, diff --git a/api/core/model_runtime/model_providers/cohere/rerank/rerank.py b/api/core/model_runtime/model_providers/cohere/rerank/rerank.py index 4194f27eb9..d2fdb30c6f 100644 --- a/api/core/model_runtime/model_providers/cohere/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/cohere/rerank/rerank.py @@ -44,7 +44,7 @@ class CohereRerankModel(RerankModel): ) # initialize client - client = cohere.Client(credentials.get('api_key')) + client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url')) response = client.rerank( query=query, documents=docs, diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py index 8269a41810..0540fb740f 100644 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py @@ -141,7 +141,7 @@ class CohereTextEmbeddingModel(TextEmbeddingModel): return [] # initialize client - client = cohere.Client(credentials.get('api_key')) + client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url')) response = client.tokenize( text=text, @@ -180,7 +180,7 @@ class CohereTextEmbeddingModel(TextEmbeddingModel): :return: embeddings and used tokens """ # initialize client - client = cohere.Client(credentials.get('api_key')) + client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url')) # call embedding model response = client.embed( From 97dcb8977ac0eef0e0eafedd0480ca1be77f5d58 Mon Sep 17 00:00:00 2001 From: Whitewater Date: Tue, 7 May 2024 21:00:43 +0800 Subject: [PATCH 018/267] fix: stop event propagation when deleting selected workflow var node (#4158) --- web/app/components/base/prompt-editor/hooks.ts | 1 + .../plugins/workflow-variable-block/component.tsx | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/web/app/components/base/prompt-editor/hooks.ts b/web/app/components/base/prompt-editor/hooks.ts index 79fe07f0c2..03074b9b17 100644 --- a/web/app/components/base/prompt-editor/hooks.ts +++ b/web/app/components/base/prompt-editor/hooks.ts @@ -64,6 +64,7 @@ export const useSelectOrDelete: UseSelectOrDeleteHanlder = (nodeKey: string, com editor.dispatchCommand(command, undefined) node.remove() + return true } } diff --git a/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx b/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx index 779c601a54..eb2aa0be6c 100644 --- a/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx +++ b/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx @@ -9,6 +9,7 @@ import { } from 'lexical' import { mergeRegister } from '@lexical/utils' import { useLexicalComposerContext } from '@lexical/react/LexicalComposerContext' +import cn from 'classnames' import { useSelectOrDelete } from '../../hooks' import type { WorkflowNodesMap } from './node' import { WorkflowVariableBlockNode } from './node' @@ -61,11 +62,11 @@ const WorkflowVariableBlockComponent = ({ const Item = (
From 58bd5627bfc6dd59e92d55d2938038f648fb5ff5 Mon Sep 17 00:00:00 2001 From: Joshua <138381132+joshua20231026@users.noreply.github.com> Date: Tue, 7 May 2024 22:45:38 +0800 Subject: [PATCH 019/267] Add-Deepseek (#4157) --- .../model_providers/deepseek/__init__.py | 0 .../deepseek/_assets/icon_l_en.png | Bin 0 -> 8232 bytes .../deepseek/_assets/icon_s_en.png | Bin 0 -> 11573 bytes .../model_providers/deepseek/deepseek.py | 30 ++++++++++++++++++ .../model_providers/deepseek/deepseek.yaml | 28 ++++++++++++++++ .../model_providers/deepseek/llm/__init__.py | 0 .../deepseek/llm/_position.yaml | 2 ++ .../deepseek/llm/deepseek-chat.yaml | 26 +++++++++++++++ .../deepseek/llm/deepseek-coder.yaml | 26 +++++++++++++++ .../model_providers/deepseek/llm/llm.py | 27 ++++++++++++++++ 10 files changed, 139 insertions(+) create mode 100644 api/core/model_runtime/model_providers/deepseek/__init__.py create mode 100644 api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.png create mode 100644 api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.png create mode 100644 api/core/model_runtime/model_providers/deepseek/deepseek.py create mode 100644 api/core/model_runtime/model_providers/deepseek/deepseek.yaml create mode 100644 api/core/model_runtime/model_providers/deepseek/llm/__init__.py create mode 100644 api/core/model_runtime/model_providers/deepseek/llm/_position.yaml create mode 100644 api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml create mode 100644 api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml create mode 100644 api/core/model_runtime/model_providers/deepseek/llm/llm.py diff --git a/api/core/model_runtime/model_providers/deepseek/__init__.py b/api/core/model_runtime/model_providers/deepseek/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.png new file mode 100644 index 0000000000000000000000000000000000000000..25254d7f533b431b5235f90b6b468f6049046d85 GIT binary patch literal 8232 zcmV+@AlKhgNk&E>AOHYYMM6+kP&il$0000G0000B0|2A|06|PpNW2UH00Bp!GyoDd zN~PhfdI}K{uzz_9_*QMpZm!$5BGgguzLX3GUCIK3F8kpH`Xk5&gPx#2ha?Dy=l+_a z)>2ET^}n{ei2g&AZL4atu_aeY=5p=@#XLSBT&s*z_#-GsZvO=6a-RPLSiDU?O|N?d z*mBN}p6{lVPo)Kp{LpxrDGNm>`ajWi<_7CO5Bl_KMlRTH_uQ@r#9@L(@*K+ znP-;E8g%v(jYBWNgz~*dX)pZjaCU5PCC`-2B?1(#Mqi$I%nSi%_T&{9ffhk<$&yu` zy35#|_7ZFq>r;Mu%%{lt;4!WyAI`H6St z(OAg?RJ(~WUx*yhJ~bX{lmzOm7Byy#XPFc(YQ$ur)f&L#7Fz;ny6!wIzjre z#&1(4b*=<6%A0l04|Dd+*xGaTQO(|Eb{t!K&Q7dtE5cpR4k)1p2iEpbZaPpx-CR`H z-nHnFJ`h?D)irie638{LWw`B8_XRr#vSaY0$r-fHAys2$&!&TmTGxNdMxgbNU9TeN zP~#kO&C+t+cdMQANU$G%FnIqyiySE)wNtWy5@pHump(Y{P|IL7|(S^9q%?aCF; zY@I{O*QvVtcK1D%_OF;OS7&9J1M@2J_E*eOpWKp8ZH;r-^R+5dYv+*iZ&Ec*z;xvI zsD|JKtVgc@B5UvtTfSQLM(^UfhrQt93->0tha_<2yI6DZ4>@12N;*^U58Y*0qk$$k zh@}FAOJKEG6FkIz9dg7*^%O&JHgh?uwRbX{M7j{U1r|Q7$M{au^!--erH|^yPn2`k zy*!&dQ}q{2SETFvX}b5e-%~q#7J*+JKb3R;yXlwnX5XiY(LI7^Qy_x zbnjGTm*E;iNZD6ZSc-m#-zr-l5skn{NZZ$42vSu6s4U=}*1leYhxW{tM0fsOK%BR- z<{^AR{Dm^g>R^FIFY65g+!+E;flmPMiVqh$&+>j3%o|w$n>Cs-MlDGhU-U0FvH(w2 z06XKiC>wqn?ozcUBqqhTPmR3c3vrug5J!eK<<2L%)fT|aQTkT`&JDE+k0RPu7AP!) zTR(%&JXt@7vKzj%SzCY^0Ccq3Mfk1mp?{kp{F&(z{6-b+2L57L+Uoc4;2f$ifQ?P? zU!ca;z!N6*X@{M|5Ef&6X9BrJ;$|DCJD5TY0c@nK&eKCMZ00N;`JX>iBsDWKx|7h@ z5}0kbv!x+%0Rb$GlK08_nSeKAmV48YCAG5?#?J`HmOyS)-;FcfUN1jp6|8kAnFGEt z)H~{ZTjd~7^P?t_z##d~NZwRa0Gv?>paG>n_o)2({wn$|-<5hweTe{*Ft@9h9>9Q_bh6aF8Umm!vfPx>@;1F- z)z1NT^8BOUztY0;05mSgKQJLo!Yoz-#P+G?=7y}}4sW_Hzhh@$KqP33umH1i_!Qh!9!NT?L_`yn*&slN2=Fdd(*OoO2m6Z9m>;c;U79Xo zHt80IF4|3_L2`9aQ%pI2>QEXTY#>;`riZRPQ(*a_I1^8x z*^ONR-axJ)fDAAJQRy&ZSR@5OBBLuq01-W#!9z+km9t;Ti^CxyxafGAT#z#y*~8}VNN z7mG#;*9hnX0-*slx}8CjOu?oSL4bxK0K!o)2uQ=6{**^Jq~t#MZ0E#>kp~JWX)Age z1>|rJXHeJ9x~MONvk(O63Qz%Lv>-M_Ea6T07Z!6+0F`NLaEPEuwgIeYRF3P-)?VxG}ORn%EKNMR&x(0Z+0rwq7r`FBY%_7>xqxbV3abtw5+y zP`8i)29E+5aj=<2*oM*p3~1z{df_=EpEF|#u)qSKY{G~Gh=j_@23J$qV_JdV;8k8(5ZF2NOG00clluf%pCRJE#`DLJBz%w@@90RUD~05=##d+>_} zmk=PUj7VHIDU5^)#d07J7#KtXEiAf`GUI_>sV66b0CNQn!D9(Y&Pvs6@oKG3GKzjF zLdMEmftJLm&U#9BW*eqlMe<0)F6g8Q2>&1@t@>Eg!7 zbNcA>$W9PoF$$o7nme~)7@0&X=xn)*?1al zV!PI$J3Gu!H?oHMcou+zW=i^y4{SVEdm4cQHHmCC|78^;1ODJma(EV^02DM6n%ly) z%z;Fst5_gLjzLp_-Oi4X8MG8&Q@kF$mf4!wu&|6WBjdbQbg=@2sedU2z(*L`5el%G z*#{P0OA@n9bcPz~JcaXF2wBrX3=9DnNMl^{^k5(8$VUiqi3*33hhPuBr}9!x8&TnN zb}d>sWOvKega#-8*1aN{HUSi1a~BU>$sFvA&^YL_LV_mfQAyO@{o zEV%$K>05K9D>a1ThVxXX2_av>rzrxobp#kRa8qKwkhwD5k5Y$Jx~y++-4jVM{f(jq zp#Le;l=NvLX`-t=<_VAt0^D@eq(h<^YcEw;qgkJU@8!)@z-{rP82TNJQgrEyNab&a z7)a<%gzEBUiN5!~*KVZV07bs9?-!aZHs#B?UwIO+GXSo? zBJjp*s3gGWD<_!-Zf;jJ*DCHqEQaJFT(1uV8Yv9QPZoZZ;9fxh3|WS@oY^jD4y0RQ zCajXKj1+3@tel0{&F&jT02PCoFK9N^ljv4TN=U8+We#>8(p*5%Z-=`_5dddYN8C}t zB-kwX2`q%<>R`t)NO41*8!HN+1DVK_6i8-WTa?{`F+pZR3at@?6*p~0VhXUM0$A9! zu`=@Kut2bB6@gK}aMDa**f8+8EvDKiKtmBg$71FRW>woJ>H00O_3(jC2#R=SWcXb>#sR-sBZVqk8nSS6Ov zC#^`Y5YLRfkOUtMBdodr2o`fR$L`td>^fThP&dV40fVM5%=wQR6@>C_hSxMja8mi` z1MC6K{{(d>RR9Q+DRV=l$@v@2CVQ;|{+Rd_P=fd~NImNO^N^wz0|aGq|0s{pAMcg~ zxWfW`WC1Wt=3zXi@6fnEOs(mqo4!9~isElAx4Nj)=a*}}DR^hkeg%aI(7SW5?_&Qd z2+&CaDE#kK{*L~#G&H1I1Y}!Gt$*)=^MA4^Gc5q-j-|D)FimU?NPcLhp|$52Z2{bJ zv2hwVt|Qp4u10hb3)jBmkXfS!x&%OL=P}x@uTXyy_{tu`GO;zVPp$bqy4HOx`e_A? zbF{u65Zy@yQ{PFD&l7UFplr&a*Zv4MBi<1;5eQ)pXfavc`yYJmUpdFPyhV> zI;4C;*LiD3`aZXoapSJ!*o(%k!&#CdZ$V4OUP%AU+cS0PtY|odGHg1El~ykwTnG zC8MIDr}5g*uo4MmZsBKsVEzI40qf3yyaD=W{LT;u@DIQb#a|!~pbl^kzz@I+U;Lmi zz&`*kyo%4%e^ULj@G8l^7yEzhr}-b%Tw#fj>`wTz|>-2>##b8}=*q1KyAQujyyIPwF3!9{?ZG|ImN>|Hs-V=mGre{FnXT zXRrBxyI%$W)PL;$=70b9q5uE?%iIJ1=cot#zyJUL`)~RI{2%ZG@E`Yozrzgkx3oK0 zda3P%&tS1bt&b5H64;kCjA-LuMrL0x2qJKpU<=E_2%IJu0@?gO5?}<(G;ur8TJL-h zL`y*kg<>KK zP7@3Pd3Zq+gu?({LrIvbo5qOtPVv{#=(yA586%9 z0)uqwyjan?C#@G_Z~b=g=A)0ZpxOz;*mRH;bczedMZNQqPVI_J4XV~_aSGj*beB2Q z7%>0{GU|70l$aRbfcqnuLx01{vZJm%uD)z~Un9%;=`KGChN-|7s+=5&`9#I&1!G-` zK{e|yw@3?1@0rv7xh+MBM>O3Obdex_%IW1aNtvEtvtnu&b$+n7SJ*Q^R)uajiAQI& zk$Fv>HQ)9mji%R|u=JVV^mN=Z0~JglQN{sh1- zafkk+4W|{^yR|d&m%<*5r^@q&L!${S#Vhv=etpKx1Izyd217{s8B06EYyrQHsRT|F z3;}bD7`}|mzFrVS;V{4#lO(dvKhJ03?VKk0ctI0{!vJ1h5JcfHz!#T<5jadHfB^ll zFiy+phwFmn*jBEKAV>aEsyRp`Z6fGKmt*TKIh5`{Vc^IKfJ>B&w2zrE&(^l#fclsz zDJ9C2)6s%c1DI?9pqQ{bOWa$8Q-0WoakRsxFF=xIiA+JTL3``7Tk?b!cNuqtE=*~_ zxpFFRgY?ik4`;>0w<*LBRj7IGQUgGN@)5oob%<-IFtD&RfItisSRBmr&7;8Btn{nk zGWO%IRD#jAZsO>{tu>$k000x9;1qwaJR8h6G~{jY%s=DO^Z*0{azj_kNI-h@C5Jcp z*?GLh7q0AIsZ#?029Q!?)Pb=`1?Rn&Bv?GSjn~Gmtd+i7ECP6>JjH_MS{-~OfR(! zuhR~i-;ykO)2-}D9d7w@xVQ9%AOHXW1I#At)mF~#=cjQpgj9+?@)8y8*QI2k%o*3| z001bFh;N+Rtq285>-AAHeT<4`jjwHgB0e0^aSQ^n=j>s$n6Ic=Kk7#Ci~*i>MfOLY z7&i1}bqS5$^D3FU$Jt1)qr}~I!k6PU&_BHIu3?oaD0C1NE6U8&{T$=eGs+jLKu}o+ zRN(&d0Gvf*00004YHTRENs5VR@t|M)emn6i4yAk<2uFPvu*g z;mM|lX-6sKza`MhYHaIcuxQv$3#b#6JO$%}ctR>47RLj|X}@=aC+&#tgHyn+g^S zRQSm{R1c(3G?|J#`NJ6847VV*Bgh9z8q2&>3aN-bZz|Mw=&EE4~Gf&^ab%(gJd}2$m#%kQamSW`Cln*H{ zDk=SNg+41gBun2rcVBU9(^@(l=8mJ^#JcqWfD_Mr8@H69Whld9yyW}A;aqg$WvB*y zZyi`QK*K%}=fBy5*vqyckz~fma3Y#?eKlr7bB_%b7d62=P>aq!rf0eRa=rd1q>SLJ z^u4W1PS%#NemnM$wT5^R!!KfQ`~TpyHvXXgmDF!kkkit~bwBFa@t&F?nyi)Q`W><& z)nMam`pIn>F%Dv^kcMJ-sQ>@k43s=8jVFaO+-SON(yWgi%u)k32wytsuwPHtC9^aa z@;|WoegEdzx1~t8J95G4%U~md_!m|Q%^k}q*okx}Xo&+H`h$ZT zFl^%@WNSN(w;vDj60kJBfu0e0PWkAOHJ7mayKP)^p%M z>OXag+dM0Z#eq)Y?;Umr-nfs$C8vCA%i~`TH4V zE~tK}`YYo{f#Q+~u!1&A1QM9doZ~WS!6sipQywaFifT#{xwJ^AL?sV9`5ZuXZaHaB z>2EBZ)-D(7?Stz$9zXp*xg?SVjv)FQxj!sLX4*=mObW+Cv&u_#{rV{FWlPp=$x??= zz;8wq%>ge5#_n0@+zfbJ-Ho9 zW;*HQShEHyj^J;Rak}qCPLdlvI6T}p;mkGc^cRl~i5S^^LJk-5hHz3vHv)+GH?mBVrge9!dEG7`t+frX33-@FjGRL(Ti_{u zM6j8Onv`&ujB>eRL;X}vQi?(8esEjjWgYUDc5t%e>oi5Z;0Fp~EZ+FBlRO9Vso{caFagWebrI zo%aEXU85ktzQRbPFL=-6QmkZNyWSn23brfx+8p`Y5$W7d43*0b8oAGGuB#RMCf4LV$p;0k|hMv-z z^k(qFyAMy%MgyrhaqfOt%Sm7I;Gz;$2n&WF5VwrbuEE@M_GvTLIkOP|CGxw@3;RW1 z<6!3_3kw0uPVHhkLe=E2oa0%&GFs39D$x z0ohUDl(3+RyP<8!~r3MGio?r?07Dtno@S|*?PkG4?{j6pnlt-OLM32LS$pYli(DH_ezn= zo*^$8^g$Nm2lIsRrQFZ84m@mVHhJ;3L89Wn$!g(a-kKz65}OD64c9m}FOp9vyOOPg zY#-mOl40=tH@!A%ygx^M;)y5Mf=xlp3mIpxO07mMO$oPp^HAN+lVU%kVw_v}*ZYA- zwYyF4YRV+V8Tplj+F*SyKlSwNr#OBw7tr41d2jbitg^ zCxd2-4)aCO|JREPpCP)8;g6oyw(IytSrStj1Qzs(4tOR2S!S-M_5nb(oh^3GH6}DV zmp;GjeAFfPGJ5kkSTyssRE9=Y%Pwhp#@f_w4D31THzaV{Px%_Z&I{~sGYk5H-xl5ldSIv+Ma|ha%!qr?zBv7YhzolJsp8l* z6*it0Xp35Eq94|_072RfZ%%?-zH`vKWUGO`Q1t}(GO3Hh>*301t2CVG48l&tp1mxI z*3&2ja(h=UR-kLFWl`^cawzeX6^;FyH46~)l%JEN{$AYOo5r>#EFK2SLQ^LV+7MWV z@x2Q(I*r~!rz;)H-$+)8Y}Ls?BN{tqhUV1p#auVgcLDJ)%Xql zMwS&S_0ie5l$Vti0oC=bKJ7pwzfO`e=rmUGhamf0pYFkgA6`tWL6A|FQT1XbC@1K= zSgCIQ16bGrooE>cb3I!BaCNxL9*;T69wYCbo&@}?#?iN$sRr46Ke*Vpb{p=4kxD2Q z2UT#W4Y(Eh&2vbj?(MRkfa1`Gvit`?tUE8qMYvMd1B%M&vZe&L)@Nij$C=96My<{) z;#HLHx*K*0VK)-a^0a2X2Ij7QTlaY-WQ9)^sW<5)Du-bOVNftQ)_x33%^f+AylgLV zLDS^;jh;Ht=m`=}m0gcld3g;ej`AbSEG`^55OgBM06Nj_-}C*zl3smjr)BJ`xT7$+ z-p>;V;S8iIxYD@777OQ&;QV54olZ6TSpQ8tkC^j?;9hVOv7OS(GjnLVB6Dk_t|Wvbif6hAw11OHEnJ5bp!Gu89g zNHC#5;BVoGZ=Bny*+N#RVKkTngT^KB2aX9NTE>w@bZ-z5T98i5lEO)l@qf~O+xz^N zO2O0TG&xuG7e>&s9-;vVCAob(=))o}M>uk6&?r8ae1!TlabQtt5( z00000003A=(7)5eR$R|+&qrE<@<{T*?rlGjQ#*(`=#sYx5ctFoPyhe`0000002J?+ z?!?vmq{yHOv<`w>i%87o2IDUeL6%nMm)jd%Gi9+orO8xEb&vq}6q?@(s(NdCogB@S ztpAGvNZ&M_Cciw{Uu3!6lYqPgBdz-Eag6c!dKv0JN8`jIdCsr?tEV`e{wl8|6Bqyh z000000WC}~KviXPaT22!S)KH_!J*2n{noj0sC7Z;a^(B+$|GsjR*dX9%&h@l&9e9Z a1Y}P)>K&h2S*%OWz$)MX0000000007YNdz( literal 0 HcmV?d00001 diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.png new file mode 100644 index 0000000000000000000000000000000000000000..3271f5cfe48fd9a4609bf11f1a2838812bf3769d GIT binary patch literal 11573 zcmcI~1ymeQv+pcU&_x3*4ncxD!6CT2>*8*K1PKrb?i$=J1PBg6f=h6R;O+z`0RnHy zKi~cCz2}|t&O7(Lp4sW&RCU!<_w>|M*X+aG!zzF+CnYNdfIt8Mf^ERVI$VydgoLq* znzEFvg5)0tT&z9R!3}{80300MUDc$;DRgx8D1iS|OrdVhVk#;M|B3Y9;ML5db^w@U ze$@5fnE$6!G;<3#D9k_UyKlPkb)licF0626Qrm*@4Gd*InfAC{pD4^D^4zM=%FihiM{|aWqpYqrf zbPGpKbyyAg^K%2#0BJxRpa9H(SAY#*54Z!&u-XweX8(IRjz4lrfD=px3OhOjZva=A zgcV>7lVyTYUjc4_1+2D&(Jf$`J*)++{Cn*GJ*IA!oR9ioT}GD!0K|=lhjT^%Ku!mM z-yshVw|Nf_zw-eAehvWI6aOvmln5KnV;DX5-!z&`0Kf_afcmz7)65D1paC|z8E=^GNBuzlEC6VD1AxLX0Hme?z%$tO zunZm+00{sQ9svOX9uamxL_|bFMn^%06)ZF~RCF*F4h|R#48|pZ5aQwyRv7lIZ5ETFi2El_t z58VI>Y@)*h@Sw*Y{4FpGLGTE0FeWkVL4*4*JOBX^4ju{g@Et&h(c!S+v0-;{Ui4q0 z|37Q^(jD7MgUm8H=v&?I^~WxmMg%Lb0~#?I>W3U>gi}n0YUh3hG3Wd>0koYjGwWF~ z_TmRrcOy@W*ms3YYE)>s(NcIW=SEbf|^;a8MFB5|&AOu<&c3 za&d8>3f>uT0b__YM%DXvjA%NcKTcQ)pRKF7+dC^&6wVXaDe~-KZ#ie!GJk4=He_-D zt|sjum0S^l1={>#nLEtBFF!T?Rd69w?IM+_nltH{^K?Pu&6)!|xhQ}fvXCl2jkP5} zd@|bp$6z#;n6S|a*(pQAmc&vZw$obCDnz}<893YzLeI<1?d zS8Bc?LgHzZQ6*J{_Wa2cSDZd&cn^w4bqLD|Q8$d9N8q{#fR-uEY{SlJG()4W1 zN7S8Mlf0{FtBWTkP@e*DgL*BZ3$G!*p^uvAQ@&^%af!{%E?3M4XZOz`daMHQ>rp;V zUDzAqqhIDAs9x>IEo0_64*PS}%pQ5ty~d?KDee0*TMeULTdZR6a?Y1*-bnLvJu1VG z5HvdEU|*c^IBcwlk$yJ8Kac}^z8wY{(1q+vuQb=F0fASCZY#RYrkZWP=3_7Bt+R?y z6lM@+%9R?=I6IqPKWgFIgbM3hDA#>bkf{IFaBf)TLLe3_3IL?K!p&e2C579c2M1j+ zzX9LyafeW#xM2NL!0w|Q2qV$qm&2h9<@n3EnJRe)rYLkbb$=KmUyWy|EdM<=D>Y0g zVY;Gou8CnyzvWQ|fDbLTG%^$6P2;uF!};8{SVhvu&vHz^6<|DksgRKW-t`NVtmSTA zbC8gq^TQs2@cI(*TZ`G*yRaL7(Cd@kmb(f`g!Vt00N~fK`$&`CDfhdtv$Mg6_FiP1 zdnGG1CFfH(+ivo81dbnn?wh(fl;rQ!dzgRGe*Qwd%vNd?dqZx-YnRl_u~-o8GN0(M zFs`1nEQzL+ozUd3+E+W@8Kqi;^2WSMgy0-M!{)n_I)1$;x3=T-dd>0?zt83F?`^ZD z*A@rm#m$^&&D`uZ0bexS)J6ZwBgf3W$)}{6MWZQa~%O>NoB66S~`Y}S=4l=x!*3DNWDXFBp9gQ;bCGO{S z)A45U&Aqaxx?g@tGA%~Z40dvQ1v5A-(YB&&vr3p^PL(5pj#E=_zbl`Gxm9MR&ZC!q z=#}nUosT78oWN{fdkW7nK{e{JA|$G0Dne(Qc&rE>{aS;pw z&`={h=BRZoU;b30Mll(~7wAu)EUj_=*2jG~(~I?VL^y-|SetY$OH8<2F)+x-$v$%8 zrwA1${oEe85yul={BdEnx2=+aLEezr2KkTEwkO>9BgGY`>Mc@?7tLuaQ2oFQ zLrP5W##5k$Y;-N<$PZRqWySIR zEkZugoc&S%mMbTv0ov!H6MwAm;cPp&9Bj|U<&h{j{)K@j&!^<{DraY3>Ur5k*a4)f zl_nKJ*z2ifBG9g8Xr*{*iMtT-o= za%`W&UWxGM8(QgkI=Oc~&^I(tbBoXl>@1xRdlqERy0G%6KL36)6In^2Brk?tl`(DC z^upda#Y-Fh7jR%8AM}j3?r?J-y9ps+GgeSBm|W@57&2_Fd?XMzEX^~;jULTt(xT%l z&#qEs5I$=sZ*jfk`+OaaVLME`+0`7UkIO5I6OHV3HbM)Y=KWU{`1AUrg^PKsC$X4; zV73Wuy_ZUZ{@9Hf;j@l~6+fD1jb~Zx?yL?!)y3MAy)KT?UlTw$&~@BBjj>xO9CnHg z509u7C=rfT#v8s|?rlw;9s%DXXX(%oUsKnh{=CIbOF{k>GjHV6(a4d@D8yVwEGN?^ z_4||ehc%Ys7mdbF&ut|kOUQZ)WkA_ufoQQ=PP=Xa}^_ za=Zg`DvV$N4g?1a`2Hhqg838xjDU?pLCMB0hKt9cDsD<8YU2FIrNDzgBA^E#i0j+) z#ru!^+&@P=Czu$YO|@#pskib_WKEevx9f;!R@@D;e$hA8flTdB$r)vzkTE>dny0yW zjaRgOTbQB@7xq=0|3}9twn(y>Ghx<+6;w(~*jj~v#KIRYMu@(s0hu8|?Kps&`&!|9 zdMWzM>J}53RR21&T&`sB3=iRe^Jc_s{qOOv&v^x&yM63N7+hsV8#II_^iuh~zuJ&# zen$4U3PZ@I+M_Z1b%E_j>y2;=Mtcu&mW^EA_ASsC2X}P-V(}0z3t&-;q^eY1XFeFR zlC!?=+SS0bxc^jVl0hAsp=Qm!&$=WdW*51t&nh`9uNP3&%Fx)hmp{Xsx6iX@hI{r6 zmAPX@&|}jvScp0+j*y^O5t-idWPY86I00MQOAF(BXsaw)U^U8D>Zv&RM+;n|U0aB8 z7|zv`p2s(-_ej%wS)eo7%6x;&!rIGfeu;VNGAwAp8#bq1p&y~$U}qGmB&c1qq6uO4 z&Z=4KI8ww7uQ!!Fk`I9{$I~kB>(#PO+mIiAh22FiFaUywgT?Exfc>A)7!LNL01&}A zlq7o*~0e|i%+&}j-a4m}4A$G@J#EXQ|lQ9)}?ei(L zcN))#n`)kMwUgs4&tbbS^zi)&EvKcuAJ`=)xZR$?Aou}LWU|zDk#po!+^6o?=>MKl zM--QAwi1=f1|=_-55|#(E9KdN9(-{ZuPe(>9cCEow-sK}y7l_HA)*MNJcFMpjZ&;j zc*|3##G}?zNN%xO8Xcje7n!X}ij-d4hc~N-0`71T%hSN?u^wb+aPJJShIBP}0ip_} zTL#P}e(bFWw)L`7WV06gUk0}7;&_e9^dUtq$88q9OGCe+esr$5b?`g>zM);zH)T`Q z%6gq8*fY`Z1`)e1P{4ii_JlbtG@4J%Sg7k)(GPUOGC7CsC1tpc&Ea~qdx~+J63p{% z^w+Z7NTDIs>IuefQ-&T>!!)nu6v$|9PH|KXRBf3TyQE(gWg<(nu}b!&IuZ_kPfMp_ zSoO0gIfE0mB#VgKiZqg}a*h{dDc_9!YV_euiQn=`1QSg#L$5k(YIwa4mFB^z^i>H* zvcQL{A2N+a480fI0Oh8nU<|77YopX|aQD$v*LKGaarDnN6lm`T^13v5>G^8*aIj_x zbGm&_)}3RF8;$AFX18Zrf6|NZd{5ApGi~&^;f|)AQcjNTswY^4)K;X3)eAK>dwkNl zBJuT%2z7iqwuAR(ZBkSDX}>y_C62t=B2D-PuR=ppQmrQOCgFYWLdcNaQSXSq1AVz% zZNUCagi@^ofw~;5&uTeCM3k4iO`{J0uHWtU%S6=}yzi;6$siq_hM!1Jks_4DeOr6W z6W5`#phK^xcSGP z{0*s)L|`FRu2DXnE!&T$LRPgBekb}$9a3jz@kfe+EZ|&AomJ}EO!lS*Q6m1gpVut$ zEVvGMwp)5TBo(-Nd!o!bcopz|{`@xlp>%}dnZCk?e-sdQ^2s=nw{!O^L*{Qw;Zxq5 zA;ZLx`x>&vEVC+knHKS$(u$GS8SNh#Wpl#Kc3S2~{k#J6@;2ZM&Tt&h18u()rx(^A zQsra3YO27K%=qxiZlABG*sSl(%s|8Yx89tSzejV2Kt>Zcdh}e+yp-O=CGcdY29Jbk zZrmI=O?+_HY2Yqvv?o6KW-YmnxT;9HHBwpFF^m};$;L)L!eWZgF8R##jy*pCE28hS z+<64;AadK0VJ z4oy`AH&vk4q%r6`IGt_ztr50drdGl|8xablGk*-Lg>6e+@SEeouB@6e_4&a@7h&ST zsR6#IF;W;`NXQm*!g$EGlcJ2flR%|!$TL$nchVRClz^;Awl4@-U6YS2A#w8{KceFJ zMJyhU65NaVwz&tu@pI1cVDrYhS5p#)@;7)&L?z^ck%0<_($70rHj>wTTTbc0de^}4?L$psdEoxjmrWdo23nKq z?EK`D?Q)7o#=n1E3~XJF9$CEeJc`ied=)t1^6t}$5!*hot*v zb4Ro{!NZ!BatW&(e%m+h?=RTNjVp>0w0=jjgN^RKs!X9)M`0Zhzu^;1r(%M-U+O6 zrM$G(rBXa2{~8smF>WFs8u6k_gpRa0)mK&F_@nuR7LE4I3q5&<_G7k*Mb*Ty9Tjo8 z3wkoRu`@opLQ8aw8T?r!eB!Reeqv}#^ZATPkK#+z>fyeviB!>Z1GmCsL+XxJv_i-|TDuMdo!K?YK_TM8_M zbb+r(wr|sR4qlmt4WwloT_Nu|?m91b5Oq`{-H;R$c~X+17uPM~QB4@Bs<&U9v0E|f#pv}k zDUrUmBvL#Ph~z4L7x^VFNr`Or=BHoXH?p5M+hIiNu5OpZ=}b_$=ycbtkW2rV`QfF{ z+j`3OjV^1CnoosYub`m)Qz84@x$L81oNdi5pKkrcwO}i~%zY))fTJavo!;dqrNcH)8+|iWxtfZ1k zHrkpd2xs=~;u0y%_abIFN2vAQfrYA-Un1VC>Mqlf+DMHbU`wydvaj%@2`hViKV3h8 zkWB-5=@1K#49hASn8V|0CFv+qyA7an-`&91@1ywl$)PBe27Jq$TRK0_PM=vSzf5g5 zjlk6?b2$OgmRbrRyh_;PusnLlh&Lz_EFhSx%r4U?@2E)DcN=6>((+wEc3)^*M@WY@ zn#tuEvJt8Kim}UdCOCbb4q`TxRCHeZ03g)x=(T9QoXEgf$FiEi>NHCL zCoJQQtdO!Mzkh{e{ z>t)yq7?%A*hOLDESucaY042MaDg~RVOF##V`8N7~{VcT#PmXzGx5juzpOE-On zj$|hmik7iAXv-odad5dR8*8pmf)-)oi@A?0+Kt1)jHCK8tWC!%U0#q){!m@ZKuy~_ zxuW|-gnq%}6$xU&GUeST{HF~~6^py1pAG29!!<1|3X@CWV*|avv9}$+=k>k^oL1g# za@cmk-#~a$PEiF8n+W>NR5_3}?Pdx9BnPG&?aLy9Or@!TD0oI!8Wvv9Q?Os&@9X44 z!dpQG1W0jQBP1v&&)?*!bWJ8oJcm8Bdp^qXs4n7gLyB~7(je*AtQ{(iSFI>E0mgUk z9Ls7(C9GfxiEm6#x{Qn<^b?-~txQ)}FVT79w7T6y5?Z91+r-Ygh?J{UPj%7nRIJRs zihUPI&5>Rq_kS%BXyLUY#6a3ncDEBtf8N72ivBYRsVIovtgSdZ9f8Zb3LDBi@!p(wlcUQ$)|aa@q-!j2q6u<& zcA)!azc~Md!X)nCIdiKLovc}CJ?^)z7kXyM?52WX>>%n){Se%+Mm-&kpGTXt-@Foi zUk@?SHU3;CAqf>)+ykqkhE>PK1(mz$0xgTXXb4$6r8n?ejG)?gIG+%~3X87-`{CL* z(K1INv~TvbKeAFTBRK|{PHi*diG8V9S8&^OwZcVuH=0EN+yXA^2CD^nW@FWVi(hw zx4``~WZ#|8Z{GJbf+inzX}E7d+#YMKz{}n^Hkv=7wJIS#lb>sekebfB+oo>#=wNd- z@uuT0Gfc+T-(44d_(wz7QOna5$KBzaUJ5d`ZbMG>t7b4*J=0=fIT<8QASZ{o%gmpi zqXDN4SH8}YlB5rQGZ6t&fnNPjmH5m4-r6DJQ(y%tcHhj2+uM^H81ZYfKN zJ3{+T{YJ8kaM{uH(Z+cRh>po^S*xPSQ{_V<*Q|)#@EyEjOpmHi{GB>ql$tPtyFE^O zE4Od*mJL8S!-A*AS{e1P!O~rqFEcI%v(mw0I92=htO4joj)jEcaWp`_wMFS0K8vph~Y6atYl5qHw`_PdXkKED%rqZ9@W z)89Y$wMvdMG}c<|>R}5cTN_axLV3sDY_i-#0f0v ze%PGX&Cl;HOoR%e`vuLe!Dl*?w{ag?dldDN;BMjfT0c40Loe;#+)`^rB^zex6%}_v zTay8;Rc`uDc?{y*_1(!NaK!J>F{Vl0a{ej>kojAyN_A@ub9q03nE7YgzU!e_5AILt zu|TKqnfq#?qM_iC;`fJgy?~nX2p*06OIh(P zQ~Z6sLIIqQ`8_ie|f=Ba|=$2y_DLE17PBcsvl2 zm0zil^{^u-j(PQnS+{cpsl1pVIPa!XI3h2;1UU~85m%yPC`-e;b=;S1obIxma07l5 zBpp-?U9)TKDIqG%S?<_@w17kc`%QNV30;5+9F-CCdzI+ur5K~N<{Y>^8d4jj_kk)B zVckx8h=QjmTHH47bK<9x;PY~^X?fF*PKi?VCQ|5sww9$5*A{$>dedSuYc(WN4U(Is; zAxoxx0pWdJgjp-fkVlKlCOZ=EhnZv&qDrT%$w!LSnXqo?JC2R;eGRd^K}B;XG+Po2 z4Z?Ipvd$aP@9})T`A9IAov9Em`1OtVcqx1%i>KpLtEhshYL&0;YocT$`3X%lL(%p= zAl4Ww^9U4Sf%KY5b!5F>qBmi;$o7>z#RnI*Y?5tL&&fJ=)FR`h?QOh9#~*;N2m$_E zT%5YK68XP>w3s@ua&p*_M@K8oAw_)3A}b|Xkmq00e%ox~>+pdP<+iWVDab8;$PV`YLhg;36A&bie#p!T{QGtkq zb$e4H`ns!UipDY%HQ>d|S|$RAb=oZJYnhJAn{Se!-yZJ1Ys`$|b54T0$GYWZ9Xlxl zeim<1e=s6Vh~#cm&GvoSDply(*K^M;r}|E@b}hh+eQZ6|AbFictfSOIoS{SbA!|5{ zo3U`{TJBS7g!0bHfWV;>o1c|d_ET7Paj(T5M~y?aBRV8P;Mc}oS zmrPc!tQVZ2MZLbTu&OL-n5t^i;*E7W#pe9-NUcFBkF41IzIa4f8v$fRqkz8O@b?1% z=9iGe?8>;-zWH5SbHDMg1p++$W9}RFw}Hou(BHXkO5qZK%@&s{3Y+dHDyROjYyYu8 zfKB!>DTWJM#e)Dfr37hJ)X6jkDukW|TQ`}gG#Z+&028qWZu0DBm(MQgioHg)03&_1 zE-=!%bGe?%qFfawVO{>Ur_4@vC?hs4ga~@`QX? zTpaxgIhrcTYT8+pBf6MK)ur+&LMRDtU|>Fz6)HAza@6ey)>?OBGB>e_O3V#i#y40w zBsgPv>jLAf*hNTIB!MYJfqWHxdc8J;=BpGaBIRb0`>}Bl>e3&2lj>V@Xq5=HBWh#K za5v4ZrQ9tIi>$?dGJ92HBT=;?HWAtK((?W$DOy?Y6?l$gpvDMgpYT?2J*7Ks?WV^3 zXo8|kD>;vXWu!DM^!RtTT8LKR``A>yv|tX%_J!sCdpwYMh{v>4T`NA$Lob`9R7$SGX~%n<}U05bE&5*BQlm_YR`hS8`T_dG`77RtAZ-eXu298V_4UBXO^d8te{ z2amG-HxpE~zzQu-$I{!q?Qj!N8a?f8O+DYf4E&tlOn5lkit&k)VvSs?Qc!fsmYimi z@5AgI*SdKyz%kAi^lrcZm0lZ$wz7jx)n0AIJ@ib1K#8TY%#Z39f2gI{luc2f9>k9G zw}PPERPFmg!L^a6SnSfw??8MngqJR^CNAU$xVpozJ~~Drl|DZk51$F}r?nf{OH@My z<5G`1bzpPiI|&YiL<*XlhQJMPV*P9=Fz9rIB}k?FL~REk2;~)5<=B+7?{64X<7j?# z-O-=;`=h`wsq*E13{r^^-N)$me=-y_*eVX!E|BB9Wj$8FMUnUzigdzBNM?02MxbezA|2l0L2Ixu-eCt9#ml*tuz1W6YIL9Mjg<(^ z8gT*XJ0VR)VKzTfb>&rScr-D;r|Z!^Kg=;nY##umj8?hvYF?pK>*!ChCes?2=Y+pyk(H%-ap?X zE|G56i&#qtcTKR?hc7$G6bjfRLTI2Y={TXG6=kMLX+`u=)YI5Rvy8WOVW&kz5{-j| zsV(VKB)t%sJBo&7U*4_Uu4<5U-rg^lP@?YVUIlzYv4J<2rbYDjq;qS00E(tUyDYyN z3VK)A7eW-#P_dfnNpOh~*uTe?bZ}u&s1H9a@SrHbh~s9EEK;>{5wzjwm7XLOA513Q zSV!Ag;gqo+G?EkXQ%il)T6_!IE}plg4}~Nko%mJy)X<1yM=w!ign@&s@x&3q-3Y&U zXKd+AP#4W_u~ci{nIwL`dMnFaD#T0SzBs?{5C|k0;FFh>M8NrU--CvQzL7V~)G)Hr zD^GKs=~9X+7cbZjkVq0Se@V+o*|##&Sj4@QaQZ=8n>`tHDJBM;Dn2B`FP1A{T4hh! zds88F6|VS!S|9ftWs@`YLgXBL;jm4~q$0i3zQ%D3`bIX*?^)mBLqA6;HgJTv?~VhR z2AM+{?~z(q)P0 z7sU7TG~OeL8w45@oq{oulZCQ;FAGiCdEOMszo9>7`<16RBueJoMqQ)x z487mXl!KRPu@Zmrq7BhrPKUf;N>6m640E6{g*4k@^Cx*!>b+gf&#K*FYJ`3rI>ao> zaXVvGl<;RbIb)`S_KM`UjyW>@7fKR%XrIn-1ShD%P8wPP2bkU3j2^l&s2?T2C z&DqA{jXDz)_&7+C@Kl;uZ~hrkdRs_i+LLW^Vi(DfP*&7IxE4fdu>3q$+D)EH@!c?$ zV!j1?;IkOE1a1r{oV)z3>gcjrijZ z4_yp1kdwfl4<2E3bC_`oc)!Tf%++nfw94mKxK_-AeZx<30WBUaQ9(37q^B7SyYcR- z5swe#PlI_%)X#qHNlr2!scaYbSzZ=oX(M;@AbVM4neo=VU(Fvah{UZX)p~MPI>(Sh z6o<9n_N)$U(-(>r?2kt#%+Y7|WTKM%WT%6KW`ZuVx}|$^NcI8H#lzt%!*aR*r3a@T Xz`p$~0$v9W>*v7k*Ly9$hxz{ncCI&w literal 0 HcmV?d00001 diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.py b/api/core/model_runtime/model_providers/deepseek/deepseek.py new file mode 100644 index 0000000000..5fb821ed45 --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/deepseek.py @@ -0,0 +1,30 @@ +import logging + +from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.model_provider import ModelProvider + +logger = logging.getLogger(__name__) + + +class DeepseekProvider(ModelProvider): + + def validate_provider_credentials(self, credentials: dict) -> None: + """ + Validate provider credentials + if validate failed, raise exception + + :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. + """ + try: + model_instance = self.get_model_instance(ModelType.LLM) + + model_instance.validate_credentials( + model='deepseek-chat', + credentials=credentials + ) + except CredentialsValidateFailedError as ex: + raise ex + except Exception as ex: + logger.exception(f'{self.get_provider_schema().provider} credentials validate failed') + raise ex diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml new file mode 100644 index 0000000000..b535053c36 --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml @@ -0,0 +1,28 @@ +provider: deepseek +label: + en_US: Deepseek +icon_small: + en_US: icon_s_en.png +icon_large: + en_US: icon_l_en.png +background: "#FFFFFF" +help: + title: + en_US: Get your API Key from Deepseek + zh_Hans: 从 Deepseek 获取 API Key + url: + en_US: https://platform.deepseek.com/api_keys +supported_model_types: + - llm +configurate_methods: + - predefined-model +provider_credential_schema: + credential_form_schemas: + - variable: api_key + label: + en_US: API Key + type: secret-input + required: true + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/deepseek/llm/__init__.py b/api/core/model_runtime/model_providers/deepseek/llm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/deepseek/llm/_position.yaml b/api/core/model_runtime/model_providers/deepseek/llm/_position.yaml new file mode 100644 index 0000000000..43d03f2ee9 --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/llm/_position.yaml @@ -0,0 +1,2 @@ +- deepseek-chat +- deepseek-coder diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml new file mode 100644 index 0000000000..a766a09a7b --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml @@ -0,0 +1,26 @@ +model: deepseek-chat +label: + zh_Hans: deepseek-chat + en_US: deepseek-chat +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + min: 0 + max: 1 + default: 0.5 + - name: top_p + use_template: top_p + min: 0 + max: 1 + default: 1 + - name: max_tokens + use_template: max_tokens + min: 1 + max: 32000 + default: 1024 diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml new file mode 100644 index 0000000000..8f156be101 --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml @@ -0,0 +1,26 @@ +model: deepseek-coder +label: + zh_Hans: deepseek-coder + en_US: deepseek-coder +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 16000 +parameter_rules: + - name: temperature + use_template: temperature + min: 0 + max: 1 + default: 0.5 + - name: top_p + use_template: top_p + min: 0 + max: 1 + default: 1 + - name: max_tokens + use_template: max_tokens + min: 1 + max: 32000 + default: 1024 diff --git a/api/core/model_runtime/model_providers/deepseek/llm/llm.py b/api/core/model_runtime/model_providers/deepseek/llm/llm.py new file mode 100644 index 0000000000..4d20a07447 --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/llm/llm.py @@ -0,0 +1,27 @@ +from collections.abc import Generator +from typing import Optional, Union + +from core.model_runtime.entities.llm_entities import LLMResult +from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool +from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel + + +class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel): + def _invoke(self, model: str, credentials: dict, + prompt_messages: list[PromptMessage], model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, + stream: bool = True, user: Optional[str] = None) \ + -> Union[LLMResult, Generator]: + + self._add_custom_parameters(credentials) + + return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) + + def validate_credentials(self, model: str, credentials: dict) -> None: + self._add_custom_parameters(credentials) + super().validate_credentials(model, credentials) + + @staticmethod + def _add_custom_parameters(credentials: dict) -> None: + credentials['mode'] = 'chat' + credentials['endpoint_url'] = 'https://api.deepseek.com/' From 9f440c11e002b0b39ddbcaff798b3a58a732e3e7 Mon Sep 17 00:00:00 2001 From: Su Yang Date: Wed, 8 May 2024 00:28:16 +0800 Subject: [PATCH 020/267] feat: DeepSeek (#4162) --- .../model_providers/_position.yaml | 1 + .../deepseek/_assets/icon_l_en.png | Bin 8232 -> 0 bytes .../deepseek/_assets/icon_l_en.svg | 22 ++++ .../deepseek/_assets/icon_s_en.png | Bin 11573 -> 0 bytes .../deepseek/_assets/icon_s_en.svg | 3 + .../model_providers/deepseek/deepseek.py | 5 +- .../model_providers/deepseek/deepseek.yaml | 25 +++-- .../deepseek/llm/deepseek-chat.yaml | 54 ++++++++-- .../model_providers/deepseek/llm/llm.py | 96 +++++++++++++++++- 9 files changed, 186 insertions(+), 20 deletions(-) delete mode 100644 api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.png create mode 100644 api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg delete mode 100644 api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.png create mode 100644 api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml index c06f122984..bda53e4394 100644 --- a/api/core/model_runtime/model_providers/_position.yaml +++ b/api/core/model_runtime/model_providers/_position.yaml @@ -27,3 +27,4 @@ - openllm - localai - openai_api_compatible +- deepseek diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.png deleted file mode 100644 index 25254d7f533b431b5235f90b6b468f6049046d85..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8232 zcmV+@AlKhgNk&E>AOHYYMM6+kP&il$0000G0000B0|2A|06|PpNW2UH00Bp!GyoDd zN~PhfdI}K{uzz_9_*QMpZm!$5BGgguzLX3GUCIK3F8kpH`Xk5&gPx#2ha?Dy=l+_a z)>2ET^}n{ei2g&AZL4atu_aeY=5p=@#XLSBT&s*z_#-GsZvO=6a-RPLSiDU?O|N?d z*mBN}p6{lVPo)Kp{LpxrDGNm>`ajWi<_7CO5Bl_KMlRTH_uQ@r#9@L(@*K+ znP-;E8g%v(jYBWNgz~*dX)pZjaCU5PCC`-2B?1(#Mqi$I%nSi%_T&{9ffhk<$&yu` zy35#|_7ZFq>r;Mu%%{lt;4!WyAI`H6St z(OAg?RJ(~WUx*yhJ~bX{lmzOm7Byy#XPFc(YQ$ur)f&L#7Fz;ny6!wIzjre z#&1(4b*=<6%A0l04|Dd+*xGaTQO(|Eb{t!K&Q7dtE5cpR4k)1p2iEpbZaPpx-CR`H z-nHnFJ`h?D)irie638{LWw`B8_XRr#vSaY0$r-fHAys2$&!&TmTGxNdMxgbNU9TeN zP~#kO&C+t+cdMQANU$G%FnIqyiySE)wNtWy5@pHump(Y{P|IL7|(S^9q%?aCF; zY@I{O*QvVtcK1D%_OF;OS7&9J1M@2J_E*eOpWKp8ZH;r-^R+5dYv+*iZ&Ec*z;xvI zsD|JKtVgc@B5UvtTfSQLM(^UfhrQt93->0tha_<2yI6DZ4>@12N;*^U58Y*0qk$$k zh@}FAOJKEG6FkIz9dg7*^%O&JHgh?uwRbX{M7j{U1r|Q7$M{au^!--erH|^yPn2`k zy*!&dQ}q{2SETFvX}b5e-%~q#7J*+JKb3R;yXlwnX5XiY(LI7^Qy_x zbnjGTm*E;iNZD6ZSc-m#-zr-l5skn{NZZ$42vSu6s4U=}*1leYhxW{tM0fsOK%BR- z<{^AR{Dm^g>R^FIFY65g+!+E;flmPMiVqh$&+>j3%o|w$n>Cs-MlDGhU-U0FvH(w2 z06XKiC>wqn?ozcUBqqhTPmR3c3vrug5J!eK<<2L%)fT|aQTkT`&JDE+k0RPu7AP!) zTR(%&JXt@7vKzj%SzCY^0Ccq3Mfk1mp?{kp{F&(z{6-b+2L57L+Uoc4;2f$ifQ?P? zU!ca;z!N6*X@{M|5Ef&6X9BrJ;$|DCJD5TY0c@nK&eKCMZ00N;`JX>iBsDWKx|7h@ z5}0kbv!x+%0Rb$GlK08_nSeKAmV48YCAG5?#?J`HmOyS)-;FcfUN1jp6|8kAnFGEt z)H~{ZTjd~7^P?t_z##d~NZwRa0Gv?>paG>n_o)2({wn$|-<5hweTe{*Ft@9h9>9Q_bh6aF8Umm!vfPx>@;1F- z)z1NT^8BOUztY0;05mSgKQJLo!Yoz-#P+G?=7y}}4sW_Hzhh@$KqP33umH1i_!Qh!9!NT?L_`yn*&slN2=Fdd(*OoO2m6Z9m>;c;U79Xo zHt80IF4|3_L2`9aQ%pI2>QEXTY#>;`riZRPQ(*a_I1^8x z*^ONR-axJ)fDAAJQRy&ZSR@5OBBLuq01-W#!9z+km9t;Ti^CxyxafGAT#z#y*~8}VNN z7mG#;*9hnX0-*slx}8CjOu?oSL4bxK0K!o)2uQ=6{**^Jq~t#MZ0E#>kp~JWX)Age z1>|rJXHeJ9x~MONvk(O63Qz%Lv>-M_Ea6T07Z!6+0F`NLaEPEuwgIeYRF3P-)?VxG}ORn%EKNMR&x(0Z+0rwq7r`FBY%_7>xqxbV3abtw5+y zP`8i)29E+5aj=<2*oM*p3~1z{df_=EpEF|#u)qSKY{G~Gh=j_@23J$qV_JdV;8k8(5ZF2NOG00clluf%pCRJE#`DLJBz%w@@90RUD~05=##d+>_} zmk=PUj7VHIDU5^)#d07J7#KtXEiAf`GUI_>sV66b0CNQn!D9(Y&Pvs6@oKG3GKzjF zLdMEmftJLm&U#9BW*eqlMe<0)F6g8Q2>&1@t@>Eg!7 zbNcA>$W9PoF$$o7nme~)7@0&X=xn)*?1al zV!PI$J3Gu!H?oHMcou+zW=i^y4{SVEdm4cQHHmCC|78^;1ODJma(EV^02DM6n%ly) z%z;Fst5_gLjzLp_-Oi4X8MG8&Q@kF$mf4!wu&|6WBjdbQbg=@2sedU2z(*L`5el%G z*#{P0OA@n9bcPz~JcaXF2wBrX3=9DnNMl^{^k5(8$VUiqi3*33hhPuBr}9!x8&TnN zb}d>sWOvKega#-8*1aN{HUSi1a~BU>$sFvA&^YL_LV_mfQAyO@{o zEV%$K>05K9D>a1ThVxXX2_av>rzrxobp#kRa8qKwkhwD5k5Y$Jx~y++-4jVM{f(jq zp#Le;l=NvLX`-t=<_VAt0^D@eq(h<^YcEw;qgkJU@8!)@z-{rP82TNJQgrEyNab&a z7)a<%gzEBUiN5!~*KVZV07bs9?-!aZHs#B?UwIO+GXSo? zBJjp*s3gGWD<_!-Zf;jJ*DCHqEQaJFT(1uV8Yv9QPZoZZ;9fxh3|WS@oY^jD4y0RQ zCajXKj1+3@tel0{&F&jT02PCoFK9N^ljv4TN=U8+We#>8(p*5%Z-=`_5dddYN8C}t zB-kwX2`q%<>R`t)NO41*8!HN+1DVK_6i8-WTa?{`F+pZR3at@?6*p~0VhXUM0$A9! zu`=@Kut2bB6@gK}aMDa**f8+8EvDKiKtmBg$71FRW>woJ>H00O_3(jC2#R=SWcXb>#sR-sBZVqk8nSS6Ov zC#^`Y5YLRfkOUtMBdodr2o`fR$L`td>^fThP&dV40fVM5%=wQR6@>C_hSxMja8mi` z1MC6K{{(d>RR9Q+DRV=l$@v@2CVQ;|{+Rd_P=fd~NImNO^N^wz0|aGq|0s{pAMcg~ zxWfW`WC1Wt=3zXi@6fnEOs(mqo4!9~isElAx4Nj)=a*}}DR^hkeg%aI(7SW5?_&Qd z2+&CaDE#kK{*L~#G&H1I1Y}!Gt$*)=^MA4^Gc5q-j-|D)FimU?NPcLhp|$52Z2{bJ zv2hwVt|Qp4u10hb3)jBmkXfS!x&%OL=P}x@uTXyy_{tu`GO;zVPp$bqy4HOx`e_A? zbF{u65Zy@yQ{PFD&l7UFplr&a*Zv4MBi<1;5eQ)pXfavc`yYJmUpdFPyhV> zI;4C;*LiD3`aZXoapSJ!*o(%k!&#CdZ$V4OUP%AU+cS0PtY|odGHg1El~ykwTnG zC8MIDr}5g*uo4MmZsBKsVEzI40qf3yyaD=W{LT;u@DIQb#a|!~pbl^kzz@I+U;Lmi zz&`*kyo%4%e^ULj@G8l^7yEzhr}-b%Tw#fj>`wTz|>-2>##b8}=*q1KyAQujyyIPwF3!9{?ZG|ImN>|Hs-V=mGre{FnXT zXRrBxyI%$W)PL;$=70b9q5uE?%iIJ1=cot#zyJUL`)~RI{2%ZG@E`Yozrzgkx3oK0 zda3P%&tS1bt&b5H64;kCjA-LuMrL0x2qJKpU<=E_2%IJu0@?gO5?}<(G;ur8TJL-h zL`y*kg<>KK zP7@3Pd3Zq+gu?({LrIvbo5qOtPVv{#=(yA586%9 z0)uqwyjan?C#@G_Z~b=g=A)0ZpxOz;*mRH;bczedMZNQqPVI_J4XV~_aSGj*beB2Q z7%>0{GU|70l$aRbfcqnuLx01{vZJm%uD)z~Un9%;=`KGChN-|7s+=5&`9#I&1!G-` zK{e|yw@3?1@0rv7xh+MBM>O3Obdex_%IW1aNtvEtvtnu&b$+n7SJ*Q^R)uajiAQI& zk$Fv>HQ)9mji%R|u=JVV^mN=Z0~JglQN{sh1- zafkk+4W|{^yR|d&m%<*5r^@q&L!${S#Vhv=etpKx1Izyd217{s8B06EYyrQHsRT|F z3;}bD7`}|mzFrVS;V{4#lO(dvKhJ03?VKk0ctI0{!vJ1h5JcfHz!#T<5jadHfB^ll zFiy+phwFmn*jBEKAV>aEsyRp`Z6fGKmt*TKIh5`{Vc^IKfJ>B&w2zrE&(^l#fclsz zDJ9C2)6s%c1DI?9pqQ{bOWa$8Q-0WoakRsxFF=xIiA+JTL3``7Tk?b!cNuqtE=*~_ zxpFFRgY?ik4`;>0w<*LBRj7IGQUgGN@)5oob%<-IFtD&RfItisSRBmr&7;8Btn{nk zGWO%IRD#jAZsO>{tu>$k000x9;1qwaJR8h6G~{jY%s=DO^Z*0{azj_kNI-h@C5Jcp z*?GLh7q0AIsZ#?029Q!?)Pb=`1?Rn&Bv?GSjn~Gmtd+i7ECP6>JjH_MS{-~OfR(! zuhR~i-;ykO)2-}D9d7w@xVQ9%AOHXW1I#At)mF~#=cjQpgj9+?@)8y8*QI2k%o*3| z001bFh;N+Rtq285>-AAHeT<4`jjwHgB0e0^aSQ^n=j>s$n6Ic=Kk7#Ci~*i>MfOLY z7&i1}bqS5$^D3FU$Jt1)qr}~I!k6PU&_BHIu3?oaD0C1NE6U8&{T$=eGs+jLKu}o+ zRN(&d0Gvf*00004YHTRENs5VR@t|M)emn6i4yAk<2uFPvu*g z;mM|lX-6sKza`MhYHaIcuxQv$3#b#6JO$%}ctR>47RLj|X}@=aC+&#tgHyn+g^S zRQSm{R1c(3G?|J#`NJ6847VV*Bgh9z8q2&>3aN-bZz|Mw=&EE4~Gf&^ab%(gJd}2$m#%kQamSW`Cln*H{ zDk=SNg+41gBun2rcVBU9(^@(l=8mJ^#JcqWfD_Mr8@H69Whld9yyW}A;aqg$WvB*y zZyi`QK*K%}=fBy5*vqyckz~fma3Y#?eKlr7bB_%b7d62=P>aq!rf0eRa=rd1q>SLJ z^u4W1PS%#NemnM$wT5^R!!KfQ`~TpyHvXXgmDF!kkkit~bwBFa@t&F?nyi)Q`W><& z)nMam`pIn>F%Dv^kcMJ-sQ>@k43s=8jVFaO+-SON(yWgi%u)k32wytsuwPHtC9^aa z@;|WoegEdzx1~t8J95G4%U~md_!m|Q%^k}q*okx}Xo&+H`h$ZT zFl^%@WNSN(w;vDj60kJBfu0e0PWkAOHJ7mayKP)^p%M z>OXag+dM0Z#eq)Y?;Umr-nfs$C8vCA%i~`TH4V zE~tK}`YYo{f#Q+~u!1&A1QM9doZ~WS!6sipQywaFifT#{xwJ^AL?sV9`5ZuXZaHaB z>2EBZ)-D(7?Stz$9zXp*xg?SVjv)FQxj!sLX4*=mObW+Cv&u_#{rV{FWlPp=$x??= zz;8wq%>ge5#_n0@+zfbJ-Ho9 zW;*HQShEHyj^J;Rak}qCPLdlvI6T}p;mkGc^cRl~i5S^^LJk-5hHz3vHv)+GH?mBVrge9!dEG7`t+frX33-@FjGRL(Ti_{u zM6j8Onv`&ujB>eRL;X}vQi?(8esEjjWgYUDc5t%e>oi5Z;0Fp~EZ+FBlRO9Vso{caFagWebrI zo%aEXU85ktzQRbPFL=-6QmkZNyWSn23brfx+8p`Y5$W7d43*0b8oAGGuB#RMCf4LV$p;0k|hMv-z z^k(qFyAMy%MgyrhaqfOt%Sm7I;Gz;$2n&WF5VwrbuEE@M_GvTLIkOP|CGxw@3;RW1 z<6!3_3kw0uPVHhkLe=E2oa0%&GFs39D$x z0ohUDl(3+RyP<8!~r3MGio?r?07Dtno@S|*?PkG4?{j6pnlt-OLM32LS$pYli(DH_ezn= zo*^$8^g$Nm2lIsRrQFZ84m@mVHhJ;3L89Wn$!g(a-kKz65}OD64c9m}FOp9vyOOPg zY#-mOl40=tH@!A%ygx^M;)y5Mf=xlp3mIpxO07mMO$oPp^HAN+lVU%kVw_v}*ZYA- zwYyF4YRV+V8Tplj+F*SyKlSwNr#OBw7tr41d2jbitg^ zCxd2-4)aCO|JREPpCP)8;g6oyw(IytSrStj1Qzs(4tOR2S!S-M_5nb(oh^3GH6}DV zmp;GjeAFfPGJ5kkSTyssRE9=Y%Pwhp#@f_w4D31THzaV{Px%_Z&I{~sGYk5H-xl5ldSIv+Ma|ha%!qr?zBv7YhzolJsp8l* z6*it0Xp35Eq94|_072RfZ%%?-zH`vKWUGO`Q1t}(GO3Hh>*301t2CVG48l&tp1mxI z*3&2ja(h=UR-kLFWl`^cawzeX6^;FyH46~)l%JEN{$AYOo5r>#EFK2SLQ^LV+7MWV z@x2Q(I*r~!rz;)H-$+)8Y}Ls?BN{tqhUV1p#auVgcLDJ)%Xql zMwS&S_0ie5l$Vti0oC=bKJ7pwzfO`e=rmUGhamf0pYFkgA6`tWL6A|FQT1XbC@1K= zSgCIQ16bGrooE>cb3I!BaCNxL9*;T69wYCbo&@}?#?iN$sRr46Ke*Vpb{p=4kxD2Q z2UT#W4Y(Eh&2vbj?(MRkfa1`Gvit`?tUE8qMYvMd1B%M&vZe&L)@Nij$C=96My<{) z;#HLHx*K*0VK)-a^0a2X2Ij7QTlaY-WQ9)^sW<5)Du-bOVNftQ)_x33%^f+AylgLV zLDS^;jh;Ht=m`=}m0gcld3g;ej`AbSEG`^55OgBM06Nj_-}C*zl3smjr)BJ`xT7$+ z-p>;V;S8iIxYD@777OQ&;QV54olZ6TSpQ8tkC^j?;9hVOv7OS(GjnLVB6Dk_t|Wvbif6hAw11OHEnJ5bp!Gu89g zNHC#5;BVoGZ=Bny*+N#RVKkTngT^KB2aX9NTE>w@bZ-z5T98i5lEO)l@qf~O+xz^N zO2O0TG&xuG7e>&s9-;vVCAob(=))o}M>uk6&?r8ae1!TlabQtt5( z00000003A=(7)5eR$R|+&qrE<@<{T*?rlGjQ#*(`=#sYx5ctFoPyhe`0000002J?+ z?!?vmq{yHOv<`w>i%87o2IDUeL6%nMm)jd%Gi9+orO8xEb&vq}6q?@(s(NdCogB@S ztpAGvNZ&M_Cciw{Uu3!6lYqPgBdz-Eag6c!dKv0JN8`jIdCsr?tEV`e{wl8|6Bqyh z000000WC}~KviXPaT22!S)KH_!J*2n{noj0sC7Z;a^(B+$|GsjR*dX9%&h@l&9e9Z a1Y}P)>K&h2S*%OWz$)MX0000000007YNdz( diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg new file mode 100644 index 0000000000..425494404f --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg @@ -0,0 +1,22 @@ + + + Created with Pixso. + + + + + + + + + + + + + + + + + + + diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.png deleted file mode 100644 index 3271f5cfe48fd9a4609bf11f1a2838812bf3769d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11573 zcmcI~1ymeQv+pcU&_x3*4ncxD!6CT2>*8*K1PKrb?i$=J1PBg6f=h6R;O+z`0RnHy zKi~cCz2}|t&O7(Lp4sW&RCU!<_w>|M*X+aG!zzF+CnYNdfIt8Mf^ERVI$VydgoLq* znzEFvg5)0tT&z9R!3}{80300MUDc$;DRgx8D1iS|OrdVhVk#;M|B3Y9;ML5db^w@U ze$@5fnE$6!G;<3#D9k_UyKlPkb)licF0626Qrm*@4Gd*InfAC{pD4^D^4zM=%FihiM{|aWqpYqrf zbPGpKbyyAg^K%2#0BJxRpa9H(SAY#*54Z!&u-XweX8(IRjz4lrfD=px3OhOjZva=A zgcV>7lVyTYUjc4_1+2D&(Jf$`J*)++{Cn*GJ*IA!oR9ioT}GD!0K|=lhjT^%Ku!mM z-yshVw|Nf_zw-eAehvWI6aOvmln5KnV;DX5-!z&`0Kf_afcmz7)65D1paC|z8E=^GNBuzlEC6VD1AxLX0Hme?z%$tO zunZm+00{sQ9svOX9uamxL_|bFMn^%06)ZF~RCF*F4h|R#48|pZ5aQwyRv7lIZ5ETFi2El_t z58VI>Y@)*h@Sw*Y{4FpGLGTE0FeWkVL4*4*JOBX^4ju{g@Et&h(c!S+v0-;{Ui4q0 z|37Q^(jD7MgUm8H=v&?I^~WxmMg%Lb0~#?I>W3U>gi}n0YUh3hG3Wd>0koYjGwWF~ z_TmRrcOy@W*ms3YYE)>s(NcIW=SEbf|^;a8MFB5|&AOu<&c3 za&d8>3f>uT0b__YM%DXvjA%NcKTcQ)pRKF7+dC^&6wVXaDe~-KZ#ie!GJk4=He_-D zt|sjum0S^l1={>#nLEtBFF!T?Rd69w?IM+_nltH{^K?Pu&6)!|xhQ}fvXCl2jkP5} zd@|bp$6z#;n6S|a*(pQAmc&vZw$obCDnz}<893YzLeI<1?d zS8Bc?LgHzZQ6*J{_Wa2cSDZd&cn^w4bqLD|Q8$d9N8q{#fR-uEY{SlJG()4W1 zN7S8Mlf0{FtBWTkP@e*DgL*BZ3$G!*p^uvAQ@&^%af!{%E?3M4XZOz`daMHQ>rp;V zUDzAqqhIDAs9x>IEo0_64*PS}%pQ5ty~d?KDee0*TMeULTdZR6a?Y1*-bnLvJu1VG z5HvdEU|*c^IBcwlk$yJ8Kac}^z8wY{(1q+vuQb=F0fASCZY#RYrkZWP=3_7Bt+R?y z6lM@+%9R?=I6IqPKWgFIgbM3hDA#>bkf{IFaBf)TLLe3_3IL?K!p&e2C579c2M1j+ zzX9LyafeW#xM2NL!0w|Q2qV$qm&2h9<@n3EnJRe)rYLkbb$=KmUyWy|EdM<=D>Y0g zVY;Gou8CnyzvWQ|fDbLTG%^$6P2;uF!};8{SVhvu&vHz^6<|DksgRKW-t`NVtmSTA zbC8gq^TQs2@cI(*TZ`G*yRaL7(Cd@kmb(f`g!Vt00N~fK`$&`CDfhdtv$Mg6_FiP1 zdnGG1CFfH(+ivo81dbnn?wh(fl;rQ!dzgRGe*Qwd%vNd?dqZx-YnRl_u~-o8GN0(M zFs`1nEQzL+ozUd3+E+W@8Kqi;^2WSMgy0-M!{)n_I)1$;x3=T-dd>0?zt83F?`^ZD z*A@rm#m$^&&D`uZ0bexS)J6ZwBgf3W$)}{6MWZQa~%O>NoB66S~`Y}S=4l=x!*3DNWDXFBp9gQ;bCGO{S z)A45U&Aqaxx?g@tGA%~Z40dvQ1v5A-(YB&&vr3p^PL(5pj#E=_zbl`Gxm9MR&ZC!q z=#}nUosT78oWN{fdkW7nK{e{JA|$G0Dne(Qc&rE>{aS;pw z&`={h=BRZoU;b30Mll(~7wAu)EUj_=*2jG~(~I?VL^y-|SetY$OH8<2F)+x-$v$%8 zrwA1${oEe85yul={BdEnx2=+aLEezr2KkTEwkO>9BgGY`>Mc@?7tLuaQ2oFQ zLrP5W##5k$Y;-N<$PZRqWySIR zEkZugoc&S%mMbTv0ov!H6MwAm;cPp&9Bj|U<&h{j{)K@j&!^<{DraY3>Ur5k*a4)f zl_nKJ*z2ifBG9g8Xr*{*iMtT-o= za%`W&UWxGM8(QgkI=Oc~&^I(tbBoXl>@1xRdlqERy0G%6KL36)6In^2Brk?tl`(DC z^upda#Y-Fh7jR%8AM}j3?r?J-y9ps+GgeSBm|W@57&2_Fd?XMzEX^~;jULTt(xT%l z&#qEs5I$=sZ*jfk`+OaaVLME`+0`7UkIO5I6OHV3HbM)Y=KWU{`1AUrg^PKsC$X4; zV73Wuy_ZUZ{@9Hf;j@l~6+fD1jb~Zx?yL?!)y3MAy)KT?UlTw$&~@BBjj>xO9CnHg z509u7C=rfT#v8s|?rlw;9s%DXXX(%oUsKnh{=CIbOF{k>GjHV6(a4d@D8yVwEGN?^ z_4||ehc%Ys7mdbF&ut|kOUQZ)WkA_ufoQQ=PP=Xa}^_ za=Zg`DvV$N4g?1a`2Hhqg838xjDU?pLCMB0hKt9cDsD<8YU2FIrNDzgBA^E#i0j+) z#ru!^+&@P=Czu$YO|@#pskib_WKEevx9f;!R@@D;e$hA8flTdB$r)vzkTE>dny0yW zjaRgOTbQB@7xq=0|3}9twn(y>Ghx<+6;w(~*jj~v#KIRYMu@(s0hu8|?Kps&`&!|9 zdMWzM>J}53RR21&T&`sB3=iRe^Jc_s{qOOv&v^x&yM63N7+hsV8#II_^iuh~zuJ&# zen$4U3PZ@I+M_Z1b%E_j>y2;=Mtcu&mW^EA_ASsC2X}P-V(}0z3t&-;q^eY1XFeFR zlC!?=+SS0bxc^jVl0hAsp=Qm!&$=WdW*51t&nh`9uNP3&%Fx)hmp{Xsx6iX@hI{r6 zmAPX@&|}jvScp0+j*y^O5t-idWPY86I00MQOAF(BXsaw)U^U8D>Zv&RM+;n|U0aB8 z7|zv`p2s(-_ej%wS)eo7%6x;&!rIGfeu;VNGAwAp8#bq1p&y~$U}qGmB&c1qq6uO4 z&Z=4KI8ww7uQ!!Fk`I9{$I~kB>(#PO+mIiAh22FiFaUywgT?Exfc>A)7!LNL01&}A zlq7o*~0e|i%+&}j-a4m}4A$G@J#EXQ|lQ9)}?ei(L zcN))#n`)kMwUgs4&tbbS^zi)&EvKcuAJ`=)xZR$?Aou}LWU|zDk#po!+^6o?=>MKl zM--QAwi1=f1|=_-55|#(E9KdN9(-{ZuPe(>9cCEow-sK}y7l_HA)*MNJcFMpjZ&;j zc*|3##G}?zNN%xO8Xcje7n!X}ij-d4hc~N-0`71T%hSN?u^wb+aPJJShIBP}0ip_} zTL#P}e(bFWw)L`7WV06gUk0}7;&_e9^dUtq$88q9OGCe+esr$5b?`g>zM);zH)T`Q z%6gq8*fY`Z1`)e1P{4ii_JlbtG@4J%Sg7k)(GPUOGC7CsC1tpc&Ea~qdx~+J63p{% z^w+Z7NTDIs>IuefQ-&T>!!)nu6v$|9PH|KXRBf3TyQE(gWg<(nu}b!&IuZ_kPfMp_ zSoO0gIfE0mB#VgKiZqg}a*h{dDc_9!YV_euiQn=`1QSg#L$5k(YIwa4mFB^z^i>H* zvcQL{A2N+a480fI0Oh8nU<|77YopX|aQD$v*LKGaarDnN6lm`T^13v5>G^8*aIj_x zbGm&_)}3RF8;$AFX18Zrf6|NZd{5ApGi~&^;f|)AQcjNTswY^4)K;X3)eAK>dwkNl zBJuT%2z7iqwuAR(ZBkSDX}>y_C62t=B2D-PuR=ppQmrQOCgFYWLdcNaQSXSq1AVz% zZNUCagi@^ofw~;5&uTeCM3k4iO`{J0uHWtU%S6=}yzi;6$siq_hM!1Jks_4DeOr6W z6W5`#phK^xcSGP z{0*s)L|`FRu2DXnE!&T$LRPgBekb}$9a3jz@kfe+EZ|&AomJ}EO!lS*Q6m1gpVut$ zEVvGMwp)5TBo(-Nd!o!bcopz|{`@xlp>%}dnZCk?e-sdQ^2s=nw{!O^L*{Qw;Zxq5 zA;ZLx`x>&vEVC+knHKS$(u$GS8SNh#Wpl#Kc3S2~{k#J6@;2ZM&Tt&h18u()rx(^A zQsra3YO27K%=qxiZlABG*sSl(%s|8Yx89tSzejV2Kt>Zcdh}e+yp-O=CGcdY29Jbk zZrmI=O?+_HY2Yqvv?o6KW-YmnxT;9HHBwpFF^m};$;L)L!eWZgF8R##jy*pCE28hS z+<64;AadK0VJ z4oy`AH&vk4q%r6`IGt_ztr50drdGl|8xablGk*-Lg>6e+@SEeouB@6e_4&a@7h&ST zsR6#IF;W;`NXQm*!g$EGlcJ2flR%|!$TL$nchVRClz^;Awl4@-U6YS2A#w8{KceFJ zMJyhU65NaVwz&tu@pI1cVDrYhS5p#)@;7)&L?z^ck%0<_($70rHj>wTTTbc0de^}4?L$psdEoxjmrWdo23nKq z?EK`D?Q)7o#=n1E3~XJF9$CEeJc`ied=)t1^6t}$5!*hot*v zb4Ro{!NZ!BatW&(e%m+h?=RTNjVp>0w0=jjgN^RKs!X9)M`0Zhzu^;1r(%M-U+O6 zrM$G(rBXa2{~8smF>WFs8u6k_gpRa0)mK&F_@nuR7LE4I3q5&<_G7k*Mb*Ty9Tjo8 z3wkoRu`@opLQ8aw8T?r!eB!Reeqv}#^ZATPkK#+z>fyeviB!>Z1GmCsL+XxJv_i-|TDuMdo!K?YK_TM8_M zbb+r(wr|sR4qlmt4WwloT_Nu|?m91b5Oq`{-H;R$c~X+17uPM~QB4@Bs<&U9v0E|f#pv}k zDUrUmBvL#Ph~z4L7x^VFNr`Or=BHoXH?p5M+hIiNu5OpZ=}b_$=ycbtkW2rV`QfF{ z+j`3OjV^1CnoosYub`m)Qz84@x$L81oNdi5pKkrcwO}i~%zY))fTJavo!;dqrNcH)8+|iWxtfZ1k zHrkpd2xs=~;u0y%_abIFN2vAQfrYA-Un1VC>Mqlf+DMHbU`wydvaj%@2`hViKV3h8 zkWB-5=@1K#49hASn8V|0CFv+qyA7an-`&91@1ywl$)PBe27Jq$TRK0_PM=vSzf5g5 zjlk6?b2$OgmRbrRyh_;PusnLlh&Lz_EFhSx%r4U?@2E)DcN=6>((+wEc3)^*M@WY@ zn#tuEvJt8Kim}UdCOCbb4q`TxRCHeZ03g)x=(T9QoXEgf$FiEi>NHCL zCoJQQtdO!Mzkh{e{ z>t)yq7?%A*hOLDESucaY042MaDg~RVOF##V`8N7~{VcT#PmXzGx5juzpOE-On zj$|hmik7iAXv-odad5dR8*8pmf)-)oi@A?0+Kt1)jHCK8tWC!%U0#q){!m@ZKuy~_ zxuW|-gnq%}6$xU&GUeST{HF~~6^py1pAG29!!<1|3X@CWV*|avv9}$+=k>k^oL1g# za@cmk-#~a$PEiF8n+W>NR5_3}?Pdx9BnPG&?aLy9Or@!TD0oI!8Wvv9Q?Os&@9X44 z!dpQG1W0jQBP1v&&)?*!bWJ8oJcm8Bdp^qXs4n7gLyB~7(je*AtQ{(iSFI>E0mgUk z9Ls7(C9GfxiEm6#x{Qn<^b?-~txQ)}FVT79w7T6y5?Z91+r-Ygh?J{UPj%7nRIJRs zihUPI&5>Rq_kS%BXyLUY#6a3ncDEBtf8N72ivBYRsVIovtgSdZ9f8Zb3LDBi@!p(wlcUQ$)|aa@q-!j2q6u<& zcA)!azc~Md!X)nCIdiKLovc}CJ?^)z7kXyM?52WX>>%n){Se%+Mm-&kpGTXt-@Foi zUk@?SHU3;CAqf>)+ykqkhE>PK1(mz$0xgTXXb4$6r8n?ejG)?gIG+%~3X87-`{CL* z(K1INv~TvbKeAFTBRK|{PHi*diG8V9S8&^OwZcVuH=0EN+yXA^2CD^nW@FWVi(hw zx4``~WZ#|8Z{GJbf+inzX}E7d+#YMKz{}n^Hkv=7wJIS#lb>sekebfB+oo>#=wNd- z@uuT0Gfc+T-(44d_(wz7QOna5$KBzaUJ5d`ZbMG>t7b4*J=0=fIT<8QASZ{o%gmpi zqXDN4SH8}YlB5rQGZ6t&fnNPjmH5m4-r6DJQ(y%tcHhj2+uM^H81ZYfKN zJ3{+T{YJ8kaM{uH(Z+cRh>po^S*xPSQ{_V<*Q|)#@EyEjOpmHi{GB>ql$tPtyFE^O zE4Od*mJL8S!-A*AS{e1P!O~rqFEcI%v(mw0I92=htO4joj)jEcaWp`_wMFS0K8vph~Y6atYl5qHw`_PdXkKED%rqZ9@W z)89Y$wMvdMG}c<|>R}5cTN_axLV3sDY_i-#0f0v ze%PGX&Cl;HOoR%e`vuLe!Dl*?w{ag?dldDN;BMjfT0c40Loe;#+)`^rB^zex6%}_v zTay8;Rc`uDc?{y*_1(!NaK!J>F{Vl0a{ej>kojAyN_A@ub9q03nE7YgzU!e_5AILt zu|TKqnfq#?qM_iC;`fJgy?~nX2p*06OIh(P zQ~Z6sLIIqQ`8_ie|f=Ba|=$2y_DLE17PBcsvl2 zm0zil^{^u-j(PQnS+{cpsl1pVIPa!XI3h2;1UU~85m%yPC`-e;b=;S1obIxma07l5 zBpp-?U9)TKDIqG%S?<_@w17kc`%QNV30;5+9F-CCdzI+ur5K~N<{Y>^8d4jj_kk)B zVckx8h=QjmTHH47bK<9x;PY~^X?fF*PKi?VCQ|5sww9$5*A{$>dedSuYc(WN4U(Is; zAxoxx0pWdJgjp-fkVlKlCOZ=EhnZv&qDrT%$w!LSnXqo?JC2R;eGRd^K}B;XG+Po2 z4Z?Ipvd$aP@9})T`A9IAov9Em`1OtVcqx1%i>KpLtEhshYL&0;YocT$`3X%lL(%p= zAl4Ww^9U4Sf%KY5b!5F>qBmi;$o7>z#RnI*Y?5tL&&fJ=)FR`h?QOh9#~*;N2m$_E zT%5YK68XP>w3s@ua&p*_M@K8oAw_)3A}b|Xkmq00e%ox~>+pdP<+iWVDab8;$PV`YLhg;36A&bie#p!T{QGtkq zb$e4H`ns!UipDY%HQ>d|S|$RAb=oZJYnhJAn{Se!-yZJ1Ys`$|b54T0$GYWZ9Xlxl zeim<1e=s6Vh~#cm&GvoSDply(*K^M;r}|E@b}hh+eQZ6|AbFictfSOIoS{SbA!|5{ zo3U`{TJBS7g!0bHfWV;>o1c|d_ET7Paj(T5M~y?aBRV8P;Mc}oS zmrPc!tQVZ2MZLbTu&OL-n5t^i;*E7W#pe9-NUcFBkF41IzIa4f8v$fRqkz8O@b?1% z=9iGe?8>;-zWH5SbHDMg1p++$W9}RFw}Hou(BHXkO5qZK%@&s{3Y+dHDyROjYyYu8 zfKB!>DTWJM#e)Dfr37hJ)X6jkDukW|TQ`}gG#Z+&028qWZu0DBm(MQgioHg)03&_1 zE-=!%bGe?%qFfawVO{>Ur_4@vC?hs4ga~@`QX? zTpaxgIhrcTYT8+pBf6MK)ur+&LMRDtU|>Fz6)HAza@6ey)>?OBGB>e_O3V#i#y40w zBsgPv>jLAf*hNTIB!MYJfqWHxdc8J;=BpGaBIRb0`>}Bl>e3&2lj>V@Xq5=HBWh#K za5v4ZrQ9tIi>$?dGJ92HBT=;?HWAtK((?W$DOy?Y6?l$gpvDMgpYT?2J*7Ks?WV^3 zXo8|kD>;vXWu!DM^!RtTT8LKR``A>yv|tX%_J!sCdpwYMh{v>4T`NA$Lob`9R7$SGX~%n<}U05bE&5*BQlm_YR`hS8`T_dG`77RtAZ-eXu298V_4UBXO^d8te{ z2amG-HxpE~zzQu-$I{!q?Qj!N8a?f8O+DYf4E&tlOn5lkit&k)VvSs?Qc!fsmYimi z@5AgI*SdKyz%kAi^lrcZm0lZ$wz7jx)n0AIJ@ib1K#8TY%#Z39f2gI{luc2f9>k9G zw}PPERPFmg!L^a6SnSfw??8MngqJR^CNAU$xVpozJ~~Drl|DZk51$F}r?nf{OH@My z<5G`1bzpPiI|&YiL<*XlhQJMPV*P9=Fz9rIB}k?FL~REk2;~)5<=B+7?{64X<7j?# z-O-=;`=h`wsq*E13{r^^-N)$me=-y_*eVX!E|BB9Wj$8FMUnUzigdzBNM?02MxbezA|2l0L2Ixu-eCt9#ml*tuz1W6YIL9Mjg<(^ z8gT*XJ0VR)VKzTfb>&rScr-D;r|Z!^Kg=;nY##umj8?hvYF?pK>*!ChCes?2=Y+pyk(H%-ap?X zE|G56i&#qtcTKR?hc7$G6bjfRLTI2Y={TXG6=kMLX+`u=)YI5Rvy8WOVW&kz5{-j| zsV(VKB)t%sJBo&7U*4_Uu4<5U-rg^lP@?YVUIlzYv4J<2rbYDjq;qS00E(tUyDYyN z3VK)A7eW-#P_dfnNpOh~*uTe?bZ}u&s1H9a@SrHbh~s9EEK;>{5wzjwm7XLOA513Q zSV!Ag;gqo+G?EkXQ%il)T6_!IE}plg4}~Nko%mJy)X<1yM=w!ign@&s@x&3q-3Y&U zXKd+AP#4W_u~ci{nIwL`dMnFaD#T0SzBs?{5C|k0;FFh>M8NrU--CvQzL7V~)G)Hr zD^GKs=~9X+7cbZjkVq0Se@V+o*|##&Sj4@QaQZ=8n>`tHDJBM;Dn2B`FP1A{T4hh! zds88F6|VS!S|9ftWs@`YLgXBL;jm4~q$0i3zQ%D3`bIX*?^)mBLqA6;HgJTv?~VhR z2AM+{?~z(q)P0 z7sU7TG~OeL8w45@oq{oulZCQ;FAGiCdEOMszo9>7`<16RBueJoMqQ)x z487mXl!KRPu@Zmrq7BhrPKUf;N>6m640E6{g*4k@^Cx*!>b+gf&#K*FYJ`3rI>ao> zaXVvGl<;RbIb)`S_KM`UjyW>@7fKR%XrIn-1ShD%P8wPP2bkU3j2^l&s2?T2C z&DqA{jXDz)_&7+C@Kl;uZ~hrkdRs_i+LLW^Vi(DfP*&7IxE4fdu>3q$+D)EH@!c?$ zV!j1?;IkOE1a1r{oV)z3>gcjrijZ z4_yp1kdwfl4<2E3bC_`oc)!Tf%++nfw94mKxK_-AeZx<30WBUaQ9(37q^B7SyYcR- z5swe#PlI_%)X#qHNlr2!scaYbSzZ=oX(M;@AbVM4neo=VU(Fvah{UZX)p~MPI>(Sh z6o<9n_N)$U(-(>r?2kt#%+Y7|WTKM%WT%6KW`ZuVx}|$^NcI8H#lzt%!*aR*r3a@T Xz`p$~0$v9W>*v7k*Ly9$hxz{ncCI&w diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg new file mode 100644 index 0000000000..aa854a7504 --- /dev/null +++ b/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg @@ -0,0 +1,3 @@ + + + diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.py b/api/core/model_runtime/model_providers/deepseek/deepseek.py index 5fb821ed45..d61fd4ddc8 100644 --- a/api/core/model_runtime/model_providers/deepseek/deepseek.py +++ b/api/core/model_runtime/model_providers/deepseek/deepseek.py @@ -7,7 +7,8 @@ from core.model_runtime.model_providers.__base.model_provider import ModelProvid logger = logging.getLogger(__name__) -class DeepseekProvider(ModelProvider): + +class DeepSeekProvider(ModelProvider): def validate_provider_credentials(self, credentials: dict) -> None: """ @@ -19,6 +20,8 @@ class DeepseekProvider(ModelProvider): try: model_instance = self.get_model_instance(ModelType.LLM) + # Use `deepseek-chat` model for validate, + # no matter what model you pass in, text completion model or chat model model_instance.validate_credentials( model='deepseek-chat', credentials=credentials diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml index b535053c36..dacb20ab18 100644 --- a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml +++ b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml @@ -1,15 +1,19 @@ provider: deepseek label: - en_US: Deepseek + en_US: deepseek + zh_Hans: 深度求索 +description: + en_US: Models provided by deepseek, such as deepseek-chat、deepseek-coder. + zh_Hans: 深度求索提供的模型,例如 deepseek-chat、deepseek-coder 。 icon_small: - en_US: icon_s_en.png + en_US: icon_s_en.svg icon_large: - en_US: icon_l_en.png -background: "#FFFFFF" + en_US: icon_l_en.svg +background: "#c0cdff" help: title: - en_US: Get your API Key from Deepseek - zh_Hans: 从 Deepseek 获取 API Key + en_US: Get your API Key from deepseek + zh_Hans: 从深度求索获取 API Key url: en_US: https://platform.deepseek.com/api_keys supported_model_types: @@ -26,3 +30,12 @@ provider_credential_schema: placeholder: zh_Hans: 在此输入您的 API Key en_US: Enter your API Key + - variable: endpoint_url + label: + zh_Hans: 自定义 API endpoint 地址 + en_US: CUstom API endpoint URL + type: text-input + required: false + placeholder: + zh_Hans: Base URL, e.g. https://api.deepseek.com/v1 or https://api.deepseek.com + en_US: Base URL, e.g. https://api.deepseek.com/v1 or https://api.deepseek.com diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml index a766a09a7b..3a5a63fa61 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml @@ -11,16 +11,54 @@ model_properties: parameter_rules: - name: temperature use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 + type: float default: 1 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - name: max_tokens use_template: max_tokens + type: int + default: 4096 min: 1 max: 32000 - default: 1024 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 1 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. + - name: logprobs + help: + zh_Hans: 是否返回所输出 token 的对数概率。如果为 true,则在 message 的 content 中返回每个输出 token 的对数概率。 + en_US: Whether to return the log probability of the output token. If true, returns the log probability of each output token in the content of message . + type: boolean + - name: top_logprobs + type: int + default: 0 + min: 0 + max: 20 + help: + zh_Hans: 一个介于 0 到 20 之间的整数 N,指定每个输出位置返回输出概率 top N 的 token,且返回这些 token 的对数概率。指定此参数时,logprobs 必须为 true。 + en_US: An integer N between 0 and 20, specifying that each output position returns the top N tokens with output probability, and returns the logarithmic probability of these tokens. When specifying this parameter, logprobs must be true. + - name: frequency_penalty + use_template: frequency_penalty + default: 0 + min: -2.0 + max: 2.0 + help: + zh_Hans: 介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其在已有文本中的出现频率受到相应的惩罚,降低模型重复相同内容的可能性。 + en_US: A number between -2.0 and 2.0. If the value is positive, new tokens are penalized based on their frequency of occurrence in existing text, reducing the likelihood that the model will repeat the same content. +pricing: + input: '1' + output: '2' + unit: '0.000001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/deepseek/llm/llm.py b/api/core/model_runtime/model_providers/deepseek/llm/llm.py index 4d20a07447..bdb3823b60 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/llm.py +++ b/api/core/model_runtime/model_providers/deepseek/llm/llm.py @@ -1,18 +1,24 @@ from collections.abc import Generator from typing import Optional, Union +from urllib.parse import urlparse + +import tiktoken from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel +from core.model_runtime.entities.message_entities import ( + PromptMessage, + PromptMessageTool, +) +from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel -class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel): +class DeepSeekLargeLanguageModel(OpenAILargeLanguageModel): + def _invoke(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, stream: bool = True, user: Optional[str] = None) \ -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) @@ -21,7 +27,87 @@ class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel): self._add_custom_parameters(credentials) super().validate_credentials(model, credentials) + + # refactored from openai model runtime, use cl100k_base for calculate token number + def _num_tokens_from_string(self, model: str, text: str, + tools: Optional[list[PromptMessageTool]] = None) -> int: + """ + Calculate num tokens for text completion model with tiktoken package. + + :param model: model name + :param text: prompt text + :param tools: tools for tool calling + :return: number of tokens + """ + encoding = tiktoken.get_encoding("cl100k_base") + num_tokens = len(encoding.encode(text)) + + if tools: + num_tokens += self._num_tokens_for_tools(encoding, tools) + + return num_tokens + + # refactored from openai model runtime, use cl100k_base for calculate token number + def _num_tokens_from_messages(self, model: str, messages: list[PromptMessage], + tools: Optional[list[PromptMessageTool]] = None) -> int: + """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. + + Official documentation: https://github.com/openai/openai-cookbook/blob/ + main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" + encoding = tiktoken.get_encoding("cl100k_base") + tokens_per_message = 3 + tokens_per_name = 1 + + num_tokens = 0 + messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] + for message in messages_dict: + num_tokens += tokens_per_message + for key, value in message.items(): + # Cast str(value) in case the message value is not a string + # This occurs with function messages + # TODO: The current token calculation method for the image type is not implemented, + # which need to download the image and then get the resolution for calculation, + # and will increase the request delay + if isinstance(value, list): + text = '' + for item in value: + if isinstance(item, dict) and item['type'] == 'text': + text += item['text'] + + value = text + + if key == "tool_calls": + for tool_call in value: + for t_key, t_value in tool_call.items(): + num_tokens += len(encoding.encode(t_key)) + if t_key == "function": + for f_key, f_value in t_value.items(): + num_tokens += len(encoding.encode(f_key)) + num_tokens += len(encoding.encode(f_value)) + else: + num_tokens += len(encoding.encode(t_key)) + num_tokens += len(encoding.encode(t_value)) + else: + num_tokens += len(encoding.encode(str(value))) + + if key == "name": + num_tokens += tokens_per_name + + # every reply is primed with assistant + num_tokens += 3 + + if tools: + num_tokens += self._num_tokens_for_tools(encoding, tools) + + return num_tokens + @staticmethod def _add_custom_parameters(credentials: dict) -> None: credentials['mode'] = 'chat' - credentials['endpoint_url'] = 'https://api.deepseek.com/' + credentials['openai_api_key']=credentials['api_key'] + if 'endpoint_url' not in credentials or credentials['endpoint_url'] == "": + credentials['openai_api_base']='https://api.deepseek.com' + else: + parsed_url = urlparse(credentials['endpoint_url']) + credentials['openai_api_base']=f"{parsed_url.scheme}://{parsed_url.netloc}" + From 903ece6160cb999a1f0ee306332fd3233d495c90 Mon Sep 17 00:00:00 2001 From: SASAKI Haruki Date: Wed, 8 May 2024 10:04:37 +0900 Subject: [PATCH 021/267] Fix:typo Incorrect Japanese 2 (#4167) --- web/i18n/ja-JP/app.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/i18n/ja-JP/app.ts b/web/i18n/ja-JP/app.ts index b2d1d255f6..795fc26624 100644 --- a/web/i18n/ja-JP/app.ts +++ b/web/i18n/ja-JP/app.ts @@ -23,7 +23,8 @@ const translation = { 'さまざまなチャンネルでチームメンバーや貢献者、開発者と議論します。', roadmap: 'ロードマップを見る', newApp: { - startFromBlank: 'から作成', + // this comment is to recreate PR + startFromBlank: '最初から作成', startFromTemplate: 'テンプレートから作成', captionAppType: 'どのタイプのアプリを作成しますか?', chatbotDescription: 'チャット形式のアプリケーションを構築します。このアプリは質問と回答の形式を使用し、複数のラウンドの継続的な会話を可能にします。', From 8ce93faf08ed5adea299951e0a42cf013a33d537 Mon Sep 17 00:00:00 2001 From: Yong723 <50616781+Yongtae723@users.noreply.github.com> Date: Wed, 8 May 2024 11:52:04 +0900 Subject: [PATCH 022/267] Typo on deepseek.yaml and yi.yaml (#4170) --- api/core/model_runtime/model_providers/deepseek/deepseek.yaml | 2 +- api/core/model_runtime/model_providers/yi/yi.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml index dacb20ab18..16abd358d6 100644 --- a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml +++ b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml @@ -33,7 +33,7 @@ provider_credential_schema: - variable: endpoint_url label: zh_Hans: 自定义 API endpoint 地址 - en_US: CUstom API endpoint URL + en_US: Custom API endpoint URL type: text-input required: false placeholder: diff --git a/api/core/model_runtime/model_providers/yi/yi.yaml b/api/core/model_runtime/model_providers/yi/yi.yaml index a8c0d857b6..de741afb10 100644 --- a/api/core/model_runtime/model_providers/yi/yi.yaml +++ b/api/core/model_runtime/model_providers/yi/yi.yaml @@ -33,7 +33,7 @@ provider_credential_schema: - variable: endpoint_url label: zh_Hans: 自定义 API endpoint 地址 - en_US: CUstom API endpoint URL + en_US: Custom API endpoint URL type: text-input required: false placeholder: From 4aa21242b6608bfe3e1ce4bb4755bcd7945d32a2 Mon Sep 17 00:00:00 2001 From: sino Date: Wed, 8 May 2024 12:45:53 +0800 Subject: [PATCH 023/267] feat: add volcengine maas model provider (#4142) --- .../model_providers/_position.yaml | 1 + .../volcengine_maas/__init__.py | 0 .../volcengine_maas/_assets/icon_l_en.svg | 23 ++ .../volcengine_maas/_assets/icon_l_zh.svg | 39 +++ .../volcengine_maas/_assets/icon_s_en.svg | 8 + .../model_providers/volcengine_maas/client.py | 108 +++++++ .../model_providers/volcengine_maas/errors.py | 156 ++++++++++ .../volcengine_maas/llm/__init__.py | 0 .../volcengine_maas/llm/llm.py | 284 ++++++++++++++++++ .../volcengine_maas/llm/models.py | 12 + .../text_embedding/__init__.py | 0 .../text_embedding/text_embedding.py | 132 ++++++++ .../volcengine_maas/volc_sdk/__init__.py | 4 + .../volcengine_maas/volc_sdk/base/__init__.py | 1 + .../volcengine_maas/volc_sdk/base/auth.py | 144 +++++++++ .../volcengine_maas/volc_sdk/base/service.py | 207 +++++++++++++ .../volcengine_maas/volc_sdk/base/util.py | 43 +++ .../volcengine_maas/volc_sdk/common.py | 79 +++++ .../volcengine_maas/volc_sdk/maas.py | 213 +++++++++++++ .../volcengine_maas/volcengine_maas.py | 10 + .../volcengine_maas/volcengine_maas.yaml | 151 ++++++++++ api/tests/integration_tests/.env.example | 8 +- .../model_runtime/volcengine_maas/__init__.py | 0 .../volcengine_maas/test_embedding.py | 81 +++++ .../model_runtime/volcengine_maas/test_llm.py | 131 ++++++++ 25 files changed, 1834 insertions(+), 1 deletion(-) create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/__init__.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/client.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/errors.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/llm/__init__.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/llm/models.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/text_embedding/__init__.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/__init__.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/__init__.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/auth.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/service.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/util.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/common.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/maas.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml create mode 100644 api/tests/integration_tests/model_runtime/volcengine_maas/__init__.py create mode 100644 api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py create mode 100644 api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml index bda53e4394..1c27b2b4aa 100644 --- a/api/core/model_runtime/model_providers/_position.yaml +++ b/api/core/model_runtime/model_providers/_position.yaml @@ -26,5 +26,6 @@ - yi - openllm - localai +- volcengine_maas - openai_api_compatible - deepseek diff --git a/api/core/model_runtime/model_providers/volcengine_maas/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg new file mode 100644 index 0000000000..616e90916b --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg new file mode 100644 index 0000000000..24b92195bd --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg new file mode 100644 index 0000000000..e6454a89b7 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/volcengine_maas/client.py b/api/core/model_runtime/model_providers/volcengine_maas/client.py new file mode 100644 index 0000000000..c7bf4fde8c --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/client.py @@ -0,0 +1,108 @@ +import re +from collections.abc import Callable, Generator +from typing import cast + +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + ImagePromptMessageContent, + PromptMessage, + PromptMessageContentType, + SystemPromptMessage, + UserPromptMessage, +) +from core.model_runtime.model_providers.volcengine_maas.errors import wrap_error +from core.model_runtime.model_providers.volcengine_maas.volc_sdk import ChatRole, MaasException, MaasService + + +class MaaSClient(MaasService): + def __init__(self, host: str, region: str): + self.endpoint_id = None + super().__init__(host, region) + + def set_endpoint_id(self, endpoint_id: str): + self.endpoint_id = endpoint_id + + @classmethod + def from_credential(cls, credentials: dict) -> 'MaaSClient': + host = credentials['api_endpoint_host'] + region = credentials['volc_region'] + ak = credentials['volc_access_key_id'] + sk = credentials['volc_secret_access_key'] + endpoint_id = credentials['endpoint_id'] + + client = cls(host, region) + client.set_endpoint_id(endpoint_id) + client.set_ak(ak) + client.set_sk(sk) + return client + + def chat(self, params: dict, messages: list[PromptMessage], stream=False) -> Generator | dict: + req = { + 'parameters': params, + 'messages': [self.convert_prompt_message_to_maas_message(prompt) for prompt in messages] + } + if not stream: + return super().chat( + self.endpoint_id, + req, + ) + return super().stream_chat( + self.endpoint_id, + req, + ) + + def embeddings(self, texts: list[str]) -> dict: + req = { + 'input': texts + } + return super().embeddings(self.endpoint_id, req) + + @staticmethod + def convert_prompt_message_to_maas_message(message: PromptMessage) -> dict: + if isinstance(message, UserPromptMessage): + message = cast(UserPromptMessage, message) + if isinstance(message.content, str): + message_dict = {"role": ChatRole.USER, + "content": message.content} + else: + content = [] + for message_content in message.content: + if message_content.type == PromptMessageContentType.TEXT: + raise ValueError( + 'Content object type only support image_url') + elif message_content.type == PromptMessageContentType.IMAGE: + message_content = cast( + ImagePromptMessageContent, message_content) + image_data = re.sub( + r'^data:image\/[a-zA-Z]+;base64,', '', message_content.data) + content.append({ + 'type': 'image_url', + 'image_url': { + 'url': '', + 'image_bytes': image_data, + 'detail': message_content.detail, + } + }) + + message_dict = {'role': ChatRole.USER, 'content': content} + elif isinstance(message, AssistantPromptMessage): + message = cast(AssistantPromptMessage, message) + message_dict = {'role': ChatRole.ASSISTANT, + 'content': message.content} + elif isinstance(message, SystemPromptMessage): + message = cast(SystemPromptMessage, message) + message_dict = {'role': ChatRole.SYSTEM, + 'content': message.content} + else: + raise ValueError(f"Got unknown PromptMessage type {message}") + + return message_dict + + @staticmethod + def wrap_exception(fn: Callable[[], dict | Generator]) -> dict | Generator: + try: + resp = fn() + except MaasException as e: + raise wrap_error(e) + + return resp diff --git a/api/core/model_runtime/model_providers/volcengine_maas/errors.py b/api/core/model_runtime/model_providers/volcengine_maas/errors.py new file mode 100644 index 0000000000..63397a456e --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/errors.py @@ -0,0 +1,156 @@ +from core.model_runtime.model_providers.volcengine_maas.volc_sdk import MaasException + + +class ClientSDKRequestError(MaasException): + pass + + +class SignatureDoesNotMatch(MaasException): + pass + + +class RequestTimeout(MaasException): + pass + + +class ServiceConnectionTimeout(MaasException): + pass + + +class MissingAuthenticationHeader(MaasException): + pass + + +class AuthenticationHeaderIsInvalid(MaasException): + pass + + +class InternalServiceError(MaasException): + pass + + +class MissingParameter(MaasException): + pass + + +class InvalidParameter(MaasException): + pass + + +class AuthenticationExpire(MaasException): + pass + + +class EndpointIsInvalid(MaasException): + pass + + +class EndpointIsNotEnable(MaasException): + pass + + +class ModelNotSupportStreamMode(MaasException): + pass + + +class ReqTextExistRisk(MaasException): + pass + + +class RespTextExistRisk(MaasException): + pass + + +class EndpointRateLimitExceeded(MaasException): + pass + + +class ServiceConnectionRefused(MaasException): + pass + + +class ServiceConnectionClosed(MaasException): + pass + + +class UnauthorizedUserForEndpoint(MaasException): + pass + + +class InvalidEndpointWithNoURL(MaasException): + pass + + +class EndpointAccountRpmRateLimitExceeded(MaasException): + pass + + +class EndpointAccountTpmRateLimitExceeded(MaasException): + pass + + +class ServiceResourceWaitQueueFull(MaasException): + pass + + +class EndpointIsPending(MaasException): + pass + + +class ServiceNotOpen(MaasException): + pass + + +AuthErrors = { + 'SignatureDoesNotMatch': SignatureDoesNotMatch, + 'MissingAuthenticationHeader': MissingAuthenticationHeader, + 'AuthenticationHeaderIsInvalid': AuthenticationHeaderIsInvalid, + 'AuthenticationExpire': AuthenticationExpire, + 'UnauthorizedUserForEndpoint': UnauthorizedUserForEndpoint, +} + +BadRequestErrors = { + 'MissingParameter': MissingParameter, + 'InvalidParameter': InvalidParameter, + 'EndpointIsInvalid': EndpointIsInvalid, + 'EndpointIsNotEnable': EndpointIsNotEnable, + 'ModelNotSupportStreamMode': ModelNotSupportStreamMode, + 'ReqTextExistRisk': ReqTextExistRisk, + 'RespTextExistRisk': RespTextExistRisk, + 'InvalidEndpointWithNoURL': InvalidEndpointWithNoURL, + 'ServiceNotOpen': ServiceNotOpen, +} + +RateLimitErrors = { + 'EndpointRateLimitExceeded': EndpointRateLimitExceeded, + 'EndpointAccountRpmRateLimitExceeded': EndpointAccountRpmRateLimitExceeded, + 'EndpointAccountTpmRateLimitExceeded': EndpointAccountTpmRateLimitExceeded, +} + +ServerUnavailableErrors = { + 'InternalServiceError': InternalServiceError, + 'EndpointIsPending': EndpointIsPending, + 'ServiceResourceWaitQueueFull': ServiceResourceWaitQueueFull, +} + +ConnectionErrors = { + 'ClientSDKRequestError': ClientSDKRequestError, + 'RequestTimeout': RequestTimeout, + 'ServiceConnectionTimeout': ServiceConnectionTimeout, + 'ServiceConnectionRefused': ServiceConnectionRefused, + 'ServiceConnectionClosed': ServiceConnectionClosed, +} + +ErrorCodeMap = { + **AuthErrors, + **BadRequestErrors, + **RateLimitErrors, + **ServerUnavailableErrors, + **ConnectionErrors, +} + + +def wrap_error(e: MaasException) -> Exception: + if ErrorCodeMap.get(e.code): + return ErrorCodeMap.get(e.code)(e.code_n, e.code, e.message, e.req_id) + return e diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py new file mode 100644 index 0000000000..7a36d019e2 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py @@ -0,0 +1,284 @@ +import logging +from collections.abc import Generator + +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + PromptMessage, + PromptMessageTool, + UserPromptMessage, +) +from core.model_runtime.entities.model_entities import ( + AIModelEntity, + FetchFrom, + ModelPropertyKey, + ModelType, + ParameterRule, + ParameterType, +) +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.model_runtime.model_providers.volcengine_maas.client import MaaSClient +from core.model_runtime.model_providers.volcengine_maas.errors import ( + AuthErrors, + BadRequestErrors, + ConnectionErrors, + RateLimitErrors, + ServerUnavailableErrors, +) +from core.model_runtime.model_providers.volcengine_maas.llm.models import ModelConfigs +from core.model_runtime.model_providers.volcengine_maas.volc_sdk import MaasException + +logger = logging.getLogger(__name__) + + +class VolcengineMaaSLargeLanguageModel(LargeLanguageModel): + def _invoke(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], + model_parameters: dict, tools: list[PromptMessageTool] | None = None, + stop: list[str] | None = None, stream: bool = True, user: str | None = None) \ + -> LLMResult | Generator: + return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate credentials + """ + # ping + client = MaaSClient.from_credential(credentials) + try: + client.chat( + { + 'max_new_tokens': 16, + 'temperature': 0.7, + 'top_p': 0.9, + 'top_k': 15, + }, + [UserPromptMessage(content='ping\nAnswer: ')], + ) + except MaasException as e: + raise CredentialsValidateFailedError(e.message) + + def get_num_tokens(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], + tools: list[PromptMessageTool] | None = None) -> int: + if len(prompt_messages) == 0: + return 0 + return self._num_tokens_from_messages(prompt_messages) + + def _num_tokens_from_messages(self, messages: list[PromptMessage]) -> int: + """ + Calculate num tokens. + + :param messages: messages + """ + num_tokens = 0 + messages_dict = [ + MaaSClient.convert_prompt_message_to_maas_message(m) for m in messages] + for message in messages_dict: + for key, value in message.items(): + num_tokens += self._get_num_tokens_by_gpt2(str(key)) + num_tokens += self._get_num_tokens_by_gpt2(str(value)) + + return num_tokens + + def _generate(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], + model_parameters: dict, tools: list[PromptMessageTool] | None = None, + stop: list[str] | None = None, stream: bool = True, user: str | None = None) \ + -> LLMResult | Generator: + + client = MaaSClient.from_credential(credentials) + + req_params = ModelConfigs.get( + credentials['base_model_name'], {}).get('req_params', {}).copy() + if credentials.get('context_size'): + req_params['max_prompt_tokens'] = credentials.get('context_size') + if credentials.get('max_tokens'): + req_params['max_new_tokens'] = credentials.get('max_tokens') + if model_parameters.get('max_tokens'): + req_params['max_new_tokens'] = model_parameters.get('max_tokens') + if model_parameters.get('temperature'): + req_params['temperature'] = model_parameters.get('temperature') + if model_parameters.get('top_p'): + req_params['top_p'] = model_parameters.get('top_p') + if model_parameters.get('top_k'): + req_params['top_k'] = model_parameters.get('top_k') + if model_parameters.get('presence_penalty'): + req_params['presence_penalty'] = model_parameters.get( + 'presence_penalty') + if model_parameters.get('frequency_penalty'): + req_params['frequency_penalty'] = model_parameters.get( + 'frequency_penalty') + if stop: + req_params['stop'] = stop + + resp = MaaSClient.wrap_exception( + lambda: client.chat(req_params, prompt_messages, stream)) + if not stream: + return self._handle_chat_response(model, credentials, prompt_messages, resp) + return self._handle_stream_chat_response(model, credentials, prompt_messages, resp) + + def _handle_stream_chat_response(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], resp: Generator) -> Generator: + for index, r in enumerate(resp): + choices = r['choices'] + if not choices: + continue + choice = choices[0] + message = choice['message'] + usage = None + if r.get('usage'): + usage = self._calc_usage(model, credentials, r['usage']) + yield LLMResultChunk( + model=model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=index, + message=AssistantPromptMessage( + content=message['content'] if message['content'] else '', + tool_calls=[] + ), + usage=usage, + finish_reason=choice.get('finish_reason'), + ), + ) + + def _handle_chat_response(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], resp: dict) -> LLMResult: + choices = resp['choices'] + if not choices: + return + choice = choices[0] + message = choice['message'] + + return LLMResult( + model=model, + prompt_messages=prompt_messages, + message=AssistantPromptMessage( + content=message['content'] if message['content'] else '', + tool_calls=[], + ), + usage=self._calc_usage(model, credentials, resp['usage']), + ) + + def _calc_usage(self, model: str, credentials: dict, usage: dict) -> LLMUsage: + return self._calc_response_usage(model=model, credentials=credentials, + prompt_tokens=usage['prompt_tokens'], + completion_tokens=usage['completion_tokens'] + ) + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: + """ + used to define customizable model schema + """ + max_tokens = ModelConfigs.get( + credentials['base_model_name'], {}).get('req_params', {}).get('max_new_tokens') + if credentials.get('max_tokens'): + max_tokens = int(credentials.get('max_tokens')) + rules = [ + ParameterRule( + name='temperature', + type=ParameterType.FLOAT, + use_template='temperature', + label=I18nObject( + zh_Hans='温度', + en_US='Temperature' + ) + ), + ParameterRule( + name='top_p', + type=ParameterType.FLOAT, + use_template='top_p', + label=I18nObject( + zh_Hans='Top P', + en_US='Top P' + ) + ), + ParameterRule( + name='top_k', + type=ParameterType.INT, + min=1, + default=1, + label=I18nObject( + zh_Hans='Top K', + en_US='Top K' + ) + ), + ParameterRule( + name='presence_penalty', + type=ParameterType.FLOAT, + use_template='presence_penalty', + label={ + 'en_US': 'Presence Penalty', + 'zh_Hans': '存在惩罚', + }, + min=-2.0, + max=2.0, + ), + ParameterRule( + name='frequency_penalty', + type=ParameterType.FLOAT, + use_template='frequency_penalty', + label={ + 'en_US': 'Frequency Penalty', + 'zh_Hans': '频率惩罚', + }, + min=-2.0, + max=2.0, + ), + ParameterRule( + name='max_tokens', + type=ParameterType.INT, + use_template='max_tokens', + min=1, + max=max_tokens, + default=512, + label=I18nObject( + zh_Hans='最大生成长度', + en_US='Max Tokens' + ) + ), + ] + + model_properties = ModelConfigs.get( + credentials['base_model_name'], {}).get('model_properties', {}).copy() + if credentials.get('mode'): + model_properties[ModelPropertyKey.MODE] = credentials.get('mode') + if credentials.get('context_size'): + model_properties[ModelPropertyKey.CONTEXT_SIZE] = int( + credentials.get('context_size', 4096)) + entity = AIModelEntity( + model=model, + label=I18nObject( + en_US=model + ), + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_type=ModelType.LLM, + model_properties=model_properties, + parameter_rules=rules + ) + + return entity + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + The key is the error type thrown to the caller + The value is the error type thrown by the model, + which needs to be converted into a unified error type for the caller. + + :return: Invoke error mapping + """ + return { + InvokeConnectionError: ConnectionErrors.values(), + InvokeServerUnavailableError: ServerUnavailableErrors.values(), + InvokeRateLimitError: RateLimitErrors.values(), + InvokeAuthorizationError: AuthErrors.values(), + InvokeBadRequestError: BadRequestErrors.values(), + } diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py new file mode 100644 index 0000000000..d022f0069b --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py @@ -0,0 +1,12 @@ +ModelConfigs = { + 'Skylark2-pro-4k': { + 'req_params': { + 'max_prompt_tokens': 4096, + 'max_new_tokens': 4000, + }, + 'model_properties': { + 'context_size': 4096, + 'mode': 'chat', + } + } +} diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py new file mode 100644 index 0000000000..d63399aec2 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py @@ -0,0 +1,132 @@ +import time +from typing import Optional + +from core.model_runtime.entities.model_entities import PriceType +from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel +from core.model_runtime.model_providers.volcengine_maas.client import MaaSClient +from core.model_runtime.model_providers.volcengine_maas.errors import ( + AuthErrors, + BadRequestErrors, + ConnectionErrors, + RateLimitErrors, + ServerUnavailableErrors, +) +from core.model_runtime.model_providers.volcengine_maas.volc_sdk import MaasException + + +class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel): + """ + Model class for VolcengineMaaS text embedding model. + """ + + def _invoke(self, model: str, credentials: dict, + texts: list[str], user: Optional[str] = None) \ + -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :return: embeddings result + """ + client = MaaSClient.from_credential(credentials) + resp = MaaSClient.wrap_exception(lambda: client.embeddings(texts)) + + usage = self._calc_response_usage( + model=model, credentials=credentials, tokens=resp['total_tokens']) + + result = TextEmbeddingResult( + model=model, + embeddings=[v['embedding'] for v in resp['data']], + usage=usage + ) + + return result + + def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: + """ + Get number of tokens for given prompt messages + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :return: + """ + num_tokens = 0 + for text in texts: + # use GPT2Tokenizer to get num tokens + num_tokens += self._get_num_tokens_by_gpt2(text) + return num_tokens + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + self._invoke(model=model, credentials=credentials, texts=['ping']) + except MaasException as e: + raise CredentialsValidateFailedError(e.message) + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + The key is the error type thrown to the caller + The value is the error type thrown by the model, + which needs to be converted into a unified error type for the caller. + + :return: Invoke error mapping + """ + return { + InvokeConnectionError: ConnectionErrors.values(), + InvokeServerUnavailableError: ServerUnavailableErrors.values(), + InvokeRateLimitError: RateLimitErrors.values(), + InvokeAuthorizationError: AuthErrors.values(), + InvokeBadRequestError: BadRequestErrors.values(), + } + + def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: + """ + Calculate response usage + + :param model: model name + :param credentials: model credentials + :param tokens: input tokens + :return: usage + """ + # get input price info + input_price_info = self.get_price( + model=model, + credentials=credentials, + price_type=PriceType.INPUT, + tokens=tokens + ) + + # transform usage + usage = EmbeddingUsage( + tokens=tokens, + total_tokens=tokens, + unit_price=input_price_info.unit_price, + price_unit=input_price_info.unit, + total_price=input_price_info.total_amount, + currency=input_price_info.currency, + latency=time.perf_counter() - self.started_at + ) + + return usage diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/__init__.py new file mode 100644 index 0000000000..64f342f16e --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/__init__.py @@ -0,0 +1,4 @@ +from .common import ChatRole +from .maas import MaasException, MaasService + +__all__ = ['MaasService', 'ChatRole', 'MaasException'] diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/__init__.py @@ -0,0 +1 @@ + diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/auth.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/auth.py new file mode 100644 index 0000000000..48110f16d7 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/auth.py @@ -0,0 +1,144 @@ +# coding : utf-8 +import datetime + +import pytz + +from .util import Util + + +class MetaData: + def __init__(self): + self.algorithm = '' + self.credential_scope = '' + self.signed_headers = '' + self.date = '' + self.region = '' + self.service = '' + + def set_date(self, date): + self.date = date + + def set_service(self, service): + self.service = service + + def set_region(self, region): + self.region = region + + def set_algorithm(self, algorithm): + self.algorithm = algorithm + + def set_credential_scope(self, credential_scope): + self.credential_scope = credential_scope + + def set_signed_headers(self, signed_headers): + self.signed_headers = signed_headers + + +class SignResult: + def __init__(self): + self.xdate = '' + self.xCredential = '' + self.xAlgorithm = '' + self.xSignedHeaders = '' + self.xSignedQueries = '' + self.xSignature = '' + self.xContextSha256 = '' + self.xSecurityToken = '' + + self.authorization = '' + + def __str__(self): + return '\n'.join(['{}:{}'.format(*item) for item in self.__dict__.items()]) + + +class Credentials: + def __init__(self, ak, sk, service, region, session_token=''): + self.ak = ak + self.sk = sk + self.service = service + self.region = region + self.session_token = session_token + + def set_ak(self, ak): + self.ak = ak + + def set_sk(self, sk): + self.sk = sk + + def set_session_token(self, session_token): + self.session_token = session_token + + +class Signer: + @staticmethod + def sign(request, credentials): + if request.path == '': + request.path = '/' + if request.method != 'GET' and not ('Content-Type' in request.headers): + request.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8' + + format_date = Signer.get_current_format_date() + request.headers['X-Date'] = format_date + if credentials.session_token != '': + request.headers['X-Security-Token'] = credentials.session_token + + md = MetaData() + md.set_algorithm('HMAC-SHA256') + md.set_service(credentials.service) + md.set_region(credentials.region) + md.set_date(format_date[:8]) + + hashed_canon_req = Signer.hashed_canonical_request_v4(request, md) + md.set_credential_scope('/'.join([md.date, md.region, md.service, 'request'])) + + signing_str = '\n'.join([md.algorithm, format_date, md.credential_scope, hashed_canon_req]) + signing_key = Signer.get_signing_secret_key_v4(credentials.sk, md.date, md.region, md.service) + sign = Util.to_hex(Util.hmac_sha256(signing_key, signing_str)) + request.headers['Authorization'] = Signer.build_auth_header_v4(sign, md, credentials) + return + + @staticmethod + def hashed_canonical_request_v4(request, meta): + body_hash = Util.sha256(request.body) + request.headers['X-Content-Sha256'] = body_hash + + signed_headers = dict() + for key in request.headers: + if key in ['Content-Type', 'Content-Md5', 'Host'] or key.startswith('X-'): + signed_headers[key.lower()] = request.headers[key] + + if 'host' in signed_headers: + v = signed_headers['host'] + if v.find(':') != -1: + split = v.split(':') + port = split[1] + if str(port) == '80' or str(port) == '443': + signed_headers['host'] = split[0] + + signed_str = '' + for key in sorted(signed_headers.keys()): + signed_str += key + ':' + signed_headers[key] + '\n' + + meta.set_signed_headers(';'.join(sorted(signed_headers.keys()))) + + canonical_request = '\n'.join( + [request.method, Util.norm_uri(request.path), Util.norm_query(request.query), signed_str, + meta.signed_headers, body_hash]) + + return Util.sha256(canonical_request) + + @staticmethod + def get_signing_secret_key_v4(sk, date, region, service): + date = Util.hmac_sha256(bytes(sk, encoding='utf-8'), date) + region = Util.hmac_sha256(date, region) + service = Util.hmac_sha256(region, service) + return Util.hmac_sha256(service, 'request') + + @staticmethod + def build_auth_header_v4(signature, meta, credentials): + credential = credentials.ak + '/' + meta.credential_scope + return meta.algorithm + ' Credential=' + credential + ', SignedHeaders=' + meta.signed_headers + ', Signature=' + signature + + @staticmethod + def get_current_format_date(): + return datetime.datetime.now(tz=pytz.timezone('UTC')).strftime("%Y%m%dT%H%M%SZ") diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/service.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/service.py new file mode 100644 index 0000000000..03734ec54f --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/service.py @@ -0,0 +1,207 @@ +import json +from collections import OrderedDict +from urllib.parse import urlencode + +import requests + +from .auth import Signer + +VERSION = 'v1.0.137' + + +class Service: + def __init__(self, service_info, api_info): + self.service_info = service_info + self.api_info = api_info + self.session = requests.session() + + def set_ak(self, ak): + self.service_info.credentials.set_ak(ak) + + def set_sk(self, sk): + self.service_info.credentials.set_sk(sk) + + def set_session_token(self, session_token): + self.service_info.credentials.set_session_token(session_token) + + def set_host(self, host): + self.service_info.host = host + + def set_scheme(self, scheme): + self.service_info.scheme = scheme + + def get(self, api, params, doseq=0): + if not (api in self.api_info): + raise Exception("no such api") + api_info = self.api_info[api] + + r = self.prepare_request(api_info, params, doseq) + + Signer.sign(r, self.service_info.credentials) + + url = r.build(doseq) + resp = self.session.get(url, headers=r.headers, + timeout=(self.service_info.connection_timeout, self.service_info.socket_timeout)) + if resp.status_code == 200: + return resp.text + else: + raise Exception(resp.text) + + def post(self, api, params, form): + if not (api in self.api_info): + raise Exception("no such api") + api_info = self.api_info[api] + r = self.prepare_request(api_info, params) + r.headers['Content-Type'] = 'application/x-www-form-urlencoded' + r.form = self.merge(api_info.form, form) + r.body = urlencode(r.form, True) + Signer.sign(r, self.service_info.credentials) + + url = r.build() + + resp = self.session.post(url, headers=r.headers, data=r.form, + timeout=(self.service_info.connection_timeout, self.service_info.socket_timeout)) + if resp.status_code == 200: + return resp.text + else: + raise Exception(resp.text) + + def json(self, api, params, body): + if not (api in self.api_info): + raise Exception("no such api") + api_info = self.api_info[api] + r = self.prepare_request(api_info, params) + r.headers['Content-Type'] = 'application/json' + r.body = body + + Signer.sign(r, self.service_info.credentials) + + url = r.build() + resp = self.session.post(url, headers=r.headers, data=r.body, + timeout=(self.service_info.connection_timeout, self.service_info.socket_timeout)) + if resp.status_code == 200: + return json.dumps(resp.json()) + else: + raise Exception(resp.text.encode("utf-8")) + + def put(self, url, file_path, headers): + with open(file_path, 'rb') as f: + resp = self.session.put(url, headers=headers, data=f) + if resp.status_code == 200: + return True, resp.text.encode("utf-8") + else: + return False, resp.text.encode("utf-8") + + def put_data(self, url, data, headers): + resp = self.session.put(url, headers=headers, data=data) + if resp.status_code == 200: + return True, resp.text.encode("utf-8") + else: + return False, resp.text.encode("utf-8") + + def prepare_request(self, api_info, params, doseq=0): + for key in params: + if type(params[key]) == int or type(params[key]) == float or type(params[key]) == bool: + params[key] = str(params[key]) + elif type(params[key]) == list: + if not doseq: + params[key] = ','.join(params[key]) + + connection_timeout = self.service_info.connection_timeout + socket_timeout = self.service_info.socket_timeout + + r = Request() + r.set_schema(self.service_info.scheme) + r.set_method(api_info.method) + r.set_connection_timeout(connection_timeout) + r.set_socket_timeout(socket_timeout) + + headers = self.merge(api_info.header, self.service_info.header) + headers['Host'] = self.service_info.host + headers['User-Agent'] = 'volc-sdk-python/' + VERSION + r.set_headers(headers) + + query = self.merge(api_info.query, params) + r.set_query(query) + + r.set_host(self.service_info.host) + r.set_path(api_info.path) + + return r + + @staticmethod + def merge(param1, param2): + od = OrderedDict() + for key in param1: + od[key] = param1[key] + + for key in param2: + od[key] = param2[key] + + return od + + +class Request: + def __init__(self): + self.schema = '' + self.method = '' + self.host = '' + self.path = '' + self.headers = OrderedDict() + self.query = OrderedDict() + self.body = '' + self.form = dict() + self.connection_timeout = 0 + self.socket_timeout = 0 + + def set_schema(self, schema): + self.schema = schema + + def set_method(self, method): + self.method = method + + def set_host(self, host): + self.host = host + + def set_path(self, path): + self.path = path + + def set_headers(self, headers): + self.headers = headers + + def set_query(self, query): + self.query = query + + def set_body(self, body): + self.body = body + + def set_connection_timeout(self, connection_timeout): + self.connection_timeout = connection_timeout + + def set_socket_timeout(self, socket_timeout): + self.socket_timeout = socket_timeout + + def build(self, doseq=0): + return self.schema + '://' + self.host + self.path + '?' + urlencode(self.query, doseq) + + +class ServiceInfo: + def __init__(self, host, header, credentials, connection_timeout, socket_timeout, scheme='http'): + self.host = host + self.header = header + self.credentials = credentials + self.connection_timeout = connection_timeout + self.socket_timeout = socket_timeout + self.scheme = scheme + + +class ApiInfo: + def __init__(self, method, path, query, form, header): + self.method = method + self.path = path + self.query = query + self.form = form + self.header = header + + def __str__(self): + return 'method: ' + self.method + ', path: ' + self.path diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/util.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/util.py new file mode 100644 index 0000000000..7eb5fdfa91 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/base/util.py @@ -0,0 +1,43 @@ +import hashlib +import hmac +from functools import reduce +from urllib.parse import quote + + +class Util: + @staticmethod + def norm_uri(path): + return quote(path).replace('%2F', '/').replace('+', '%20') + + @staticmethod + def norm_query(params): + query = '' + for key in sorted(params.keys()): + if type(params[key]) == list: + for k in params[key]: + query = query + quote(key, safe='-_.~') + '=' + quote(k, safe='-_.~') + '&' + else: + query = query + quote(key, safe='-_.~') + '=' + quote(params[key], safe='-_.~') + '&' + query = query[:-1] + return query.replace('+', '%20') + + @staticmethod + def hmac_sha256(key, content): + return hmac.new(key, bytes(content, encoding='utf-8'), hashlib.sha256).digest() + + @staticmethod + def sha256(content): + if isinstance(content, str) is True: + return hashlib.sha256(content.encode('utf-8')).hexdigest() + else: + return hashlib.sha256(content).hexdigest() + + @staticmethod + def to_hex(content): + lst = [] + for ch in content: + hv = hex(ch).replace('0x', '') + if len(hv) == 1: + hv = '0' + hv + lst.append(hv) + return reduce(lambda x, y: x + y, lst) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/common.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/common.py new file mode 100644 index 0000000000..8b14d026d9 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/common.py @@ -0,0 +1,79 @@ +import json +import random +from datetime import datetime + + +class ChatRole: + USER = "user" + ASSISTANT = "assistant" + SYSTEM = "system" + FUNCTION = "function" + + +class _Dict(dict): + __setattr__ = dict.__setitem__ + __getattr__ = dict.__getitem__ + + def __missing__(self, key): + return None + + +def dict_to_object(dict_obj): + # 支持嵌套类型 + if isinstance(dict_obj, list): + insts = [] + for i in dict_obj: + insts.append(dict_to_object(i)) + return insts + + if isinstance(dict_obj, dict): + inst = _Dict() + for k, v in dict_obj.items(): + inst[k] = dict_to_object(v) + return inst + + return dict_obj + + +def json_to_object(json_str, req_id=None): + obj = dict_to_object(json.loads(json_str)) + if obj and isinstance(obj, dict) and req_id: + obj["req_id"] = req_id + return obj + + +def gen_req_id(): + return datetime.now().strftime("%Y%m%d%H%M%S") + format( + random.randint(0, 2 ** 64 - 1), "020X" + ) + + +class SSEDecoder: + def __init__(self, source): + self.source = source + + def _read(self): + data = b'' + for chunk in self.source: + for line in chunk.splitlines(True): + data += line + if data.endswith((b'\r\r', b'\n\n', b'\r\n\r\n')): + yield data + data = b'' + if data: + yield data + + def next(self): + for chunk in self._read(): + for line in chunk.splitlines(): + # skip comment + if line.startswith(b':'): + continue + + if b':' in line: + field, value = line.split(b':', 1) + else: + field, value = line, b'' + + if field == b'data' and len(value) > 0: + yield value diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/maas.py b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/maas.py new file mode 100644 index 0000000000..3cbe9d9f09 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volc_sdk/maas.py @@ -0,0 +1,213 @@ +import copy +import json +from collections.abc import Iterator + +from .base.auth import Credentials, Signer +from .base.service import ApiInfo, Service, ServiceInfo +from .common import SSEDecoder, dict_to_object, gen_req_id, json_to_object + + +class MaasService(Service): + def __init__(self, host, region, connection_timeout=60, socket_timeout=60): + service_info = self.get_service_info( + host, region, connection_timeout, socket_timeout + ) + self._apikey = None + api_info = self.get_api_info() + super().__init__(service_info, api_info) + + def set_apikey(self, apikey): + self._apikey = apikey + + @staticmethod + def get_service_info(host, region, connection_timeout, socket_timeout): + service_info = ServiceInfo( + host, + {"Accept": "application/json"}, + Credentials("", "", "ml_maas", region), + connection_timeout, + socket_timeout, + "https", + ) + return service_info + + @staticmethod + def get_api_info(): + api_info = { + "chat": ApiInfo("POST", "/api/v2/endpoint/{endpoint_id}/chat", {}, {}, {}), + "embeddings": ApiInfo( + "POST", "/api/v2/endpoint/{endpoint_id}/embeddings", {}, {}, {} + ), + } + return api_info + + def chat(self, endpoint_id, req): + req["stream"] = False + return self._request(endpoint_id, "chat", req) + + def stream_chat(self, endpoint_id, req): + req_id = gen_req_id() + self._validate("chat", req_id) + apikey = self._apikey + + try: + req["stream"] = True + res = self._call( + endpoint_id, "chat", req_id, {}, json.dumps(req).encode("utf-8"), apikey, stream=True + ) + + decoder = SSEDecoder(res) + + def iter_fn(): + for data in decoder.next(): + if data == b"[DONE]": + return + + try: + res = json_to_object( + str(data, encoding="utf-8"), req_id=req_id) + except Exception: + raise + + if res.error is not None and res.error.code_n != 0: + raise MaasException( + res.error.code_n, + res.error.code, + res.error.message, + req_id, + ) + yield res + + return iter_fn() + except MaasException: + raise + except Exception as e: + raise new_client_sdk_request_error(str(e)) + + def embeddings(self, endpoint_id, req): + return self._request(endpoint_id, "embeddings", req) + + def _request(self, endpoint_id, api, req, params={}): + req_id = gen_req_id() + + self._validate(api, req_id) + + apikey = self._apikey + + try: + res = self._call(endpoint_id, api, req_id, params, + json.dumps(req).encode("utf-8"), apikey) + resp = dict_to_object(res.json()) + if resp and isinstance(resp, dict): + resp["req_id"] = req_id + return resp + + except MaasException as e: + raise e + except Exception as e: + raise new_client_sdk_request_error(str(e), req_id) + + def _validate(self, api, req_id): + credentials_exist = ( + self.service_info.credentials is not None and + self.service_info.credentials.sk is not None and + self.service_info.credentials.ak is not None + ) + + if not self._apikey and not credentials_exist: + raise new_client_sdk_request_error("no valid credential", req_id) + + if not (api in self.api_info): + raise new_client_sdk_request_error("no such api", req_id) + + def _call(self, endpoint_id, api, req_id, params, body, apikey=None, stream=False): + api_info = copy.deepcopy(self.api_info[api]) + api_info.path = api_info.path.format(endpoint_id=endpoint_id) + + r = self.prepare_request(api_info, params) + r.headers["x-tt-logid"] = req_id + r.headers["Content-Type"] = "application/json" + r.body = body + + if apikey is None: + Signer.sign(r, self.service_info.credentials) + elif apikey is not None: + r.headers["Authorization"] = "Bearer " + apikey + + url = r.build() + res = self.session.post( + url, + headers=r.headers, + data=r.body, + timeout=( + self.service_info.connection_timeout, + self.service_info.socket_timeout, + ), + stream=stream, + ) + + if res.status_code != 200: + raw = res.text.encode() + res.close() + try: + resp = json_to_object( + str(raw, encoding="utf-8"), req_id=req_id) + except Exception: + raise new_client_sdk_request_error(raw, req_id) + + if resp.error: + raise MaasException( + resp.error.code_n, resp.error.code, resp.error.message, req_id + ) + else: + raise new_client_sdk_request_error(resp, req_id) + + return res + + +class MaasException(Exception): + def __init__(self, code_n, code, message, req_id): + self.code_n = code_n + self.code = code + self.message = message + self.req_id = req_id + + def __str__(self): + return ("Detailed exception information is listed below.\n" + + "req_id: {}\n" + + "code_n: {}\n" + + "code: {}\n" + + "message: {}").format(self.req_id, self.code_n, self.code, self.message) + + +def new_client_sdk_request_error(raw, req_id=""): + return MaasException(1709701, "ClientSDKRequestError", "MaaS SDK request error: {}".format(raw), req_id) + + +class BinaryResponseContent: + def __init__(self, response, request_id) -> None: + self.response = response + self.request_id = request_id + + def stream_to_file( + self, + file: str + ) -> None: + is_first = True + error_bytes = b'' + with open(file, mode="wb") as f: + for data in self.response: + if len(error_bytes) > 0 or (is_first and "\"error\":" in str(data)): + error_bytes += data + else: + f.write(data) + + if len(error_bytes) > 0: + resp = json_to_object( + str(error_bytes, encoding="utf-8"), req_id=self.request_id) + raise MaasException( + resp.error.code_n, resp.error.code, resp.error.message, self.request_id + ) + + def iter_bytes(self) -> Iterator[bytes]: + yield from self.response diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py new file mode 100644 index 0000000000..10f9be2d08 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py @@ -0,0 +1,10 @@ +import logging + +from core.model_runtime.model_providers.__base.model_provider import ModelProvider + +logger = logging.getLogger(__name__) + + +class VolcengineMaaSProvider(ModelProvider): + def validate_provider_credentials(self, credentials: dict) -> None: + pass diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml new file mode 100644 index 0000000000..4f299ecae0 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml @@ -0,0 +1,151 @@ +provider: volcengine_maas +label: + en_US: Volcengine +description: + en_US: Volcengine MaaS models. +icon_small: + en_US: icon_s_en.svg +icon_large: + en_US: icon_l_en.svg + zh_Hans: icon_l_zh.svg +background: "#F9FAFB" +help: + title: + en_US: Get your Access Key and Secret Access Key from Volcengine Console + url: + en_US: https://console.volcengine.com/iam/keymanage/ +supported_model_types: + - llm + - text-embedding +configurate_methods: + - customizable-model +model_credential_schema: + model: + label: + en_US: Model Name + zh_Hans: 模型名称 + placeholder: + en_US: Enter your Model Name + zh_Hans: 输入模型名称 + credential_form_schemas: + - variable: volc_access_key_id + required: true + label: + en_US: Access Key + zh_Hans: Access Key + type: secret-input + placeholder: + en_US: Enter your Access Key + zh_Hans: 输入您的 Access Key + - variable: volc_secret_access_key + required: true + label: + en_US: Secret Access Key + zh_Hans: Secret Access Key + type: secret-input + placeholder: + en_US: Enter your Secret Access Key + zh_Hans: 输入您的 Secret Access Key + - variable: volc_region + required: true + label: + en_US: Volcengine Region + zh_Hans: 火山引擎地区 + type: text-input + default: cn-beijing + placeholder: + en_US: Enter Volcengine Region + zh_Hans: 输入火山引擎地域 + - variable: api_endpoint_host + required: true + label: + en_US: API Endpoint Host + zh_Hans: API Endpoint Host + type: text-input + default: maas-api.ml-platform-cn-beijing.volces.com + placeholder: + en_US: Enter your API Endpoint Host + zh_Hans: 输入 API Endpoint Host + - variable: endpoint_id + required: true + label: + en_US: Endpoint ID + zh_Hans: Endpoint ID + type: text-input + placeholder: + en_US: Enter your Endpoint ID + zh_Hans: 输入您的 Endpoint ID + - variable: base_model_name + show_on: + - variable: __model_type + value: llm + label: + en_US: Base Model + zh_Hans: 基础模型 + type: select + required: true + options: + - label: + en_US: Skylark2-pro-4k + value: Skylark2-pro-4k + show_on: + - variable: __model_type + value: llm + - label: + en_US: Custom + zh_Hans: 自定义 + value: Custom + - variable: mode + required: true + show_on: + - variable: __model_type + value: llm + - variable: base_model_name + value: Custom + label: + zh_Hans: 模型类型 + en_US: Completion Mode + type: select + default: chat + placeholder: + zh_Hans: 选择对话类型 + en_US: Select Completion Mode + options: + - value: completion + label: + en_US: Completion + zh_Hans: 补全 + - value: chat + label: + en_US: Chat + zh_Hans: 对话 + - variable: context_size + required: true + show_on: + - variable: __model_type + value: llm + - variable: base_model_name + value: Custom + label: + zh_Hans: 模型上下文长度 + en_US: Model Context Size + type: text-input + default: '4096' + placeholder: + zh_Hans: 输入您的模型上下文长度 + en_US: Enter your Model Context Size + - variable: max_tokens + required: true + show_on: + - variable: __model_type + value: llm + - variable: base_model_name + value: Custom + label: + zh_Hans: 最大 token 上限 + en_US: Upper Bound for Max Tokens + default: '4096' + type: text-input + placeholder: + zh_Hans: 输入您的模型最大 token 上限 + en_US: Enter your model Upper Bound for Max Tokens diff --git a/api/tests/integration_tests/.env.example b/api/tests/integration_tests/.env.example index 9cd04b4764..f29e5ef4d6 100644 --- a/api/tests/integration_tests/.env.example +++ b/api/tests/integration_tests/.env.example @@ -73,4 +73,10 @@ MOCK_SWITCH=false # CODE EXECUTION CONFIGURATION CODE_EXECUTION_ENDPOINT= -CODE_EXECUTION_API_KEY= \ No newline at end of file +CODE_EXECUTION_API_KEY= + +# Volcengine MaaS Credentials +VOLC_API_KEY= +VOLC_SECRET_KEY= +VOLC_MODEL_ENDPOINT_ID= +VOLC_EMBEDDING_ENDPOINT_ID= \ No newline at end of file diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/__init__.py b/api/tests/integration_tests/model_runtime/volcengine_maas/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py b/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py new file mode 100644 index 0000000000..61e9f704af --- /dev/null +++ b/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py @@ -0,0 +1,81 @@ +import os + +import pytest + +from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.volcengine_maas.text_embedding.text_embedding import ( + VolcengineMaaSTextEmbeddingModel, +) + + +def test_validate_credentials(): + model = VolcengineMaaSTextEmbeddingModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': 'INVALID', + 'volc_secret_access_key': 'INVALID', + 'endpoint_id': 'INVALID', + } + ) + + model.validate_credentials( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_EMBEDDING_ENDPOINT_ID'), + }, + ) + + +def test_invoke_model(): + model = VolcengineMaaSTextEmbeddingModel() + + result = model.invoke( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_EMBEDDING_ENDPOINT_ID'), + }, + texts=[ + "hello", + "world" + ], + user="abc-123" + ) + + assert isinstance(result, TextEmbeddingResult) + assert len(result.embeddings) == 2 + assert result.usage.total_tokens > 0 + + +def test_get_num_tokens(): + model = VolcengineMaaSTextEmbeddingModel() + + num_tokens = model.get_num_tokens( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_EMBEDDING_ENDPOINT_ID'), + }, + texts=[ + "hello", + "world" + ] + ) + + assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py b/api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py new file mode 100644 index 0000000000..63835d0263 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py @@ -0,0 +1,131 @@ +import os +from collections.abc import Generator + +import pytest + +from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta +from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.volcengine_maas.llm.llm import VolcengineMaaSLargeLanguageModel + + +def test_validate_credentials_for_chat_model(): + model = VolcengineMaaSLargeLanguageModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': 'INVALID', + 'volc_secret_access_key': 'INVALID', + 'endpoint_id': 'INVALID', + } + ) + + model.validate_credentials( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_MODEL_ENDPOINT_ID'), + } + ) + + +def test_invoke_model(): + model = VolcengineMaaSLargeLanguageModel() + + response = model.invoke( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_MODEL_ENDPOINT_ID'), + 'base_model_name': 'Skylark2-pro-4k', + }, + prompt_messages=[ + UserPromptMessage( + content='Hello World!' + ) + ], + model_parameters={ + 'temperature': 0.7, + 'top_p': 1.0, + 'top_k': 1, + }, + stop=['you'], + user="abc-123", + stream=False + ) + + assert isinstance(response, LLMResult) + assert len(response.message.content) > 0 + assert response.usage.total_tokens > 0 + + +def test_invoke_stream_model(): + model = VolcengineMaaSLargeLanguageModel() + + response = model.invoke( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_MODEL_ENDPOINT_ID'), + 'base_model_name': 'Skylark2-pro-4k', + }, + prompt_messages=[ + UserPromptMessage( + content='Hello World!' + ) + ], + model_parameters={ + 'temperature': 0.7, + 'top_p': 1.0, + 'top_k': 1, + }, + stop=['you'], + stream=True, + user="abc-123" + ) + + assert isinstance(response, Generator) + for chunk in response: + assert isinstance(chunk, LLMResultChunk) + assert isinstance(chunk.delta, LLMResultChunkDelta) + assert isinstance(chunk.delta.message, AssistantPromptMessage) + assert len( + chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True + + +def test_get_num_tokens(): + model = VolcengineMaaSLargeLanguageModel() + + response = model.get_num_tokens( + model='NOT IMPORTANT', + credentials={ + 'api_endpoint_host': 'maas-api.ml-platform-cn-beijing.volces.com', + 'volc_region': 'cn-beijing', + 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), + 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), + 'endpoint_id': os.environ.get('VOLC_MODEL_ENDPOINT_ID'), + 'base_model_name': 'Skylark2-pro-4k', + }, + prompt_messages=[ + UserPromptMessage( + content='Hello World!' + ) + ], + tools=[] + ) + + assert isinstance(response, int) + assert response == 6 From 8137d63000fea6496e6184e30604e6f3967141c5 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Wed, 8 May 2024 13:20:26 +0800 Subject: [PATCH 024/267] fix: workflow http node timeout & url check (#4175) --- .../workflow/nodes/http/components/timeout/index.tsx | 6 +++--- web/app/components/workflow/nodes/http/default.ts | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/web/app/components/workflow/nodes/http/components/timeout/index.tsx b/web/app/components/workflow/nodes/http/components/timeout/index.tsx index 70e49a80d6..a7f9cab00e 100644 --- a/web/app/components/workflow/nodes/http/components/timeout/index.tsx +++ b/web/app/components/workflow/nodes/http/components/timeout/index.tsx @@ -68,7 +68,7 @@ const Timeout: FC = ({ readonly, payload, onChange }) => { value={connect} onChange={v => onChange?.({ ...payload, connect: v })} min={1} - max={max_connect_timeout ?? 300} + max={max_connect_timeout || 300} /> = ({ readonly, payload, onChange }) => { value={read} onChange={v => onChange?.({ ...payload, read: v })} min={1} - max={max_read_timeout ?? 600} + max={max_read_timeout || 600} /> = ({ readonly, payload, onChange }) => { value={write} onChange={v => onChange?.({ ...payload, write: v })} min={1} - max={max_write_timeout ?? 600} + max={max_write_timeout || 600} />
diff --git a/web/app/components/workflow/nodes/http/default.ts b/web/app/components/workflow/nodes/http/default.ts index 0fa72f970d..4e797ed14a 100644 --- a/web/app/components/workflow/nodes/http/default.ts +++ b/web/app/components/workflow/nodes/http/default.ts @@ -40,9 +40,6 @@ const nodeDefault: NodeDefault = { if (!errorMessages && !payload.url) errorMessages = t('workflow.errorMsg.fieldRequired', { field: t('workflow.nodes.http.api') }) - if (!errorMessages && !payload.url.startsWith('http://') && !payload.url.startsWith('https://')) - errorMessages = t('workflow.nodes.http.notStartWithHttp') - return { isValid: !errorMessages, errorMessage: errorMessages, From a79941df22eea39823f5fa7364af388ba6ee8c97 Mon Sep 17 00:00:00 2001 From: tomo Date: Thu, 9 May 2024 13:52:07 +0900 Subject: [PATCH 025/267] fix: button widths (#4145) --- web/app/components/base/button/index.css | 2 +- web/app/components/base/confirm-ui/index.tsx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/web/app/components/base/button/index.css b/web/app/components/base/button/index.css index bffb19be94..81fb5fda45 100644 --- a/web/app/components/base/button/index.css +++ b/web/app/components/base/button/index.css @@ -2,7 +2,7 @@ @layer components { .btn { - @apply inline-flex justify-center items-center content-center h-9 leading-5 rounded-lg px-4 py-2 text-base cursor-pointer; + @apply inline-flex justify-center items-center content-center h-9 leading-5 rounded-lg px-4 py-2 text-base cursor-pointer whitespace-nowrap; } .btn-default { diff --git a/web/app/components/base/confirm-ui/index.tsx b/web/app/components/base/confirm-ui/index.tsx index 7120de862b..b43a363134 100644 --- a/web/app/components/base/confirm-ui/index.tsx +++ b/web/app/components/base/confirm-ui/index.tsx @@ -42,8 +42,8 @@ const ConfirmUI: FC = ({
- - + +
From ca5081e3272a2e9d3df669d4b9725dfaf40b6e90 Mon Sep 17 00:00:00 2001 From: Louie Long Date: Thu, 9 May 2024 12:53:06 +0800 Subject: [PATCH 026/267] fix delete log annotation (#4201) Co-authored-by: langyong --- web/app/components/app/chat/index.tsx | 6 +----- web/app/components/app/log/list.tsx | 4 ++-- web/models/log.ts | 1 + 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/web/app/components/app/chat/index.tsx b/web/app/components/app/chat/index.tsx index e1fe5589ed..6187ec684f 100644 --- a/web/app/components/app/chat/index.tsx +++ b/web/app/components/app/chat/index.tsx @@ -260,11 +260,7 @@ const Chat: FC = ({ return { ...item, content: item.content, - annotation: { - ...(item.annotation || {}), - id: '', - logAnnotation: undefined, // remove log - } as Annotation, + annotation: undefined, } } return item diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx index 026db4da2c..3c420d342f 100644 --- a/web/app/components/app/log/list.tsx +++ b/web/app/components/app/log/list.tsx @@ -137,8 +137,8 @@ const getFormattedChatList = (messages: ChatMessage[], conversationId: string, t if (item.annotation) { return { - id: '', - authorName: '', + id: item.annotation.id, + authorName: item.annotation.account.name, logAnnotation: item.annotation, created_at: 0, } diff --git a/web/models/log.ts b/web/models/log.ts index 3b893e1e88..994da445dc 100644 --- a/web/models/log.ts +++ b/web/models/log.ts @@ -57,6 +57,7 @@ export type ModelConfigDetail = { } export type LogAnnotation = { + id: string content: string account: { id: string From 7405b2e8192fbc516545a501d6b573d99a271809 Mon Sep 17 00:00:00 2001 From: faukwaa <133618995+faukwaa@users.noreply.github.com> Date: Thu, 9 May 2024 13:49:19 +0800 Subject: [PATCH 027/267] modify spelling errors: bulild -> build (#4206) --- web/config/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/config/index.ts b/web/config/index.ts index 7d6e9a053e..933c479b7c 100644 --- a/web/config/index.ts +++ b/web/config/index.ts @@ -15,7 +15,7 @@ else if ( globalThis.document?.body?.getAttribute('data-api-prefix') && globalThis.document?.body?.getAttribute('data-pubic-api-prefix') ) { - // Not bulild can not get env from process.env.NEXT_PUBLIC_ in browser https://nextjs.org/docs/basic-features/environment-variables#exposing-environment-variables-to-the-browser + // Not build can not get env from process.env.NEXT_PUBLIC_ in browser https://nextjs.org/docs/basic-features/environment-variables#exposing-environment-variables-to-the-browser apiPrefix = globalThis.document.body.getAttribute('data-api-prefix') as string publicApiPrefix = globalThis.document.body.getAttribute('data-pubic-api-prefix') as string } From 64c3bc070a66dda9a3b6b732457067580a828acd Mon Sep 17 00:00:00 2001 From: takatost Date: Thu, 9 May 2024 13:58:25 +0800 Subject: [PATCH 028/267] version to 0.6.7 (#4208) --- api/config.py | 2 +- docker/docker-compose.yaml | 6 +++--- web/package.json | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/config.py b/api/config.py index d6c345c579..81fb866d27 100644 --- a/api/config.py +++ b/api/config.py @@ -107,7 +107,7 @@ class Config: # ------------------------ # General Configurations. # ------------------------ - self.CURRENT_VERSION = "0.6.6" + self.CURRENT_VERSION = "0.6.7" self.COMMIT_SHA = get_env('COMMIT_SHA') self.EDITION = get_env('EDITION') self.DEPLOY_ENV = get_env('DEPLOY_ENV') diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 706f7d70e1..432383b76d 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.6.6 + image: langgenius/dify-api:0.6.7 restart: always environment: # Startup mode, 'api' starts the API server. @@ -168,7 +168,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.6.6 + image: langgenius/dify-api:0.6.7 restart: always environment: # Startup mode, 'worker' starts the Celery worker for processing the queue. @@ -265,7 +265,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.6.6 + image: langgenius/dify-web:0.6.7 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/web/package.json b/web/package.json index 79ca6e814f..1fff4eda65 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.6.6", + "version": "0.6.7", "private": true, "scripts": { "dev": "next dev", From cc835d523ceb5a8e7399dcbf11ebf7b7019f9a05 Mon Sep 17 00:00:00 2001 From: TinsFox Date: Thu, 9 May 2024 15:38:09 +0800 Subject: [PATCH 029/267] refactor: install form (#4154) --- web/app/install/installForm.tsx | 115 ++++++++++++++------------------ web/i18n/en-US/login.ts | 1 + web/package.json | 3 + web/yarn.lock | 46 +++++-------- 4 files changed, 71 insertions(+), 94 deletions(-) diff --git a/web/app/install/installForm.tsx b/web/app/install/installForm.tsx index 631812613e..414bd9d6fd 100644 --- a/web/app/install/installForm.tsx +++ b/web/app/install/installForm.tsx @@ -1,73 +1,67 @@ 'use client' import React, { useEffect } from 'react' import { useTranslation } from 'react-i18next' + import Link from 'next/link' import { useRouter } from 'next/navigation' -// import { useContext } from 'use-context-selector' -import Toast from '../components/base/toast' + +import type { SubmitHandler } from 'react-hook-form' +import { useForm } from 'react-hook-form' +import { z } from 'zod' +import { zodResolver } from '@hookform/resolvers/zod' +import classNames from 'classnames' import Loading from '../components/base/loading' import Button from '@/app/components/base/button' -// import I18n from '@/context/i18n' import { fetchInitValidateStatus, fetchSetupStatus, setup } from '@/service/common' import type { InitValidateStatusResponse, SetupStatusResponse } from '@/models/common' -const validEmailReg = /^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$/ const validPassword = /^(?=.*[a-zA-Z])(?=.*\d).{8,}$/ +const accountFormSchema = z.object({ + email: z + .string() + .min(1, { message: 'login.error.emailInValid' }) + .email('login.error.emailInValid'), + name: z.string().min(1, { message: 'login.error.nameEmpty' }), + password: z.string().min(8, { + message: 'login.error.passwordLengthInValid', + }).regex(validPassword, 'login.error.passwordInvalid'), +}) + +type AccountFormValues = z.infer + const InstallForm = () => { const { t } = useTranslation() const router = useRouter() - - const [email, setEmail] = React.useState('') - const [name, setName] = React.useState('') - const [password, setPassword] = React.useState('') const [showPassword, setShowPassword] = React.useState(false) const [loading, setLoading] = React.useState(true) + const { + register, + handleSubmit, + formState: { errors }, + } = useForm({ + resolver: zodResolver(accountFormSchema), + defaultValues: { + name: '', + password: '', + email: '', + }, + }) - const showErrorMessage = (message: string) => { - Toast.notify({ - type: 'error', - message, - }) - } - const valid = () => { - if (!email) { - showErrorMessage(t('login.error.emailEmpty')) - return false - } - if (!validEmailReg.test(email)) { - showErrorMessage(t('login.error.emailInValid')) - return false - } - if (!name.trim()) { - showErrorMessage(t('login.error.nameEmpty')) - return false - } - if (!password.trim()) { - showErrorMessage(t('login.error.passwordEmpty')) - return false - } - if (!validPassword.test(password)) { - showErrorMessage(t('login.error.passwordInvalid')) - return false - } - - return true - } - const handleSetting = async () => { - if (!valid()) - return + const onSubmit: SubmitHandler = async (data) => { await setup({ body: { - email, - name, - password, + ...data, }, }) router.push('/signin') } + const handleSetting = async () => { + handleSubmit(onSubmit)() + } + useEffect(() => { fetchSetupStatus().then((res: SetupStatusResponse) => { if (res.step === 'finished') { @@ -93,24 +87,22 @@ const InstallForm = () => { mt-1 text-sm text-gray-600 '>{t('login.setAdminAccountDesc')}

-
-
{ }}> +
setEmail(e.target.value)} + {...register('email')} placeholder={t('login.emailPlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm'} /> + {errors.email && {t(`${errors.email?.message}`)}}
+
@@ -119,14 +111,12 @@ const InstallForm = () => {
setName(e.target.value)} + {...register('name')} placeholder={t('login.namePlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'} />
+ {errors.name && {t(`${errors.name.message}`)}}
@@ -135,13 +125,12 @@ const InstallForm = () => {
setPassword(e.target.value)} placeholder={t('login.passwordPlaceholder') || ''} className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'} /> +
-
{t('login.error.passwordInvalid')}
+
{t('login.error.passwordInvalid')}
- {/*
-
-
- - -
-
-
*/}
- + @@ -127,7 +127,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com - + diff --git a/README_CN.md b/README_CN.md index 6a7f178e63..dc4e564c50 100644 --- a/README_CN.md +++ b/README_CN.md @@ -111,7 +111,7 @@ Dify 是一个开源的 LLM 应用开发平台。其直观的界面结合了 AI - + diff --git a/README_ES.md b/README_ES.md index ae6ab4e382..4758a107e9 100644 --- a/README_ES.md +++ b/README_ES.md @@ -111,7 +111,7 @@ es basados en LLM Function Calling o ReAct, y agregar herramientas preconstruida - + diff --git a/README_FR.md b/README_FR.md index ae7df183e2..2b9b6b038e 100644 --- a/README_FR.md +++ b/README_FR.md @@ -111,7 +111,7 @@ ités d'agent**: - + diff --git a/README_JA.md b/README_JA.md index ea1c717272..47ecbe884c 100644 --- a/README_JA.md +++ b/README_JA.md @@ -110,7 +110,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ - + diff --git a/README_KL.md b/README_KL.md index 600649c459..a52f859bab 100644 --- a/README_KL.md +++ b/README_KL.md @@ -111,7 +111,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com - + From 875249eb00ddfe5de669de6b7e7317d247a08509 Mon Sep 17 00:00:00 2001 From: LiuVaayne <10231735+vaayne@users.noreply.github.com> Date: Fri, 10 May 2024 17:20:30 +0800 Subject: [PATCH 037/267] Feat/vector db pgvector (#3879) --- .github/workflows/api-tests.yml | 4 +- api/.env.example | 9 +- api/commands.py | 8 + api/config.py | 9 +- api/controllers/console/datasets/datasets.py | 9 +- .../rag/datasource/vdb/pgvector/__init__.py | 0 .../rag/datasource/vdb/pgvector/pgvector.py | 169 ++++++++++++++++++ api/core/rag/datasource/vdb/vector_factory.py | 23 +++ api/requirements.txt | 1 + .../vdb/pgvector/__init__.py | 0 .../vdb/pgvector/test_pgvector.py | 30 ++++ docker/docker-compose.pgvector.yaml | 24 +++ docker/docker-compose.yaml | 39 +++- 13 files changed, 316 insertions(+), 9 deletions(-) create mode 100644 api/core/rag/datasource/vdb/pgvector/__init__.py create mode 100644 api/core/rag/datasource/vdb/pgvector/pgvector.py create mode 100644 api/tests/integration_tests/vdb/pgvector/__init__.py create mode 100644 api/tests/integration_tests/vdb/pgvector/test_pgvector.py create mode 100644 docker/docker-compose.pgvector.yaml diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index a0407de843..7d24b15bdf 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -50,7 +50,7 @@ jobs: - name: Run Workflow run: dev/pytest/pytest_workflow.sh - - name: Set up Vector Stores (Weaviate, Qdrant, Milvus, PgVecto-RS) + - name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS) uses: hoverkraft-tech/compose-action@v2.0.0 with: compose-file: | @@ -58,6 +58,7 @@ jobs: docker/docker-compose.qdrant.yaml docker/docker-compose.milvus.yaml docker/docker-compose.pgvecto-rs.yaml + docker/docker-compose.pgvector.yaml services: | weaviate qdrant @@ -65,6 +66,7 @@ jobs: minio milvus-standalone pgvecto-rs + pgvector - name: Test Vector Stores run: dev/pytest/pytest_vdb.sh diff --git a/api/.env.example b/api/.env.example index 01326a0cc8..e0f87d471a 100644 --- a/api/.env.example +++ b/api/.env.example @@ -65,7 +65,7 @@ GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON=your-google-service-account-json-base64-stri WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,* CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,* -# Vector database configuration, support: weaviate, qdrant, milvus, relyt, pgvecto_rs +# Vector database configuration, support: weaviate, qdrant, milvus, relyt, pgvecto_rs, pgvector VECTOR_STORE=weaviate # Weaviate configuration @@ -102,6 +102,13 @@ PGVECTO_RS_USER=postgres PGVECTO_RS_PASSWORD=difyai123456 PGVECTO_RS_DATABASE=postgres +# PGVector configuration +PGVECTOR_HOST=127.0.0.1 +PGVECTOR_PORT=5433 +PGVECTOR_USER=postgres +PGVECTOR_PASSWORD=postgres +PGVECTOR_DATABASE=postgres + # Upload configuration UPLOAD_FILE_SIZE_LIMIT=15 UPLOAD_FILE_BATCH_LIMIT=5 diff --git a/api/commands.py b/api/commands.py index b82f7ac3f8..75f2491421 100644 --- a/api/commands.py +++ b/api/commands.py @@ -305,6 +305,14 @@ def migrate_knowledge_vector_database(): "vector_store": {"class_prefix": collection_name} } dataset.index_struct = json.dumps(index_struct_dict) + elif vector_type == "pgvector": + dataset_id = dataset.id + collection_name = Dataset.gen_collection_name_by_id(dataset_id) + index_struct_dict = { + "type": 'pgvector', + "vector_store": {"class_prefix": collection_name} + } + dataset.index_struct = json.dumps(index_struct_dict) else: raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.") diff --git a/api/config.py b/api/config.py index 81fb866d27..4dcd44237a 100644 --- a/api/config.py +++ b/api/config.py @@ -222,7 +222,7 @@ class Config: # ------------------------ # Vector Store Configurations. - # Currently, only support: qdrant, milvus, zilliz, weaviate, relyt + # Currently, only support: qdrant, milvus, zilliz, weaviate, relyt, pgvector # ------------------------ self.VECTOR_STORE = get_env('VECTOR_STORE') self.KEYWORD_STORE = get_env('KEYWORD_STORE') @@ -261,6 +261,13 @@ class Config: self.PGVECTO_RS_PASSWORD = get_env('PGVECTO_RS_PASSWORD') self.PGVECTO_RS_DATABASE = get_env('PGVECTO_RS_DATABASE') + # pgvector settings + self.PGVECTOR_HOST = get_env('PGVECTOR_HOST') + self.PGVECTOR_PORT = get_env('PGVECTOR_PORT') + self.PGVECTOR_USER = get_env('PGVECTOR_USER') + self.PGVECTOR_PASSWORD = get_env('PGVECTOR_PASSWORD') + self.PGVECTOR_DATABASE = get_env('PGVECTOR_DATABASE') + # ------------------------ # Mail Configurations. # ------------------------ diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 40ded54120..30dc6ac845 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -476,13 +476,13 @@ class DatasetRetrievalSettingApi(Resource): @account_initialization_required def get(self): vector_type = current_app.config['VECTOR_STORE'] - if vector_type == 'milvus' or vector_type == 'pgvecto_rs' or vector_type == 'relyt': + if vector_type in {"milvus", "relyt", "pgvector", "pgvecto_rs"}: return { 'retrieval_method': [ 'semantic_search' ] } - elif vector_type == 'qdrant' or vector_type == 'weaviate': + elif vector_type in {"qdrant", "weaviate"}: return { 'retrieval_method': [ 'semantic_search', 'full_text_search', 'hybrid_search' @@ -497,14 +497,13 @@ class DatasetRetrievalSettingMockApi(Resource): @login_required @account_initialization_required def get(self, vector_type): - - if vector_type == 'milvus' or vector_type == 'relyt': + if vector_type in {'milvus', 'relyt', 'pgvector'}: return { 'retrieval_method': [ 'semantic_search' ] } - elif vector_type == 'qdrant' or vector_type == 'weaviate': + elif vector_type in {'qdrant', 'weaviate'}: return { 'retrieval_method': [ 'semantic_search', 'full_text_search', 'hybrid_search' diff --git a/api/core/rag/datasource/vdb/pgvector/__init__.py b/api/core/rag/datasource/vdb/pgvector/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py new file mode 100644 index 0000000000..22cf790bfa --- /dev/null +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -0,0 +1,169 @@ +import json +import uuid +from contextlib import contextmanager +from typing import Any + +import psycopg2.extras +import psycopg2.pool +from pydantic import BaseModel, root_validator + +from core.rag.datasource.vdb.vector_base import BaseVector +from core.rag.models.document import Document +from extensions.ext_redis import redis_client + + +class PGVectorConfig(BaseModel): + host: str + port: int + user: str + password: str + database: str + + @root_validator() + def validate_config(cls, values: dict) -> dict: + if not values["host"]: + raise ValueError("config PGVECTOR_HOST is required") + if not values["port"]: + raise ValueError("config PGVECTOR_PORT is required") + if not values["user"]: + raise ValueError("config PGVECTOR_USER is required") + if not values["password"]: + raise ValueError("config PGVECTOR_PASSWORD is required") + if not values["database"]: + raise ValueError("config PGVECTOR_DATABASE is required") + return values + + +SQL_CREATE_TABLE = """ +CREATE TABLE IF NOT EXISTS {table_name} ( + id UUID PRIMARY KEY, + text TEXT NOT NULL, + meta JSONB NOT NULL, + embedding vector({dimension}) NOT NULL +) using heap; +""" + + +class PGVector(BaseVector): + def __init__(self, collection_name: str, config: PGVectorConfig): + super().__init__(collection_name) + self.pool = self._create_connection_pool(config) + self.table_name = f"embedding_{collection_name}" + + def get_type(self) -> str: + return "pgvector" + + def _create_connection_pool(self, config: PGVectorConfig): + return psycopg2.pool.SimpleConnectionPool( + 1, + 5, + host=config.host, + port=config.port, + user=config.user, + password=config.password, + database=config.database, + ) + + @contextmanager + def _get_cursor(self): + conn = self.pool.getconn() + cur = conn.cursor() + try: + yield cur + finally: + cur.close() + conn.commit() + self.pool.putconn(conn) + + def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs): + dimension = len(embeddings[0]) + self._create_collection(dimension) + return self.add_texts(texts, embeddings) + + def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): + values = [] + pks = [] + for i, doc in enumerate(documents): + doc_id = doc.metadata.get("doc_id", str(uuid.uuid4())) + pks.append(doc_id) + values.append( + ( + doc_id, + doc.page_content, + json.dumps(doc.metadata), + embeddings[i], + ) + ) + with self._get_cursor() as cur: + psycopg2.extras.execute_values( + cur, f"INSERT INTO {self.table_name} (id, text, meta, embedding) VALUES %s", values + ) + return pks + + def text_exists(self, id: str) -> bool: + with self._get_cursor() as cur: + cur.execute(f"SELECT id FROM {self.table_name} WHERE id = %s", (id,)) + return cur.fetchone() is not None + + def get_by_ids(self, ids: list[str]) -> list[Document]: + with self._get_cursor() as cur: + cur.execute(f"SELECT meta, text FROM {self.table_name} WHERE id IN %s", (tuple(ids),)) + docs = [] + for record in cur: + docs.append(Document(page_content=record[1], metadata=record[0])) + return docs + + def delete_by_ids(self, ids: list[str]) -> None: + with self._get_cursor() as cur: + cur.execute(f"DELETE FROM {self.table_name} WHERE id IN %s", (tuple(ids),)) + + def delete_by_metadata_field(self, key: str, value: str) -> None: + with self._get_cursor() as cur: + cur.execute(f"DELETE FROM {self.table_name} WHERE meta->>%s = %s", (key, value)) + + def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: + """ + Search the nearest neighbors to a vector. + + :param query_vector: The input vector to search for similar items. + :param top_k: The number of nearest neighbors to return, default is 5. + :return: List of Documents that are nearest to the query vector. + """ + top_k = kwargs.get("top_k", 5) + + with self._get_cursor() as cur: + cur.execute( + f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name} ORDER BY distance LIMIT {top_k}", + (json.dumps(query_vector),), + ) + docs = [] + score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0 + for record in cur: + metadata, text, distance = record + score = 1 - distance + metadata["score"] = score + if score > score_threshold: + docs.append(Document(page_content=text, metadata=metadata)) + return docs + + def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: + # do not support bm25 search + return [] + + def delete(self) -> None: + with self._get_cursor() as cur: + cur.execute(f"DROP TABLE IF EXISTS {self.table_name}") + + def _create_collection(self, dimension: int): + cache_key = f"vector_indexing_{self._collection_name}" + lock_name = f"{cache_key}_lock" + with redis_client.lock(lock_name, timeout=20): + collection_exist_cache_key = f"vector_indexing_{self._collection_name}" + if redis_client.get(collection_exist_cache_key): + return + + with self._get_cursor() as cur: + cur.execute("CREATE EXTENSION IF NOT EXISTS vector") + cur.execute(SQL_CREATE_TABLE.format(table_name=self.table_name, dimension=dimension)) + # TODO: create index https://github.com/pgvector/pgvector?tab=readme-ov-file#indexing + redis_client.set(collection_exist_cache_key, 1, ex=3600) diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index 2405d16b1d..82ba6139e1 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -164,6 +164,29 @@ class Vector: ), dim=dim ) + elif vector_type == "pgvector": + from core.rag.datasource.vdb.pgvector.pgvector import PGVector, PGVectorConfig + + if self._dataset.index_struct_dict: + class_prefix: str = self._dataset.index_struct_dict["vector_store"]["class_prefix"] + collection_name = class_prefix + else: + dataset_id = self._dataset.id + collection_name = Dataset.gen_collection_name_by_id(dataset_id) + index_struct_dict = { + "type": "pgvector", + "vector_store": {"class_prefix": collection_name}} + self._dataset.index_struct = json.dumps(index_struct_dict) + return PGVector( + collection_name=collection_name, + config=PGVectorConfig( + host=config.get("PGVECTOR_HOST"), + port=config.get("PGVECTOR_PORT"), + user=config.get("PGVECTOR_USER"), + password=config.get("PGVECTOR_PASSWORD"), + database=config.get("PGVECTOR_DATABASE"), + ), + ) else: raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.") diff --git a/api/requirements.txt b/api/requirements.txt index e2c430c9d6..6d08202527 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -83,3 +83,4 @@ pydantic~=1.10.0 pgvecto-rs==0.1.4 firecrawl-py==0.0.5 oss2==2.15.0 +pgvector==0.2.5 diff --git a/api/tests/integration_tests/vdb/pgvector/__init__.py b/api/tests/integration_tests/vdb/pgvector/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/integration_tests/vdb/pgvector/test_pgvector.py b/api/tests/integration_tests/vdb/pgvector/test_pgvector.py new file mode 100644 index 0000000000..915bb5e837 --- /dev/null +++ b/api/tests/integration_tests/vdb/pgvector/test_pgvector.py @@ -0,0 +1,30 @@ +from core.rag.datasource.vdb.pgvector.pgvector import PGVector, PGVectorConfig +from core.rag.models.document import Document +from tests.integration_tests.vdb.test_vector_store import ( + AbstractVectorTest, + get_example_text, + setup_mock_redis, +) + + +class TestPGVector(AbstractVectorTest): + def __init__(self): + super().__init__() + self.vector = PGVector( + collection_name=self.collection_name, + config=PGVectorConfig( + host="localhost", + port=5433, + user="postgres", + password="difyai123456", + database="dify", + ), + ) + + def search_by_full_text(self): + hits_by_full_text: list[Document] = self.vector.search_by_full_text(query=get_example_text()) + assert len(hits_by_full_text) == 0 + + +def test_pgvector(setup_mock_redis): + TestPGVector().run_all_tests() diff --git a/docker/docker-compose.pgvector.yaml b/docker/docker-compose.pgvector.yaml new file mode 100644 index 0000000000..b584880abf --- /dev/null +++ b/docker/docker-compose.pgvector.yaml @@ -0,0 +1,24 @@ +version: '3' +services: + # Qdrant vector store. + pgvector: + image: pgvector/pgvector:pg16 + restart: always + environment: + PGUSER: postgres + # The password for the default postgres user. + POSTGRES_PASSWORD: difyai123456 + # The name of the default postgres database. + POSTGRES_DB: dify + # postgres data directory + PGDATA: /var/lib/postgresql/data/pgdata + volumes: + - ./volumes/pgvector/data:/var/lib/postgresql/data + # uncomment to expose db(postgresql) port to host + ports: + - "5433:5432" + healthcheck: + test: [ "CMD", "pg_isready" ] + interval: 1s + timeout: 3s + retries: 30 diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 432383b76d..b2a3353641 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -122,6 +122,12 @@ services: RELYT_USER: postgres RELYT_PASSWORD: difyai123456 RELYT_DATABASE: postgres + # pgvector configurations + PGVECTOR_HOST: pgvector + PGVECTOR_PORT: 5432 + PGVECTOR_USER: postgres + PGVECTOR_PASSWORD: difyai123456 + PGVECTOR_DATABASE: dify # Mail configuration, support: resend, smtp MAIL_TYPE: '' # default send from email address, if not specified @@ -211,7 +217,7 @@ services: AZURE_BLOB_ACCOUNT_KEY: 'difyai' AZURE_BLOB_CONTAINER_NAME: 'difyai-container' AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net' - # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`. + # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`. VECTOR_STORE: weaviate # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. WEAVIATE_ENDPOINT: http://weaviate:8080 @@ -251,6 +257,12 @@ services: RELYT_USER: postgres RELYT_PASSWORD: difyai123456 RELYT_DATABASE: postgres + # pgvector configurations + PGVECTOR_HOST: pgvector + PGVECTOR_PORT: 5432 + PGVECTOR_USER: postgres + PGVECTOR_PASSWORD: difyai123456 + PGVECTOR_DATABASE: dify # Notion import configuration, support public and internal NOTION_INTEGRATION_TYPE: public NOTION_CLIENT_SECRET: you-client-secret @@ -374,6 +386,31 @@ services: # # - "6333:6333" # # - "6334:6334" + # The pgvector vector database. + # Uncomment to use qdrant as vector store. + # pgvector: + # image: pgvector/pgvector:pg16 + # restart: always + # environment: + # PGUSER: postgres + # # The password for the default postgres user. + # POSTGRES_PASSWORD: difyai123456 + # # The name of the default postgres database. + # POSTGRES_DB: dify + # # postgres data directory + # PGDATA: /var/lib/postgresql/data/pgdata + # volumes: + # - ./volumes/pgvector/data:/var/lib/postgresql/data + # # uncomment to expose db(postgresql) port to host + # # ports: + # # - "5433:5432" + # healthcheck: + # test: [ "CMD", "pg_isready" ] + # interval: 1s + # timeout: 3s + # retries: 30 + + # The nginx reverse proxy. # used for reverse proxying the API service and Web service. nginx: From 897e07f63954d39a260d771a2d01b7932d005645 Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Fri, 10 May 2024 17:22:46 +0800 Subject: [PATCH 038/267] question classifier prompt optimize (#4262) --- .../question_classifier_node.py | 4 ++-- .../nodes/question_classifier/template_prompts.py | 14 ++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index 770027d06c..af1e68b92a 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -259,7 +259,7 @@ class QuestionClassifierNode(LLMNode): user_prompt_message_3 = ChatModelMessage( role=PromptMessageRole.USER, text=QUESTION_CLASSIFIER_USER_PROMPT_3.format(input_text=input_text, - categories=json.dumps(categories), + categories=json.dumps(categories, ensure_ascii=False), classification_instructions=instruction) ) prompt_messages.append(user_prompt_message_3) @@ -269,7 +269,7 @@ class QuestionClassifierNode(LLMNode): text=QUESTION_CLASSIFIER_COMPLETION_PROMPT.format(histories=memory_str, input_text=input_text, categories=json.dumps(categories), - classification_instructions=instruction) + classification_instructions=instruction, ensure_ascii=False) ) else: diff --git a/api/core/workflow/nodes/question_classifier/template_prompts.py b/api/core/workflow/nodes/question_classifier/template_prompts.py index ea24baa522..1af171762f 100644 --- a/api/core/workflow/nodes/question_classifier/template_prompts.py +++ b/api/core/workflow/nodes/question_classifier/template_prompts.py @@ -6,7 +6,7 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """ ### Task Your task is to assign one categories ONLY to the input text and only one category may be assigned returned in the output.Additionally, you need to extract the key words from the text that are related to the classification. ### Format - The input text is in the variable text_field.Categories are specified as a category list in the variable categories or left empty for automatic determination.Classification instructions may be included to improve the classification accuracy. + The input text is in the variable text_field.Categories are specified as a category list with two filed category_id and category_name in the variable categories .Classification instructions may be included to improve the classification accuracy. ### Constraint DO NOT include anything other than the JSON array in your response. ### Memory @@ -24,7 +24,8 @@ QUESTION_CLASSIFIER_USER_PROMPT_1 = """ QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """ ```json - {"category_id": "f5660049-284f-41a7-b301-fd24176a711c", + {"keywords": ["recently", "great experience", "company", "service", "prompt", "staff", "friendly"], + "category_id": "f5660049-284f-41a7-b301-fd24176a711c", "category_name": "Customer Service"} ``` """ @@ -37,7 +38,8 @@ QUESTION_CLASSIFIER_USER_PROMPT_2 = """ QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """ ```json - {"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f", + {"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"], + "category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f", "category_name": "Experience"} ``` """ @@ -54,16 +56,16 @@ You are a text classification engine that analyzes text data and assigns categor ### Task Your task is to assign one categories ONLY to the input text and only one category may be assigned returned in the output. Additionally, you need to extract the key words from the text that are related to the classification. ### Format -The input text is in the variable text_field. Categories are specified as a category list in the variable categories or left empty for automatic determination. Classification instructions may be included to improve the classification accuracy. +The input text is in the variable text_field. Categories are specified as a category list with two filed category_id and category_name in the variable categories. Classification instructions may be included to improve the classification accuracy. ### Constraint DO NOT include anything other than the JSON array in your response. ### Example Here is the chat example between human and assistant, inside XML tags. User:{{"input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], "categories": [{{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"}},{{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"}},{{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"}},{{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}}], "classification_instructions": ["classify the text based on the feedback provided by customer"]}} -Assistant:{{"category_id": "f5660049-284f-41a7-b301-fd24176a711c","category_name": "Customer Service"}} +Assistant:{{"keywords": ["recently", "great experience", "company", "service", "prompt", "staff", "friendly"],"category_id": "f5660049-284f-41a7-b301-fd24176a711c","category_name": "Customer Service"}} User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"}},{{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"}},{{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}}], "classification_instructions": []}} -Assistant:{{"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Customer Service"}} +Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Customer Service"}} ### Memory Here is the chat histories between human and assistant, inside XML tags. From 8578ee08647bada442f6e9f717c86a9a1d8590f3 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Fri, 10 May 2024 18:08:32 +0800 Subject: [PATCH 039/267] feat: support LLM jinja2 template prompt (#3968) Co-authored-by: Joel --- .../helper/code_executor/jinja2_formatter.py | 17 +++ api/core/prompt/advanced_prompt_transform.py | 65 +++++---- .../entities/advanced_prompt_entities.py | 4 +- api/core/workflow/nodes/llm/entities.py | 21 ++- api/core/workflow/nodes/llm/llm_node.py | 138 ++++++++++++++++-- api/requirements-dev.txt | 1 + .../workflow/nodes/__mock/code_executor.py | 3 +- .../workflow/nodes/test_llm.py | 117 +++++++++++++++ 8 files changed, 325 insertions(+), 41 deletions(-) create mode 100644 api/core/helper/code_executor/jinja2_formatter.py diff --git a/api/core/helper/code_executor/jinja2_formatter.py b/api/core/helper/code_executor/jinja2_formatter.py new file mode 100644 index 0000000000..96f35e3ab2 --- /dev/null +++ b/api/core/helper/code_executor/jinja2_formatter.py @@ -0,0 +1,17 @@ +from core.helper.code_executor.code_executor import CodeExecutor + + +class Jinja2Formatter: + @classmethod + def format(cls, template: str, inputs: str) -> str: + """ + Format template + :param template: template + :param inputs: inputs + :return: + """ + result = CodeExecutor.execute_workflow_code_template( + language='jinja2', code=template, inputs=inputs + ) + + return result['result'] \ No newline at end of file diff --git a/api/core/prompt/advanced_prompt_transform.py b/api/core/prompt/advanced_prompt_transform.py index 29b516ac02..9952371a82 100644 --- a/api/core/prompt/advanced_prompt_transform.py +++ b/api/core/prompt/advanced_prompt_transform.py @@ -2,6 +2,7 @@ from typing import Optional, Union from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.file.file_obj import FileVar +from core.helper.code_executor.jinja2_formatter import Jinja2Formatter from core.memory.token_buffer_memory import TokenBufferMemory from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, @@ -80,29 +81,35 @@ class AdvancedPromptTransform(PromptTransform): prompt_messages = [] - prompt_template = PromptTemplateParser(template=raw_prompt, with_variable_tmpl=self.with_variable_tmpl) - prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs} + if prompt_template.edition_type == 'basic' or not prompt_template.edition_type: + prompt_template = PromptTemplateParser(template=raw_prompt, with_variable_tmpl=self.with_variable_tmpl) + prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs} - prompt_inputs = self._set_context_variable(context, prompt_template, prompt_inputs) + prompt_inputs = self._set_context_variable(context, prompt_template, prompt_inputs) - if memory and memory_config: - role_prefix = memory_config.role_prefix - prompt_inputs = self._set_histories_variable( - memory=memory, - memory_config=memory_config, - raw_prompt=raw_prompt, - role_prefix=role_prefix, - prompt_template=prompt_template, - prompt_inputs=prompt_inputs, - model_config=model_config + if memory and memory_config: + role_prefix = memory_config.role_prefix + prompt_inputs = self._set_histories_variable( + memory=memory, + memory_config=memory_config, + raw_prompt=raw_prompt, + role_prefix=role_prefix, + prompt_template=prompt_template, + prompt_inputs=prompt_inputs, + model_config=model_config + ) + + if query: + prompt_inputs = self._set_query_variable(query, prompt_template, prompt_inputs) + + prompt = prompt_template.format( + prompt_inputs ) + else: + prompt = raw_prompt + prompt_inputs = inputs - if query: - prompt_inputs = self._set_query_variable(query, prompt_template, prompt_inputs) - - prompt = prompt_template.format( - prompt_inputs - ) + prompt = Jinja2Formatter.format(prompt, prompt_inputs) if files: prompt_message_contents = [TextPromptMessageContent(data=prompt)] @@ -135,14 +142,22 @@ class AdvancedPromptTransform(PromptTransform): for prompt_item in raw_prompt_list: raw_prompt = prompt_item.text - prompt_template = PromptTemplateParser(template=raw_prompt, with_variable_tmpl=self.with_variable_tmpl) - prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs} + if prompt_item.edition_type == 'basic' or not prompt_item.edition_type: + prompt_template = PromptTemplateParser(template=raw_prompt, with_variable_tmpl=self.with_variable_tmpl) + prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs} - prompt_inputs = self._set_context_variable(context, prompt_template, prompt_inputs) + prompt_inputs = self._set_context_variable(context, prompt_template, prompt_inputs) - prompt = prompt_template.format( - prompt_inputs - ) + prompt = prompt_template.format( + prompt_inputs + ) + elif prompt_item.edition_type == 'jinja2': + prompt = raw_prompt + prompt_inputs = inputs + + prompt = Jinja2Formatter.format(prompt, prompt_inputs) + else: + raise ValueError(f'Invalid edition type: {prompt_item.edition_type}') if prompt_item.role == PromptMessageRole.USER: prompt_messages.append(UserPromptMessage(content=prompt)) diff --git a/api/core/prompt/entities/advanced_prompt_entities.py b/api/core/prompt/entities/advanced_prompt_entities.py index 2be00bdf0e..23a8602bea 100644 --- a/api/core/prompt/entities/advanced_prompt_entities.py +++ b/api/core/prompt/entities/advanced_prompt_entities.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Literal, Optional from pydantic import BaseModel @@ -11,6 +11,7 @@ class ChatModelMessage(BaseModel): """ text: str role: PromptMessageRole + edition_type: Optional[Literal['basic', 'jinja2']] class CompletionModelPromptTemplate(BaseModel): @@ -18,6 +19,7 @@ class CompletionModelPromptTemplate(BaseModel): Completion Model Prompt Template. """ text: str + edition_type: Optional[Literal['basic', 'jinja2']] class MemoryConfig(BaseModel): diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index c390aaf8c9..1e48a10bc7 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -4,6 +4,7 @@ from pydantic import BaseModel from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig from core.workflow.entities.base_node_data_entities import BaseNodeData +from core.workflow.entities.variable_entities import VariableSelector class ModelConfig(BaseModel): @@ -37,13 +38,31 @@ class VisionConfig(BaseModel): enabled: bool configs: Optional[Configs] = None +class PromptConfig(BaseModel): + """ + Prompt Config. + """ + jinja2_variables: Optional[list[VariableSelector]] = None + +class LLMNodeChatModelMessage(ChatModelMessage): + """ + LLM Node Chat Model Message. + """ + jinja2_text: Optional[str] = None + +class LLMNodeCompletionModelPromptTemplate(CompletionModelPromptTemplate): + """ + LLM Node Chat Model Prompt Template. + """ + jinja2_text: Optional[str] = None class LLMNodeData(BaseNodeData): """ LLM Node Data. """ model: ModelConfig - prompt_template: Union[list[ChatModelMessage], CompletionModelPromptTemplate] + prompt_template: Union[list[LLMNodeChatModelMessage], LLMNodeCompletionModelPromptTemplate] + prompt_config: Optional[PromptConfig] = None memory: Optional[MemoryConfig] = None context: ContextConfig vision: VisionConfig diff --git a/api/core/workflow/nodes/llm/llm_node.py b/api/core/workflow/nodes/llm/llm_node.py index c8b7f279ab..fef09c1385 100644 --- a/api/core/workflow/nodes/llm/llm_node.py +++ b/api/core/workflow/nodes/llm/llm_node.py @@ -1,4 +1,6 @@ +import json from collections.abc import Generator +from copy import deepcopy from typing import Optional, cast from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity @@ -17,11 +19,15 @@ from core.model_runtime.utils.encoders import jsonable_encoder from core.prompt.advanced_prompt_transform import AdvancedPromptTransform from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig from core.prompt.utils.prompt_message_util import PromptMessageUtil -from core.workflow.entities.base_node_data_entities import BaseNodeData from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType, SystemVariable from core.workflow.entities.variable_pool import VariablePool from core.workflow.nodes.base_node import BaseNode -from core.workflow.nodes.llm.entities import LLMNodeData, ModelConfig +from core.workflow.nodes.llm.entities import ( + LLMNodeChatModelMessage, + LLMNodeCompletionModelPromptTemplate, + LLMNodeData, + ModelConfig, +) from core.workflow.utils.variable_template_parser import VariableTemplateParser from extensions.ext_database import db from models.model import Conversation @@ -39,16 +45,24 @@ class LLMNode(BaseNode): :param variable_pool: variable pool :return: """ - node_data = self.node_data - node_data = cast(self._node_data_cls, node_data) + node_data = cast(LLMNodeData, deepcopy(self.node_data)) node_inputs = None process_data = None try: + # init messages template + node_data.prompt_template = self._transform_chat_messages(node_data.prompt_template) + # fetch variables and fetch values from variable pool inputs = self._fetch_inputs(node_data, variable_pool) + # fetch jinja2 inputs + jinja_inputs = self._fetch_jinja_inputs(node_data, variable_pool) + + # merge inputs + inputs.update(jinja_inputs) + node_inputs = {} # fetch files @@ -183,6 +197,86 @@ class LLMNode(BaseNode): usage = LLMUsage.empty_usage() return full_text, usage + + def _transform_chat_messages(self, + messages: list[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate + ) -> list[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate: + """ + Transform chat messages + + :param messages: chat messages + :return: + """ + + if isinstance(messages, LLMNodeCompletionModelPromptTemplate): + if messages.edition_type == 'jinja2': + messages.text = messages.jinja2_text + + return messages + + for message in messages: + if message.edition_type == 'jinja2': + message.text = message.jinja2_text + + return messages + + def _fetch_jinja_inputs(self, node_data: LLMNodeData, variable_pool: VariablePool) -> dict[str, str]: + """ + Fetch jinja inputs + :param node_data: node data + :param variable_pool: variable pool + :return: + """ + variables = {} + + if not node_data.prompt_config: + return variables + + for variable_selector in node_data.prompt_config.jinja2_variables or []: + variable = variable_selector.variable + value = variable_pool.get_variable_value( + variable_selector=variable_selector.value_selector + ) + + def parse_dict(d: dict) -> str: + """ + Parse dict into string + """ + # check if it's a context structure + if 'metadata' in d and '_source' in d['metadata'] and 'content' in d: + return d['content'] + + # else, parse the dict + try: + return json.dumps(d, ensure_ascii=False) + except Exception: + return str(d) + + if isinstance(value, str): + value = value + elif isinstance(value, list): + result = '' + for item in value: + if isinstance(item, dict): + result += parse_dict(item) + elif isinstance(item, str): + result += item + elif isinstance(item, int | float): + result += str(item) + else: + result += str(item) + result += '\n' + value = result.strip() + elif isinstance(value, dict): + value = parse_dict(value) + elif isinstance(value, int | float): + value = str(value) + else: + value = str(value) + + variables[variable] = value + + return variables def _fetch_inputs(self, node_data: LLMNodeData, variable_pool: VariablePool) -> dict[str, str]: """ @@ -531,25 +625,25 @@ class LLMNode(BaseNode): db.session.commit() @classmethod - def _extract_variable_selector_to_variable_mapping(cls, node_data: BaseNodeData) -> dict[str, list[str]]: + def _extract_variable_selector_to_variable_mapping(cls, node_data: LLMNodeData) -> dict[str, list[str]]: """ Extract variable selector to variable mapping :param node_data: node data :return: """ - node_data = node_data - node_data = cast(cls._node_data_cls, node_data) prompt_template = node_data.prompt_template variable_selectors = [] if isinstance(prompt_template, list): for prompt in prompt_template: - variable_template_parser = VariableTemplateParser(template=prompt.text) - variable_selectors.extend(variable_template_parser.extract_variable_selectors()) + if prompt.edition_type != 'jinja2': + variable_template_parser = VariableTemplateParser(template=prompt.text) + variable_selectors.extend(variable_template_parser.extract_variable_selectors()) else: - variable_template_parser = VariableTemplateParser(template=prompt_template.text) - variable_selectors = variable_template_parser.extract_variable_selectors() + if prompt_template.edition_type != 'jinja2': + variable_template_parser = VariableTemplateParser(template=prompt_template.text) + variable_selectors = variable_template_parser.extract_variable_selectors() variable_mapping = {} for variable_selector in variable_selectors: @@ -571,6 +665,22 @@ class LLMNode(BaseNode): if node_data.memory: variable_mapping['#sys.query#'] = ['sys', SystemVariable.QUERY.value] + if node_data.prompt_config: + enable_jinja = False + + if isinstance(prompt_template, list): + for prompt in prompt_template: + if prompt.edition_type == 'jinja2': + enable_jinja = True + break + else: + if prompt_template.edition_type == 'jinja2': + enable_jinja = True + + if enable_jinja: + for variable_selector in node_data.prompt_config.jinja2_variables or []: + variable_mapping[variable_selector.variable] = variable_selector.value_selector + return variable_mapping @classmethod @@ -588,7 +698,8 @@ class LLMNode(BaseNode): "prompts": [ { "role": "system", - "text": "You are a helpful AI assistant." + "text": "You are a helpful AI assistant.", + "edition_type": "basic" } ] }, @@ -600,7 +711,8 @@ class LLMNode(BaseNode): "prompt": { "text": "Here is the chat histories between human and assistant, inside " " XML tags.\n\n\n{{" - "#histories#}}\n\n\n\nHuman: {{#sys.query#}}\n\nAssistant:" + "#histories#}}\n\n\n\nHuman: {{#sys.query#}}\n\nAssistant:", + "edition_type": "basic" }, "stop": ["Human:"] } diff --git a/api/requirements-dev.txt b/api/requirements-dev.txt index 0391ac5969..70b2ce2ef5 100644 --- a/api/requirements-dev.txt +++ b/api/requirements-dev.txt @@ -3,3 +3,4 @@ pytest~=8.1.1 pytest-benchmark~=4.0.0 pytest-env~=1.1.3 pytest-mock~=3.14.0 +jinja2~=3.1.2 \ No newline at end of file diff --git a/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py b/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py index 38517cf448..ef84c92625 100644 --- a/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py +++ b/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py @@ -3,6 +3,7 @@ from typing import Literal import pytest from _pytest.monkeypatch import MonkeyPatch +from jinja2 import Template from core.helper.code_executor.code_executor import CodeExecutor @@ -18,7 +19,7 @@ class MockedCodeExecutor: } elif language == 'jinja2': return { - "result": "3" + "result": Template(code).render(inputs) } @pytest.fixture diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py index 8a8a58d59f..d04497a187 100644 --- a/api/tests/integration_tests/workflow/nodes/test_llm.py +++ b/api/tests/integration_tests/workflow/nodes/test_llm.py @@ -1,3 +1,4 @@ +import json import os from unittest.mock import MagicMock @@ -19,6 +20,7 @@ from models.workflow import WorkflowNodeExecutionStatus """FOR MOCK FIXTURES, DO NOT REMOVE""" from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock +from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock @pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True) @@ -116,3 +118,118 @@ def test_execute_llm(setup_openai_mock): assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED assert result.outputs['text'] is not None assert result.outputs['usage']['total_tokens'] > 0 + +@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True) +@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True) +def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock): + """ + Test execute LLM node with jinja2 + """ + node = LLMNode( + tenant_id='1', + app_id='1', + workflow_id='1', + user_id='1', + user_from=UserFrom.ACCOUNT, + config={ + 'id': 'llm', + 'data': { + 'title': '123', + 'type': 'llm', + 'model': { + 'provider': 'openai', + 'name': 'gpt-3.5-turbo', + 'mode': 'chat', + 'completion_params': {} + }, + 'prompt_config': { + 'jinja2_variables': [{ + 'variable': 'sys_query', + 'value_selector': ['sys', 'query'] + }, { + 'variable': 'output', + 'value_selector': ['abc', 'output'] + }] + }, + 'prompt_template': [ + { + 'role': 'system', + 'text': 'you are a helpful assistant.\ntoday\'s weather is {{#abc.output#}}', + 'jinja2_text': 'you are a helpful assistant.\ntoday\'s weather is {{output}}.', + 'edition_type': 'jinja2' + }, + { + 'role': 'user', + 'text': '{{#sys.query#}}', + 'jinja2_text': '{{sys_query}}', + 'edition_type': 'basic' + } + ], + 'memory': None, + 'context': { + 'enabled': False + }, + 'vision': { + 'enabled': False + } + } + } + ) + + # construct variable pool + pool = VariablePool(system_variables={ + SystemVariable.QUERY: 'what\'s the weather today?', + SystemVariable.FILES: [], + SystemVariable.CONVERSATION_ID: 'abababa', + SystemVariable.USER_ID: 'aaa' + }, user_inputs={}) + pool.append_variable(node_id='abc', variable_key_list=['output'], value='sunny') + + credentials = { + 'openai_api_key': os.environ.get('OPENAI_API_KEY') + } + + provider_instance = ModelProviderFactory().get_provider_instance('openai') + model_type_instance = provider_instance.get_model_instance(ModelType.LLM) + provider_model_bundle = ProviderModelBundle( + configuration=ProviderConfiguration( + tenant_id='1', + provider=provider_instance.get_provider_schema(), + preferred_provider_type=ProviderType.CUSTOM, + using_provider_type=ProviderType.CUSTOM, + system_configuration=SystemConfiguration( + enabled=False + ), + custom_configuration=CustomConfiguration( + provider=CustomProviderConfiguration( + credentials=credentials + ) + ) + ), + provider_instance=provider_instance, + model_type_instance=model_type_instance + ) + + model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model='gpt-3.5-turbo') + + model_config = ModelConfigWithCredentialsEntity( + model='gpt-3.5-turbo', + provider='openai', + mode='chat', + credentials=credentials, + parameters={}, + model_schema=model_type_instance.get_model_schema('gpt-3.5-turbo'), + provider_model_bundle=provider_model_bundle + ) + + # Mock db.session.close() + db.session.close = MagicMock() + + node._fetch_model_config = MagicMock(return_value=tuple([model_instance, model_config])) + + # execute node + result = node.run(pool) + + assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED + assert 'sunny' in json.dumps(result.process_data) + assert 'what\'s the weather today?' in json.dumps(result.process_data) \ No newline at end of file From 6b99075dc89c93e576c026472cd5351c98d0dea5 Mon Sep 17 00:00:00 2001 From: GalvinYang Date: Fri, 10 May 2024 18:12:18 +0800 Subject: [PATCH 040/267] fix: system default model name length (#4245) (#4246) Co-authored-by: takatost --- ...f8c4f3_modify_default_model_name_length.py | 39 +++++++++++++++++++ api/models/provider.py | 2 +- 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 api/migrations/versions/47cc7df8c4f3_modify_default_model_name_length.py diff --git a/api/migrations/versions/47cc7df8c4f3_modify_default_model_name_length.py b/api/migrations/versions/47cc7df8c4f3_modify_default_model_name_length.py new file mode 100644 index 0000000000..b37928d3c0 --- /dev/null +++ b/api/migrations/versions/47cc7df8c4f3_modify_default_model_name_length.py @@ -0,0 +1,39 @@ +"""modify default model name length + +Revision ID: 47cc7df8c4f3 +Revises: 3c7cac9521c6 +Create Date: 2024-05-10 09:48:09.046298 + +""" +import sqlalchemy as sa +from alembic import op + +import models as models + +# revision identifiers, used by Alembic. +revision = '47cc7df8c4f3' +down_revision = '3c7cac9521c6' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tenant_default_models', schema=None) as batch_op: + batch_op.alter_column('model_name', + existing_type=sa.VARCHAR(length=40), + type_=sa.String(length=255), + existing_nullable=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tenant_default_models', schema=None) as batch_op: + batch_op.alter_column('model_name', + existing_type=sa.String(length=255), + type_=sa.VARCHAR(length=40), + existing_nullable=False) + + # ### end Alembic commands ### diff --git a/api/models/provider.py b/api/models/provider.py index 413e8f9d67..eb6ec4beb4 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -113,7 +113,7 @@ class TenantDefaultModel(db.Model): id = db.Column(StringUUID, server_default=db.text('uuid_generate_v4()')) tenant_id = db.Column(StringUUID, nullable=False) provider_name = db.Column(db.String(40), nullable=False) - model_name = db.Column(db.String(40), nullable=False) + model_name = db.Column(db.String(255), nullable=False) model_type = db.Column(db.String(40), nullable=False) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) From 01555463d2bf4866a2503fe9706e54706d073bc5 Mon Sep 17 00:00:00 2001 From: Joel Date: Fri, 10 May 2024 18:14:05 +0800 Subject: [PATCH 041/267] feat: llm support jinja fe (#4260) --- .../icons/assets/vender/workflow/jinja.svg | 13 ++ .../base/icons/src/vender/workflow/Jinja.json | 98 ++++++++++ .../base/icons/src/vender/workflow/Jinja.tsx | 16 ++ .../base/icons/src/vender/workflow/index.ts | 1 + web/app/components/base/switch/index.tsx | 5 +- .../components/base/tooltip-plus/index.tsx | 57 +++++- .../code-editor/editor-support-vars.tsx | 22 +-- .../components/editor/code-editor/index.tsx | 140 ++++++++++---- .../components/editor/code-editor/style.css | 4 + .../nodes/_base/components/prompt/editor.tsx | 183 +++++++++++------- .../llm/components/config-prompt-item.tsx | 17 +- .../nodes/llm/components/config-prompt.tsx | 40 +++- .../components/workflow/nodes/llm/default.ts | 23 ++- .../components/workflow/nodes/llm/panel.tsx | 27 +++ .../components/workflow/nodes/llm/types.ts | 4 +- .../workflow/nodes/llm/use-config.ts | 127 ++++++++---- .../nodes/template-transform/panel.tsx | 3 +- .../nodes/template-transform/use-config.ts | 7 + web/app/components/workflow/types.ts | 7 + web/i18n/en-US/workflow.ts | 2 + web/i18n/zh-Hans/workflow.ts | 2 + 21 files changed, 621 insertions(+), 177 deletions(-) create mode 100644 web/app/components/base/icons/assets/vender/workflow/jinja.svg create mode 100644 web/app/components/base/icons/src/vender/workflow/Jinja.json create mode 100644 web/app/components/base/icons/src/vender/workflow/Jinja.tsx diff --git a/web/app/components/base/icons/assets/vender/workflow/jinja.svg b/web/app/components/base/icons/assets/vender/workflow/jinja.svg new file mode 100644 index 0000000000..5b40f30ed5 --- /dev/null +++ b/web/app/components/base/icons/assets/vender/workflow/jinja.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/web/app/components/base/icons/src/vender/workflow/Jinja.json b/web/app/components/base/icons/src/vender/workflow/Jinja.json new file mode 100644 index 0000000000..ba46cb9ca6 --- /dev/null +++ b/web/app/components/base/icons/src/vender/workflow/Jinja.json @@ -0,0 +1,98 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "24", + "height": "12", + "viewBox": "0 0 24 12", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "g", + "attributes": { + "id": "Jinja Icon" + }, + "children": [ + { + "type": "element", + "name": "g", + "attributes": { + "id": "Vector" + }, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M7.46013 5.99982C7.46013 4.87982 7.48013 3.92982 7.53013 3.16982V3.06982L6.13013 3.23982L6.15013 3.32982C6.29013 4.03982 6.36013 4.93982 6.36013 5.99982C6.36013 6.93982 6.33013 7.78982 6.28013 8.51982V8.60982H7.55013V8.51982C7.49013 7.72982 7.46013 6.87982 7.46013 5.99982Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M3.33016 1.31998C3.38016 2.31998 3.38016 5.13998 3.38016 7.00998V7.77998C3.38016 8.21998 3.35016 8.58998 3.28016 8.85998C3.22016 9.12998 3.11016 9.34998 2.96016 9.52998C2.82016 9.70998 2.62016 9.83998 2.37016 9.92998C2.12016 10.01 1.82016 10.06 1.49016 10.06C1.19016 10.06 0.900156 9.99998 0.620156 9.87998L0.520156 9.83998L0.410156 10.83L0.480156 10.85C0.800156 10.93 1.16016 10.97 1.56016 10.97C2.08016 10.97 2.53016 10.9 2.90016 10.77C3.28016 10.64 3.59016 10.43 3.83016 10.15C4.07016 9.87998 4.25016 9.52998 4.36016 9.13998C4.47016 8.74998 4.53016 8.23998 4.53016 7.64998C4.53016 6.78998 4.59016 3.54998 4.59016 3.17998C4.61016 2.47998 4.63016 1.86998 4.66016 1.31998V1.22998H3.33016V1.31998Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M7.08021 0.919922C6.82022 0.919922 6.60021 0.999922 6.45021 1.14992C6.30021 1.29992 6.22021 1.47992 6.22021 1.68992C6.22021 1.87992 6.28021 2.04992 6.41021 2.18992C6.54022 2.31992 6.73022 2.38992 6.96022 2.38992C7.23022 2.38992 7.44021 2.30992 7.59021 2.15992C7.74021 1.99992 7.81021 1.81992 7.81021 1.60992C7.81021 1.42992 7.74021 1.25992 7.61021 1.12992C7.48021 0.989922 7.30021 0.919922 7.08021 0.919922Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M15.6102 3.30981C15.7702 4.07981 15.8502 5.25981 15.8502 6.81981C15.8502 8.26981 15.7902 9.23981 15.6702 9.67981C15.5902 9.96981 15.3802 10.2598 15.0302 10.5198L14.9702 10.5698L15.3502 11.0998H15.4002C16.4302 10.8198 16.9602 10.0598 16.9602 8.83981C16.9602 8.64981 16.9502 8.30981 16.9202 7.80981C16.9002 7.31981 16.8902 6.90981 16.8902 6.59981C16.8902 5.44981 16.9202 4.28981 16.9902 3.15981V3.05981L15.5802 3.21981L15.6002 3.30981H15.6102Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M14.2901 5.77C14.2901 5.7 14.2901 5.56 14.3001 5.36C14.3001 5.15 14.3101 5.01 14.3101 4.94C14.3101 4.22 14.1101 3.71 13.7201 3.43C13.3401 3.15 12.8001 3 12.1101 3C11.4201 3 10.7901 3.24 10.2001 3.71L10.0901 3.06L8.8501 3.22L8.8701 3.31C9.0501 4.11 9.1401 4.95 9.1401 5.8C9.1401 6.36 9.1101 7.27 9.0401 8.52V8.61H10.3101V8.53C10.2901 7.07 10.2801 5.71 10.2801 4.49C10.7401 4.14 11.2501 3.96 11.7901 3.96C12.2401 3.96 12.5801 4.06 12.8201 4.26C13.0501 4.45 13.1701 4.82 13.1701 5.36C13.1701 6.5 13.1301 7.56 13.0401 8.53V8.62H14.3101V8.54C14.2901 7.35 14.2801 6.42 14.2801 5.79L14.2901 5.77Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M16.5302 0.919922C16.2702 0.919922 16.0502 0.999922 15.9002 1.14992C15.7502 1.29992 15.6702 1.47992 15.6702 1.68992C15.6702 1.87992 15.7302 2.04992 15.8602 2.18992C15.9902 2.31992 16.1802 2.38992 16.4102 2.38992C16.6702 2.38992 16.8902 2.30992 17.0302 2.15992C17.1802 1.99992 17.2502 1.81992 17.2502 1.60992C17.2502 1.42992 17.1802 1.25992 17.0502 1.12992C16.9202 0.989922 16.7402 0.919922 16.5202 0.919922H16.5302Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M23.1802 8.51001C23.0702 8.00001 23.0202 7.40001 23.0202 6.73001C23.0202 6.57001 23.0202 6.26001 23.0402 5.83001C23.0602 5.38001 23.0702 5.06001 23.0702 4.88001C23.0702 4.20001 22.8602 3.71001 22.4502 3.43001C22.0402 3.15001 21.4702 3.01001 20.7302 3.01001C19.9402 3.01001 19.2302 3.09001 18.6102 3.25001H18.5602L18.4302 4.20001L18.5502 4.17001C19.1602 4.03001 19.7802 3.96001 20.4102 3.96001C20.9302 3.96001 21.3202 4.03001 21.5702 4.18001C21.8102 4.31001 21.9302 4.59001 21.9302 5.01001C21.9302 5.09001 21.9302 5.16001 21.9302 5.23001C20.5102 5.25001 19.5602 5.44001 19.0302 5.79001C18.4802 6.15001 18.2002 6.63001 18.2002 7.23001C18.2002 7.72001 18.3802 8.10001 18.7402 8.36001C19.0902 8.62001 19.5102 8.75001 19.9902 8.75001C20.8202 8.75001 21.5002 8.55001 22.0102 8.17001C22.0102 8.30001 22.0402 8.44001 22.0802 8.58001L22.1002 8.64001L23.2202 8.60001L23.2002 8.50001L23.1802 8.51001ZM20.2802 6.18001C20.6502 6.08001 21.2002 6.03001 21.9102 6.03001C21.9102 6.45001 21.9202 6.92001 21.9402 7.42001C21.5602 7.69001 21.0502 7.83001 20.4302 7.83001C19.7002 7.83001 19.3502 7.61001 19.3502 7.16001C19.3502 6.68001 19.6602 6.36001 20.2802 6.18001Z", + "fill": "currentColor" + }, + "children": [] + } + ] + } + ] + } + ] + }, + "name": "Jinja" +} \ No newline at end of file diff --git a/web/app/components/base/icons/src/vender/workflow/Jinja.tsx b/web/app/components/base/icons/src/vender/workflow/Jinja.tsx new file mode 100644 index 0000000000..ed819ea27b --- /dev/null +++ b/web/app/components/base/icons/src/vender/workflow/Jinja.tsx @@ -0,0 +1,16 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import * as React from 'react' +import data from './Jinja.json' +import IconBase from '@/app/components/base/icons/IconBase' +import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase' + +const Icon = React.forwardRef, Omit>(( + props, + ref, +) => ) + +Icon.displayName = 'Jinja' + +export default Icon diff --git a/web/app/components/base/icons/src/vender/workflow/index.ts b/web/app/components/base/icons/src/vender/workflow/index.ts index a0ebc30014..bb79b2c045 100644 --- a/web/app/components/base/icons/src/vender/workflow/index.ts +++ b/web/app/components/base/icons/src/vender/workflow/index.ts @@ -4,6 +4,7 @@ export { default as End } from './End' export { default as Home } from './Home' export { default as Http } from './Http' export { default as IfElse } from './IfElse' +export { default as Jinja } from './Jinja' export { default as KnowledgeRetrieval } from './KnowledgeRetrieval' export { default as Llm } from './Llm' export { default as QuestionClassifier } from './QuestionClassifier' diff --git a/web/app/components/base/switch/index.tsx b/web/app/components/base/switch/index.tsx index fec88ecdde..6794e51efd 100644 --- a/web/app/components/base/switch/index.tsx +++ b/web/app/components/base/switch/index.tsx @@ -5,7 +5,7 @@ import { Switch as OriginalSwitch } from '@headlessui/react' type SwitchProps = { onChange: (value: boolean) => void - size?: 'md' | 'lg' | 'l' + size?: 'sm' | 'md' | 'lg' | 'l' defaultValue?: boolean disabled?: boolean } @@ -19,18 +19,21 @@ const Switch = ({ onChange, size = 'lg', defaultValue = false, disabled = false lg: 'h-6 w-11', l: 'h-5 w-9', md: 'h-4 w-7', + sm: 'h-3 w-5', } const circleStyle = { lg: 'h-5 w-5', l: 'h-4 w-4', md: 'h-3 w-3', + sm: 'h-2 w-2', } const translateLeft = { lg: 'translate-x-5', l: 'translate-x-4', md: 'translate-x-3', + sm: 'translate-x-2', } return ( = ({ offset, }) => { const [open, setOpen] = useState(false) + const [isHoverPopup, { + setTrue: setHoverPopup, + setFalse: setNotHoverPopup, + }] = useBoolean(false) + + const isHoverPopupRef = useRef(isHoverPopup) + useEffect(() => { + isHoverPopupRef.current = isHoverPopup + }, [isHoverPopup]) + + const [isHoverTrigger, { + setTrue: setHoverTrigger, + setFalse: setNotHoverTrigger, + }] = useBoolean(false) + + const isHoverTriggerRef = useRef(isHoverTrigger) + useEffect(() => { + isHoverTriggerRef.current = isHoverTrigger + }, [isHoverTrigger]) + + const handleLeave = (isTrigger: boolean) => { + if (isTrigger) + setNotHoverTrigger() + + else + setNotHoverPopup() + + // give time to move to the popup + setTimeout(() => { + if (!isHoverPopupRef.current && !isHoverTriggerRef.current) + setOpen(false) + }, 500) + } return ( = ({ > triggerMethod === 'click' && setOpen(v => !v)} - onMouseEnter={() => triggerMethod === 'hover' && setOpen(true)} - onMouseLeave={() => triggerMethod === 'hover' && setOpen(false)} + onMouseEnter={() => { + if (triggerMethod === 'hover') { + setHoverTrigger() + setOpen(true) + } + }} + onMouseLeave={() => triggerMethod === 'hover' && handleLeave(true)} > {children} -
+
triggerMethod === 'hover' && setHoverPopup()} + onMouseLeave={() => triggerMethod === 'hover' && handleLeave(false)} + > {popupContent} {!hideArrow && arrow}
diff --git a/web/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars.tsx b/web/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars.tsx index d3f5547781..c084a838ba 100644 --- a/web/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars.tsx +++ b/web/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars.tsx @@ -3,33 +3,28 @@ import type { FC } from 'react' import React, { useEffect, useRef, useState } from 'react' import { useBoolean } from 'ahooks' import { useTranslation } from 'react-i18next' +import cn from 'classnames' import type { Props as EditorProps } from '.' import Editor from '.' import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars' -import useAvailableVarList from '@/app/components/workflow/nodes/_base/hooks/use-available-var-list' -import type { Variable } from '@/app/components/workflow/types' +import type { NodeOutPutVar, Variable } from '@/app/components/workflow/types' const TO_WINDOW_OFFSET = 8 type Props = { - nodeId: string + availableVars: NodeOutPutVar[] varList: Variable[] - onAddVar: (payload: Variable) => void + onAddVar?: (payload: Variable) => void } & EditorProps const CodeEditor: FC = ({ - nodeId, + availableVars, varList, onAddVar, ...editorProps }) => { const { t } = useTranslation() - const { availableVars } = useAvailableVarList(nodeId, { - onlyLeafNodeVar: false, - filterVar: () => true, - }) - const isLeftBraceRef = useRef(false) const editorRef = useRef(null) @@ -76,7 +71,8 @@ const CodeEditor: FC = ({ if (popupPosition.y + height > window.innerHeight - TO_WINDOW_OFFSET) newPopupPosition.y = window.innerHeight - height - TO_WINDOW_OFFSET - setPopupPosition(newPopupPosition) + if (newPopupPosition.x !== popupPosition.x || newPopupPosition.y !== popupPosition.y) + setPopupPosition(newPopupPosition) } }, [isShowVarPicker, popupPosition]) @@ -124,7 +120,7 @@ const CodeEditor: FC = ({ value_selector: varValue, } - onAddVar(newVar) + onAddVar?.(newVar) } const editor: any = editorRef.current const monaco: any = monacoRef.current @@ -143,7 +139,7 @@ const CodeEditor: FC = ({ } return ( -
+
void - title: JSX.Element + title?: JSX.Element language: CodeLanguage headerRight?: JSX.Element readOnly?: boolean @@ -22,6 +25,8 @@ export type Props = { height?: number isInNode?: boolean onMount?: (editor: any, monaco: any) => void + noWrapper?: boolean + isExpand?: boolean } const languageMap = { @@ -30,11 +35,20 @@ const languageMap = { [CodeLanguage.json]: 'json', } +const DEFAULT_THEME = { + base: 'vs', + inherit: true, + rules: [], + colors: { + 'editor.background': '#F2F4F7', // #00000000 transparent. But it will has a blue border + }, +} + const CodeEditor: FC = ({ value = '', placeholder = '', onChange = () => { }, - title, + title = '', headerRight, language, readOnly, @@ -42,16 +56,37 @@ const CodeEditor: FC = ({ height, isInNode, onMount, + noWrapper, + isExpand, }) => { const [isFocus, setIsFocus] = React.useState(false) + const [isMounted, setIsMounted] = React.useState(false) + const minHeight = height || 200 + const [editorContentHeight, setEditorContentHeight] = useState(56) + + const valueRef = useRef(value) + useEffect(() => { + valueRef.current = value + }, [value]) + + const editorRef = useRef(null) + const resizeEditorToContent = () => { + if (editorRef.current) { + const contentHeight = editorRef.current.getContentHeight() // Math.max(, minHeight) + setEditorContentHeight(contentHeight) + } + } const handleEditorChange = (value: string | undefined) => { onChange(value || '') + setTimeout(() => { + resizeEditorToContent() + }, 10) } - const editorRef = useRef(null) const handleEditorDidMount = (editor: any, monaco: any) => { editorRef.current = editor + resizeEditorToContent() editor.onDidFocusEditorText(() => { setIsFocus(true) @@ -60,6 +95,8 @@ const CodeEditor: FC = ({ setIsFocus(false) }) + monaco.editor.defineTheme('default-theme', DEFAULT_THEME) + monaco.editor.defineTheme('blur-theme', { base: 'vs', inherit: true, @@ -78,7 +115,10 @@ const CodeEditor: FC = ({ }, }) + monaco.editor.setTheme('default-theme') // Fix: sometimes not load the default theme + onMount?.(editor, monaco) + setIsMounted(true) } const outPutValue = (() => { @@ -92,43 +132,63 @@ const CodeEditor: FC = ({ } })() - return ( -
- { + if (noWrapper) + return 'default-theme' + + return isFocus ? 'focus-theme' : 'blur-theme' + })() + + const main = ( + <> + {/* https://www.npmjs.com/package/@monaco-editor/react */} + - <> - {/* https://www.npmjs.com/package/@monaco-editor/react */} - { + // return
{num}
+ // } + }} + onMount={handleEditorDidMount} + /> + {!outPutValue &&
{placeholder}
} + + ) + + return ( +
+ {noWrapper + ?
+ {main} +
+ : ( + { - // return
{num}
- // } - }} - onMount={handleEditorDidMount} - /> - {!outPutValue &&
{placeholder}
} - - + headerRight={headerRight} + isFocus={isFocus && !readOnly} + minHeight={minHeight} + isInNode={isInNode} + > + {main} + + )}
) } diff --git a/web/app/components/workflow/nodes/_base/components/editor/code-editor/style.css b/web/app/components/workflow/nodes/_base/components/editor/code-editor/style.css index 229146ce18..3a6624267a 100644 --- a/web/app/components/workflow/nodes/_base/components/editor/code-editor/style.css +++ b/web/app/components/workflow/nodes/_base/components/editor/code-editor/style.css @@ -2,6 +2,10 @@ padding-left: 10px; } +.no-wrapper .margin-view-overlays { + padding-left: 0; +} + /* hide readonly tooltip */ .monaco-editor-overlaymessage { display: none !important; diff --git a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx index 779bf76197..08a714e385 100644 --- a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx +++ b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx @@ -1,16 +1,19 @@ 'use client' import type { FC } from 'react' -import React, { useCallback, useEffect, useRef, useState } from 'react' +import React, { useCallback, useRef } from 'react' import cn from 'classnames' import copy from 'copy-to-clipboard' import { useTranslation } from 'react-i18next' import { useBoolean } from 'ahooks' -import { - BlockEnum, - type Node, - type NodeOutPutVar, +import { BlockEnum, EditionType } from '../../../../types' +import type { + Node, + NodeOutPutVar, + Variable, } from '../../../../types' + import Wrap from '../editor/wrap' +import { CodeLanguage } from '../../../code/types' import ToggleExpandBtn from '@/app/components/workflow/nodes/_base/components/toggle-expand-btn' import useToggleExpend from '@/app/components/workflow/nodes/_base/hooks/use-toggle-expend' import PromptEditor from '@/app/components/base/prompt-editor' @@ -21,6 +24,10 @@ import { useEventEmitterContextContext } from '@/context/event-emitter' import { PROMPT_EDITOR_INSERT_QUICKLY } from '@/app/components/base/prompt-editor/plugins/update-block' import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' import TooltipPlus from '@/app/components/base/tooltip-plus' +import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars' +import Switch from '@/app/components/base/switch' +import { Jinja } from '@/app/components/base/icons/src/vender/workflow' + type Props = { className?: string headerClassName?: string @@ -42,6 +49,12 @@ type Props = { } nodesOutputVars?: NodeOutPutVar[] availableNodes?: Node[] + // for jinja + isSupportJinja?: boolean + editionType?: EditionType + onEditionTypeChange?: (editionType: EditionType) => void + varList?: Variable[] + handleAddVariable?: (payload: any) => void } const Editor: FC = ({ @@ -61,6 +74,11 @@ const Editor: FC = ({ hasSetBlockStatus, nodesOutputVars, availableNodes = [], + isSupportJinja, + editionType, + onEditionTypeChange, + varList = [], + handleAddVariable, }) => { const { t } = useTranslation() const { eventEmitter } = useEventEmitterContextContext() @@ -85,20 +103,6 @@ const Editor: FC = ({ setTrue: setFocus, setFalse: setBlur, }] = useBoolean(false) - const hideTooltipRunId = useRef(0) - - const [isShowInsertToolTip, setIsShowInsertTooltip] = useState(false) - useEffect(() => { - if (isFocus) { - clearTimeout(hideTooltipRunId.current) - setIsShowInsertTooltip(true) - } - else { - hideTooltipRunId.current = setTimeout(() => { - setIsShowInsertTooltip(false) - }, 100) as any - } - }, [isFocus]) const handleInsertVariable = () => { setFocus() @@ -116,6 +120,29 @@ const Editor: FC = ({
{/* Operations */}
+ {isSupportJinja && ( + +
{t('workflow.common.enableJinja')}
+ {t('workflow.common.learnMore')} +
+ } + hideArrow + > +
+ + { + onEditionTypeChange?.(checked ? EditionType.jinja2 : EditionType.basic) + }} + /> +
+ + + )} {!readOnly && ( = ({ {/* Min: 80 Max: 560. Header: 24 */}
-
- { - acc[node.id] = { - title: node.data.title, - type: node.data.type, - } - if (node.data.type === BlockEnum.Start) { - acc.sys = { - title: t('workflow.blocks.start'), - type: BlockEnum.Start, - } - } - return acc - }, {} as any), - }} - onChange={onChange} - onBlur={setBlur} - onFocus={setFocus} - editable={!readOnly} - /> - {/* to patch Editor not support dynamic change editable status */} - {readOnly &&
} -
+ {!(isSupportJinja && editionType === EditionType.jinja2) + ? ( +
+ { + acc[node.id] = { + title: node.data.title, + type: node.data.type, + } + if (node.data.type === BlockEnum.Start) { + acc.sys = { + title: t('workflow.blocks.start'), + type: BlockEnum.Start, + } + } + return acc + }, {} as any), + }} + onChange={onChange} + onBlur={setBlur} + onFocus={setFocus} + editable={!readOnly} + /> + {/* to patch Editor not support dynamic change editable status */} + {readOnly &&
} +
+ ) + : ( +
+ +
+ )}
-
diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx index 58478591dd..6a6ef1e958 100644 --- a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx +++ b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx @@ -3,7 +3,8 @@ import type { FC } from 'react' import React, { useEffect, useState } from 'react' import { uniqueId } from 'lodash-es' import { useTranslation } from 'react-i18next' -import type { PromptItem } from '../../../types' +import type { PromptItem, Variable } from '../../../types' +import { EditionType } from '../../../types' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' import TypeSelector from '@/app/components/workflow/nodes/_base/components/selector' import TooltipPlus from '@/app/components/base/tooltip-plus' @@ -24,6 +25,7 @@ type Props = { payload: PromptItem handleChatModeMessageRoleChange: (role: PromptRole) => void onPromptChange: (p: string) => void + onEditionTypeChange: (editionType: EditionType) => void onRemove: () => void isShowContext: boolean hasSetBlockStatus: { @@ -33,6 +35,8 @@ type Props = { } availableVars: any availableNodes: any + varList: Variable[] + handleAddVariable: (payload: any) => void } const roleOptions = [ @@ -64,17 +68,21 @@ const ConfigPromptItem: FC = ({ isChatApp, payload, onPromptChange, + onEditionTypeChange, onRemove, isShowContext, hasSetBlockStatus, availableVars, availableNodes, + varList, + handleAddVariable, }) => { const { t } = useTranslation() const [instanceId, setInstanceId] = useState(uniqueId()) useEffect(() => { setInstanceId(`${id}-${uniqueId()}`) }, [id]) + return ( = ({
} - value={payload.text} + value={payload.edition_type === EditionType.jinja2 ? (payload.jinja2_text || '') : payload.text} onChange={onPromptChange} readOnly={readOnly} showRemove={canRemove} @@ -118,6 +126,11 @@ const ConfigPromptItem: FC = ({ hasSetBlockStatus={hasSetBlockStatus} nodesOutputVars={availableVars} availableNodes={availableNodes} + isSupportJinja + editionType={payload.edition_type} + onEditionTypeChange={onEditionTypeChange} + varList={varList} + handleAddVariable={handleAddVariable} /> ) } diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx index 6c56aa656c..b79ecfa62a 100644 --- a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx +++ b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx @@ -6,8 +6,8 @@ import produce from 'immer' import { ReactSortable } from 'react-sortablejs' import { v4 as uuid4 } from 'uuid' import cn from 'classnames' -import type { PromptItem, ValueSelector, Var } from '../../../types' -import { PromptRole } from '../../../types' +import type { PromptItem, ValueSelector, Var, Variable } from '../../../types' +import { EditionType, PromptRole } from '../../../types' import useAvailableVarList from '../../_base/hooks/use-available-var-list' import ConfigPromptItem from './config-prompt-item' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' @@ -30,6 +30,8 @@ type Props = { history: boolean query: boolean } + varList?: Variable[] + handleAddVariable: (payload: any) => void } const ConfigPrompt: FC = ({ @@ -42,10 +44,12 @@ const ConfigPrompt: FC = ({ onChange, isShowContext, hasSetBlockStatus, + varList = [], + handleAddVariable, }) => { const { t } = useTranslation() const payloadWithIds = (isChatModel && Array.isArray(payload)) - ? payload.map((item, i) => { + ? payload.map((item) => { const id = uuid4() return { id: item.id || id, @@ -67,7 +71,16 @@ const ConfigPrompt: FC = ({ const handleChatModePromptChange = useCallback((index: number) => { return (prompt: string) => { const newPrompt = produce(payload as PromptItem[], (draft) => { - draft[index].text = prompt + draft[index][draft[index].edition_type === EditionType.jinja2 ? 'jinja2_text' : 'text'] = prompt + }) + onChange(newPrompt) + } + }, [onChange, payload]) + + const handleChatModeEditionTypeChange = useCallback((index: number) => { + return (editionType: EditionType) => { + const newPrompt = produce(payload as PromptItem[], (draft) => { + draft[index].edition_type = editionType }) onChange(newPrompt) } @@ -106,7 +119,14 @@ const ConfigPrompt: FC = ({ const handleCompletionPromptChange = useCallback((prompt: string) => { const newPrompt = produce(payload as PromptItem, (draft) => { - draft.text = prompt + draft[draft.edition_type === EditionType.jinja2 ? 'jinja2_text' : 'text'] = prompt + }) + onChange(newPrompt) + }, [onChange, payload]) + + const handleCompletionEditionTypeChange = useCallback((editionType: EditionType) => { + const newPrompt = produce(payload as PromptItem, (draft) => { + draft.edition_type = editionType }) onChange(newPrompt) }, [onChange, payload]) @@ -161,11 +181,14 @@ const ConfigPrompt: FC = ({ isChatApp={isChatApp} payload={item} onPromptChange={handleChatModePromptChange(index)} + onEditionTypeChange={handleChatModeEditionTypeChange(index)} onRemove={handleRemove(index)} isShowContext={isShowContext} hasSetBlockStatus={hasSetBlockStatus} availableVars={availableVars} availableNodes={availableNodes} + varList={varList} + handleAddVariable={handleAddVariable} />
@@ -187,7 +210,7 @@ const ConfigPrompt: FC = ({ {t(`${i18nPrefix}.prompt`)}} - value={(payload as PromptItem).text} + value={(payload as PromptItem).edition_type === EditionType.basic ? (payload as PromptItem).text : ((payload as PromptItem).jinja2_text || '')} onChange={handleCompletionPromptChange} readOnly={readOnly} isChatModel={isChatModel} @@ -196,6 +219,11 @@ const ConfigPrompt: FC = ({ hasSetBlockStatus={hasSetBlockStatus} nodesOutputVars={availableVars} availableNodes={availableNodes} + isSupportJinja + editionType={(payload as PromptItem).edition_type} + varList={varList} + onEditionTypeChange={handleCompletionEditionTypeChange} + handleAddVariable={handleAddVariable} /> )} diff --git a/web/app/components/workflow/nodes/llm/default.ts b/web/app/components/workflow/nodes/llm/default.ts index 8ad6d86260..e68b0cb318 100644 --- a/web/app/components/workflow/nodes/llm/default.ts +++ b/web/app/components/workflow/nodes/llm/default.ts @@ -1,7 +1,6 @@ -import { BlockEnum } from '../../types' -import { type NodeDefault, PromptRole } from '../../types' +import { BlockEnum, EditionType } from '../../types' +import { type NodeDefault, type PromptItem, PromptRole } from '../../types' import type { LLMNodeType } from './types' -import type { PromptItem } from '@/models/debug' import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants' const i18nPrefix = 'workflow.errorMsg' @@ -16,7 +15,6 @@ const nodeDefault: NodeDefault = { temperature: 0.7, }, }, - variables: [], prompt_template: [{ role: PromptRole.system, text: '', @@ -57,6 +55,23 @@ const nodeDefault: NodeDefault = { if (isChatModel && !!payload.memory.query_prompt_template && !payload.memory.query_prompt_template.includes('{{#sys.query#}}')) errorMessages = t('workflow.nodes.llm.sysQueryInUser') } + + if (!errorMessages) { + const isChatModel = payload.model.mode === 'chat' + const isShowVars = (() => { + if (isChatModel) + return (payload.prompt_template as PromptItem[]).some(item => item.edition_type === EditionType.jinja2) + return (payload.prompt_template as PromptItem).edition_type === EditionType.jinja2 + })() + if (isShowVars && payload.prompt_config?.jinja2_variables) { + payload.prompt_config?.jinja2_variables.forEach((i) => { + if (!errorMessages && !i.variable) + errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t(`${i18nPrefix}.fields.variable`) }) + if (!errorMessages && !i.value_selector.length) + errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t(`${i18nPrefix}.fields.variableValue`) }) + }) + } + } return { isValid: !errorMessages, errorMessage: errorMessages, diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx index cb1e3b767c..89a5a0eb2b 100644 --- a/web/app/components/workflow/nodes/llm/panel.tsx +++ b/web/app/components/workflow/nodes/llm/panel.tsx @@ -7,6 +7,8 @@ import useConfig from './use-config' import ResolutionPicker from './components/resolution-picker' import type { LLMNodeType } from './types' import ConfigPrompt from './components/config-prompt' +import VarList from '@/app/components/workflow/nodes/_base/components/variable/var-list' +import AddButton2 from '@/app/components/base/button/add-button' import Field from '@/app/components/workflow/nodes/_base/components/field' import Split from '@/app/components/workflow/nodes/_base/components/split' import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal' @@ -44,7 +46,12 @@ const Panel: FC> = ({ filterVar, availableVars, availableNodes, + isShowVars, handlePromptChange, + handleAddEmptyVariable, + handleAddVariable, + handleVarListChange, + handleVarNameChange, handleSyeQueryChange, handleMemoryChange, handleVisionResolutionEnabledChange, @@ -169,9 +176,29 @@ const Panel: FC> = ({ payload={inputs.prompt_template} onChange={handlePromptChange} hasSetBlockStatus={hasSetBlockStatus} + varList={inputs.prompt_config?.jinja2_variables || []} + handleAddVariable={handleAddVariable} /> )} + {isShowVars && ( + : undefined + } + > + + + )} + {/* Memory put place examples. */} {isChatMode && isChatModel && !!inputs.memory && (
diff --git a/web/app/components/workflow/nodes/llm/types.ts b/web/app/components/workflow/nodes/llm/types.ts index 97261ca95a..0ada4d3728 100644 --- a/web/app/components/workflow/nodes/llm/types.ts +++ b/web/app/components/workflow/nodes/llm/types.ts @@ -3,8 +3,10 @@ import type { CommonNodeType, Memory, ModelConfig, PromptItem, ValueSelector, Va export type LLMNodeType = CommonNodeType & { model: ModelConfig - variables: Variable[] prompt_template: PromptItem[] | PromptItem + prompt_config?: { + jinja2_variables?: Variable[] + } memory?: Memory context: { enabled: boolean diff --git a/web/app/components/workflow/nodes/llm/use-config.ts b/web/app/components/workflow/nodes/llm/use-config.ts index 8ccbb50cca..a93bf63e47 100644 --- a/web/app/components/workflow/nodes/llm/use-config.ts +++ b/web/app/components/workflow/nodes/llm/use-config.ts @@ -1,8 +1,7 @@ import { useCallback, useEffect, useRef, useState } from 'react' import produce from 'immer' -import useVarList from '../_base/hooks/use-var-list' -import { VarType } from '../../types' -import type { Memory, ValueSelector, Var } from '../../types' +import { EditionType, VarType } from '../../types' +import type { Memory, PromptItem, ValueSelector, Var, Variable } from '../../types' import { useStore } from '../../store' import { useIsChatMode, @@ -18,7 +17,6 @@ import { } from '@/app/components/header/account-setting/model-provider-page/declarations' import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud' import useOneStepRun from '@/app/components/workflow/nodes/_base/hooks/use-one-step-run' -import type { PromptItem } from '@/models/debug' import { RETRIEVAL_OUTPUT_STRUCT } from '@/app/components/workflow/constants' import { checkHasContextBlock, checkHasHistoryBlock, checkHasQueryBlock } from '@/app/components/base/prompt-editor/constants' @@ -29,20 +27,21 @@ const useConfig = (id: string, payload: LLMNodeType) => { const defaultConfig = useStore(s => s.nodesDefaultConfigs)[payload.type] const [defaultRolePrefix, setDefaultRolePrefix] = useState<{ user: string; assistant: string }>({ user: '', assistant: '' }) const { inputs, setInputs: doSetInputs } = useNodeCrud(id, payload) + const inputRef = useRef(inputs) + const setInputs = useCallback((newInputs: LLMNodeType) => { if (newInputs.memory && !newInputs.memory.role_prefix) { const newPayload = produce(newInputs, (draft) => { draft.memory!.role_prefix = defaultRolePrefix }) doSetInputs(newPayload) + inputRef.current = newPayload return } doSetInputs(newInputs) + inputRef.current = newInputs }, [doSetInputs, defaultRolePrefix]) - const inputRef = useRef(inputs) - useEffect(() => { - inputRef.current = inputs - }, [inputs]) + // model const model = inputs.model const modelMode = inputs.model?.mode @@ -178,11 +177,80 @@ const useConfig = (id: string, payload: LLMNodeType) => { } // eslint-disable-next-line react-hooks/exhaustive-deps }, [isShowVisionConfig, modelChanged]) + // variables - const { handleVarListChange, handleAddVariable } = useVarList({ - inputs, - setInputs, - }) + const isShowVars = (() => { + if (isChatModel) + return (inputs.prompt_template as PromptItem[]).some(item => item.edition_type === EditionType.jinja2) + + return (inputs.prompt_template as PromptItem).edition_type === EditionType.jinja2 + })() + const handleAddEmptyVariable = useCallback(() => { + const newInputs = produce(inputRef.current, (draft) => { + if (!draft.prompt_config) { + draft.prompt_config = { + jinja2_variables: [], + } + } + if (!draft.prompt_config.jinja2_variables) + draft.prompt_config.jinja2_variables = [] + + draft.prompt_config.jinja2_variables.push({ + variable: '', + value_selector: [], + }) + }) + setInputs(newInputs) + }, [setInputs]) + + const handleAddVariable = useCallback((payload: Variable) => { + const newInputs = produce(inputRef.current, (draft) => { + if (!draft.prompt_config) { + draft.prompt_config = { + jinja2_variables: [], + } + } + if (!draft.prompt_config.jinja2_variables) + draft.prompt_config.jinja2_variables = [] + + draft.prompt_config.jinja2_variables.push(payload) + }) + setInputs(newInputs) + }, [setInputs]) + + const handleVarListChange = useCallback((newList: Variable[]) => { + const newInputs = produce(inputRef.current, (draft) => { + if (!draft.prompt_config) { + draft.prompt_config = { + jinja2_variables: [], + } + } + if (!draft.prompt_config.jinja2_variables) + draft.prompt_config.jinja2_variables = [] + + draft.prompt_config.jinja2_variables = newList + }) + setInputs(newInputs) + }, [setInputs]) + + const handleVarNameChange = useCallback((oldName: string, newName: string) => { + const newInputs = produce(inputRef.current, (draft) => { + if (isChatModel) { + const promptTemplate = draft.prompt_template as PromptItem[] + promptTemplate.filter(item => item.edition_type === EditionType.jinja2).forEach((item) => { + item.jinja2_text = (item.jinja2_text || '').replaceAll(`{{ ${oldName} }}`, `{{ ${newName} }}`) + }) + } + else { + if ((draft.prompt_template as PromptItem).edition_type !== EditionType.jinja2) + return + + const promptTemplate = draft.prompt_template as PromptItem + promptTemplate.jinja2_text = (promptTemplate.jinja2_text || '').replaceAll(`{{ ${oldName} }}`, `{{ ${newName} }}`) + } + }) + setInputs(newInputs) + }, [isChatModel, setInputs]) // context const handleContextVarChange = useCallback((newVar: ValueSelector | string) => { @@ -194,11 +262,11 @@ const useConfig = (id: string, payload: LLMNodeType) => { }, [inputs, setInputs]) const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => { - const newInputs = produce(inputs, (draft) => { + const newInputs = produce(inputRef.current, (draft) => { draft.prompt_template = newPrompt }) setInputs(newInputs) - }, [inputs, setInputs]) + }, [setInputs]) const handleMemoryChange = useCallback((newMemory?: Memory) => { const newInputs = produce(inputs, (draft) => { @@ -286,6 +354,7 @@ const useConfig = (id: string, payload: LLMNodeType) => { runInputData, setRunInputData, runResult, + toVarInputs, } = useOneStepRun({ id, data: inputs, @@ -295,23 +364,6 @@ const useConfig = (id: string, payload: LLMNodeType) => { }, }) - // const handleRun = (submitData: Record) => { - // console.log(submitData) - // const res = produce(submitData, (draft) => { - // debugger - // if (draft.contexts) { - // draft['#context#'] = draft.contexts - // delete draft.contexts - // } - // if (draft.visionFiles) { - // draft['#files#'] = draft.visionFiles - // delete draft.visionFiles - // } - // }) - - // doHandleRun(res) - // } - const inputVarValues = (() => { const vars: Record = {} Object.keys(runInputData) @@ -348,7 +400,7 @@ const useConfig = (id: string, payload: LLMNodeType) => { }, [runInputData, setRunInputData]) const allVarStrArr = (() => { - const arr = isChatModel ? (inputs.prompt_template as PromptItem[]).map(item => item.text) : [(inputs.prompt_template as PromptItem).text] + const arr = isChatModel ? (inputs.prompt_template as PromptItem[]).filter(item => item.edition_type !== EditionType.jinja2).map(item => item.text) : [(inputs.prompt_template as PromptItem).text] if (isChatMode && isChatModel && !!inputs.memory) { arr.push('{{#sys.query#}}') arr.push(inputs.memory.query_prompt_template) @@ -357,7 +409,13 @@ const useConfig = (id: string, payload: LLMNodeType) => { return arr })() - const varInputs = getInputVars(allVarStrArr) + const varInputs = (() => { + const vars = getInputVars(allVarStrArr) + if (isShowVars) + return [...vars, ...toVarInputs(inputs.prompt_config?.jinja2_variables || [])] + + return vars + })() return { readOnly, @@ -370,8 +428,11 @@ const useConfig = (id: string, payload: LLMNodeType) => { isShowVisionConfig, handleModelChanged, handleCompletionParamsChange, + isShowVars, handleVarListChange, + handleVarNameChange, handleAddVariable, + handleAddEmptyVariable, handleContextVarChange, filterInputVar, filterVar, diff --git a/web/app/components/workflow/nodes/template-transform/panel.tsx b/web/app/components/workflow/nodes/template-transform/panel.tsx index 70fd4806e4..e3e54419ec 100644 --- a/web/app/components/workflow/nodes/template-transform/panel.tsx +++ b/web/app/components/workflow/nodes/template-transform/panel.tsx @@ -26,6 +26,7 @@ const Panel: FC> = ({ const { readOnly, inputs, + availableVars, handleVarListChange, handleVarNameChange, handleAddVariable, @@ -65,7 +66,7 @@ const Panel: FC> = ({ { const { nodesReadOnly: readOnly } = useNodesReadOnly() @@ -22,6 +23,11 @@ const useConfig = (id: string, payload: TemplateTransformNodeType) => { inputsRef.current = newPayload }, [doSetInputs]) + const { availableVars } = useAvailableVarList(id, { + onlyLeafNodeVar: false, + filterVar: () => true, + }) + const { handleAddVariable: handleAddEmptyVariable } = useVarList({ inputs, setInputs, @@ -108,6 +114,7 @@ const useConfig = (id: string, payload: TemplateTransformNodeType) => { return { readOnly, inputs, + availableVars, handleVarListChange, handleVarNameChange, handleAddVariable, diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index f0b5e08c6c..c21887a5bf 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -131,10 +131,17 @@ export enum PromptRole { assistant = 'assistant', } +export enum EditionType { + basic = 'basic', + jinja2 = 'jinja2', +} + export type PromptItem = { id?: string role?: PromptRole text: string + edition_type?: EditionType + jinja2_text?: string } export enum MemoryRole { diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 5093b6631d..206bae5400 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -52,6 +52,8 @@ const translation = { jinjaEditorPlaceholder: 'Type \'/\' or \'{\' to insert variable', viewOnly: 'View Only', showRunHistory: 'Show Run History', + enableJinja: 'Enable Jinja template support', + learnMore: 'Learn More', copy: 'Copy', duplicate: 'Duplicate', addBlock: 'Add Block', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index baae846376..781ff3b49d 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -52,6 +52,8 @@ const translation = { jinjaEditorPlaceholder: '输入 “/” 或 “{” 插入变量', viewOnly: '只读', showRunHistory: '显示运行历史', + enableJinja: '开启支持 Jinja 模板', + learnMore: '了解更多', copy: '拷贝', duplicate: '复制', addBlock: '添加节点', From 228de1f12acccbccadd5a990cd0bb33765fc0323 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Fri, 10 May 2024 18:14:48 +0800 Subject: [PATCH 042/267] fix: miss usage of os.path.join for URL assembly and add tests on yarl (#4224) --- .../model_providers/chatglm/llm/llm.py | 4 ++-- .../provider/builtin/dalle/tools/dalle2.py | 4 ++-- .../provider/builtin/dalle/tools/dalle3.py | 4 ++-- api/tests/unit_tests/libs/test_yarl.py | 23 +++++++++++++++++++ 4 files changed, 29 insertions(+), 6 deletions(-) create mode 100644 api/tests/unit_tests/libs/test_yarl.py diff --git a/api/core/model_runtime/model_providers/chatglm/llm/llm.py b/api/core/model_runtime/model_providers/chatglm/llm/llm.py index 12dc75aece..e83d08af71 100644 --- a/api/core/model_runtime/model_providers/chatglm/llm/llm.py +++ b/api/core/model_runtime/model_providers/chatglm/llm/llm.py @@ -1,6 +1,5 @@ import logging from collections.abc import Generator -from os.path import join from typing import Optional, cast from httpx import Timeout @@ -19,6 +18,7 @@ from openai import ( ) from openai.types.chat import ChatCompletion, ChatCompletionChunk from openai.types.chat.chat_completion_message import FunctionCall +from yarl import URL from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.message_entities import ( @@ -265,7 +265,7 @@ class ChatGLMLargeLanguageModel(LargeLanguageModel): client_kwargs = { "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), "api_key": "1", - "base_url": join(credentials['api_base'], 'v1') + "base_url": str(URL(credentials['api_base']) / 'v1') } return client_kwargs diff --git a/api/core/tools/provider/builtin/dalle/tools/dalle2.py b/api/core/tools/provider/builtin/dalle/tools/dalle2.py index e41cbd9f65..450e782281 100644 --- a/api/core/tools/provider/builtin/dalle/tools/dalle2.py +++ b/api/core/tools/provider/builtin/dalle/tools/dalle2.py @@ -1,8 +1,8 @@ from base64 import b64decode -from os.path import join from typing import Any, Union from openai import OpenAI +from yarl import URL from core.tools.entities.tool_entities import ToolInvokeMessage from core.tools.tool.builtin_tool import BuiltinTool @@ -23,7 +23,7 @@ class DallE2Tool(BuiltinTool): if not openai_base_url: openai_base_url = None else: - openai_base_url = join(openai_base_url, 'v1') + openai_base_url = str(URL(openai_base_url) / 'v1') client = OpenAI( api_key=self.runtime.credentials['openai_api_key'], diff --git a/api/core/tools/provider/builtin/dalle/tools/dalle3.py b/api/core/tools/provider/builtin/dalle/tools/dalle3.py index dc53025b02..87d18f68e0 100644 --- a/api/core/tools/provider/builtin/dalle/tools/dalle3.py +++ b/api/core/tools/provider/builtin/dalle/tools/dalle3.py @@ -1,8 +1,8 @@ from base64 import b64decode -from os.path import join from typing import Any, Union from openai import OpenAI +from yarl import URL from core.tools.entities.tool_entities import ToolInvokeMessage from core.tools.tool.builtin_tool import BuiltinTool @@ -23,7 +23,7 @@ class DallE3Tool(BuiltinTool): if not openai_base_url: openai_base_url = None else: - openai_base_url = join(openai_base_url, 'v1') + openai_base_url = str(URL(openai_base_url) / 'v1') client = OpenAI( api_key=self.runtime.credentials['openai_api_key'], diff --git a/api/tests/unit_tests/libs/test_yarl.py b/api/tests/unit_tests/libs/test_yarl.py new file mode 100644 index 0000000000..75a5344126 --- /dev/null +++ b/api/tests/unit_tests/libs/test_yarl.py @@ -0,0 +1,23 @@ +import pytest +from yarl import URL + + +def test_yarl_urls(): + expected_1 = 'https://dify.ai/api' + assert str(URL('https://dify.ai') / 'api') == expected_1 + assert str(URL('https://dify.ai/') / 'api') == expected_1 + + expected_2 = 'http://dify.ai:12345/api' + assert str(URL('http://dify.ai:12345') / 'api') == expected_2 + assert str(URL('http://dify.ai:12345/') / 'api') == expected_2 + + expected_3 = 'https://dify.ai/api/v1' + assert str(URL('https://dify.ai') / 'api' / 'v1') == expected_3 + assert str(URL('https://dify.ai') / 'api/v1') == expected_3 + assert str(URL('https://dify.ai/') / 'api/v1') == expected_3 + assert str(URL('https://dify.ai/api') / 'v1') == expected_3 + assert str(URL('https://dify.ai/api/') / 'v1') == expected_3 + + with pytest.raises(ValueError) as e1: + str(URL('https://dify.ai') / '/api') + assert str(e1.value) == "Appending path '/api' starting from slash is forbidden" From 36a9c5cc6b21f8357162b1c4c2f34c2c1b80c00b Mon Sep 17 00:00:00 2001 From: Whitewater Date: Fri, 10 May 2024 18:52:41 +0800 Subject: [PATCH 043/267] fix: remove unexpected zip and add FlipForward arrow icon (#4263) --- .../vender/line/arrows/flip-backward.zip | Bin 372 -> 0 bytes .../vender/line/arrows/flip-forward.svg | 5 +++ .../vender/line/arrows/flip-forward.zip | Bin 370 -> 0 bytes .../src/vender/line/arrows/FlipForward.json | 39 ++++++++++++++++++ .../src/vender/line/arrows/FlipForward.tsx | 16 +++++++ .../icons/src/vender/line/arrows/index.ts | 1 + 6 files changed, 61 insertions(+) delete mode 100644 web/app/components/base/icons/assets/vender/line/arrows/flip-backward.zip create mode 100644 web/app/components/base/icons/assets/vender/line/arrows/flip-forward.svg delete mode 100644 web/app/components/base/icons/assets/vender/line/arrows/flip-forward.zip create mode 100644 web/app/components/base/icons/src/vender/line/arrows/FlipForward.json create mode 100644 web/app/components/base/icons/src/vender/line/arrows/FlipForward.tsx diff --git a/web/app/components/base/icons/assets/vender/line/arrows/flip-backward.zip b/web/app/components/base/icons/assets/vender/line/arrows/flip-backward.zip deleted file mode 100644 index 5cb3d8483a4c5c406753aa6048b8eb7748d1c6d0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 372 zcmWIWW@h1HU|`^2ut=F3F*!0L`w@^=%*enX0i?t8i?Y*l^2-&R6Z6Uvi}gK|^YiqI z%hGcvoXtC|z|-))mg}@Ne}RXG)l|7_-s-vM4MKBgZAcYRUGwGR^A$-~mlPh5pI@)C zXQsZaN_p2_S0j$v*SqZQ{djcii_AJPQ4Iy=ZtLBN8(uW$3$BZAp0E4Ou+Kke*%X&c zN|GM1qXfyZi#=l_Y~I=q}scgwY__MK^+y>2rei`YeX^XG&Hr+%C< zUFBU?!9H6#x9;E{- + + + + diff --git a/web/app/components/base/icons/assets/vender/line/arrows/flip-forward.zip b/web/app/components/base/icons/assets/vender/line/arrows/flip-forward.zip deleted file mode 100644 index c5c1b0020479ed4999cd96e0bb9ed752d47042b7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 370 zcmWIWW@h1HU|`^2a88*UA$qK%_5qMr$jHDT0i?t8i?Y*l^2-&R6Z6Uvi}gK|^YiqI z%hGcvocB9yz;o<Qf z^V2)qpX{=bcp{a!VV(S=(D`$H{U0VxU}RpmaKmJ2_h!3_?t-5(f0GJ23agfwEOnWr zlK5_ALeIsVp4(TKv0s<0v15y0^-`c+xn zho`<}DRA48wSccU(26N9>;Au@+J|#a$fU-2eT`qP8udC%NvDuc{@R1N*Lbr-S-*dE zeZt;*?)KwEBa3$Z`+pC#U^p%mcF$_7%# O2!w7xS_~Mf3=9BdosMe& diff --git a/web/app/components/base/icons/src/vender/line/arrows/FlipForward.json b/web/app/components/base/icons/src/vender/line/arrows/FlipForward.json new file mode 100644 index 0000000000..2e0b517c38 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/arrows/FlipForward.json @@ -0,0 +1,39 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "16", + "height": "16", + "viewBox": "0 0 16 16", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "g", + "attributes": { + "id": "Icon" + }, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "id": "Icon_2", + "d": "M14 6.00016H5C3.34315 6.00016 2 7.34331 2 9.00016C2 10.657 3.34315 12.0002 5 12.0002H8M14 6.00016L11.3333 3.3335M14 6.00016L11.3333 8.66683", + "stroke": "currentColor", + "stroke-width": "1.5", + "stroke-linecap": "round", + "stroke-linejoin": "round" + }, + "children": [] + } + ] + } + ] + }, + "name": "FlipForward" +} \ No newline at end of file diff --git a/web/app/components/base/icons/src/vender/line/arrows/FlipForward.tsx b/web/app/components/base/icons/src/vender/line/arrows/FlipForward.tsx new file mode 100644 index 0000000000..1a091772a5 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/arrows/FlipForward.tsx @@ -0,0 +1,16 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import * as React from 'react' +import data from './FlipForward.json' +import IconBase from '@/app/components/base/icons/IconBase' +import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase' + +const Icon = React.forwardRef, Omit>(( + props, + ref, +) => ) + +Icon.displayName = 'FlipForward' + +export default Icon diff --git a/web/app/components/base/icons/src/vender/line/arrows/index.ts b/web/app/components/base/icons/src/vender/line/arrows/index.ts index 5b482a75d0..2a1e4002b5 100644 --- a/web/app/components/base/icons/src/vender/line/arrows/index.ts +++ b/web/app/components/base/icons/src/vender/line/arrows/index.ts @@ -7,6 +7,7 @@ export { default as ChevronRight } from './ChevronRight' export { default as ChevronSelectorVertical } from './ChevronSelectorVertical' export { default as Collapse04 } from './Collapse04' export { default as FlipBackward } from './FlipBackward' +export { default as FlipForward } from './FlipForward' export { default as RefreshCcw01 } from './RefreshCcw01' export { default as RefreshCw05 } from './RefreshCw05' export { default as ReverseLeft } from './ReverseLeft' From 28495273b469b14665d0d3de356e3adf06ea972c Mon Sep 17 00:00:00 2001 From: KiyotakaMatsushita <6897406+KiyotakaMatsushita@users.noreply.github.com> Date: Fri, 10 May 2024 19:54:08 +0900 Subject: [PATCH 044/267] feat: Add storage type and Google Storage settings to worker (#4266) --- docker/docker-compose.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index b2a3353641..1baba04bd2 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -203,7 +203,7 @@ services: REDIS_USE_SSL: 'false' # The configurations of celery broker. CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1 - # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob`, Default: `local` + # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local` STORAGE_TYPE: local STORAGE_LOCAL_PATH: storage # The S3 storage configurations, only available when STORAGE_TYPE is `s3`. @@ -217,6 +217,9 @@ services: AZURE_BLOB_ACCOUNT_KEY: 'difyai' AZURE_BLOB_CONTAINER_NAME: 'difyai-container' AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net' + # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`. + GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name' + GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string' # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`. VECTOR_STORE: weaviate # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. From 370e1c1a17d052735947d261aae22c5d581c4191 Mon Sep 17 00:00:00 2001 From: Patryk Garstecki Date: Sat, 11 May 2024 02:42:03 +0200 Subject: [PATCH 045/267] fix(frontend): :wrench: add privacy policy spaces (#4277) --- .../base/chat/chat-with-history/config-panel/index.tsx | 2 +- web/app/components/share/chat/welcome/index.tsx | 2 +- web/app/components/share/chatbot/welcome/index.tsx | 2 +- web/app/components/share/text-generation/index.tsx | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/web/app/components/base/chat/chat-with-history/config-panel/index.tsx b/web/app/components/base/chat/chat-with-history/config-panel/index.tsx index cf12b35d08..f49dcbab00 100644 --- a/web/app/components/base/chat/chat-with-history/config-panel/index.tsx +++ b/web/app/components/base/chat/chat-with-history/config-panel/index.tsx @@ -134,7 +134,7 @@ const ConfigPanel = () => { {site?.privacy_policy ?
{t('share.chat.privacyPolicyLeft')} {t('share.chat.privacyPolicyMiddle')} {t('share.chat.privacyPolicyRight')} diff --git a/web/app/components/share/chat/welcome/index.tsx b/web/app/components/share/chat/welcome/index.tsx index 6c431362dc..adc58d8a63 100644 --- a/web/app/components/share/chat/welcome/index.tsx +++ b/web/app/components/share/chat/welcome/index.tsx @@ -357,7 +357,7 @@ const Welcome: FC = ({ {siteInfo.privacy_policy ?
{t('share.chat.privacyPolicyLeft')} {t('share.chat.privacyPolicyMiddle')} {t('share.chat.privacyPolicyRight')} diff --git a/web/app/components/share/chatbot/welcome/index.tsx b/web/app/components/share/chatbot/welcome/index.tsx index 0cf1298873..240271d3b5 100644 --- a/web/app/components/share/chatbot/welcome/index.tsx +++ b/web/app/components/share/chatbot/welcome/index.tsx @@ -358,7 +358,7 @@ const Welcome: FC = ({ {siteInfo.privacy_policy ?
{t('share.chat.privacyPolicyLeft')} {t('share.chat.privacyPolicyMiddle')} {t('share.chat.privacyPolicyRight')} diff --git a/web/app/components/share/text-generation/index.tsx b/web/app/components/share/text-generation/index.tsx index 1a88fcddd4..21ff2b051a 100644 --- a/web/app/components/share/text-generation/index.tsx +++ b/web/app/components/share/text-generation/index.tsx @@ -618,7 +618,7 @@ const TextGeneration: FC = ({
·
{t('share.chat.privacyPolicyLeft')} {t('share.chat.privacyPolicyMiddle')} {t('share.chat.privacyPolicyRight')} From 00ce372b71ad9abe30f806ebf4b86aa2200a8984 Mon Sep 17 00:00:00 2001 From: TinsFox Date: Sat, 11 May 2024 08:43:37 +0800 Subject: [PATCH 046/267] fix: hook dependency (#4242) --- web/app/signin/normalForm.tsx | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/web/app/signin/normalForm.tsx b/web/app/signin/normalForm.tsx index bb213c25ff..aba658b3ce 100644 --- a/web/app/signin/normalForm.tsx +++ b/web/app/signin/normalForm.tsx @@ -5,13 +5,11 @@ import { useRouter } from 'next/navigation' import classNames from 'classnames' import useSWR from 'swr' import Link from 'next/link' -import { useContext } from 'use-context-selector' import Toast from '../components/base/toast' import style from './page.module.css' import { IS_CE_EDITION, apiPrefix } from '@/config' import Button from '@/app/components/base/button' import { login, oauth } from '@/service/common' -import I18n from '@/context/i18n' import { getPurifyHref } from '@/utils' const validEmailReg = /^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$/ @@ -65,7 +63,6 @@ function reducer(state: IState, action: IAction) { const NormalForm = () => { const { t } = useTranslation() const router = useRouter() - const { locale } = useContext(I18n) const [state, dispatch] = useReducer(reducer, { formValid: false, @@ -96,7 +93,6 @@ const NormalForm = () => { remember_me: true, }, }) - if (res.result === 'success') { localStorage.setItem('console_token', res.data) router.replace('/apps') @@ -143,7 +139,7 @@ const NormalForm = () => { dispatch({ type: 'google_login_failed' }) if (google) window.location.href = google.redirect_url - }, [google, google]) + }, [google, google_error]) return ( <> From 749b236d3db71f5c8dc7b68cfdb1529d67bc4c83 Mon Sep 17 00:00:00 2001 From: Louie Long Date: Sat, 11 May 2024 08:50:46 +0800 Subject: [PATCH 047/267] fix: do nothing if switch to current app (#4249) Co-authored-by: langyong --- web/app/components/header/nav/nav-selector/index.tsx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/web/app/components/header/nav/nav-selector/index.tsx b/web/app/components/header/nav/nav-selector/index.tsx index edb6c3a388..6e36677f0e 100644 --- a/web/app/components/header/nav/nav-selector/index.tsx +++ b/web/app/components/header/nav/nav-selector/index.tsx @@ -73,6 +73,8 @@ const NavSelector = ({ curNav, navs, createText, isApp, onCreate, onLoadmore }: navs.map(nav => (
{ + if (curNav?.id === nav.id) + return setAppDetail() router.push(nav.link) }} title={nav.name}> From 1e451991dbd9318b8043c1bcb0daa3b3ab0e8ba5 Mon Sep 17 00:00:00 2001 From: YidaHu Date: Sat, 11 May 2024 10:07:54 +0800 Subject: [PATCH 048/267] fix: deutsch edit app (#4270) --- web/i18n/de-DE/app.ts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts index 81d12cb04f..dc7396702c 100644 --- a/web/i18n/de-DE/app.ts +++ b/web/i18n/de-DE/app.ts @@ -42,13 +42,14 @@ const translation = { appCreated: 'App erstellt', appCreateFailed: 'Erstellen der App fehlgeschlagen', }, - editApp: { - startToEdit: 'App bearbeiten', - }, + editApp: 'App bearbeiten', + editAppTitle: 'App-Informationen bearbeiten', + editDone: 'App-Informationen wurden aktualisiert', + editFailed: 'Aktualisierung der App-Informationen fehlgeschlagen', emoji: { ok: 'OK', cancel: 'Abbrechen', }, } -export default translation +export default translation From 13f4ed6e0ecb4b57b851a277d99b2e61e9bbc86e Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Sat, 11 May 2024 10:38:12 +0800 Subject: [PATCH 049/267] fix: workflow zoomin/out shortcuts (#4283) --- .../components/workflow/operator/zoom-in-out.tsx | 16 +++++++++++++--- web/app/components/workflow/utils.ts | 8 ++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/web/app/components/workflow/operator/zoom-in-out.tsx b/web/app/components/workflow/operator/zoom-in-out.tsx index 6839c1aa04..7de83904fd 100644 --- a/web/app/components/workflow/operator/zoom-in-out.tsx +++ b/web/app/components/workflow/operator/zoom-in-out.tsx @@ -19,6 +19,7 @@ import { import { getKeyboardKeyCodeBySystem, getKeyboardKeyNameBySystem, + isEventTargetInputArea, } from '../utils' import ShortcutsName from '../shortcuts-name' import TipPopup from './tip-popup' @@ -128,10 +129,13 @@ const ZoomInOut: FC = () => { }) useKeyPress('shift.1', (e) => { - e.preventDefault() if (workflowReadOnly) return + if (isEventTargetInputArea(e.target as HTMLElement)) + return + + e.preventDefault() zoomTo(1) handleSyncWorkflowDraft() }, { @@ -140,10 +144,13 @@ const ZoomInOut: FC = () => { }) useKeyPress('shift.2', (e) => { - e.preventDefault() if (workflowReadOnly) return + if (isEventTargetInputArea(e.target as HTMLElement)) + return + + e.preventDefault() zoomTo(2) handleSyncWorkflowDraft() }, { @@ -152,10 +159,13 @@ const ZoomInOut: FC = () => { }) useKeyPress('shift.5', (e) => { - e.preventDefault() if (workflowReadOnly) return + if (isEventTargetInputArea(e.target as HTMLElement)) + return + + e.preventDefault() zoomTo(0.5) handleSyncWorkflowDraft() }, { diff --git a/web/app/components/workflow/utils.ts b/web/app/components/workflow/utils.ts index d4f6f77d71..e32ba61a36 100644 --- a/web/app/components/workflow/utils.ts +++ b/web/app/components/workflow/utils.ts @@ -406,3 +406,11 @@ export const getTopLeftNodePosition = (nodes: Node[]) => { y: minY, } } + +export const isEventTargetInputArea = (target: HTMLElement) => { + if (target.tagName === 'INPUT' || target.tagName === 'TEXTAREA') + return true + + if (target.contentEditable === 'true') + return true +} From 2c1c660c6e080552aceacbb4c3e5f37f5ca8fddf Mon Sep 17 00:00:00 2001 From: Patryk Garstecki Date: Sat, 11 May 2024 05:23:35 +0200 Subject: [PATCH 050/267] =?UTF-8?q?fix(Backend:http=5Fexecutor):=20:wrench?= =?UTF-8?q?:=20prevent=20splitting=20JSON=20data=20as=20v=E2=80=A6=20(#427?= =?UTF-8?q?6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/core/workflow/nodes/http_request/http_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/http_request/http_executor.py b/api/core/workflow/nodes/http_request/http_executor.py index 4ca8a81d8c..0b07ad8e82 100644 --- a/api/core/workflow/nodes/http_request/http_executor.py +++ b/api/core/workflow/nodes/http_request/http_executor.py @@ -236,7 +236,7 @@ class HttpExecutor: for kv in kv_paris: if not kv.strip(): continue - kv = kv.split(':') + kv = kv.split(':', 1) if len(kv) == 2: body[kv[0].strip()] = kv[1] elif len(kv) == 1: From a588df43716c463ea2185f774102b436141e3bfe Mon Sep 17 00:00:00 2001 From: "Sebastian.W" Date: Sat, 11 May 2024 11:29:28 +0800 Subject: [PATCH 051/267] Add rerank model type for LocalAI provider (#3952) --- .../model_providers/localai/localai.yaml | 1 + .../localai/rerank/__init__.py | 0 .../model_providers/localai/rerank/rerank.py | 120 +++++++++++++ .../model_runtime/localai/test_rerank.py | 158 ++++++++++++++++++ 4 files changed, 279 insertions(+) create mode 100644 api/core/model_runtime/model_providers/localai/rerank/__init__.py create mode 100644 api/core/model_runtime/model_providers/localai/rerank/rerank.py create mode 100644 api/tests/integration_tests/model_runtime/localai/test_rerank.py diff --git a/api/core/model_runtime/model_providers/localai/localai.yaml b/api/core/model_runtime/model_providers/localai/localai.yaml index 151f02ee6f..864dd7a30c 100644 --- a/api/core/model_runtime/model_providers/localai/localai.yaml +++ b/api/core/model_runtime/model_providers/localai/localai.yaml @@ -15,6 +15,7 @@ help: supported_model_types: - llm - text-embedding + - rerank - speech2text configurate_methods: - customizable-model diff --git a/api/core/model_runtime/model_providers/localai/rerank/__init__.py b/api/core/model_runtime/model_providers/localai/rerank/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/localai/rerank/rerank.py b/api/core/model_runtime/model_providers/localai/rerank/rerank.py new file mode 100644 index 0000000000..96087d06dc --- /dev/null +++ b/api/core/model_runtime/model_providers/localai/rerank/rerank.py @@ -0,0 +1,120 @@ +from json import dumps +from typing import Optional + +import httpx +from requests import post +from yarl import URL + +from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.rerank_model import RerankModel + + +class LocalaiRerankModel(RerankModel): + """ + LocalAI rerank model API is compatible with Jina rerank model API. So just copy the JinaRerankModel class code here. + """ + + def _invoke(self, model: str, credentials: dict, + query: str, docs: list[str], score_threshold: Optional[float] = None, top_n: Optional[int] = None, + user: Optional[str] = None) -> RerankResult: + """ + Invoke rerank model + + :param model: model name + :param credentials: model credentials + :param query: search query + :param docs: docs for reranking + :param score_threshold: score threshold + :param top_n: top n documents to return + :param user: unique user id + :return: rerank result + """ + if len(docs) == 0: + return RerankResult(model=model, docs=[]) + + server_url = credentials['server_url'] + model_name = model + + if not server_url: + raise CredentialsValidateFailedError('server_url is required') + if not model_name: + raise CredentialsValidateFailedError('model_name is required') + + url = server_url + headers = { + 'Authorization': f"Bearer {credentials.get('api_key')}", + 'Content-Type': 'application/json' + } + + data = { + "model": model_name, + "query": query, + "documents": docs, + "top_n": top_n + } + + try: + response = post(str(URL(url) / 'rerank'), headers=headers, data=dumps(data), timeout=10) + response.raise_for_status() + results = response.json() + + rerank_documents = [] + for result in results['results']: + rerank_document = RerankDocument( + index=result['index'], + text=result['document']['text'], + score=result['relevance_score'], + ) + if score_threshold is None or result['relevance_score'] >= score_threshold: + rerank_documents.append(rerank_document) + + return RerankResult(model=model, docs=rerank_documents) + except httpx.HTTPStatusError as e: + raise InvokeServerUnavailableError(str(e)) + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + + self._invoke( + model=model, + credentials=credentials, + query="What is the capital of the United States?", + docs=[ + "Carson City is the capital city of the American state of Nevada. At the 2010 United States " + "Census, Carson City had a population of 55,274.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " + "are a political division controlled by the United States. Its capital is Saipan.", + ], + score_threshold=0.8 + ) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + """ + return { + InvokeConnectionError: [httpx.ConnectError], + InvokeServerUnavailableError: [httpx.RemoteProtocolError], + InvokeRateLimitError: [], + InvokeAuthorizationError: [httpx.HTTPStatusError], + InvokeBadRequestError: [httpx.RequestError] + } diff --git a/api/tests/integration_tests/model_runtime/localai/test_rerank.py b/api/tests/integration_tests/model_runtime/localai/test_rerank.py new file mode 100644 index 0000000000..a75439337e --- /dev/null +++ b/api/tests/integration_tests/model_runtime/localai/test_rerank.py @@ -0,0 +1,158 @@ +import os + +import pytest +from api.core.model_runtime.entities.rerank_entities import RerankResult + +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.localai.rerank.rerank import LocalaiRerankModel + + +def test_validate_credentials_for_chat_model(): + model = LocalaiRerankModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model='bge-reranker-v2-m3', + credentials={ + 'server_url': 'hahahaha', + 'completion_type': 'completion', + } + ) + + model.validate_credentials( + model='bge-reranker-base', + credentials={ + 'server_url': os.environ.get('LOCALAI_SERVER_URL'), + 'completion_type': 'completion', + } + ) + +def test_invoke_rerank_model(): + model = LocalaiRerankModel() + + response = model.invoke( + model='bge-reranker-base', + credentials={ + 'server_url': os.environ.get('LOCALAI_SERVER_URL') + }, + query='Organic skincare products for sensitive skin', + docs=[ + "Eco-friendly kitchenware for modern homes", + "Biodegradable cleaning supplies for eco-conscious consumers", + "Organic cotton baby clothes for sensitive skin", + "Natural organic skincare range for sensitive skin", + "Tech gadgets for smart homes: 2024 edition", + "Sustainable gardening tools and compost solutions", + "Sensitive skin-friendly facial cleansers and toners", + "Organic food wraps and storage solutions", + "Yoga mats made from recycled materials" + ], + top_n=3, + score_threshold=0.75, + user="abc-123" + ) + + assert isinstance(response, RerankResult) + assert len(response.docs) == 3 +import os + +import pytest +from api.core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult + +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.localai.rerank.rerank import LocalaiRerankModel + + +def test_validate_credentials_for_chat_model(): + model = LocalaiRerankModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model='bge-reranker-v2-m3', + credentials={ + 'server_url': 'hahahaha', + 'completion_type': 'completion', + } + ) + + model.validate_credentials( + model='bge-reranker-base', + credentials={ + 'server_url': os.environ.get('LOCALAI_SERVER_URL'), + 'completion_type': 'completion', + } + ) + +def test_invoke_rerank_model(): + model = LocalaiRerankModel() + + response = model.invoke( + model='bge-reranker-base', + credentials={ + 'server_url': os.environ.get('LOCALAI_SERVER_URL') + }, + query='Organic skincare products for sensitive skin', + docs=[ + "Eco-friendly kitchenware for modern homes", + "Biodegradable cleaning supplies for eco-conscious consumers", + "Organic cotton baby clothes for sensitive skin", + "Natural organic skincare range for sensitive skin", + "Tech gadgets for smart homes: 2024 edition", + "Sustainable gardening tools and compost solutions", + "Sensitive skin-friendly facial cleansers and toners", + "Organic food wraps and storage solutions", + "Yoga mats made from recycled materials" + ], + top_n=3, + score_threshold=0.75, + user="abc-123" + ) + + assert isinstance(response, RerankResult) + assert len(response.docs) == 3 + +def test__invoke(): + model = LocalaiRerankModel() + + # Test case 1: Empty docs + result = model._invoke( + model='bge-reranker-base', + credentials={ + 'server_url': 'https://example.com', + 'api_key': '1234567890' + }, + query='Organic skincare products for sensitive skin', + docs=[], + top_n=3, + score_threshold=0.75, + user="abc-123" + ) + assert isinstance(result, RerankResult) + assert len(result.docs) == 0 + + # Test case 2: Valid invocation + result = model._invoke( + model='bge-reranker-base', + credentials={ + 'server_url': 'https://example.com', + 'api_key': '1234567890' + }, + query='Organic skincare products for sensitive skin', + docs=[ + "Eco-friendly kitchenware for modern homes", + "Biodegradable cleaning supplies for eco-conscious consumers", + "Organic cotton baby clothes for sensitive skin", + "Natural organic skincare range for sensitive skin", + "Tech gadgets for smart homes: 2024 edition", + "Sustainable gardening tools and compost solutions", + "Sensitive skin-friendly facial cleansers and toners", + "Organic food wraps and storage solutions", + "Yoga mats made from recycled materials" + ], + top_n=3, + score_threshold=0.75, + user="abc-123" + ) + assert isinstance(result, RerankResult) + assert len(result.docs) == 3 + assert all(isinstance(doc, RerankDocument) for doc in result.docs) \ No newline at end of file From 4796f9d914faefcc04e39741bd1ec64be4c4b144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Sat, 11 May 2024 13:02:56 +0800 Subject: [PATCH 052/267] feat:add gpt-4-turbo for azure (#4287) --- .../model_providers/azure_openai/_constant.py | 76 +++++++++++++++++++ .../azure_openai/azure_openai.yaml | 6 ++ 2 files changed, 82 insertions(+) diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index 99378f3aab..26ce858679 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -482,6 +482,82 @@ LLM_BASE_MODELS = [ ) ) ), + AzureBaseModel( + base_model_name='gpt-4-turbo', + entity=AIModelEntity( + model='fake-deployment-name', + label=I18nObject( + en_US='fake-deployment-name-label', + ), + model_type=ModelType.LLM, + features=[ + ModelFeature.AGENT_THOUGHT, + ModelFeature.VISION, + ModelFeature.MULTI_TOOL_CALL, + ModelFeature.STREAM_TOOL_CALL, + ], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, + }, + parameter_rules=[ + ParameterRule( + name='temperature', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], + ), + ParameterRule( + name='top_p', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], + ), + ParameterRule( + name='presence_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], + ), + ParameterRule( + name='frequency_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], + ), + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='seed', + label=I18nObject( + zh_Hans='种子', + en_US='Seed' + ), + type='int', + help=I18nObject( + zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。', + en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.' + ), + required=False, + precision=2, + min=0, + max=1, + ), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), + ], + pricing=PriceConfig( + input=0.001, + output=0.003, + unit=0.001, + currency='USD', + ) + ) + ), AzureBaseModel( base_model_name='gpt-4-turbo-2024-04-09', entity=AIModelEntity( diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml index 828698acc7..9b2a1169c5 100644 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml +++ b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml @@ -99,6 +99,12 @@ model_credential_schema: show_on: - variable: __model_type value: llm + - label: + en_US: gpt-4-turbo + value: gpt-4-turbo + show_on: + - variable: __model_type + value: llm - label: en_US: gpt-4-turbo-2024-04-09 value: gpt-4-turbo-2024-04-09 From d8926a2571558b3ee48e284d8f9e4adc27b79a69 Mon Sep 17 00:00:00 2001 From: takatost Date: Sat, 11 May 2024 13:40:11 +0800 Subject: [PATCH 053/267] feat: hide node detail outputs in webapp & installed app in explore (#3954) --- .../generate_response_converter.py | 4 ++ .../workflow/generate_response_converter.py | 24 ++++++++++- api/core/app/entities/task_entities.py | 43 +++++++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/api/core/app/apps/advanced_chat/generate_response_converter.py b/api/core/app/apps/advanced_chat/generate_response_converter.py index 80e8e22e88..08069332ba 100644 --- a/api/core/app/apps/advanced_chat/generate_response_converter.py +++ b/api/core/app/apps/advanced_chat/generate_response_converter.py @@ -8,6 +8,8 @@ from core.app.entities.task_entities import ( ChatbotAppStreamResponse, ErrorStreamResponse, MessageEndStreamResponse, + NodeFinishStreamResponse, + NodeStartStreamResponse, PingStreamResponse, ) @@ -111,6 +113,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): if isinstance(sub_stream_response, ErrorStreamResponse): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) + elif isinstance(sub_stream_response, NodeStartStreamResponse | NodeFinishStreamResponse): + response_chunk.update(sub_stream_response.to_ignore_detail_dict()) else: response_chunk.update(sub_stream_response.to_dict()) diff --git a/api/core/app/apps/workflow/generate_response_converter.py b/api/core/app/apps/workflow/generate_response_converter.py index d907b82c99..88bde58ba0 100644 --- a/api/core/app/apps/workflow/generate_response_converter.py +++ b/api/core/app/apps/workflow/generate_response_converter.py @@ -5,6 +5,8 @@ from typing import cast from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( ErrorStreamResponse, + NodeFinishStreamResponse, + NodeStartStreamResponse, PingStreamResponse, WorkflowAppBlockingResponse, WorkflowAppStreamResponse, @@ -68,4 +70,24 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): :param stream_response: stream response :return: """ - return cls.convert_stream_full_response(stream_response) + for chunk in stream_response: + chunk = cast(WorkflowAppStreamResponse, chunk) + sub_stream_response = chunk.stream_response + + if isinstance(sub_stream_response, PingStreamResponse): + yield 'ping' + continue + + response_chunk = { + 'event': sub_stream_response.event.value, + 'workflow_run_id': chunk.workflow_run_id, + } + + if isinstance(sub_stream_response, ErrorStreamResponse): + data = cls._error_to_stream_response(sub_stream_response.err) + response_chunk.update(data) + elif isinstance(sub_stream_response, NodeStartStreamResponse | NodeFinishStreamResponse): + response_chunk.update(sub_stream_response.to_ignore_detail_dict()) + else: + response_chunk.update(sub_stream_response.to_dict()) + yield json.dumps(response_chunk) diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 4994efe2e9..1a11ac9aa3 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -246,6 +246,24 @@ class NodeStartStreamResponse(StreamResponse): workflow_run_id: str data: Data + def to_ignore_detail_dict(self): + return { + "event": self.event.value, + "task_id": self.task_id, + "workflow_run_id": self.workflow_run_id, + "data": { + "id": self.data.id, + "node_id": self.data.node_id, + "node_type": self.data.node_type, + "title": self.data.title, + "index": self.data.index, + "predecessor_node_id": self.data.predecessor_node_id, + "inputs": None, + "created_at": self.data.created_at, + "extras": {} + } + } + class NodeFinishStreamResponse(StreamResponse): """ @@ -276,6 +294,31 @@ class NodeFinishStreamResponse(StreamResponse): workflow_run_id: str data: Data + def to_ignore_detail_dict(self): + return { + "event": self.event.value, + "task_id": self.task_id, + "workflow_run_id": self.workflow_run_id, + "data": { + "id": self.data.id, + "node_id": self.data.node_id, + "node_type": self.data.node_type, + "title": self.data.title, + "index": self.data.index, + "predecessor_node_id": self.data.predecessor_node_id, + "inputs": None, + "process_data": None, + "outputs": None, + "status": self.data.status, + "error": None, + "elapsed_time": self.data.elapsed_time, + "execution_metadata": None, + "created_at": self.data.created_at, + "finished_at": self.data.finished_at, + "files": [] + } + } + class TextChunkStreamResponse(StreamResponse): """ From 1663df8a0560a936055b7a2fe4416c9a71a49921 Mon Sep 17 00:00:00 2001 From: Joel Date: Sat, 11 May 2024 13:40:27 +0800 Subject: [PATCH 054/267] feat: hide run detail in webapps and installed apps (#4289) --- .../app/text-generate/item/index.tsx | 4 +- .../chat/chat-with-history/chat-wrapper.tsx | 1 + .../base/chat/chat/answer/index.tsx | 4 +- .../chat/chat/answer/workflow-process.tsx | 3 ++ web/app/components/base/chat/chat/index.tsx | 3 ++ .../share/text-generation/result/index.tsx | 1 + .../nodes/llm/components/config-prompt.tsx | 2 +- .../components/workflow/nodes/llm/default.ts | 9 ++++- web/app/components/workflow/run/node.tsx | 39 ++++++++++++------- 9 files changed, 49 insertions(+), 17 deletions(-) diff --git a/web/app/components/app/text-generate/item/index.tsx b/web/app/components/app/text-generate/item/index.tsx index d65587fe75..7da907210e 100644 --- a/web/app/components/app/text-generate/item/index.tsx +++ b/web/app/components/app/text-generate/item/index.tsx @@ -58,6 +58,7 @@ export type IGenerationItemProps = { innerClassName?: string contentClassName?: string footerClassName?: string + hideProcessDetail?: boolean } export const SimpleBtn = ({ className, isDisabled, onClick, children }: { @@ -108,6 +109,7 @@ const GenerationItem: FC = ({ varList, innerClassName, contentClassName, + hideProcessDetail, }) => { const { t } = useTranslation() const params = useParams() @@ -291,7 +293,7 @@ const GenerationItem: FC = ({
{workflowProcessData && ( - + )} {workflowProcessData && !isError && ( diff --git a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx index 9e20bddada..99c106293d 100644 --- a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx +++ b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx @@ -140,6 +140,7 @@ const ChatWrapper = () => { allToolIcons={appMeta?.tool_icons || {}} onFeedback={handleFeedback} suggestedQuestions={suggestedQuestions} + hideProcessDetail /> ) } diff --git a/web/app/components/base/chat/chat/answer/index.tsx b/web/app/components/base/chat/chat/answer/index.tsx index d338efc7e5..f2739f6374 100644 --- a/web/app/components/base/chat/chat/answer/index.tsx +++ b/web/app/components/base/chat/chat/answer/index.tsx @@ -31,6 +31,7 @@ type AnswerProps = { allToolIcons?: Record showPromptLog?: boolean chatAnswerContainerInner?: string + hideProcessDetail?: boolean } const Answer: FC = ({ item, @@ -42,6 +43,7 @@ const Answer: FC = ({ allToolIcons, showPromptLog, chatAnswerContainerInner, + hideProcessDetail, }) => { const { t } = useTranslation() const { @@ -129,7 +131,7 @@ const Answer: FC = ({ } { workflowProcess && ( - + ) } { diff --git a/web/app/components/base/chat/chat/answer/workflow-process.tsx b/web/app/components/base/chat/chat/answer/workflow-process.tsx index 43f412b50f..4eef0b938f 100644 --- a/web/app/components/base/chat/chat/answer/workflow-process.tsx +++ b/web/app/components/base/chat/chat/answer/workflow-process.tsx @@ -18,12 +18,14 @@ type WorkflowProcessProps = { grayBg?: boolean expand?: boolean hideInfo?: boolean + hideProcessDetail?: boolean } const WorkflowProcessItem = ({ data, grayBg, expand = false, hideInfo = false, + hideProcessDetail = false, }: WorkflowProcessProps) => { const { t } = useTranslation() const [collapse, setCollapse] = useState(!expand) @@ -94,6 +96,7 @@ const WorkflowProcessItem = ({
)) diff --git a/web/app/components/base/chat/chat/index.tsx b/web/app/components/base/chat/chat/index.tsx index 4c9718de7f..a66074e8ba 100644 --- a/web/app/components/base/chat/chat/index.tsx +++ b/web/app/components/base/chat/chat/index.tsx @@ -54,6 +54,7 @@ export type ChatProps = { chatNode?: ReactNode onFeedback?: (messageId: string, feedback: Feedback) => void chatAnswerContainerInner?: string + hideProcessDetail?: boolean } const Chat: FC = ({ config, @@ -78,6 +79,7 @@ const Chat: FC = ({ chatNode, onFeedback, chatAnswerContainerInner, + hideProcessDetail, }) => { const { t } = useTranslation() const { currentLogItem, setCurrentLogItem, showPromptLogModal, setShowPromptLogModal, showAgentLogModal, setShowAgentLogModal } = useAppStore(useShallow(state => ({ @@ -204,6 +206,7 @@ const Chat: FC = ({ allToolIcons={allToolIcons} showPromptLog={showPromptLog} chatAnswerContainerInner={chatAnswerContainerInner} + hideProcessDetail={hideProcessDetail} /> ) } diff --git a/web/app/components/share/text-generation/result/index.tsx b/web/app/components/share/text-generation/result/index.tsx index 8678e65ea5..cf6c31c6c3 100644 --- a/web/app/components/share/text-generation/result/index.tsx +++ b/web/app/components/share/text-generation/result/index.tsx @@ -332,6 +332,7 @@ const Result: FC = ({ taskId={isCallBatchAPI ? ((taskId as number) < 10 ? `0${taskId}` : `${taskId}`) : undefined} controlClearMoreLikeThis={controlClearMoreLikeThis} isShowTextToSpeech={isShowTextToSpeech} + hideProcessDetail /> ) diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx index b79ecfa62a..6a76a58a43 100644 --- a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx +++ b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx @@ -146,7 +146,7 @@ const ConfigPrompt: FC = ({ { - if ((payload as PromptItem[])?.[0].role === PromptRole.system && list[0].p.role !== PromptRole.system) + if ((payload as PromptItem[])?.[0].role === PromptRole.system && list[0].p?.role !== PromptRole.system) return onChange(list.map(item => item.p)) diff --git a/web/app/components/workflow/nodes/llm/default.ts b/web/app/components/workflow/nodes/llm/default.ts index e68b0cb318..803add6f00 100644 --- a/web/app/components/workflow/nodes/llm/default.ts +++ b/web/app/components/workflow/nodes/llm/default.ts @@ -44,7 +44,14 @@ const nodeDefault: NodeDefault = { if (!errorMessages && !payload.memory) { const isChatModel = payload.model.mode === 'chat' - const isPromptyEmpty = isChatModel ? !(payload.prompt_template as PromptItem[]).some(t => t.text !== '') : (payload.prompt_template as PromptItem).text === '' + const isPromptyEmpty = isChatModel + ? !(payload.prompt_template as PromptItem[]).some((t) => { + if (t.edition_type === EditionType.jinja2) + return t.jinja2_text !== '' + + return t.text !== '' + }) + : ((payload.prompt_template as PromptItem).edition_type === EditionType.jinja2 ? (payload.prompt_template as PromptItem).jinja2_text === '' : (payload.prompt_template as PromptItem).text === '') if (isPromptyEmpty) errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.llm.prompt') }) } diff --git a/web/app/components/workflow/run/node.tsx b/web/app/components/workflow/run/node.tsx index 237ac1c3de..fbab1b342f 100644 --- a/web/app/components/workflow/run/node.tsx +++ b/web/app/components/workflow/run/node.tsx @@ -1,7 +1,7 @@ 'use client' import { useTranslation } from 'react-i18next' import type { FC } from 'react' -import { useEffect, useState } from 'react' +import { useCallback, useEffect, useState } from 'react' import cn from 'classnames' import BlockIcon from '../block-icon' import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' @@ -14,10 +14,20 @@ import type { NodeTracing } from '@/types/workflow' type Props = { nodeInfo: NodeTracing hideInfo?: boolean + hideProcessDetail?: boolean } -const NodePanel: FC = ({ nodeInfo, hideInfo = false }) => { - const [collapseState, setCollapseState] = useState(true) +const NodePanel: FC = ({ + nodeInfo, + hideInfo = false, + hideProcessDetail, +}) => { + const [collapseState, doSetCollapseState] = useState(true) + const setCollapseState = useCallback((state: boolean) => { + if (hideProcessDetail) + return + doSetCollapseState(state) + }, [hideProcessDetail]) const { t } = useTranslation() const getTime = (time: number) => { @@ -39,7 +49,7 @@ const NodePanel: FC = ({ nodeInfo, hideInfo = false }) => { useEffect(() => { setCollapseState(!nodeInfo.expand) - }, [nodeInfo.expand]) + }, [nodeInfo.expand, setCollapseState]) return (
@@ -52,12 +62,15 @@ const NodePanel: FC = ({ nodeInfo, hideInfo = false }) => { )} onClick={() => setCollapseState(!collapseState)} > - + {!hideProcessDetail && ( + + )} +
= ({ nodeInfo, hideInfo = false }) => { )} {nodeInfo.status === 'running' && (
- - Running + Running +
)}
- {!collapseState && ( + {!collapseState && !hideProcessDetail && (
{nodeInfo.status === 'stopped' && ( From 198d6c00d6433feda2cb69b31c480832674e94b5 Mon Sep 17 00:00:00 2001 From: rechardwang Date: Sat, 11 May 2024 13:41:12 +0800 Subject: [PATCH 055/267] Update docker-compose.yaml (#4288) --- docker/docker-compose.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 1baba04bd2..0c3a0c202f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -177,6 +177,7 @@ services: image: langgenius/dify-api:0.6.7 restart: always environment: + CONSOLE_WEB_URL: '' # Startup mode, 'worker' starts the Celery worker for processing the queue. MODE: worker @@ -251,6 +252,11 @@ services: MAIL_TYPE: '' # default send from email address, if not specified MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply )' + SMTP_SERVER: '' + SMTP_PORT: 587 + SMTP_USERNAME: '' + SMTP_PASSWORD: '' + SMTP_USE_TLS: 'true' # the api-key for resend (https://resend.com) RESEND_API_KEY: '' RESEND_API_URL: https://api.resend.com From 34d3998566fe2b3a02e003025886423a1206c20e Mon Sep 17 00:00:00 2001 From: Joel Date: Sat, 11 May 2024 14:42:04 +0800 Subject: [PATCH 056/267] fix: webapps not show number type input field (#4292) --- .../base/chat/chat-with-history/config-panel/form.tsx | 11 +++++++++++ .../components/base/chat/chat-with-history/hooks.tsx | 11 +++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/web/app/components/base/chat/chat-with-history/config-panel/form.tsx b/web/app/components/base/chat/chat-with-history/config-panel/form.tsx index d34b1cc30f..39b2426a5f 100644 --- a/web/app/components/base/chat/chat-with-history/config-panel/form.tsx +++ b/web/app/components/base/chat/chat-with-history/config-panel/form.tsx @@ -37,6 +37,17 @@ const Form = () => { /> ) } + if (form.type === 'number') { + return ( + handleFormChange(variable, e.target.value)} + placeholder={`${label}${!required ? `(${t('appDebug.variableTable.optional')})` : ''}`} + /> + ) + } return ( { setNewConversationInputs(newInputs) }, []) const inputsForms = useMemo(() => { - return (appParams?.user_input_form || []).filter((item: any) => item.paragraph || item.select || item['text-input']).map((item: any) => { + return (appParams?.user_input_form || []).filter((item: any) => item.paragraph || item.select || item['text-input'] || item.number).map((item: any) => { if (item.paragraph) { return { ...item.paragraph, type: 'paragraph', } } + if (item.number) { + return { + ...item.number, + type: 'number', + } + } if (item.select) { return { ...item.select, type: 'select', } } + return { ...item['text-input'], type: 'text-input', @@ -226,7 +233,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { setShowNewConversationItemInList(true) } }, [setShowConfigPanelBeforeChat, setShowNewConversationItemInList, checkInputsRequired]) - const currentChatInstanceRef = useRef<{ handleStop: () => void }>({ handleStop: () => {} }) + const currentChatInstanceRef = useRef<{ handleStop: () => void }>({ handleStop: () => { } }) const handleChangeConversation = useCallback((conversationId: string) => { currentChatInstanceRef.current.handleStop() setNewConversationId('') From 20a9037d5ba5467ac0f42e520963cc7c03a36f25 Mon Sep 17 00:00:00 2001 From: Nite Knite Date: Sat, 11 May 2024 15:39:56 +0800 Subject: [PATCH 057/267] fix: align versions of react typing package (#4297) --- web/package.json | 12 ++++++--- web/yarn.lock | 69 ++++++++++++++++++++++++++++-------------------- 2 files changed, 48 insertions(+), 33 deletions(-) diff --git a/web/package.json b/web/package.json index 6183a093fa..6c183f7205 100644 --- a/web/package.json +++ b/web/package.json @@ -56,9 +56,9 @@ "qrcode.react": "^3.1.0", "qs": "^6.11.1", "rc-textarea": "^1.5.2", - "react": "^18.2.0", + "react": "~18.2.0", "react-18-input-autosize": "^3.0.0", - "react-dom": "^18.2.0", + "react-dom": "~18.2.0", "react-error-boundary": "^4.0.2", "react-headless-pagination": "^1.1.4", "react-hook-form": "^7.51.4", @@ -100,8 +100,8 @@ "@types/negotiator": "^0.6.1", "@types/node": "18.15.0", "@types/qs": "^6.9.7", - "@types/react": "^18.3.1", - "@types/react-dom": "18.0.11", + "@types/react": "~18.2.0", + "@types/react-dom": "~18.2.0", "@types/react-slider": "^1.3.1", "@types/react-syntax-highlighter": "^15.5.6", "@types/react-window": "^1.8.5", @@ -132,5 +132,9 @@ }, "engines": { "node": ">=18.17.0" + }, + "resolutions": { + "@types/react": "~18.2.0", + "@types/react-dom": "~18.2.0" } } diff --git a/web/yarn.lock b/web/yarn.lock index 60c2cc94b7..0fea151c25 100644 --- a/web/yarn.lock +++ b/web/yarn.lock @@ -1222,10 +1222,10 @@ resolved "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz" integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== -"@types/react-dom@18.0.11": - version "18.0.11" - resolved "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.11.tgz" - integrity sha512-O38bPbI2CWtgw/OoQoY+BRelw7uysmXbWvw3nLWO21H1HSh+GOlqPuXshJfjmpNlKiiSDG9cc1JZAaMmVdcTlw== +"@types/react-dom@~18.2.0": + version "18.2.25" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.2.25.tgz#2946a30081f53e7c8d585eb138277245caedc521" + integrity sha512-o/V48vf4MQh7juIKZU2QGDfli6p1+OOi5oXx36Hffpc9adsHeXjVp8rHuPkjd8VT8sOJ2Zp05HR7CdpGTIUFUA== dependencies: "@types/react" "*" @@ -1258,19 +1258,10 @@ dependencies: "@types/react" "*" -"@types/react@*", "@types/react@>=16": - version "18.0.28" - resolved "https://registry.npmjs.org/@types/react/-/react-18.0.28.tgz" - integrity sha512-RD0ivG1kEztNBdoAK7lekI9M+azSnitIn85h4iOiaLjaTrMjzslhaqCGaI4IyCJ1RljWiLCEu4jyrLLgqxBTew== - dependencies: - "@types/prop-types" "*" - "@types/scheduler" "*" - csstype "^3.0.2" - -"@types/react@^18.3.1": - version "18.3.1" - resolved "https://registry.npmjs.org/@types/react/-/react-18.3.1.tgz#fed43985caa834a2084d002e4771e15dfcbdbe8e" - integrity sha512-V0kuGBX3+prX+DQ/7r2qsv1NsdfnCLnTgnRJ1pYnxykBhGMz+qj+box5lq7XsO5mtZsBqpjwwTu/7wszPfMBcw== +"@types/react@*", "@types/react@>=16", "@types/react@~18.2.0": + version "18.2.79" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.2.79.tgz#c40efb4f255711f554d47b449f796d1c7756d865" + integrity sha512-RwGAGXPl9kSXwdNTafkOEuFrTBD5SA2B3iEB96xi8+xu5ddUa/cpvyVCSNn+asgLCTHkb5ZxN8gbuibYJi4s1w== dependencies: "@types/prop-types" "*" csstype "^3.0.2" @@ -1280,11 +1271,6 @@ resolved "https://registry.npmjs.org/@types/recordrtc/-/recordrtc-5.6.11.tgz" integrity sha512-X4XD5nltz0cjmyzsPNegQReOPF+C5ARTfSPAPhqnKV7SsfRta/M4FBJ5AtSInCaEveL71FLLSVQE9mg8Uuo++w== -"@types/scheduler@*": - version "0.16.3" - resolved "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz" - integrity sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ== - "@types/semver@^7.3.12": version "7.5.0" resolved "https://registry.npmjs.org/@types/semver/-/semver-7.5.0.tgz" @@ -5749,9 +5735,9 @@ react-18-input-autosize@^3.0.0: dependencies: prop-types "^15.5.8" -react-dom@^18.2.0: +react-dom@~18.2.0: version "18.2.0" - resolved "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-18.2.0.tgz#22aaf38708db2674ed9ada224ca4aa708d821e3d" integrity sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g== dependencies: loose-envify "^1.1.0" @@ -5889,9 +5875,9 @@ react-window@^1.8.9: "@babel/runtime" "^7.0.0" memoize-one ">=3.1.1 <6" -react@^18.2.0: +react@~18.2.0: version "18.2.0" - resolved "https://registry.npmjs.org/react/-/react-18.2.0.tgz" + resolved "https://registry.yarnpkg.com/react/-/react-18.2.0.tgz#555bd98592883255fa00de14f1151a917b5d77d5" integrity sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ== dependencies: loose-envify "^1.1.0" @@ -6446,7 +6432,16 @@ string-argv@^0.3.1: resolved "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz" integrity sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q== -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.1.0, string-width@^4.2.0: +"string-width-cjs@npm:string-width@^4.2.0": + version "4.2.3" + resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string-width@^4.1.0, string-width@^4.2.0: version "4.2.3" resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -6514,7 +6509,14 @@ stringify-entities@^4.0.0: character-entities-html4 "^2.0.0" character-entities-legacy "^3.0.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": + version "6.0.1" + resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -7127,7 +7129,7 @@ word-wrap@^1.2.3: resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34" integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": version "7.0.0" resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -7145,6 +7147,15 @@ wrap-ansi@^6.2.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrap-ansi@^8.1.0: version "8.1.0" resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz" From c01c95d77f342622c60bbded399672ce3e58ab76 Mon Sep 17 00:00:00 2001 From: Joel Date: Sat, 11 May 2024 16:23:31 +0800 Subject: [PATCH 058/267] fix: chatflow run progress problem (#4298) --- web/app/components/base/chat/chat/hooks.ts | 5 ++++- .../components/workflow/panel/debug-and-preview/hooks.ts | 9 ++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/web/app/components/base/chat/chat/hooks.ts b/web/app/components/base/chat/chat/hooks.ts index 61d34642d8..382c01e167 100644 --- a/web/app/components/base/chat/chat/hooks.ts +++ b/web/app/components/base/chat/chat/hooks.ts @@ -468,7 +468,10 @@ export const useChat = ( })) }, onNodeStarted: ({ data }) => { - responseItem.workflowProcess!.tracing!.push(data as any) + responseItem.workflowProcess!.tracing!.push({ + ...data, + status: WorkflowRunningStatus.Running, + } as any) handleUpdateChatList(produce(chatListRef.current, (draft) => { const currentIndex = draft.findIndex(item => item.id === responseItem.id) draft[currentIndex] = { diff --git a/web/app/components/workflow/panel/debug-and-preview/hooks.ts b/web/app/components/workflow/panel/debug-and-preview/hooks.ts index 72a39e2051..1c143a2b01 100644 --- a/web/app/components/workflow/panel/debug-and-preview/hooks.ts +++ b/web/app/components/workflow/panel/debug-and-preview/hooks.ts @@ -7,7 +7,7 @@ import { import { useTranslation } from 'react-i18next' import { produce, setAutoFreeze } from 'immer' import { useWorkflowRun } from '../../hooks' -import { WorkflowRunningStatus } from '../../types' +import { NodeRunningStatus, WorkflowRunningStatus } from '../../types' import type { ChatItem, Inputs, @@ -173,7 +173,7 @@ export const useChat = ( // answer const responseItem: ChatItem = { - id: `${Date.now()}`, + id: placeholderAnswerId, content: '', agent_thoughts: [], message_files: [], @@ -298,7 +298,10 @@ export const useChat = ( })) }, onNodeStarted: ({ data }) => { - responseItem.workflowProcess!.tracing!.push(data as any) + responseItem.workflowProcess!.tracing!.push({ + ...data, + status: NodeRunningStatus.Running, + } as any) handleUpdateChatList(produce(chatListRef.current, (draft) => { const currentIndex = draft.findIndex(item => item.id === responseItem.id) draft[currentIndex] = { From 4af00e4a451043d0d21ce56fd4a9fd012af70395 Mon Sep 17 00:00:00 2001 From: Joel Date: Sat, 11 May 2024 16:59:17 +0800 Subject: [PATCH 059/267] feat: support copy run text result in debug panel in workflow (#4300) --- .../app/text-generate/item/index.tsx | 34 +++++++++++-------- .../app/text-generate/item/result-tab.tsx | 9 ++--- .../workflow/panel/workflow-preview.tsx | 33 ++++++++++++++---- 3 files changed, 52 insertions(+), 24 deletions(-) diff --git a/web/app/components/app/text-generate/item/index.tsx b/web/app/components/app/text-generate/item/index.tsx index 7da907210e..c3086bc829 100644 --- a/web/app/components/app/text-generate/item/index.tsx +++ b/web/app/components/app/text-generate/item/index.tsx @@ -267,6 +267,8 @@ const GenerationItem: FC = ({ ) + const [currentTab, setCurrentTab] = useState('DETAIL') + return (
= ({ )} {workflowProcessData && !isError && ( - + )} {isError && (
{t('share.generation.batchFailed.outputPlaceholder')}
@@ -320,19 +322,23 @@ const GenerationItem: FC = ({ ) } - { - if (typeof content === 'string') - copy(content) - else - copy(JSON.stringify(content)) - Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) - }}> - - {!isMobile &&
{t('common.operation.copy')}
} -
+ {currentTab === 'RESULT' && ( + { + const content = workflowProcessData?.resultText + if (typeof content === 'string') + copy(content) + else + copy(JSON.stringify(content)) + Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) + }}> + + {!isMobile &&
{t('common.operation.copy')}
} +
+ )} + {isInWebApp && ( <> {!isWorkflow && ( diff --git a/web/app/components/app/text-generate/item/result-tab.tsx b/web/app/components/app/text-generate/item/result-tab.tsx index 6a24acc843..3f48dd1b94 100644 --- a/web/app/components/app/text-generate/item/result-tab.tsx +++ b/web/app/components/app/text-generate/item/result-tab.tsx @@ -1,8 +1,6 @@ import { memo, useEffect, - // useRef, - useState, } from 'react' import cn from 'classnames' import { useTranslation } from 'react-i18next' @@ -16,15 +14,18 @@ import type { WorkflowProcess } from '@/app/components/base/chat/types' const ResultTab = ({ data, content, + currentTab, + onCurrentTabChange, }: { data?: WorkflowProcess content: any + currentTab: string + onCurrentTabChange: (tab: string) => void }) => { const { t } = useTranslation() - const [currentTab, setCurrentTab] = useState('DETAIL') const switchTab = async (tab: string) => { - setCurrentTab(tab) + onCurrentTabChange(tab) } useEffect(() => { if (data?.resultText) diff --git a/web/app/components/workflow/panel/workflow-preview.tsx b/web/app/components/workflow/panel/workflow-preview.tsx index 95b1d48e24..0f9997f06d 100644 --- a/web/app/components/workflow/panel/workflow-preview.tsx +++ b/web/app/components/workflow/panel/workflow-preview.tsx @@ -6,6 +6,7 @@ import { } from 'react' import cn from 'classnames' import { useTranslation } from 'react-i18next' +import copy from 'copy-to-clipboard' import ResultText from '../run/result-text' import ResultPanel from '../run/result-panel' import TracingPanel from '../run/tracing-panel' @@ -16,9 +17,12 @@ import { useStore } from '../store' import { WorkflowRunningStatus, } from '../types' +import { SimpleBtn } from '../../app/text-generate/item' +import Toast from '../../base/toast' import InputsPanel from './inputs-panel' import Loading from '@/app/components/base/loading' import { XClose } from '@/app/components/base/icons/src/vender/line/general' +import { Clipboard } from '@/app/components/base/icons/src/vender/line/files' const WorkflowPreview = () => { const { t } = useTranslation() @@ -108,12 +112,28 @@ const WorkflowPreview = () => { switchTab('RESULT')} /> )} {currentTab === 'RESULT' && ( - switchTab('DETAIL')} - /> + <> + switchTab('DETAIL')} + /> + { + const content = workflowRunningData?.resultText + if (typeof content === 'string') + copy(content) + else + copy(JSON.stringify(content)) + Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) + }}> + +
{t('common.operation.copy')}
+
+ )} {currentTab === 'DETAIL' && ( {
)} +
From aa5ca90f00d5a2367b4c2d63d6c37d8febb1e521 Mon Sep 17 00:00:00 2001 From: Joel Date: Sat, 11 May 2024 20:39:17 +0800 Subject: [PATCH 060/267] fix: text generation app not show copy button (#4304) --- web/app/components/app/text-generate/item/index.tsx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/web/app/components/app/text-generate/item/index.tsx b/web/app/components/app/text-generate/item/index.tsx index c3086bc829..e8b98e554d 100644 --- a/web/app/components/app/text-generate/item/index.tsx +++ b/web/app/components/app/text-generate/item/index.tsx @@ -322,16 +322,16 @@ const GenerationItem: FC = ({ ) } - {currentTab === 'RESULT' && ( + {(currentTab === 'RESULT' || !isWorkflow) && ( { - const content = workflowProcessData?.resultText - if (typeof content === 'string') - copy(content) + const copyContent = isWorkflow ? workflowProcessData?.resultText : content + if (typeof copyContent === 'string') + copy(copyContent) else - copy(JSON.stringify(content)) + copy(JSON.stringify(copyContent)) Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) }}> From f7986805c6c686498f04dcfc8620718fb1f1a933 Mon Sep 17 00:00:00 2001 From: Chenhe Gu Date: Sat, 11 May 2024 20:48:15 +0800 Subject: [PATCH 061/267] Update README.md to remove outdated badge (#4302) --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 7433ce8070..f331c442c3 100644 --- a/README.md +++ b/README.md @@ -37,11 +37,7 @@ README tlhIngan Hol

-# -

- langgenius%2Fdify | Trendshift -

Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production. Here's a list of the core features:

From a80fe20456c14c741b381a0d95f4341d636d0ee8 Mon Sep 17 00:00:00 2001 From: Joshua <138381132+joshua20231026@users.noreply.github.com> Date: Sat, 11 May 2024 21:05:31 +0800 Subject: [PATCH 062/267] add-some-new-models-hosted-on-nvidia (#4303) --- .../model_providers/nvidia/llm/_position.yaml | 4 ++ .../model_providers/nvidia/llm/arctic.yaml | 36 ++++++++++++++++++ .../model_providers/nvidia/llm/llm.py | 6 ++- .../nvidia/llm/mistral-large.yaml | 36 ++++++++++++++++++ .../llm/mixtral-8x22b-instruct-v0.1.yaml | 36 ++++++++++++++++++ .../nvidia/llm/recurrentgemma-2b.yaml | 37 +++++++++++++++++++ 6 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml diff --git a/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml b/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml index fc69862722..2401f2a890 100644 --- a/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml @@ -1,7 +1,11 @@ - google/gemma-7b - google/codegemma-7b +- google/recurrentgemma-2b - meta/llama2-70b - meta/llama3-8b-instruct - meta/llama3-70b-instruct +- mistralai/mistral-large - mistralai/mixtral-8x7b-instruct-v0.1 +- mistralai/mixtral-8x22b-instruct-v0.1 - fuyu-8b +- snowflake/arctic diff --git a/api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml b/api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml new file mode 100644 index 0000000000..7f53ae58e6 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml @@ -0,0 +1,36 @@ +model: snowflake/arctic +label: + zh_Hans: snowflake/arctic + en_US: snowflake/arctic +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 4000 +parameter_rules: + - name: temperature + use_template: temperature + min: 0 + max: 1 + default: 0.5 + - name: top_p + use_template: top_p + min: 0 + max: 1 + default: 1 + - name: max_tokens + use_template: max_tokens + min: 1 + max: 1024 + default: 1024 + - name: frequency_penalty + use_template: frequency_penalty + min: -2 + max: 2 + default: 0 + - name: presence_penalty + use_template: presence_penalty + min: -2 + max: 2 + default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py index 402ffb2cf2..047bbeda63 100644 --- a/api/core/model_runtime/model_providers/nvidia/llm/llm.py +++ b/api/core/model_runtime/model_providers/nvidia/llm/llm.py @@ -22,12 +22,16 @@ from core.model_runtime.utils import helper class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel): MODEL_SUFFIX_MAP = { 'fuyu-8b': 'vlm/adept/fuyu-8b', + 'mistralai/mistral-large': '', 'mistralai/mixtral-8x7b-instruct-v0.1': '', + 'mistralai/mixtral-8x22b-instruct-v0.1': '', 'google/gemma-7b': '', 'google/codegemma-7b': '', + 'snowflake/arctic':'', 'meta/llama2-70b': '', 'meta/llama3-8b-instruct': '', - 'meta/llama3-70b-instruct': '' + 'meta/llama3-70b-instruct': '', + 'google/recurrentgemma-2b': '' } diff --git a/api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml b/api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml new file mode 100644 index 0000000000..3e14d22141 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml @@ -0,0 +1,36 @@ +model: mistralai/mistral-large +label: + zh_Hans: mistralai/mistral-large + en_US: mistralai/mistral-large +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + min: 0 + max: 1 + default: 0.5 + - name: top_p + use_template: top_p + min: 0 + max: 1 + default: 1 + - name: max_tokens + use_template: max_tokens + min: 1 + max: 1024 + default: 1024 + - name: frequency_penalty + use_template: frequency_penalty + min: -2 + max: 2 + default: 0 + - name: presence_penalty + use_template: presence_penalty + min: -2 + max: 2 + default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml new file mode 100644 index 0000000000..05500c0336 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml @@ -0,0 +1,36 @@ +model: mistralai/mixtral-8x22b-instruct-v0.1 +label: + zh_Hans: mistralai/mixtral-8x22b-instruct-v0.1 + en_US: mistralai/mixtral-8x22b-instruct-v0.1 +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 64000 +parameter_rules: + - name: temperature + use_template: temperature + min: 0 + max: 1 + default: 0.5 + - name: top_p + use_template: top_p + min: 0 + max: 1 + default: 1 + - name: max_tokens + use_template: max_tokens + min: 1 + max: 1024 + default: 1024 + - name: frequency_penalty + use_template: frequency_penalty + min: -2 + max: 2 + default: 0 + - name: presence_penalty + use_template: presence_penalty + min: -2 + max: 2 + default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml new file mode 100644 index 0000000000..73fcce3930 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml @@ -0,0 +1,37 @@ +model: google/recurrentgemma-2b +label: + zh_Hans: google/recurrentgemma-2b + en_US: google/recurrentgemma-2b +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 2048 +parameter_rules: + - name: temperature + use_template: temperature + min: 0 + max: 1 + default: 0.2 + - name: top_p + use_template: top_p + min: 0 + max: 1 + default: 0.7 + - name: max_tokens + use_template: max_tokens + min: 1 + max: 1024 + default: 1024 + - name: random_seed + type: int + help: + en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. + zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 + label: + en_US: Seed + zh_Hans: 种子 + default: 0 + min: 0 + max: 2147483647 From 8cc492721bc2828587842f59965574f563c1f535 Mon Sep 17 00:00:00 2001 From: Weaxs <459312872@qq.com> Date: Sat, 11 May 2024 21:07:22 +0800 Subject: [PATCH 063/267] fix: minimax streaming function_call message (#4271) --- .../minimax/llm/chat_completion_pro.py | 72 ++++++++----------- 1 file changed, 29 insertions(+), 43 deletions(-) diff --git a/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py b/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py index 81ea2e165e..71f6e2a1fe 100644 --- a/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py +++ b/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py @@ -20,16 +20,16 @@ class MinimaxChatCompletionPro: Minimax Chat Completion Pro API, supports function calling however, we do not have enough time and energy to implement it, but the parameters are reserved """ - def generate(self, model: str, api_key: str, group_id: str, + def generate(self, model: str, api_key: str, group_id: str, prompt_messages: list[MinimaxMessage], model_parameters: dict, tools: list[dict[str, Any]], stop: list[str] | None, stream: bool, user: str) \ - -> Union[MinimaxMessage, Generator[MinimaxMessage, None, None]]: + -> Union[MinimaxMessage, Generator[MinimaxMessage, None, None]]: """ generate chat completion """ if not api_key or not group_id: raise InvalidAPIKeyError('Invalid API key or group ID') - + url = f'https://api.minimax.chat/v1/text/chatcompletion_pro?GroupId={group_id}' extra_kwargs = {} @@ -42,7 +42,7 @@ class MinimaxChatCompletionPro: if 'top_p' in model_parameters and type(model_parameters['top_p']) == float: extra_kwargs['top_p'] = model_parameters['top_p'] - + if 'plugin_web_search' in model_parameters and model_parameters['plugin_web_search']: extra_kwargs['plugins'] = [ 'plugin_web_search' @@ -61,7 +61,7 @@ class MinimaxChatCompletionPro: # check if there is a system message if len(prompt_messages) == 0: raise BadRequestError('At least one message is required') - + if prompt_messages[0].role == MinimaxMessage.Role.SYSTEM.value: if prompt_messages[0].content: bot_setting['content'] = prompt_messages[0].content @@ -70,7 +70,7 @@ class MinimaxChatCompletionPro: # check if there is a user message if len(prompt_messages) == 0: raise BadRequestError('At least one user message is required') - + messages = [message.to_dict() for message in prompt_messages] headers = { @@ -89,21 +89,21 @@ class MinimaxChatCompletionPro: if tools: body['functions'] = tools - body['function_call'] = { 'type': 'auto' } + body['function_call'] = {'type': 'auto'} try: response = post( url=url, data=dumps(body), headers=headers, stream=stream, timeout=(10, 300)) except Exception as e: raise InternalServerError(e) - + if response.status_code != 200: raise InternalServerError(response.text) - + if stream: return self._handle_stream_chat_generate_response(response) return self._handle_chat_generate_response(response) - + def _handle_error(self, code: int, msg: str): if code == 1000 or code == 1001 or code == 1013 or code == 1027: raise InternalServerError(msg) @@ -127,7 +127,7 @@ class MinimaxChatCompletionPro: code = response['base_resp']['status_code'] msg = response['base_resp']['status_msg'] self._handle_error(code, msg) - + message = MinimaxMessage( content=response['reply'], role=MinimaxMessage.Role.ASSISTANT.value @@ -144,7 +144,6 @@ class MinimaxChatCompletionPro: """ handle stream chat generate response """ - function_call_storage = None for line in response.iter_lines(): if not line: continue @@ -158,54 +157,41 @@ class MinimaxChatCompletionPro: msg = data['base_resp']['status_msg'] self._handle_error(code, msg) + # final chunk if data['reply'] or 'usage' in data and data['usage']: total_tokens = data['usage']['total_tokens'] - message = MinimaxMessage( + minimax_message = MinimaxMessage( role=MinimaxMessage.Role.ASSISTANT.value, content='' ) - message.usage = { + minimax_message.usage = { 'prompt_tokens': 0, 'completion_tokens': total_tokens, 'total_tokens': total_tokens } - message.stop_reason = data['choices'][0]['finish_reason'] + minimax_message.stop_reason = data['choices'][0]['finish_reason'] - if function_call_storage: - function_call_message = MinimaxMessage(content='', role=MinimaxMessage.Role.ASSISTANT.value) - function_call_message.function_call = function_call_storage - yield function_call_message + choices = data.get('choices', []) + if len(choices) > 0: + for choice in choices: + message = choice['messages'][0] + # append function_call message + if 'function_call' in message: + function_call_message = MinimaxMessage(content='', role=MinimaxMessage.Role.ASSISTANT.value) + function_call_message.function_call = message['function_call'] + yield function_call_message - yield message + yield minimax_message return + # partial chunk choices = data.get('choices', []) if len(choices) == 0: continue for choice in choices: message = choice['messages'][0] - - if 'function_call' in message: - if not function_call_storage: - function_call_storage = message['function_call'] - if 'arguments' not in function_call_storage or not function_call_storage['arguments']: - function_call_storage['arguments'] = '' - continue - else: - function_call_storage['arguments'] += message['function_call']['arguments'] - continue - else: - if function_call_storage: - message['function_call'] = function_call_storage - function_call_storage = None - - minimax_message = MinimaxMessage(content='', role=MinimaxMessage.Role.ASSISTANT.value) - - if 'function_call' in message: - minimax_message.function_call = message['function_call'] - + # append text message if 'text' in message: - minimax_message.content = message['text'] - - yield minimax_message \ No newline at end of file + minimax_message = MinimaxMessage(content=message['text'], role=MinimaxMessage.Role.ASSISTANT.value) + yield minimax_message From e6db7ad1d5d83ef45e5a5acc16a45f822e49cfe9 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 13 May 2024 11:45:29 +0900 Subject: [PATCH 064/267] chore: update gmpy2_pkcs10aep_cipher.py (#4314) --- api/libs/gmpy2_pkcs10aep_cipher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/libs/gmpy2_pkcs10aep_cipher.py b/api/libs/gmpy2_pkcs10aep_cipher.py index c22546f602..9856875c16 100644 --- a/api/libs/gmpy2_pkcs10aep_cipher.py +++ b/api/libs/gmpy2_pkcs10aep_cipher.py @@ -48,7 +48,7 @@ class PKCS1OAEP_Cipher: `Crypto.Hash.SHA1` is used. mgfunc : callable A mask generation function that accepts two parameters: a string to - use as seed, and the lenth of the mask to generate, in bytes. + use as seed, and the length of the mask to generate, in bytes. If not specified, the standard MGF1 consistent with ``hashAlgo`` is used (a safe choice). label : bytes/bytearray/memoryview A label to apply to this particular encryption. If not specified, @@ -218,7 +218,7 @@ def new(key, hashAlgo=None, mgfunc=None, label=b'', randfunc=None): :param mgfunc: A mask generation function that accepts two parameters: a string to - use as seed, and the lenth of the mask to generate, in bytes. + use as seed, and the length of the mask to generate, in bytes. If not specified, the standard MGF1 consistent with ``hashAlgo`` is used (a safe choice). :type mgfunc: callable From bbef964eb5caaca6df74b21a596900f31faff677 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Mon, 13 May 2024 14:39:14 +0800 Subject: [PATCH 065/267] improve: code upgrade (#4231) --- .github/workflows/api-tests.yml | 1 + .../helper/code_executor/code_executor.py | 86 +++++++++++++++-- api/core/helper/code_executor/entities.py | 6 ++ .../code_executor/javascript_transformer.py | 7 +- .../code_executor/jinja2_transformer.py | 20 +++- .../code_executor/python_transformer.py | 46 +++++---- .../code_executor/template_transformer.py | 6 +- api/core/workflow/nodes/code/code_node.py | 14 ++- api/core/workflow/nodes/code/entities.py | 4 +- .../workflow/nodes/__mock/code_executor.py | 6 +- docker/docker-compose.middleware.yaml | 39 ++++++-- docker/docker-compose.yaml | 43 +++++++-- .../dependencies/python-requirements.txt | 0 docker/volumes/ssrf_proxy/squid.conf | 50 ++++++++++ .../workflow/nodes/code/dependency-picker.tsx | 94 +++++++++++++++++++ .../workflow/nodes/code/dependency.tsx | 36 +++++++ .../components/workflow/nodes/code/panel.tsx | 31 ++++++ .../components/workflow/nodes/code/types.ts | 6 ++ .../workflow/nodes/code/use-config.ts | 69 +++++++++++++- web/i18n/en-US/workflow.ts | 3 + web/i18n/zh-Hans/workflow.ts | 3 + 21 files changed, 510 insertions(+), 60 deletions(-) create mode 100644 api/core/helper/code_executor/entities.py create mode 100644 docker/volumes/sandbox/dependencies/python-requirements.txt create mode 100644 docker/volumes/ssrf_proxy/squid.conf create mode 100644 web/app/components/workflow/nodes/code/dependency-picker.tsx create mode 100644 web/app/components/workflow/nodes/code/dependency.tsx diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index 7d24b15bdf..bfb1054639 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -46,6 +46,7 @@ jobs: docker/docker-compose.middleware.yaml services: | sandbox + ssrf_proxy - name: Run Workflow run: dev/pytest/pytest_workflow.sh diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index ec685ae814..f078563658 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -1,14 +1,20 @@ +import logging +import time from enum import Enum +from threading import Lock from typing import Literal, Optional -from httpx import post +from httpx import get, post from pydantic import BaseModel from yarl import URL from config import get_env +from core.helper.code_executor.entities import CodeDependency from core.helper.code_executor.javascript_transformer import NodeJsTemplateTransformer from core.helper.code_executor.jinja2_transformer import Jinja2TemplateTransformer -from core.helper.code_executor.python_transformer import PythonTemplateTransformer +from core.helper.code_executor.python_transformer import PYTHON_STANDARD_PACKAGES, PythonTemplateTransformer + +logger = logging.getLogger(__name__) # Code Executor CODE_EXECUTION_ENDPOINT = get_env('CODE_EXECUTION_ENDPOINT') @@ -28,7 +34,6 @@ class CodeExecutionResponse(BaseModel): message: str data: Data - class CodeLanguage(str, Enum): PYTHON3 = 'python3' JINJA2 = 'jinja2' @@ -36,6 +41,9 @@ class CodeLanguage(str, Enum): class CodeExecutor: + dependencies_cache = {} + dependencies_cache_lock = Lock() + code_template_transformers = { CodeLanguage.PYTHON3: PythonTemplateTransformer, CodeLanguage.JINJA2: Jinja2TemplateTransformer, @@ -49,7 +57,11 @@ class CodeExecutor: } @classmethod - def execute_code(cls, language: Literal['python3', 'javascript', 'jinja2'], preload: str, code: str) -> str: + def execute_code(cls, + language: Literal['python3', 'javascript', 'jinja2'], + preload: str, + code: str, + dependencies: Optional[list[CodeDependency]] = None) -> str: """ Execute code :param language: code language @@ -65,9 +77,13 @@ class CodeExecutor: data = { 'language': cls.code_language_to_running_language.get(language), 'code': code, - 'preload': preload + 'preload': preload, + 'enable_network': True } + if dependencies: + data['dependencies'] = [dependency.dict() for dependency in dependencies] + try: response = post(str(url), json=data, headers=headers, timeout=CODE_EXECUTION_TIMEOUT) if response.status_code == 503: @@ -95,7 +111,7 @@ class CodeExecutor: return response.data.stdout @classmethod - def execute_workflow_code_template(cls, language: Literal['python3', 'javascript', 'jinja2'], code: str, inputs: dict) -> dict: + def execute_workflow_code_template(cls, language: Literal['python3', 'javascript', 'jinja2'], code: str, inputs: dict, dependencies: Optional[list[CodeDependency]] = None) -> dict: """ Execute code :param language: code language @@ -107,11 +123,63 @@ class CodeExecutor: if not template_transformer: raise CodeExecutionException(f'Unsupported language {language}') - runner, preload = template_transformer.transform_caller(code, inputs) + runner, preload, dependencies = template_transformer.transform_caller(code, inputs, dependencies) try: - response = cls.execute_code(language, preload, runner) + response = cls.execute_code(language, preload, runner, dependencies) except CodeExecutionException as e: raise e - return template_transformer.transform_response(response) \ No newline at end of file + return template_transformer.transform_response(response) + + @classmethod + def list_dependencies(cls, language: Literal['python3']) -> list[CodeDependency]: + with cls.dependencies_cache_lock: + if language in cls.dependencies_cache: + # check expiration + dependencies = cls.dependencies_cache[language] + if dependencies['expiration'] > time.time(): + return dependencies['data'] + # remove expired cache + del cls.dependencies_cache[language] + + dependencies = cls._get_dependencies(language) + with cls.dependencies_cache_lock: + cls.dependencies_cache[language] = { + 'data': dependencies, + 'expiration': time.time() + 60 + } + + return dependencies + + @classmethod + def _get_dependencies(cls, language: Literal['python3']) -> list[CodeDependency]: + """ + List dependencies + """ + url = URL(CODE_EXECUTION_ENDPOINT) / 'v1' / 'sandbox' / 'dependencies' + + headers = { + 'X-Api-Key': CODE_EXECUTION_API_KEY + } + + running_language = cls.code_language_to_running_language.get(language) + if isinstance(running_language, Enum): + running_language = running_language.value + + data = { + 'language': running_language, + } + + try: + response = get(str(url), params=data, headers=headers, timeout=CODE_EXECUTION_TIMEOUT) + if response.status_code != 200: + raise Exception(f'Failed to list dependencies, got status code {response.status_code}, please check if the sandbox service is running') + response = response.json() + dependencies = response.get('data', {}).get('dependencies', []) + return [ + CodeDependency(**dependency) for dependency in dependencies if dependency.get('name') not in PYTHON_STANDARD_PACKAGES + ] + except Exception as e: + logger.exception(f'Failed to list dependencies: {e}') + return [] \ No newline at end of file diff --git a/api/core/helper/code_executor/entities.py b/api/core/helper/code_executor/entities.py new file mode 100644 index 0000000000..55464d2ff7 --- /dev/null +++ b/api/core/helper/code_executor/entities.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel + + +class CodeDependency(BaseModel): + name: str + version: str \ No newline at end of file diff --git a/api/core/helper/code_executor/javascript_transformer.py b/api/core/helper/code_executor/javascript_transformer.py index 29b8e06e86..8da16b568f 100644 --- a/api/core/helper/code_executor/javascript_transformer.py +++ b/api/core/helper/code_executor/javascript_transformer.py @@ -1,6 +1,8 @@ import json import re +from typing import Optional +from core.helper.code_executor.entities import CodeDependency from core.helper.code_executor.template_transformer import TemplateTransformer NODEJS_RUNNER = """// declare main function here @@ -22,7 +24,8 @@ NODEJS_PRELOAD = """""" class NodeJsTemplateTransformer(TemplateTransformer): @classmethod - def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]: + def transform_caller(cls, code: str, inputs: dict, + dependencies: Optional[list[CodeDependency]] = None) -> tuple[str, str, list[CodeDependency]]: """ Transform code to python runner :param code: code @@ -37,7 +40,7 @@ class NodeJsTemplateTransformer(TemplateTransformer): runner = NODEJS_RUNNER.replace('{{code}}', code) runner = runner.replace('{{inputs}}', inputs_str) - return runner, NODEJS_PRELOAD + return runner, NODEJS_PRELOAD, [] @classmethod def transform_response(cls, response: str) -> dict: diff --git a/api/core/helper/code_executor/jinja2_transformer.py b/api/core/helper/code_executor/jinja2_transformer.py index 27a3579493..3d557372f1 100644 --- a/api/core/helper/code_executor/jinja2_transformer.py +++ b/api/core/helper/code_executor/jinja2_transformer.py @@ -1,7 +1,10 @@ import json import re from base64 import b64encode +from typing import Optional +from core.helper.code_executor.entities import CodeDependency +from core.helper.code_executor.python_transformer import PYTHON_STANDARD_PACKAGES from core.helper.code_executor.template_transformer import TemplateTransformer PYTHON_RUNNER = """ @@ -58,7 +61,8 @@ if __name__ == '__main__': class Jinja2TemplateTransformer(TemplateTransformer): @classmethod - def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]: + def transform_caller(cls, code: str, inputs: dict, + dependencies: Optional[list[CodeDependency]] = None) -> tuple[str, str, list[CodeDependency]]: """ Transform code to python runner :param code: code @@ -72,7 +76,19 @@ class Jinja2TemplateTransformer(TemplateTransformer): runner = PYTHON_RUNNER.replace('{{code}}', code) runner = runner.replace('{{inputs}}', inputs_str) - return runner, JINJA2_PRELOAD + if not dependencies: + dependencies = [] + + # add native packages and jinja2 + for package in PYTHON_STANDARD_PACKAGES.union(['jinja2']): + dependencies.append(CodeDependency(name=package, version='')) + + # deduplicate + dependencies = list({ + dep.name: dep for dep in dependencies if dep.name + }.values()) + + return runner, JINJA2_PRELOAD, dependencies @classmethod def transform_response(cls, response: str) -> dict: diff --git a/api/core/helper/code_executor/python_transformer.py b/api/core/helper/code_executor/python_transformer.py index f44acbb9bf..fd28b06187 100644 --- a/api/core/helper/code_executor/python_transformer.py +++ b/api/core/helper/code_executor/python_transformer.py @@ -1,7 +1,9 @@ import json import re from base64 import b64encode +from typing import Optional +from core.helper.code_executor.entities import CodeDependency from core.helper.code_executor.template_transformer import TemplateTransformer PYTHON_RUNNER = """# declare main function here @@ -25,32 +27,17 @@ result = f'''<> print(result) """ -PYTHON_PRELOAD = """ -# prepare general imports -import json -import datetime -import math -import random -import re -import string -import sys -import time -import traceback -import uuid -import os -import base64 -import hashlib -import hmac -import binascii -import collections -import functools -import operator -import itertools -""" +PYTHON_PRELOAD = """""" + +PYTHON_STANDARD_PACKAGES = set([ + 'json', 'datetime', 'math', 'random', 're', 'string', 'sys', 'time', 'traceback', 'uuid', 'os', 'base64', + 'hashlib', 'hmac', 'binascii', 'collections', 'functools', 'operator', 'itertools', 'uuid', +]) class PythonTemplateTransformer(TemplateTransformer): @classmethod - def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]: + def transform_caller(cls, code: str, inputs: dict, + dependencies: Optional[list[CodeDependency]] = None) -> tuple[str, str, list[CodeDependency]]: """ Transform code to python runner :param code: code @@ -65,7 +52,18 @@ class PythonTemplateTransformer(TemplateTransformer): runner = PYTHON_RUNNER.replace('{{code}}', code) runner = runner.replace('{{inputs}}', inputs_str) - return runner, PYTHON_PRELOAD + # add standard packages + if dependencies is None: + dependencies = [] + + for package in PYTHON_STANDARD_PACKAGES: + if package not in dependencies: + dependencies.append(CodeDependency(name=package, version='')) + + # deduplicate + dependencies = list({dep.name: dep for dep in dependencies if dep.name}.values()) + + return runner, PYTHON_PRELOAD, dependencies @classmethod def transform_response(cls, response: str) -> dict: diff --git a/api/core/helper/code_executor/template_transformer.py b/api/core/helper/code_executor/template_transformer.py index c3564afd04..b83d3df30a 100644 --- a/api/core/helper/code_executor/template_transformer.py +++ b/api/core/helper/code_executor/template_transformer.py @@ -1,10 +1,14 @@ from abc import ABC, abstractmethod +from typing import Optional + +from core.helper.code_executor.entities import CodeDependency class TemplateTransformer(ABC): @classmethod @abstractmethod - def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]: + def transform_caller(cls, code: str, inputs: dict, + dependencies: Optional[list[CodeDependency]] = None) -> tuple[str, str, list[CodeDependency]]: """ Transform code to python runner :param code: code diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 12e7ae940f..3e00e501ac 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -2,6 +2,7 @@ import os from typing import Optional, Union, cast from core.helper.code_executor.code_executor import CodeExecutionException, CodeExecutor, CodeLanguage +from core.model_runtime.utils.encoders import jsonable_encoder from core.workflow.entities.node_entities import NodeRunResult, NodeType from core.workflow.entities.variable_pool import VariablePool from core.workflow.nodes.base_node import BaseNode @@ -61,7 +62,8 @@ class CodeNode(BaseNode): "children": None } } - } + }, + "available_dependencies": [] } return { @@ -84,8 +86,11 @@ class CodeNode(BaseNode): "type": "string", "children": None } - } - } + }, + "dependencies": [ + ] + }, + "available_dependencies": jsonable_encoder(CodeExecutor.list_dependencies('python3')) } def _run(self, variable_pool: VariablePool) -> NodeRunResult: @@ -115,7 +120,8 @@ class CodeNode(BaseNode): result = CodeExecutor.execute_workflow_code_template( language=code_language, code=code, - inputs=variables + inputs=variables, + dependencies=node_data.dependencies ) # Transform result diff --git a/api/core/workflow/nodes/code/entities.py b/api/core/workflow/nodes/code/entities.py index 555bb3918e..4f957e5afb 100644 --- a/api/core/workflow/nodes/code/entities.py +++ b/api/core/workflow/nodes/code/entities.py @@ -2,6 +2,7 @@ from typing import Literal, Optional from pydantic import BaseModel +from core.helper.code_executor.entities import CodeDependency from core.workflow.entities.base_node_data_entities import BaseNodeData from core.workflow.entities.variable_entities import VariableSelector @@ -17,4 +18,5 @@ class CodeNodeData(BaseNodeData): variables: list[VariableSelector] code_language: Literal['python3', 'javascript'] code: str - outputs: dict[str, Output] \ No newline at end of file + outputs: dict[str, Output] + dependencies: Optional[list[CodeDependency]] = None \ No newline at end of file diff --git a/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py b/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py index ef84c92625..a7252d3036 100644 --- a/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py +++ b/api/tests/integration_tests/workflow/nodes/__mock/code_executor.py @@ -1,17 +1,19 @@ import os -from typing import Literal +from typing import Literal, Optional import pytest from _pytest.monkeypatch import MonkeyPatch from jinja2 import Template from core.helper.code_executor.code_executor import CodeExecutor +from core.helper.code_executor.entities import CodeDependency MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true' class MockedCodeExecutor: @classmethod - def invoke(cls, language: Literal['python3', 'javascript', 'jinja2'], code: str, inputs: dict) -> dict: + def invoke(cls, language: Literal['python3', 'javascript', 'jinja2'], + code: str, inputs: dict, dependencies: Optional[list[CodeDependency]] = None) -> dict: # invoke directly if language == 'python3': return { diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index 6bf45da9e0..d786e7d4c1 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -53,20 +53,38 @@ services: # The DifySandbox sandbox: - image: langgenius/dify-sandbox:0.1.0 + image: langgenius/dify-sandbox:0.2.0 restart: always - cap_add: - # Why is sys_admin permission needed? - # https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-sys_admin-permission-needed - - SYS_ADMIN environment: # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. API_KEY: dify-sandbox GIN_MODE: 'release' WORKER_TIMEOUT: 15 - ports: - - "8194:8194" + ENABLE_NETWORK: 'true' + HTTP_PROXY: 'http://ssrf_proxy:3128' + HTTPS_PROXY: 'http://ssrf_proxy:3128' + volumes: + - ./volumes/sandbox/dependencies:/dependencies + networks: + - ssrf_proxy_network + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + ports: + - "3128:3128" + - "8194:8194" + volumes: + # pls clearly modify the squid.conf file to fit your network environment. + - ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf + networks: + - ssrf_proxy_network + - default # Qdrant vector store. # uncomment to use qdrant as vector store. # (if uncommented, you need to comment out the weaviate service above, @@ -81,3 +99,10 @@ services: # ports: # - "6333:6333" # - "6334:6334" + + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 0c3a0c202f..e232ef436a 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -161,6 +161,9 @@ services: CODE_MAX_STRING_ARRAY_LENGTH: 30 CODE_MAX_OBJECT_ARRAY_LENGTH: 30 CODE_MAX_NUMBER_ARRAY_LENGTH: 1000 + # SSRF Proxy server + SSRF_PROXY_HTTP_URL: 'http://ssrf_proxy:3128' + SSRF_PROXY_HTTPS_URL: 'http://ssrf_proxy:3128' depends_on: - db - redis @@ -170,6 +173,9 @@ services: # uncomment to expose dify-api port to host # ports: # - "5001:5001" + networks: + - ssrf_proxy_network + - default # worker service # The Celery worker for processing the queue. @@ -283,6 +289,9 @@ services: volumes: # Mount the storage directory to the container, for storing user files. - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default # Frontend web application. web: @@ -367,18 +376,35 @@ services: # The DifySandbox sandbox: - image: langgenius/dify-sandbox:0.1.0 + image: langgenius/dify-sandbox:0.2.0 restart: always - cap_add: - # Why is sys_admin permission needed? - # https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-sys_admin-permission-needed - - SYS_ADMIN environment: # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. API_KEY: dify-sandbox - GIN_MODE: release + GIN_MODE: 'release' WORKER_TIMEOUT: 15 + ENABLE_NETWORK: 'true' + HTTP_PROXY: 'http://ssrf_proxy:3128' + HTTPS_PROXY: 'http://ssrf_proxy:3128' + volumes: + - ./volumes/sandbox/dependencies:/dependencies + networks: + - ssrf_proxy_network + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + # pls clearly modify the squid.conf file to fit your network environment. + - ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf + networks: + - ssrf_proxy_network + - default # Qdrant vector store. # uncomment to use qdrant as vector store. # (if uncommented, you need to comment out the weaviate service above, @@ -436,3 +462,8 @@ services: ports: - "80:80" #- "443:443" +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true diff --git a/docker/volumes/sandbox/dependencies/python-requirements.txt b/docker/volumes/sandbox/dependencies/python-requirements.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docker/volumes/ssrf_proxy/squid.conf b/docker/volumes/ssrf_proxy/squid.conf new file mode 100644 index 0000000000..3028bf35c6 --- /dev/null +++ b/docker/volumes/ssrf_proxy/squid.conf @@ -0,0 +1,50 @@ +acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN) +acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN) +acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN) +acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines +acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN) +acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +http_access allow localhost +http_access allow localnet +http_access deny all + +################################## Proxy Server ################################ +http_port 3128 +coredump_dir /var/spool/squid +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims +refresh_pattern \/InRelease$ 0 0% 0 refresh-ims +refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern . 0 20% 4320 +logfile_rotate 0 + +# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks +# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default + + +################################## Reverse Proxy To Sandbox ################################ +http_port 8194 accel vhost +cache_peer sandbox parent 8194 0 no-query originserver +acl all src all +http_access allow all \ No newline at end of file diff --git a/web/app/components/workflow/nodes/code/dependency-picker.tsx b/web/app/components/workflow/nodes/code/dependency-picker.tsx new file mode 100644 index 0000000000..8fe9fa01bc --- /dev/null +++ b/web/app/components/workflow/nodes/code/dependency-picker.tsx @@ -0,0 +1,94 @@ +import type { FC } from 'react' +import React, { useCallback, useState } from 'react' +import { t } from 'i18next' +import type { CodeDependency } from './types' +import { ChevronDown } from '@/app/components/base/icons/src/vender/line/arrows' +import { PortalToFollowElem, PortalToFollowElemContent, PortalToFollowElemTrigger } from '@/app/components/base/portal-to-follow-elem' +import { Check, SearchLg } from '@/app/components/base/icons/src/vender/line/general' +import { XCircle } from '@/app/components/base/icons/src/vender/solid/general' + +type Props = { + value: CodeDependency + available_dependencies: CodeDependency[] + onChange: (dependency: CodeDependency) => void +} + +const DependencyPicker: FC = ({ + available_dependencies, + value, + onChange, +}) => { + const [open, setOpen] = useState(false) + const [searchText, setSearchText] = useState('') + + const handleChange = useCallback((dependency: CodeDependency) => { + return () => { + setOpen(false) + onChange(dependency) + } + }, [onChange]) + + return ( + + setOpen(!open)} className='flex-grow cursor-pointer'> +
+
{value.name}
+ +
+
+ +
+
+ + setSearchText(e.target.value)} + autoFocus + /> + { + searchText && ( +
setSearchText('')} + > + +
+ ) + } +
+
+ {available_dependencies.filter((v) => { + if (!searchText) + return true + return v.name.toLowerCase().includes(searchText.toLowerCase()) + }).map(dependency => ( +
+
{dependency.name}
+ {dependency.name === value.name && } +
+ ))} +
+
+
+
+ ) +} + +export default React.memo(DependencyPicker) diff --git a/web/app/components/workflow/nodes/code/dependency.tsx b/web/app/components/workflow/nodes/code/dependency.tsx new file mode 100644 index 0000000000..5e868efe31 --- /dev/null +++ b/web/app/components/workflow/nodes/code/dependency.tsx @@ -0,0 +1,36 @@ +import type { FC } from 'react' +import React from 'react' +import RemoveButton from '../_base/components/remove-button' +import type { CodeDependency } from './types' +import DependencyPicker from './dependency-picker' + +type Props = { + available_dependencies: CodeDependency[] + dependencies: CodeDependency[] + handleRemove: (index: number) => void + handleChange: (index: number, dependency: CodeDependency) => void +} + +const Dependencies: FC = ({ + available_dependencies, dependencies, handleRemove, handleChange, +}) => { + return ( +
+ {dependencies.map((dependency, index) => ( +
+ handleChange(index, dependency)} + /> + handleRemove(index)} + /> +
+ ))} +
+ ) +} + +export default React.memo(Dependencies) diff --git a/web/app/components/workflow/nodes/code/panel.tsx b/web/app/components/workflow/nodes/code/panel.tsx index 838e7190d3..8ab9b3d0e5 100644 --- a/web/app/components/workflow/nodes/code/panel.tsx +++ b/web/app/components/workflow/nodes/code/panel.tsx @@ -5,6 +5,7 @@ import RemoveEffectVarConfirm from '../_base/components/remove-effect-var-confir import useConfig from './use-config' import type { CodeNodeType } from './types' import { CodeLanguage } from './types' +import Dependencies from './dependency' import VarList from '@/app/components/workflow/nodes/_base/components/variable/var-list' import OutputVarList from '@/app/components/workflow/nodes/_base/components/variable/output-var-list' import AddButton from '@/app/components/base/button/add-button' @@ -59,6 +60,11 @@ const Panel: FC> = ({ varInputs, inputVarValues, setInputVarValues, + allowDependencies, + availableDependencies, + handleAddDependency, + handleRemoveDependency, + handleChangeDependency, } = useConfig(id, data) return ( @@ -78,6 +84,31 @@ const Panel: FC> = ({ filterVar={filterVar} /> + { + allowDependencies + ? ( +
+ +
+ handleAddDependency({ name: '', version: '' })} /> + } + tooltip={t(`${i18nPrefix}.advancedDependenciesTip`)!} + > + handleRemoveDependency(index)} + handleChange={(index, dependency) => handleChangeDependency(index, dependency)} + /> + +
+
+ ) + : null + } { const appId = useAppStore.getState().appDetail?.id const [allLanguageDefault, setAllLanguageDefault] = useState | null>(null) + const [allLanguageDependencies, setAllLanguageDependencies] = useState | null>(null) useEffect(() => { if (appId) { (async () => { const { config: javaScriptConfig } = await fetchNodeDefault(appId, BlockEnum.Code, { code_language: CodeLanguage.javascript }) as any - const { config: pythonConfig } = await fetchNodeDefault(appId, BlockEnum.Code, { code_language: CodeLanguage.python3 }) as any + const { config: pythonConfig, available_dependencies: pythonDependencies } = await fetchNodeDefault(appId, BlockEnum.Code, { code_language: CodeLanguage.python3 }) as any setAllLanguageDefault({ [CodeLanguage.javascript]: javaScriptConfig as CodeNodeType, [CodeLanguage.python3]: pythonConfig as CodeNodeType, } as any) + setAllLanguageDependencies({ + [CodeLanguage.python3]: pythonDependencies as CodeDependency[], + } as any) })() } }, [appId]) @@ -41,6 +45,62 @@ const useConfig = (id: string, payload: CodeNodeType) => { setInputs, }) + const handleAddDependency = useCallback((dependency: CodeDependency) => { + const newInputs = produce(inputs, (draft) => { + if (!draft.dependencies) + draft.dependencies = [] + draft.dependencies.push(dependency) + }) + setInputs(newInputs) + }, [inputs, setInputs]) + + const handleRemoveDependency = useCallback((index: number) => { + const newInputs = produce(inputs, (draft) => { + if (!draft.dependencies) + draft.dependencies = [] + draft.dependencies.splice(index, 1) + }) + setInputs(newInputs) + }, [inputs, setInputs]) + + const handleChangeDependency = useCallback((index: number, dependency: CodeDependency) => { + const newInputs = produce(inputs, (draft) => { + if (!draft.dependencies) + draft.dependencies = [] + draft.dependencies[index] = dependency + }) + setInputs(newInputs) + }, [inputs, setInputs]) + + const [allowDependencies, setAllowDependencies] = useState(false) + useEffect(() => { + if (!inputs.code_language) + return + if (!allLanguageDependencies) + return + + const newAllowDependencies = !!allLanguageDependencies[inputs.code_language] + setAllowDependencies(newAllowDependencies) + }, [allLanguageDependencies, inputs.code_language]) + + const [availableDependencies, setAvailableDependencies] = useState([]) + useEffect(() => { + if (!inputs.code_language) + return + if (!allLanguageDependencies) + return + + const newAvailableDependencies = produce(allLanguageDependencies[inputs.code_language], (draft) => { + const currentLanguage = inputs.code_language + if (!currentLanguage || !draft || !inputs.dependencies) + return [] + return draft.filter((dependency) => { + return !inputs.dependencies?.find(d => d.name === dependency.name) + }) + }) + setAvailableDependencies(newAvailableDependencies || []) + }, [allLanguageDependencies, inputs.code_language, inputs.dependencies]) + const [outputKeyOrders, setOutputKeyOrders] = useState([]) const syncOutputKeyOrders = useCallback((outputs: OutputVar) => { setOutputKeyOrders(Object.keys(outputs)) @@ -163,6 +223,11 @@ const useConfig = (id: string, payload: CodeNodeType) => { inputVarValues, setInputVarValues, runResult, + availableDependencies, + allowDependencies, + handleAddDependency, + handleRemoveDependency, + handleChangeDependency, } } diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 206bae5400..3ffad0198d 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -273,6 +273,9 @@ const translation = { code: { inputVars: 'Input Variables', outputVars: 'Output Variables', + advancedDependencies: 'Advanced Dependencies', + advancedDependenciesTip: 'Add some preloaded dependencies that take more time to consume or are not default built-in here', + searchDependencies: 'Search Dependencies', }, templateTransform: { inputVars: 'Input Variables', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index 781ff3b49d..f9ae082f6f 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -273,6 +273,9 @@ const translation = { code: { inputVars: '输入变量', outputVars: '输出变量', + advancedDependencies: '高级依赖', + advancedDependenciesTip: '在这里添加一些预加载需要消耗较多时间或非默认内置的依赖包', + searchDependencies: '搜索依赖', }, templateTransform: { inputVars: '输入变量', From 63382f758e50ca4cf980888838be29d82c8f5fa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Mon, 13 May 2024 15:20:16 +0800 Subject: [PATCH 066/267] fix typo (#4329) --- .devcontainer/README.md | 4 ++-- .github/ISSUE_TEMPLATE/bug_report.yml | 4 ++-- .github/ISSUE_TEMPLATE/document_issue.yml | 4 ++-- .github/ISSUE_TEMPLATE/feature_request.yml | 2 +- .github/ISSUE_TEMPLATE/translation_issue.yml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.devcontainer/README.md b/.devcontainer/README.md index fa989584f5..df12a3c2d6 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -1,4 +1,4 @@ -# Devlopment with devcontainer +# Development with devcontainer This project includes a devcontainer configuration that allows you to open the project in a container with a fully configured development environment. Both frontend and backend environments are initialized when the container is started. ## GitHub Codespaces @@ -33,5 +33,5 @@ Performance Impact: While usually minimal, programs running inside a devcontaine if you see such error message when you open this project in codespaces: ![Alt text](troubleshooting.png) -a simple workaround is change `/signin` endpoint into another one, then login with github account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine. +a simple workaround is change `/signin` endpoint into another one, then login with GitHub account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine. The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204) \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index fea45de1d3..b596bdb6b0 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -8,13 +8,13 @@ body: label: Self Checks description: "To make sure we get to you in time, please check the following :)" options: - - label: This is only for bug report, if you would like to ask a quesion, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general). + - label: This is only for bug report, if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general). required: true - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones. required: true - label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)). required: true - - label: "Pleas do not modify this template :) and fill in all the required fields." + - label: "Please do not modify this template :) and fill in all the required fields." required: true - type: input diff --git a/.github/ISSUE_TEMPLATE/document_issue.yml b/.github/ISSUE_TEMPLATE/document_issue.yml index 44115b2097..c5aeb7fd73 100644 --- a/.github/ISSUE_TEMPLATE/document_issue.yml +++ b/.github/ISSUE_TEMPLATE/document_issue.yml @@ -1,7 +1,7 @@ name: "📚 Documentation Issue" description: Report issues in our documentation labels: - - ducumentation + - documentation body: - type: checkboxes attributes: @@ -12,7 +12,7 @@ body: required: true - label: I confirm that I am using English to submit report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)). required: true - - label: "Pleas do not modify this template :) and fill in all the required fields." + - label: "Please do not modify this template :) and fill in all the required fields." required: true - type: textarea attributes: diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 694bd3975d..8730f5c11f 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -12,7 +12,7 @@ body: required: true - label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)). required: true - - label: "Pleas do not modify this template :) and fill in all the required fields." + - label: "Please do not modify this template :) and fill in all the required fields." required: true - type: textarea attributes: diff --git a/.github/ISSUE_TEMPLATE/translation_issue.yml b/.github/ISSUE_TEMPLATE/translation_issue.yml index 589e071e14..898e2cdf58 100644 --- a/.github/ISSUE_TEMPLATE/translation_issue.yml +++ b/.github/ISSUE_TEMPLATE/translation_issue.yml @@ -12,7 +12,7 @@ body: required: true - label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)). required: true - - label: "Pleas do not modify this template :) and fill in all the required fields." + - label: "Please do not modify this template :) and fill in all the required fields." required: true - type: input attributes: From 5edb3d55e5797956aed488e49302888e2bfb9f1d Mon Sep 17 00:00:00 2001 From: paragonnov Date: Mon, 13 May 2024 16:20:44 +0900 Subject: [PATCH 067/267] feat: i18n: add korean language (ko-KR) (#4333) --- web/i18n/README.md | 2 +- web/i18n/ko-KR/app-annotation.ts | 87 +++++ web/i18n/ko-KR/app-api.ts | 85 +++++ web/i18n/ko-KR/app-debug.ts | 415 ++++++++++++++++++++ web/i18n/ko-KR/app-log.ts | 92 +++++ web/i18n/ko-KR/app-overview.ts | 143 +++++++ web/i18n/ko-KR/app.ts | 86 +++++ web/i18n/ko-KR/billing.ts | 113 ++++++ web/i18n/ko-KR/common.ts | 526 ++++++++++++++++++++++++++ web/i18n/ko-KR/custom.ts | 30 ++ web/i18n/ko-KR/dataset-creation.ts | 131 +++++++ web/i18n/ko-KR/dataset-documents.ts | 348 +++++++++++++++++ web/i18n/ko-KR/dataset-hit-testing.ts | 28 ++ web/i18n/ko-KR/dataset-settings.ts | 33 ++ web/i18n/ko-KR/dataset.ts | 48 +++ web/i18n/ko-KR/explore.ts | 43 +++ web/i18n/ko-KR/layout.ts | 3 + web/i18n/ko-KR/login.ts | 60 +++ web/i18n/ko-KR/register.ts | 3 + web/i18n/ko-KR/run-log.ts | 29 ++ web/i18n/ko-KR/share-app.ts | 70 ++++ web/i18n/ko-KR/tools.ts | 115 ++++++ web/i18n/ko-KR/workflow.ts | 358 ++++++++++++++++++ web/i18n/language.ts | 6 +- 24 files changed, 2850 insertions(+), 4 deletions(-) create mode 100644 web/i18n/ko-KR/app-annotation.ts create mode 100644 web/i18n/ko-KR/app-api.ts create mode 100644 web/i18n/ko-KR/app-debug.ts create mode 100644 web/i18n/ko-KR/app-log.ts create mode 100644 web/i18n/ko-KR/app-overview.ts create mode 100644 web/i18n/ko-KR/app.ts create mode 100644 web/i18n/ko-KR/billing.ts create mode 100644 web/i18n/ko-KR/common.ts create mode 100644 web/i18n/ko-KR/custom.ts create mode 100644 web/i18n/ko-KR/dataset-creation.ts create mode 100644 web/i18n/ko-KR/dataset-documents.ts create mode 100644 web/i18n/ko-KR/dataset-hit-testing.ts create mode 100644 web/i18n/ko-KR/dataset-settings.ts create mode 100644 web/i18n/ko-KR/dataset.ts create mode 100644 web/i18n/ko-KR/explore.ts create mode 100644 web/i18n/ko-KR/layout.ts create mode 100644 web/i18n/ko-KR/login.ts create mode 100644 web/i18n/ko-KR/register.ts create mode 100644 web/i18n/ko-KR/run-log.ts create mode 100644 web/i18n/ko-KR/share-app.ts create mode 100644 web/i18n/ko-KR/tools.ts create mode 100644 web/i18n/ko-KR/workflow.ts diff --git a/web/i18n/README.md b/web/i18n/README.md index f4a649c79b..6e79be6b9b 100644 --- a/web/i18n/README.md +++ b/web/i18n/README.md @@ -123,7 +123,7 @@ export const languages = [ value: 'ko-KR', name: '한국어(대한민국)', example: '안녕, Dify!', - supported: false, + supported: true, }, { value: 'ru-RU', diff --git a/web/i18n/ko-KR/app-annotation.ts b/web/i18n/ko-KR/app-annotation.ts new file mode 100644 index 0000000000..7a93d17821 --- /dev/null +++ b/web/i18n/ko-KR/app-annotation.ts @@ -0,0 +1,87 @@ +const translation = { + title: '어노테이션', + name: '어노테이션 답변', + editBy: '{{author}} 님이 편집한 답변', + noData: { + title: '어노테이션이 없습니다', + description: '여기에서는 앱 디버깅 중에 어노테이션을 편집하거나 일괄적으로 어노테이션을 가져와 고품질의 응답을 생성할 수 있습니다.', + }, + table: { + header: { + question: '질문', + answer: '답변', + createdAt: '생성 날짜', + hits: '조회수', + actions: '액션', + addAnnotation: '어노테이션 추가', + bulkImport: '일괄 가져오기', + bulkExport: '일괄 내보내기', + clearAll: '모든 어노테이션 지우기', + }, + }, + editModal: { + title: '어노테이션 답변 편집', + queryName: '사용자 쿼리', + answerName: '스토리텔러 봇', + yourAnswer: '당신의 답변', + answerPlaceholder: '여기에 답변을 입력하세요', + yourQuery: '당신의 쿼리', + queryPlaceholder: '여기에 쿼리를 입력하세요', + removeThisCache: '이 어노테이션 삭제', + createdAt: '생성 날짜', + }, + addModal: { + title: '어노테이션 답변 추가', + queryName: '질문', + answerName: '답변', + answerPlaceholder: '여기에 답변을 입력하세요', + queryPlaceholder: '여기에 질문을 입력하세요', + createNext: '다른 어노테이션이 달린 응답 추가', + }, + batchModal: { + title: '일괄 가져오기', + csvUploadTitle: 'CSV 파일을 여기에 드래그 앤 드롭하거나,', + browse: '찾아보기', + tip: 'CSV 파일은 다음 구조를 따라야 합니다:', + question: '질문', + answer: '답변', + contentTitle: '덩어리 내용', + content: '내용', + template: '여기서 템플릿 다운로드', + cancel: '취소', + run: '일괄 실행', + runError: '일괄 실행 실패', + processing: '일괄 처리 중', + completed: '가져오기 완료', + error: '가져오기 오류', + ok: '확인', + }, + errorMessage: { + answerRequired: '답변은 필수입니다', + queryRequired: '질문은 필수입니다', + }, + viewModal: { + annotatedResponse: '어노테이션 답변', + hitHistory: '조회 기록', + hit: '조회', + hits: '조회수', + noHitHistory: '조회 기록이 없습니다', + }, + hitHistoryTable: { + query: '쿼리', + match: '일치', + response: '응답', + source: '소스', + score: '점수', + time: '시간', + }, + initSetup: { + title: '어노테이션 답변 초기 설정', + configTitle: '어노테이션 답변 설정', + confirmBtn: '저장하고 활성화하기', + configConfirmBtn: '저장', + }, + embeddingModelSwitchTip: '어노테이션 텍스트의 임베딩 모델입니다. 모델을 변경하면 다시 임베딩되며 추가 비용이 발생합니다.', +} + +export default translation diff --git a/web/i18n/ko-KR/app-api.ts b/web/i18n/ko-KR/app-api.ts new file mode 100644 index 0000000000..fc978cddf4 --- /dev/null +++ b/web/i18n/ko-KR/app-api.ts @@ -0,0 +1,85 @@ +const translation = { + apiServer: 'API 서버', + apiKey: 'API 키', + status: '상태', + disabled: '비활성화됨', + ok: '서비스 중', + copy: '복사', + copied: '복사 완료', + play: '실행', + pause: '일시 정지', + playing: '실행 중', + loading: '로드 중', + merMaind: { + rerender: '다시 렌더링', + }, + never: '없음', + apiKeyModal: { + apiSecretKey: 'API 비밀 키', + apiSecretKeyTips: 'API 키를 보호하여 API의 남용을 방지하십시오. 프런트엔드 코드에서 평문으로 사용하지 마세요. :)', + createNewSecretKey: '새로운 비밀 키 생성', + secretKey: '비밀 키', + created: '생성 날짜', + lastUsed: '최종 사용 날짜', + generateTips: '이 키를 안전하고 접근 가능한 위치에 보관하십시오.', + }, + actionMsg: { + deleteConfirmTitle: '이 비밀 키를 삭제하시겠습니까?', + deleteConfirmTips: '이 작업은 취소할 수 없습니다.', + ok: '확인', + }, + completionMode: { + title: '완성 모드 API', + info: '문서, 요약, 번역 등 고품질 텍스트 생성을 위해 사용자 입력을 사용하는 완성 메시지 API를 사용합니다. 텍스트 생성은 Dify Prompt Engineering에서 설정한 모델 매개변수와 프롬프트 템플릿에 의존합니다.', + createCompletionApi: '완성 메시지 생성', + createCompletionApiTip: '질의 응답 모드를 지원하기 위해 완성 메시지를 생성합니다.', + inputsTips: + '(선택 사항) Prompt Eng의 변수에 해당하는 키-값 쌍으로 사용자 입력 필드를 제공합니다. 키는 변수 이름이고 값은 매개변수 값입니다. 필드 유형이 Select인 경우 전송되는 값은 미리 설정된 선택 사항 중 하나여야 합니다.', + queryTips: '사용자 입력 텍스트 내용.', + blocking: '블로킹 유형으로 실행이 완료되고 결과가 반환될 때까지 대기합니다. (처리가 오래 걸리면 요청이 중단될 수 있습니다)', + streaming: '스트리밍 반환. SSE(Server-Sent Events)를 기반으로 하는 스트리밍 반환 구현.', + messageFeedbackApi: '메시지 피드백(좋아요)', + messageFeedbackApiTip: '엔드 사용자 대신 수신된 메시지를 "좋아요" 또는 "좋아요"로 평가합니다. 이 데이터는 로그 및 주석 페이지에 표시되며 향후 모델 세부 조정에 사용됩니다.', + messageIDTip: '메시지 ID', + ratingTip: '좋아요 또는 좋아요, null은 취소', + parametersApi: '애플리케이션 매개변수 정보 가져오기', + parametersApiTip: '변수 이름, 필드 이름, 유형, 기본값을 포함한 설정된 입력 매개변수를 가져옵니다. 일반적으로 이러한 필드는 양식에 표시하거나 클라이언트 로드 후에 기본값을 입력하는 데 사용됩니다.', + }, + chatMode: { + title: '채팅 모드 API', + info: '질의 응답 형식을 사용하는 다목적 대화형 응용 프로그램에는 채팅 메시지 API를 호출하여 대화를 시작합니다. 반환된 conversation_id를 전달하여 계속된 대화를 유지합니다. 응답 매개변수 및 템플릿은 Dify Prompt Eng의 설정에 의존합니다.', + createChatApi: '채팅 메시지 생성', + createChatApiTip: '새로운 대화 메시지를 생성하거나 기존 대화를 계속합니다.', + inputsTips: + '(선택 사항) Prompt Eng의 변수에 해당하는 키-값 쌍으로 사용자 입력 필드를 제공합니다. 키는 변수 이름이고 값은 매개변수 값입니다. 필드 유형이 Select인 경우 전송되는 값은 미리 설정된 선택 사항 중 하나여야 합니다.', + queryTips: '사용자 입력/질문 내용', + blocking: '블로킹 유형으로 실행이 완료되고 결과가 반환될 때까지 대기합니다. (처리가 오래 걸리면 요청이 중단될 수 있습니다)', + streaming: '스트리밍 반환. SSE(Server-Sent Events)를 기반으로 하는 스트리밍 반환 구현.', + conversationIdTip: '(선택 사항) 대화 ID: 처음 대화의 경우 비워두고, 계속된 경우 컨텍스트에서 conversation_id를 전달합니다.', + messageFeedbackApi: '메시지 피드백(좋아요)', + messageFeedbackApiTip: '엔드 사용자 대신 수신된 메시지를 "좋아요" 또는 "좋아요"로 평가합니다. 이 데이터는 로그 및 주석 페이지에 표시되며 향후 모델 세부 조정에 사용됩니다.', + messageIDTip: '메시지 ID', + ratingTip: '좋아요 또는 좋아요, null은 취소', + chatMsgHistoryApi: '채팅 메시지 기록 가져오기', + chatMsgHistoryApiTip: '첫 번째 페이지는 최신의 "limit" 바를 반환합니다. 역순입니다.', + chatMsgHistoryConversationIdTip: '대화 ID', + chatMsgHistoryFirstId: '현재 페이지의 첫 번째 채팅 레코드의 ID. 기본값은 없음입니다.', + chatMsgHistoryLimit: '한 번에 반환되는 채팅 수', + conversationsListApi: '대화 목록 가져오기', + conversationsListApiTip: '현재 사용자의 세션 목록을 가져옵니다. 기본적으로 최근 20개의 세션이 반환됩니다.', + conversationsListFirstIdTip: '현재 페이지의 마지막 레코드의 ID, 기본값은 없음입니다.', + conversationsListLimitTip: '한 번에 반환되는 채팅 수', + conversationRenamingApi: '대화 이름 변경', + conversationRenamingApiTip: '대화 이름을 변경합니다. 이름은 멀티 세션 클라이언트 인터페이스에 표시됩니다.', + conversationRenamingNameTip: '새 이름', + parametersApi: '애플리케이션 매개변수 정보 가져오기', + parametersApiTip: '변수 이름, 필드 이름, 유형, 기본값을 포함한 설정된 입력 매개변수를 가져옵니다. 일반적으로 이러한 필드는 양식에 표시하거나 클라이언트 로드 후에 기본값을 입력하는 데 사용됩니다.', + }, + develop: { + requestBody: '요청 본문', + pathParams: '경로 매개변수', + query: '쿼리', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/app-debug.ts b/web/i18n/ko-KR/app-debug.ts new file mode 100644 index 0000000000..65d77a75fd --- /dev/null +++ b/web/i18n/ko-KR/app-debug.ts @@ -0,0 +1,415 @@ +const translation = { + pageTitle: { + line1: '프롬프트', + line2: '엔지니어링', + }, + orchestrate: '오케스트레이션', + promptMode: { + simple: '전문가 모드로 전환하여 전체 프롬프트를 편집합니다', + advanced: '전문가 모드', + switchBack: '기본 모드로 전환', + advancedWarning: { + title: '전문가 모드로 전환되었습니다. 프롬프트를 변경하면 기본 모드로 돌아갈 수 없습니다.', + description: '전문가 모드에서는 전체 프롬프트를 편집할 수 있습니다.', + learnMore: '자세히 알아보기', + ok: '확인', + }, + operation: { + addMessage: '메시지 추가', + }, + contextMissing: '컨텍스트 컴포넌트를 찾을 수 없습니다. 프롬프트의 효과가 충분하지 않을 수 있습니다.', + }, + operation: { + applyConfig: '배포', + resetConfig: '재설정', + debugConfig: '디버그', + addFeature: '기능 추가', + automatic: '자동', + stopResponding: '응답 중지', + agree: '좋아요', + disagree: '싫어요', + cancelAgree: '좋아요 취소', + cancelDisagree: '싫어요 취소', + userAction: '사용자', + }, + notSetAPIKey: { + title: 'LLM 제공자 키가 설정되지 않았습니다', + trailFinished: '트라이얼 종료', + description: 'LLM 제공자 키가 설정되지 않았습니다. 디버깅하기 전에 설정해야 합니다.', + settingBtn: '설정으로 이동', + }, + trailUseGPT4Info: { + title: '현재 gpt-4는 지원되지 않습니다', + description: 'gpt-4를 사용하려면 API 키를 설정해야 합니다.', + }, + feature: { + groupChat: { + title: '채팅 기능 강화', + description: '사전 대화 설정을 추가하면 사용자 경험이 향상됩니다.', + }, + groupExperience: { + title: '경험 강화', + }, + conversationOpener: { + title: '대화 시작', + description: '채팅 앱에서 AI가 사용자에게 처음으로 적극적으로 말을 건다면 일반적으로 환영 메시지로 사용됩니다.', + }, + suggestedQuestionsAfterAnswer: { + title: '팔로우업', + description: '다음 질문 제안을 설정하면 사용자에게 더 나은 채팅이 제공됩니다.', + resDes: '사용자의 다음 질문에 대한 3가지 제안.', + tryToAsk: '질문해보세요', + }, + moreLikeThis: { + title: '유사한 항목', + description: '여러 텍스트를 한 번에 생성하고 편집하여 계속해서 생성합니다.', + generateNumTip: '생성 횟수', + tip: '이 기능을 사용하면 추가적인 토큰 오버헤드가 발생합니다', + }, + speechToText: { + title: '음성에서 텍스트로', + description: '활성화하면 음성 입력을 사용할 수 있습니다.', + resDes: '음성 입력이 활성화되어 있습니다', + }, + textToSpeech: { + title: '텍스트에서 음성으로', + description: '활성화하면 텍스트를 음성으로 변환할 수 있습니다.', + resDes: '텍스트에서 오디오로의 변환이 활성화되어 있습니다', + }, + citation: { + title: '인용 및 소유권', + description: '활성화하면 생성된 콘텐츠의 소스 문서 및 소유권 섹션이 표시됩니다.', + resDes: '인용 및 소유권이 활성화되어 있습니다', + }, + annotation: { + title: '주석 응답', + description: '유사한 사용자 질문과 우선 일치를 위해 캐시에 고품질 응답을 수동으로 추가할 수 있습니다.', + resDes: '주석 응답이 활성화되어 있습니다', + scoreThreshold: { + title: '점수 임계값', + description: '주석 응답의 유사성 임계값을 설정하는 데 사용됩니다.', + easyMatch: '간단한 일치', + accurateMatch: '정확한 일치', + }, + matchVariable: { + title: '매치 변수', + choosePlaceholder: '매치 변수 선택', + }, + cacheManagement: '주석', + cached: '주석이 있는', + remove: '삭제', + removeConfirm: '이 주석을 삭제하시겠습니까?', + add: '주석 추가', + edit: '주석 편집', + }, + dataSet: { + title: '컨텍스트', + noData: '지식을 컨텍스트로 가져올 수 있습니다', + words: '단어', + textBlocks: '텍스트 블록', + selectTitle: '참조할 지식 선택', + selected: '선택한 지식', + noDataSet: '지식이 없습니다', + toCreate: '생성하기', + notSupportSelectMulti: '현재 다중 선택은 지원되지 않습니다', + queryVariable: { + title: '쿼리 변수', + tip: '이 변수는 컨텍스트 조회에 사용되는 쿼리 입력으로 사용되며, 이 변수 입력에 관련된 컨텍스트 정보를 가져옵니다.', + choosePlaceholder: '쿼리 변수 선택', + noVar: '변수 없음', + noVarTip: '변수 섹션 하단에서 변수를 생성하십시오', + unableToQueryDataSet: '지식을 쿼리할 수 없음', + unableToQueryDataSetTip: '지식 쿼리에 실패했습니다. 정상적으로 쿼리할 수 없는 경우, 컨텍스트 섹션에서 컨텍스트 쿼리 변수를 다시 선택하십시오.', + ok: '확인', + contextVarNotEmpty: '컨텍스트 쿼리 변수를 비울 수 없습니다', + deleteContextVarTitle: '변수 "{{varName}}"를 삭제하시겠습니까?', + deleteContextVarTip: '이 변수는 컨텍스트 쿼리 변수로 설정되어 있어 삭제하면 지식의 정상적인 사용에 영향을 미칩니다. 삭제하려면 컨텍스트 섹션에서 다시 선택하십시오.', + }, + }, + tools: { + title: '도구', + tips: '도구는 사용자 입력이나 변수를 요청 매개변수로 사용하여 외부 데이터를 컨텍스트로 쿼리하는 표준적인 API 호출 방법을 제공합니다.', + toolsInUse: '{{count}}개의 도구가 사용 중', + modal: { + title: '도구', + toolType: { + title: '도구 유형', + placeholder: '도구 유형 선택', + }, + name: { + title: '이름', + placeholder: '이름 입력', + }, + variableName: { + title: '변수 이름', + placeholder: '변수 이름 입력', + }, + }, + }, + conversationHistory: { + title: '대화 기록', + description: '대화 역할에 접두사 이름을 설정합니다', + tip: '대화 기록이 활성화되어 있지 않습니다. 위의 프롬프트에 를 추가하십시오.', + learnMore: '자세히 알아보기', + editModal: { + title: '대화 역할 이름 편집', + userPrefix: '사용자 접두사', + assistantPrefix: '어시스턴트 접두사', + }, + }, + toolbox: { + title: '도구 상자', + }, + moderation: { + title: '콘텐츠 모더레이션', + description: '모더레이션 API를 사용하거나 기밀 단어 목록을 유지함으로써 모델 출력을 안전하게 합니다.', + allEnabled: '입력/출력 콘텐츠가 모두 활성화되어 있습니다', + inputEnabled: '입력 콘텐츠가 활성화되어 있습니다', + outputEnabled: '출력 콘텐츠가 활성화되어 있습니다', + modal: { + title: '콘텐츠 모더레이션 설정', + provider: { + title: '제공자', + openai: 'OpenAI 모더레이션', + openaiTip: { + prefix: 'OpenAI 모더레이션에는', + suffix: '에 OpenAI API 키가 설정되어 있어야 합니다.', + }, + keywords: '키워드', + }, + keywords: { + tip: '한 줄에 하나씩, 줄 바꿈으로 입력하세요. 한 줄 당 최대 100자.', + placeholder: '한 줄씩 입력하세요', + line: '줄', + }, + content: { + input: '입력 콘텐츠 모더레이션', + output: '출력 콘텐츠 모더레이션', + preset: '프리셋 응답', + placeholder: '프리셋 응답 내용을 입력하세요', + condition: '최소한 하나의 입력 및 출력 콘텐츠를 모더레이션합니다', + fromApi: '프리셋 응답은 API에서 반환됩니다', + errorMessage: '프리셋 응답은 비워둘 수 없습니다', + supportMarkdown: '마크다운이 지원됩니다', + }, + openaiNotConfig: { + before: 'OpenAI 모더레이션에는', + after: '에 OpenAI API 키가 설정되어 있어야 합니다.', + }, + }, + }, + }, + automatic: { + title: '자동 어플리케이션 오케스트레이션', + description: '시나리오를 설명하세요. Dify가 어플리케이션을 자동으로 오케스트레이션 합니다.', + intendedAudience: '누가 대상이 되는지 설명하세요.', + intendedAudiencePlaceHolder: '예: 학생', + solveProblem: '어떤 문제를 AI가 해결할 것으로 예상하나요?', + solveProblemPlaceHolder: '예: 학업 성적 평가', + generate: '생성', + audiencesRequired: '대상이 필요합니다', + problemRequired: '문제가 필요합니다', + resTitle: '다음 어플리케이션을 자동으로 오케스트레이션 했습니다.', + apply: '이 오케스트레이션을 적용하기', + noData: '왼쪽에 사용 예시를 기술하고, 오케스트레이션 미리보기가 여기에 나타납니다.', + loading: '어플리케이션 오케스트레이션을 실행 중입니다...', + overwriteTitle: '기존 구성을 덮어쓰시겠습니까?', + overwriteMessage: '이 오케스트레이션을 적용하면 기존 구성이 덮어쓰여집니다.', + }, + resetConfig: { + title: '리셋을 확인하시겠습니까?', + message: '변경 사항이 취소되고, 마지막으로 공개된 구성이 복원됩니다.', + }, + errorMessage: { + nameOfKeyRequired: '키 이름: {{key}} 이 필요합니다', + valueOfVarRequired: '{{key}}의 값은 비워둘 수 없습니다', + queryRequired: '요청 텍스트가 필요합니다.', + waitForResponse: '이전 메시지에 대한 응답이 완료될 때까지 기다려 주세요.', + waitForBatchResponse: '배치 작업에 대한 응답이 완료될 때까지 기다려 주세요.', + notSelectModel: '모델을 선택해 주세요', + waitForImgUpload: '이미지 업로드가 완료될 때까지 기다려 주세요', + }, + chatSubTitle: '단계', + completionSubTitle: '접두사 프롬프트', + promptTip: '프롬프트는 AI의 응답을 지시하고 제한하여 유도합니다. {{input}}과 같은 변수를 삽입하세요. 이 프롬프트는 사용자에게 표시되지 않습니다.', + formattingChangedTitle: '포맷이 변경되었습니다', + formattingChangedText: '포맷을 변경하면 디버그 영역이 재설정됩니다. 계속하시겠습니까?', + variableTitle: '변수', + variableTip: '사용자는 양식에 변수를 입력하고, 프롬프트 내의 변수가 자동으로 대체됩니다.', + notSetVar: '변수를 사용하면 사용자는 양식에 입력할 때 프롬프트의 단어나 시작 단어를 소개할 수 있습니다. "{{input}}"을 프롬프트 단어에 입력해 보세요.', + autoAddVar: '프리프롬프트에서 참조되는 미정의 변수가 있습니다. 사용자 입력 양식에 추가하시겠습니까?', + variableTable: { + key: '변수 키', + name: '사용자 입력 필드명', + optional: '옵션', + type: '입력 타입', + action: '액션', + typeString: '문자열', + typeSelect: '선택', + }, + varKeyError: { + canNoBeEmpty: '변수 키를 비울 수 없습니다', + tooLong: '변수 키: {{key}}가 너무 깁니다. 30자를 넘을 수 없습니다', + notValid: '변수 키: {{key}}가 유효하지 않습니다. 문자, 숫자, 밑줄만 포함할 수 있습니다', + notStartWithNumber: '변수 키: {{key}}는 숫자로 시작할 수 없습니다', + keyAlreadyExists: '변수 키: {{key}}는 이미 존재합니다', + }, + otherError: { + promptNoBeEmpty: '프롬프트를 비울 수 없습니다', + historyNoBeEmpty: '프롬프트에 대화 기록을 설정해야 합니다', + queryNoBeEmpty: '프롬프트에 쿼리를 설정해야 합니다', + }, + variableConig: { + 'addModalTitle': '입력 필드 추가', + 'editModalTitle': '입력 필드 편집', + 'description': '{{varName}} 변수 설정', + 'fieldType': '필드 타입', + 'string': '짧은 텍스트', + 'text-input': '짧은 텍스트', + 'paragraph': '문단', + 'select': '선택', + 'number': '숫자', + 'notSet': '설정되지 않음. 프롬프트의 프리픽스에 {{input}}을 입력해 보세요.', + 'stringTitle': '폼 텍스트 상자 옵션', + 'maxLength': '최대 길이', + 'options': '옵션', + 'addOption': '옵션 추가', + 'apiBasedVar': 'API 기반 변수', + 'varName': '변수명', + 'labelName': '레이블명', + 'inputPlaceholder': '입력하세요', + 'required': '필수', + 'errorMsg': { + varNameRequired: '변수명은 필수입니다', + labelNameRequired: '레이블명은 필수입니다', + varNameCanBeRepeat: '변수명은 중복될 수 없습니다', + atLeastOneOption: '적어도 하나의 옵션이 필요합니다', + optionRepeat: '옵션이 중복되어 있습니다', + }, + }, + vision: { + name: '비전', + description: '비전을 활성화하면 모델이 이미지를 받아와 관련 질문에 답변할 수 있습니다.', + settings: '설정', + visionSettings: { + title: '비전 설정', + resolution: '해상도', + resolutionTooltip: `저해상도는 모델에게 512 x 512 해상도의 저해상도 이미지를 제공하여 65 토큰의 예산으로 이미지를 표현합니다. 이로 인해 API는 더 빠른 응답을 제공하며 높은 세부 정보가 필요한 경우 토큰 소모를 늘립니다. + \n + 고해상도는 먼저 모델에게 저해상도 이미지를 보여주고, 그 후 입력 이미지 크기에 따라 512px의 정사각형 세부 사진을 만듭니다. 각 세부 사진에 대해 129 토큰의 예산을 사용합니다.`, + high: '고', + low: '저', + uploadMethod: '업로드 방식', + both: '모두', + localUpload: '로컬 업로드', + url: 'URL', + uploadLimit: '업로드 제한', + }, + }, + voice: { + name: '음성', + defaultDisplay: '기본 음성', + description: '텍스트 읽기 음성 설정', + settings: '설정', + voiceSettings: { + title: '음성 설정', + language: '언어', + resolutionTooltip: '텍스트 읽기 음성 언어를 지원합니다.', + voice: '음성', + }, + }, + openingStatement: { + title: '대화 시작', + add: '추가', + writeOpener: '오프너 작성', + placeholder: '여기에 오프너 메시지를 작성하세요. 변수를 사용할 수 있습니다. {{variable}}를 입력해보세요.', + openingQuestion: '시작 질문', + noDataPlaceHolder: '사용자와의 대화를 시작하면 대화 애플리케이션에서 그들과 더 밀접한 관계를 구축하는 데 도움이 됩니다.', + varTip: '변수를 사용할 수 있습니다. {{variable}}를 입력해보세요.', + tooShort: '대화 시작에는 최소 20 단어의 초기 프롬프트가 필요합니다.', + notIncludeKey: '초기 프롬프트에 변수 {{key}}가 포함되어 있지 않습니다. 초기 프롬프트에 추가하세요.', + }, + modelConfig: { + model: '모델', + setTone: '응답 톤 설정', + title: '모델 및 매개변수', + modeType: { + chat: '채팅', + completion: '완성', + }, + }, + inputs: { + title: '디버그 및 미리보기', + noPrompt: '프리프롬프트 입력란에 몇 가지 프롬프트를 작성해보세요.', + userInputField: '사용자 입력 필드', + noVar: '변수 값을 입력하세요. 새로운 세션이 시작될 때마다 프롬프트 단어가 자동으로 대체됩니다.', + chatVarTip: '변수 값을 입력하세요. 새로운 세션이 시작될 때마다 프롬프트 단어가 자동으로 대체됩니다.', + completionVarTip: '변수 값을 입력하세요. 질문이 전송될 때마다 프롬프트 단어가 자동으로 대체됩니다.', + previewTitle: '프롬프트 미리보기', + queryTitle: '쿼리 내용', + queryPlaceholder: '요청 텍스트를 입력하세요.', + run: '실행', + }, + result: '출력 텍스트', + datasetConfig: { + settingTitle: '리트리벌 설정', + knowledgeTip: '지식을 추가하려면 "+" 버튼을 클릭하세요.', + retrieveOneWay: { + title: 'N-to-1 리트리벌', + description: '사용자 의도와 지식 설명을 기반으로, 에이전트가 자율적으로 최적의 지식을 선택합니다. 개별적이고 제한된 지식을 가진 애플리케이션에 적합합니다.', + }, + retrieveMultiWay: { + title: '멀티패스 리트리벌', + description: '사용자 의도에 따라 모든 지식을 쿼리하고, 관련 텍스트를 여러 소스에서 가져와 다시 순위를 매긴 후 사용자 쿼리에 가장 적합한 결과를 선택합니다. 재순위 모델 API의 구성이 필요합니다.', + }, + rerankModelRequired: '재순위 모델이 필요합니다', + params: '매개변수', + top_k: '상위 K', + top_kTip: '사용자 질문에 가장 유사한 청크를 필터링하는 데 사용됩니다. 시스템은 선택한 모델의 max_tokens에 따라 동적으로 상위 K 값을 조정합니다.', + score_threshold: '점수 임계값', + score_thresholdTip: '청크 필터링의 유사성 임계값을 설정하는 데 사용됩니다.', + retrieveChangeTip: '인덱스 모드 및 리트리벌 모드를 변경하면 이 지식과 관련된 애플리케이션에 영향을 줄 수 있습니다.', + }, + debugAsSingleModel: '단일 모델로 디버그', + debugAsMultipleModel: '다중 모델로 디버그', + duplicateModel: '복제', + publishAs: '로 게시', + assistantType: { + name: '어시스턴트 유형', + chatAssistant: { + name: '기본 어시스턴트', + description: '대규모 언어 모델을 사용하여 채팅 기반의 어시스턴트를 구축합니다', + }, + agentAssistant: { + name: '에이전트 어시스턴트', + description: '작업을 자율적으로 완료하기 위한 도구를 선택할 수 있는 인텔리전트 에이전트를 구축합니다', + }, + }, + agent: { + agentMode: '에이전트 모드', + agentModeDes: '에이전트의 추론 모드 유형을 설정합니다', + agentModeType: { + ReACT: 'ReAct', + functionCall: '함수 호출', + }, + setting: { + name: '에이전트 설정', + description: '에이전트 어시스턴트 설정에서는 에이전트 모드나 빌트인 프롬프트 등 고급 기능을 설정할 수 있습니다. 에이전트 유형에서만 사용할 수 있습니다.', + maximumIterations: { + name: '최대 반복 횟수', + description: '에이전트 어시스턴트가 실행할 수 있는 반복 횟수를 제한합니다', + }, + }, + buildInPrompt: '빌트인 프롬프트', + firstPrompt: '첫 번째 프롬프트', + nextIteration: '다음 반복', + promptPlaceholder: '여기에 프롬프트를 입력하세요', + tools: { + name: '도구', + description: '도구를 사용하여 인터넷 검색이나 과학적 계산 등 LLM의 기능을 확장할 수 있습니다', + enabled: '활성화됨', + }, + }, +} + +export default translation diff --git a/web/i18n/ko-KR/app-log.ts b/web/i18n/ko-KR/app-log.ts new file mode 100644 index 0000000000..61bf5a7f8e --- /dev/null +++ b/web/i18n/ko-KR/app-log.ts @@ -0,0 +1,92 @@ +const translation = { + title: '로그', + description: '로그는 애플리케이션 실행 상태를 기록합니다. 사용자 입력 및 AI 응답이 포함됩니다.', + dateTimeFormat: 'YYYY/MM/DD HH:mm', + table: { + header: { + time: '시간', + endUser: '엔드 유저', + input: '입력', + output: '출력', + summary: '요약', + messageCount: '메시지 수', + userRate: '사용자 비율', + adminRate: '관리자 비율', + startTime: '시작 시간', + status: '상태', + runtime: '실행 시간', + tokens: '토큰', + user: '엔드 유저', + version: '버전', + }, + pagination: { + previous: '이전', + next: '다음', + }, + empty: { + noChat: '아직 대화가 없습니다', + noOutput: '출력이 없습니다', + element: { + title: '여기 누구 있어요?', + content: + '여기에서 엔드 유저와 AI 애플리케이션 간 상호 작용을 관찰하고 주석을 달아 AI 정확도를 계속 향상시킵니다. 웹 앱을 공유하거나 테스트하고 다시 이 페이지로 돌아오세요.', + }, + }, + }, + detail: { + time: '시간', + conversationId: '대화 ID', + promptTemplate: '프롬프트 템플릿', + promptTemplateBeforeChat: '채팅 전 프롬프트 템플릿 - 시스템 메시지로', + annotationTip: '{{user}}에 의해 향상됨', + timeConsuming: '시간 소요', + second: '초', + tokenCost: '토큰 비용', + loading: '로드 중', + operation: { + like: '좋아요', + dislike: '좋아요 취소', + addAnnotation: '향상 추가', + editAnnotation: '향상 편집', + annotationPlaceholder: 'AI가 응답할 것으로 예상하는 답변을 입력하여 향후 모델 세부 조정 및 텍스트 생성 품질 지속적 향상을 위해 개선할 수 있습니다.', + }, + variables: '변수', + uploadImages: '업로드된 이미지', + }, + filter: { + period: { + today: '오늘', + last7days: '지난 7일', + last4weeks: '지난 4주', + last3months: '지난 3개월', + last12months: '지난 12개월', + monthToDate: '월 초부터 오늘까지', + quarterToDate: '분기 초부터 오늘까지', + yearToDate: '연 초부터 오늘까지', + allTime: '모든 기간', + }, + annotation: { + all: '모두', + annotated: '향상 주석 ({{count}} 개 항목)', + not_annotated: '주석 없음', + }, + }, + workflowTitle: '워크플로우 로그', + workflowSubtitle: '이 로그는 Automate의 작업을 기록했습니다.', + runDetail: { + title: '대화 로그', + workflowTitle: '로그 세부 정보', + }, + promptLog: '프롬프트 로그', + agentLog: '에이전트 로그', + viewLog: '로그 보기', + agentLogDetail: { + agentMode: '에이전트 모드', + toolUsed: '사용된 도구', + iterations: '반복', + iteration: '반복', + finalProcessing: '최종 처리', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/app-overview.ts b/web/i18n/ko-KR/app-overview.ts new file mode 100644 index 0000000000..b253d63072 --- /dev/null +++ b/web/i18n/ko-KR/app-overview.ts @@ -0,0 +1,143 @@ +const translation = { + welcome: { + firstStepTip: '시작하려면,', + enterKeyTip: '아래에 OpenAI API 키를 입력하세요', + getKeyTip: 'OpenAI 대시보드에서 API 키를 가져오세요', + placeholder: '나의 OpenAI API 키 (예: sk-xxxx)', + }, + apiKeyInfo: { + cloud: { + trial: { + title: '{{providerName}} 트라이얼 쿼터를 사용 중입니다.', + description: '트라이얼 쿼터는 테스트용으로 제공됩니다. 트라이얼 쿼터 소진 전에 고유한 모델 제공자를 설정하거나 추가 쿼터를 구매하세요.', + }, + exhausted: { + title: '트라이얼 쿼터가 소진되었습니다. API 키를 설정하세요.', + description: '트라이얼 쿼터가 소진되었습니다. 고유한 모델 제공자를 설정하거나 추가 쿼터를 구매하세요.', + }, + }, + selfHost: { + title: { + row1: '시작하려면,', + row2: '먼저 모델 제공자를 설정하세요.', + }, + }, + callTimes: '요청 횟수', + usedToken: '사용된 토큰', + setAPIBtn: '모델 제공자 설정으로 이동', + tryCloud: '또는 Dify의 클라우드 버전을 무료로 체험해보세요', + }, + overview: { + title: '개요', + appInfo: { + explanation: '사용하기 쉬운 AI 웹앱', + accessibleAddress: '공개 URL', + preview: '미리보기', + regenerate: '재생성', + preUseReminder: '계속하기 전에 웹앱을 활성화하세요.', + settings: { + entry: '설정', + title: '웹앱 설정', + webName: '웹앱 이름', + webDesc: '웹앱 설명', + webDescTip: '이 텍스트는 클라이언트 측에서 표시되며, 애플리케이션의 사용 방법에 대한 기본적인 안내를 제공합니다.', + webDescPlaceholder: '웹앱 설명을 입력하세요', + language: '언어', + more: { + entry: '추가 설정 보기', + copyright: '저작권', + copyRightPlaceholder: '저작권자 또는 조직 이름을 입력하세요', + privacyPolicy: '개인정보 처리방침', + privacyPolicyPlaceholder: '개인정보 처리방침 링크를 입력하세요', + privacyPolicyTip: '방문자가 애플리케이션이 수집하는 데이터를 이해하고, Dify의 개인정보 처리방침을 참조할 수 있도록 합니다.', + }, + }, + embedded: { + entry: '임베드', + title: '웹사이트에 임베드하기', + explanation: '챗봇 앱을 웹사이트에 임베드하는 방법을 선택하세요.', + iframe: '웹사이트의 원하는 위치에 챗봇 앱을 추가하려면 이 iframe을 HTML 코드에 추가하세요.', + scripts: '웹사이트의 우측 하단에 챗봇 앱을 추가하려면 이 코드를 HTML에 추가하세요.', + chromePlugin: 'Dify Chatbot Chrome 확장 프로그램 설치', + copied: '복사되었습니다', + copy: '복사', + }, + qrcode: { + title: '공유용 QR 코드', + scan: '앱 공유를 스캔하세요', + download: 'QR 코드 다운로드', + }, + customize: { + way: '방법', + entry: '사용자화', + title: 'AI 웹앱 사용자화', + explanation: '시나리오와 스타일 요구에 따라 웹앱의 프론트엔드를 사용자화할 수 있습니다.', + way1: { + name: '클라이언트 코드를 포크하여 수정하고 Vercel에 배포하기 (권장)', + step1: '클라이언트 코드를 포크하여 수정합니다', + step1Tip: '여기를 클릭하여 소스 코드를 GitHub 계정에 포크하고 코드를 수정하세요', + step1Operation: 'Dify-WebClient', + step2: 'Vercel에 배포합니다', + step2Tip: '여기를 클릭하여 리포지토리를 Vercel에 임포트하고 배포하세요', + step2Operation: '리포지토리 임포트', + step3: '환경 변수를 설정합니다', + step3Tip: 'Vercel에 다음 환경 변수를 추가하세요', + }, + way2: { + name: '클라이언트 측 코드를 작성하여 API를 호출하고 서버에 배포합니다', + operation: '문서', + }, + }, + }, + apiInfo: { + title: '백엔드 서비스 API', + explanation: '개발자의 애플리케이션에 쉽게 통합할 수 있습니다', + accessibleAddress: '서비스 API 엔드포인트', + doc: 'API 레퍼런스', + }, + status: { + running: '서비스 중', + disable: '비활성', + }, + }, + analysis: { + title: '분석', + ms: 'ms', + tokenPS: '토큰/초', + totalMessages: { + title: '총 메시지 수', + explanation: '일일 AI 상호작용 수; 엔지니어링/디버깅 목적의 프롬프트는 제외됩니다.', + }, + activeUsers: { + title: '활성 사용자 수', + explanation: 'AI와의 Q&A에 참여하는 고유 사용자 수; 엔지니어링/디버깅 목적의 프롬프트는 제외됩니다.', + }, + tokenUsage: { + title: '토큰 사용량', + explanation: '애플리케이션의 언어 모델의 일일 토큰 사용량을 반영하여 비용 관리에 도움이 됩니다.', + consumed: '소비된 토큰', + }, + avgSessionInteractions: { + title: '평균 세션 상호작용 수', + explanation: '사용자와 AI의 연속적인 커뮤니케이션 수; 대화형 애플리케이션을 위한 것입니다.', + }, + avgUserInteractions: { + title: '평균 사용자 상호작용 수', + explanation: '사용자의 일일 사용 빈도를 반영합니다. 이 지표는 사용자의 임계를 반영합니다.', + }, + userSatisfactionRate: { + title: '사용자 만족도율', + explanation: '1,000개의 메시지 당 "좋아요" 수입니다. 이는 사용자가 매우 만족한 응답의 비율을 나타냅니다.', + }, + avgResponseTime: { + title: '평균 응답 시간', + explanation: 'AI가 처리/응답하는 시간(밀리초); 텍스트 기반 애플리케이션을 위한 것입니다.', + }, + tps: { + title: '토큰 출력 속도', + explanation: 'LLM의 성능을 측정합니다. 요청 시작부터 출력 완료까지의 LLM의 토큰 출력 속도를 계산합니다.', + }, + }, +} + +export default translation diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts new file mode 100644 index 0000000000..d9fb7b3788 --- /dev/null +++ b/web/i18n/ko-KR/app.ts @@ -0,0 +1,86 @@ +const translation = { + createApp: '앱 만들기', + types: { + all: '모두', + chatbot: '챗봇', + agent: '에이전트', + workflow: '워크플로우', + completion: '완성', + }, + duplicate: '복제', + duplicateTitle: '앱 복제하기', + export: 'DSL 내보내기', + exportFailed: 'DSL 내보내기 실패', + importDSL: 'DSL 파일 가져오기', + createFromConfigFile: 'DSL 파일에서 생성하기', + deleteAppConfirmTitle: '이 앱을 삭제하시겠습니까?', + deleteAppConfirmContent: '앱을 삭제하면 복구할 수 없습니다. 사용자는 더 이상 앱에 액세스할 수 없으며 모든 프롬프트 설정 및 로그가 영구적으로 삭제됩니다.', + appDeleted: '앱이 삭제되었습니다', + appDeleteFailed: '앱 삭제 실패', + join: '커뮤니티에 참여하기', + communityIntro: '여러 채널에서 팀원, 기여자, 개발자들과 토론하세요.', + roadmap: '로드맵 보기', + newApp: { + startFromBlank: '빈 상태로 시작', + startFromTemplate: '템플릿에서 시작', + captionAppType: '어떤 종류의 앱을 만들어 보시겠어요?', + chatbotDescription: '대화형 어플리케이션을 만듭니다. 질문과 답변 형식을 사용하여 다단계 대화를 지원합니다.', + completionDescription: '프롬프트를 기반으로 품질 높은 텍스트를 생성하는 어플리케이션을 만듭니다. 기사, 요약, 번역 등을 생성할 수 있습니다.', + completionWarning: '이 종류의 앱은 더 이상 지원되지 않습니다.', + agentDescription: '작업을 자동으로 완료하는 지능형 에이전트를 만듭니다.', + workflowDescription: '고도로 사용자 지정 가능한 워크플로우에 기반한 고품질 텍스트 생성 어플리케이션을 만듭니다. 경험 있는 사용자를 위한 것입니다.', + workflowWarning: '현재 베타 버전입니다.', + chatbotType: '챗봇 오케스트레이션 방식', + basic: '기본', + basicTip: '초보자용. 나중에 Chatflow로 전환할 수 있습니다.', + basicFor: '초보자용', + basicDescription: '기본 오케스트레이션은 내장된 프롬프트를 수정할 수 없고 간단한 설정을 사용하여 챗봇 앱을 오케스트레이션합니다. 초보자용입니다.', + advanced: 'Chatflow', + advancedFor: '고급 사용자용', + advancedDescription: '워크플로우 오케스트레이션은 워크플로우 형식으로 챗봇을 오케스트레이션하며 내장된 프롬프트를 편집할 수 있는 고급 사용자 정의 기능을 제공합니다. 경험이 많은 사용자용입니다.', + captionName: '앱 아이콘과 이름', + appNamePlaceholder: '앱 이름을 입력하세요', + captionDescription: '설명', + appDescriptionPlaceholder: '앱 설명을 입력하세요', + useTemplate: '이 템플릿 사용', + previewDemo: '데모 미리보기', + chatApp: '어시스턴트', + chatAppIntro: '대화형 어플리케이션을 만들고 싶어요. 이 어플리케이션은 질문과 답변 형식을 사용하여 다단계 대화를 지원합니다.', + agentAssistant: '새로운 에이전트 어시스턴트', + completeApp: '텍스트 생성기', + completeAppIntro: '프롬프트를 기반으로 품질 높은 텍스트를 생성하는 어플리케이션을 만들고 싶어요. 기사, 요약, 번역 등을 생성합니다.', + showTemplates: '템플릿 선택', + hideTemplates: '모드 선택으로 돌아가기', + Create: '만들기', + Cancel: '취소', + nameNotEmpty: '이름을 입력하세요', + appTemplateNotSelected: '템플릿을 선택하세요', + appTypeRequired: '앱 종류를 선택하세요', + appCreated: '앱이 생성되었습니다', + appCreateFailed: '앱 생성 실패', + }, + editApp: '정보 편집하기', + editAppTitle: '앱 정보 편집하기', + editDone: '앱 정보가 업데이트되었습니다', + editFailed: '앱 정보 업데이트 실패', + emoji: { + ok: '확인', + cancel: '취소', + }, + switch: '워크플로우 오케스트레이션으로 전환하기', + switchTipStart: '새로운 앱의 복사본이 생성되어 새로운 복사본이 워크플로우 오케스트레이션으로 전환됩니다. 새로운 복사본은 ', + switchTip: '전환을 허용하지 않습니다', + switchTipEnd: ' 기본적인 오케스트레이션으로 되돌릴 수 없습니다.', + switchLabel: '생성될 앱의 복사본', + removeOriginal: '원본 앱 제거하기', + switchStart: '전환 시작하기', + typeSelector: { + all: '모든 종류', + chatbot: '챗봇', + agent: '에이전트', + workflow: '워크플로우', + completion: '완성', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/billing.ts b/web/i18n/ko-KR/billing.ts new file mode 100644 index 0000000000..ca6a361e06 --- /dev/null +++ b/web/i18n/ko-KR/billing.ts @@ -0,0 +1,113 @@ +const translation = { + currentPlan: '현재 요금제', + upgradeBtn: { + plain: '요금제 업그레이드', + encourage: '지금 업그레이드', + encourageShort: '업그레이드', + }, + viewBilling: '청구 및 구독 관리', + buyPermissionDeniedTip: '구독하려면 엔터프라이즈 관리자에게 문의하세요', + plansCommon: { + title: '당신에게 맞는 요금제를 선택하세요', + yearlyTip: '연간 구독 시 2개월 무료!', + mostPopular: '가장 인기 있는', + planRange: { + monthly: '월간', + yearly: '연간', + }, + month: '월', + year: '년', + save: '절약 ', + free: '무료', + currentPlan: '현재 요금제', + contractSales: '영업에 문의하기', + contractOwner: '팀 관리자에게 문의하기', + startForFree: '무료로 시작하기', + getStartedWith: '시작하기 ', + contactSales: '영업에 문의하기', + talkToSales: '영업과 상담하기', + modelProviders: '모델 제공자', + teamMembers: '팀 멤버', + buildApps: '앱 만들기', + vectorSpace: '벡터 공간', + vectorSpaceBillingTooltip: '1MB당 약 120만 글자의 벡터화된 데이터를 저장할 수 있습니다 (OpenAI Embeddings을 기반으로 추정되며 모델에 따라 다릅니다).', + vectorSpaceTooltip: '벡터 공간은 LLM이 데이터를 이해하는 데 필요한 장기 기억 시스템입니다.', + documentProcessingPriority: '문서 처리 우선순위', + documentProcessingPriorityTip: '더 높은 문서 처리 우선순위를 원하시면 요금제를 업그레이드하세요.', + documentProcessingPriorityUpgrade: '더 높은 정확성과 빠른 속도로 데이터를 처리합니다.', + priority: { + 'standard': '표준', + 'priority': '우선', + 'top-priority': '최우선', + }, + logsHistory: '로그 기록', + customTools: '사용자 정의 도구', + unavailable: '사용 불가', + days: '일', + unlimited: '무제한', + support: '지원', + supportItems: { + communityForums: '커뮤니티 포럼', + emailSupport: '이메일 지원', + priorityEmail: '우선 이메일 및 채팅 지원', + logoChange: '로고 변경', + SSOAuthentication: 'SSO 인증', + personalizedSupport: '개별 지원', + dedicatedAPISupport: '전용 API 지원', + customIntegration: '사용자 정의 통합 및 지원', + ragAPIRequest: 'RAG API 요청', + agentMode: '에이전트 모드', + workflow: '워크플로우', + }, + comingSoon: '곧 출시 예정', + member: '멤버', + memberAfter: '멤버', + messageRequest: { + title: '메시지 크레딧', + tooltip: 'GPT 제외 다양한 요금제에서의 메시지 호출 쿼터 (gpt4 제외). 제한을 초과하는 메시지는 OpenAI API 키를 사용합니다.', + }, + annotatedResponse: { + title: '주석 응답 쿼터', + tooltip: '수동으로 편집 및 응답 주석 달기로 앱의 사용자 정의 가능한 고품질 질의응답 기능을 제공합니다 (채팅 앱에만 해당).', + }, + ragAPIRequestTooltip: 'Dify의 지식베이스 처리 기능을 호출하는 API 호출 수를 나타냅니다.', + receiptInfo: '팀 소유자 및 팀 관리자만 구독 및 청구 정보를 볼 수 있습니다', + }, + plans: { + sandbox: { + name: '샌드박스', + description: 'GPT 무료 체험 200회', + includesTitle: '포함된 항목:', + }, + professional: { + name: '프로페셔널', + description: '개인 및 소규모 팀을 위해 더 많은 파워를 저렴한 가격에 제공합니다.', + includesTitle: '무료 플랜에 추가로 포함된 항목:', + }, + team: { + name: '팀', + description: '제한 없이 협업하고 최고의 성능을 누리세요.', + includesTitle: '프로페셔널 플랜에 추가로 포함된 항목:', + }, + enterprise: { + name: '엔터프라이즈', + description: '대규모 미션 크리티컬 시스템을 위한 완전한 기능과 지원을 제공합니다.', + includesTitle: '팀 플랜에 추가로 포함된 항목:', + }, + }, + vectorSpace: { + fullTip: '벡터 공간이 가득 찼습니다.', + fullSolution: '더 많은 공간을 얻으려면 요금제를 업그레이드하세요.', + }, + apps: { + fullTipLine1: '더 많은 앱을 생성하려면,', + fullTipLine2: '요금제를 업그레이드하세요.', + }, + annotatedResponse: { + fullTipLine1: '더 많은 대화를 주석 처리하려면,', + fullTipLine2: '요금제를 업그레이드하세요.', + quotaTitle: '주석 응답 쿼터', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts new file mode 100644 index 0000000000..bb177ff173 --- /dev/null +++ b/web/i18n/ko-KR/common.ts @@ -0,0 +1,526 @@ +const translation = { + api: { + success: '성공', + actionSuccess: '동작이 성공적으로 수행되었습니다', + saved: '저장됨', + create: '생성됨', + remove: '삭제됨', + }, + operation: { + create: '생성', + confirm: '확인', + cancel: '취소', + clear: '지우기', + save: '저장', + edit: '편집', + add: '추가', + added: '추가됨', + refresh: '새로 고침', + reset: '초기화', + search: '검색', + change: '변경', + remove: '삭제', + send: '전송', + copy: '복사', + lineBreak: '줄 바꿈', + sure: '확실히', + download: '다운로드', + delete: '삭제', + settings: '설정', + setup: '설정', + getForFree: '무료로 받기', + reload: '다시 불러오기', + ok: '확인', + log: '로그', + learnMore: '자세히 알아보기', + params: '매개변수', + duplicate: '중복', + rename: '이름 바꾸기', + }, + placeholder: { + input: '입력해주세요', + select: '선택해주세요', + }, + voice: { + language: { + zhHans: '중국어', + zhHant: '번체 중국어', + enUS: '영어', + deDE: '독일어', + frFR: '프랑스어', + esES: '스페인어', + itIT: '이탈리아어', + thTH: '태국어', + idID: '인도네시아어', + jaJP: '일본어', + koKR: '한국어', + ptBR: '포르투갈어', + ruRU: '러시아어', + ukUA: '우크라이나어', + viVN: '베트남어', + plPL: '폴란드어', + }, + }, + unit: { + char: '문자', + }, + actionMsg: { + noModification: '현재 변경사항이 없습니다.', + modifiedSuccessfully: '변경이 성공적으로 이루어졌습니다', + modifiedUnsuccessfully: '변경에 실패했습니다', + copySuccessfully: '복사가 성공적으로 이루어졌습니다', + paySucceeded: '결제가 성공했습니다', + payCancelled: '결제가 취소되었습니다', + generatedSuccessfully: '생성이 성공적으로 이루어졌습니다', + generatedUnsuccessfully: '생성에 실패했습니다', + }, + model: { + params: { + temperature: '온도', + temperatureTip: '랜덤성을 제어합니다. 온도를 낮추면 더 랜덤한 결과물을 얻을 수 있습니다. 온도가 0에 가까워질수록 모델은 결정적이고 반복적으로 작동합니다.', + top_p: '상위P', + top_pTip: '뉴클리어스 샘플링에 의한 다양성 제어: 0.5는 모든 확률 가중 옵션의 절반을 고려함을 의미합니다.', + presence_penalty: '존재 페널티', + presence_penaltyTip: '이전 텍스트에서 토큰이 나타나는지 여부에 따라 새로운 토큰에 얼마나 많은 페널티를 부여할지 제어합니다. 모델이 새로운 주제에 대해 말할 가능성이 높아집니다.', + frequency_penalty: '빈도 페널티', + frequency_penaltyTip: '이전 텍스트 내 토큰의 기존 빈도에 따라 새로운 토큰에 얼마나 많은 페널티를 부여할지 제어합니다. 모델이 같은 문구를 글자 그대로 반복할 가능성이 줄어듭니다.', + max_tokens: '최대 토큰', + max_tokensTip: + '응답의 최대 길이를 토큰 단위로 제한하는 데 사용됩니다. 큰 값은 프롬프트, 채팅 로그 및 남은 공간에 대한 제한을 가질 수 있습니다. 2/3 이하로 설정하는 것이 좋습니다. gpt-4-1106-preview, gpt-4-vision-preview의 최대 토큰 (입력 128k 출력 4k)보다 작게 설정하는 것이 좋습니다.', + maxTokenSettingTip: '최대 토큰 설정이 높아서 프롬프트, 쿼리 및 데이터 공간에 제한이 생길 수 있습니다. 현재 모델의 최대 토큰의 80% 이하로 설정해주세요.', + setToCurrentModelMaxTokenTip: '최대 토큰이 현재 모델의 최대 토큰의 80%로 업데이트되었습니다 {{maxToken}}.', + stop_sequences: '중단 시퀀스', + stop_sequencesTip: 'API가 진행 중인 토큰 생성을 중단하는 최대 4개의 시퀀스입니다. 반환된 텍스트에는 중단 시퀀스가 포함되지 않습니다.', + stop_sequencesPlaceholder: '시퀀스를 입력하고 탭 키를 누르세요', + }, + tone: { + Creative: '창의적인', + Balanced: '균형잡힌', + Precise: '정확한', + Custom: '사용자 정의', + }, + addMoreModel: '설정에서 다른 모델을 추가하세요', + }, + menus: { + status: '베타 버전', + explore: '탐색', + apps: '스튜디오', + plugins: '플러그인', + pluginsTips: '타사 플러그인을 통합하거나 ChatGPT 호환 AI 플러그인을 작성합니다.', + datasets: '지식', + datasetsTips: '곧 출시될 예정: 고유한 텍스트 데이터를 가져오거나 웹훅을 통해 실시간으로 데이터를 기록하여 LLM 컨텍스트를 강화합니다.', + newApp: '새로운 앱', + newDataset: '지식 만들기', + tools: '도구', + }, + userProfile: { + settings: '설정', + workspace: '작업 공간', + createWorkspace: '작업 공간 만들기', + helpCenter: '도움말 센터', + roadmapAndFeedback: '로드맵 및 피드백', + community: '커뮤니티', + about: 'Dify 소개', + logout: '로그아웃', + }, + settings: { + accountGroup: '계정', + workplaceGroup: '작업 공간', + account: '내 계정', + members: '멤버', + billing: '청구', + integrations: '통합', + language: '언어', + provider: '모델 제공자', + dataSource: '데이터 소스', + plugin: '플러그인', + apiBasedExtension: 'API 확장', + }, + account: { + avatar: '아바타', + name: '이름', + email: '이메일', + password: '비밀번호', + passwordTip: '일시적인 로그인 코드를 사용하지 않으려면 영구적인 비밀번호를 설정할 수 있습니다.', + setPassword: '비밀번호 설정', + resetPassword: '비밀번호 재설정', + currentPassword: '현재 비밀번호', + newPassword: '새 비밀번호', + confirmPassword: '비밀번호 확인', + notEqual: '비밀번호가 일치하지 않습니다.', + langGeniusAccount: 'Dify 계정', + langGeniusAccountTip: 'Dify 계정과 관련된 사용자 데이터.', + editName: '이름 편집', + showAppLength: '{{length}}개의 앱 표시', + }, + members: { + team: '팀', + invite: '초대', + name: '이름', + lastActive: '최근 활동', + role: '역할', + pending: '대기 중...', + owner: '소유자', + admin: '관리자', + adminTip: '앱 빌드 및 팀 설정 관리 가능', + normal: '일반', + normalTip: '앱 사용만 가능하고 앱 빌드는 불가능', + inviteTeamMember: '팀 멤버 초대', + inviteTeamMemberTip: '로그인 후에 바로 팀 데이터에 액세스할 수 있습니다.', + email: '이메일', + emailInvalid: '유효하지 않은 이메일 형식', + emailPlaceholder: '이메일 입력', + sendInvite: '초대 보내기', + invitedAsRole: '{{role}} 사용자로 초대되었습니다', + invitationSent: '초대가 전송되었습니다', + invitationSentTip: '초대가 전송되었으며, 그들은 Dify에 로그인하여 당신의 팀 데이터에 액세스할 수 있습니다.', + invitationLink: '초대 링크', + failedinvitationEmails: '다음 사용자들은 성공적으로 초대되지 않았습니다', + ok: '확인', + removeFromTeam: '팀에서 제거', + removeFromTeamTip: '팀 액세스가 제거됩니다', + setAdmin: '관리자 설정', + setMember: '일반 멤버 설정', + disinvite: '초대 취소', + deleteMember: '멤버 삭제', + you: '(나)', + }, + integrations: { + connected: '연결됨', + google: 'Google', + googleAccount: 'Google 계정으로 로그인', + github: 'GitHub', + githubAccount: 'GitHub 계정으로 로그인', + connect: '연결', + }, + language: { + displayLanguage: '표시 언어', + timezone: '시간대', + }, + provider: { + apiKey: 'API 키', + enterYourKey: '여기에 API 키를 입력하세요', + invalidKey: '유효하지 않은 OpenAI API 키', + validatedError: '검증 실패:', + validating: '키를 확인하는 중...', + saveFailed: 'API 키 저장 실패', + apiKeyExceedBill: '이 API KEY에는 사용 가능한 할당량이 없습니다. 자세한 내용은', + addKey: '키 추가', + comingSoon: '곧 출시됨', + editKey: '편집', + invalidApiKey: '유효하지 않은 API 키', + azure: { + apiBase: 'API 베이스', + apiBasePlaceholder: 'Azure OpenAI 엔드포인트의 API 베이스 URL.', + apiKey: 'API 키', + apiKeyPlaceholder: '여기에 API 키를 입력하세요', + helpTip: 'Azure OpenAI 서비스 배우기', + }, + openaiHosted: { + openaiHosted: '호스팅된 OpenAI', + onTrial: '트라이얼 중', + exhausted: '할당량이 다 사용되었습니다', + desc: 'Dify가 제공하는 OpenAI 호스팅 서비스를 사용하면 GPT-3.5와 같은 모델을 사용할 수 있습니다. 트라이얼 할당량이 다 사용되기 전에 다른 모델 제공자를 설정해야 합니다.', + callTimes: '호출 횟수', + usedUp: '트라이얼 할당량이 다 사용되었습니다. 다른 모델 제공자를 추가하세요.', + useYourModel: '현재 사용자 정의 모델 제공자를 사용 중입니다.', + close: '닫기', + }, + anthropicHosted: { + anthropicHosted: 'Anthropic Claude 호스팅', + onTrial: '트라이얼 중', + exhausted: '할당량이 다 사용되었습니다', + desc: '고급 대화 및 창의적인 콘텐츠 생성부터 상세한 지시까지 다양한 작업에 강력한 모델입니다.', + callTimes: '호출 횟수', + usedUp: '트라이얼 할당량이 다 사용되었습니다. 다른 모델 제공자를 추가하세요.', + useYourModel: '현재 사용자 정의 모델 제공자를 사용 중입니다.', + close: '닫기', + }, + anthropic: { + using: '임베드 기능을 사용 중입니다', + enableTip: 'Anthropic 모델을 활성화하려면 먼저 OpenAI 또는 Azure OpenAI 서비스에 바인딩해야 합니다.', + notEnabled: '비활성화됨', + keyFrom: 'Anthropic에서 API 키를 받으세요', + }, + encrypted: { + front: 'API KEY는', + back: '기술을 사용하여 암호화 및 저장됩니다.', + }, + }, + modelProvider: { + notConfigured: '시스템 모델이 아직 완전히 설정되지 않아 일부 기능을 사용할 수 없습니다.', + systemModelSettings: '시스템 모델 설정', + systemModelSettingsLink: '시스템 모델 설정이 필요한 이유는 무엇입니까?', + selectModel: '모델 선택', + setupModelFirst: '먼저 모델을 설정하세요', + systemReasoningModel: { + key: '시스템 추론 모델', + tip: '앱 구축에 사용되는 기본 추론 모델을 설정합니다. 또한 대화 이름 생성 및 다음 질문 제안과 같은 기능도 기본 추론 모델을 사용합니다.', + }, + embeddingModel: { + key: '임베딩 모델', + tip: '지식 문서 임베딩 처리의 기본 모델을 설정합니다. 지식 가져오기 및 임포트에 모두 이 임베딩 모델을 벡터화 처리에 사용합니다. 변경하면 가져온 지식과 질문 간의 벡터 차원이 일치하지 않아 가져오기에 실패합니다. 실패를 피하려면 이 모델을 변경하지 마세요.', + required: '임베딩 모델이 필요합니다', + }, + speechToTextModel: { + key: '음성-to-텍스트 모델', + tip: '대화에서의 음성-to-텍스트 입력에 사용되는 기본 모델을 설정합니다.', + }, + ttsModel: { + key: '텍스트-to-음성 모델', + tip: '대화에서의 텍스트-to-음성 입력에 사용되는 기본 모델을 설정합니다.', + }, + rerankModel: { + key: '재랭크 모델', + tip: '재랭크 모델은 사용자 쿼리와의 의미적 일치를 기반으로 후보 문서 목록을 재배열하여 의미적 순위를 향상시킵니다.', + }, + quota: '할당량', + searchModel: '검색 모델', + noModelFound: '{{model}}에 대한 모델을 찾을 수 없습니다', + models: '모델', + showMoreModelProvider: '더 많은 모델 제공자 표시', + selector: { + tip: '이 모델은 삭제되었습니다. 다른 모델을 추가하거나 다른 모델을 선택하세요.', + emptyTip: '사용 가능한 모델이 없습니다', + emptySetting: '설정으로 이동하여 구성하세요', + rerankTip: '재랭크 모델을 설정하세요', + }, + card: { + quota: '할당량', + onTrial: '트라이얼 중', + paid: '유료', + quotaExhausted: '할당량이 다 사용되었습니다', + callTimes: '호출 횟수', + tokens: '토큰', + buyQuota: 'Buy Quota', + priorityUse: '우선 사용', + removeKey: 'API 키 제거', + tip: '지불된 할당량에 우선순위가 부여됩니다. 평가판 할당량은 유료 할당량이 소진된 후 사용됩니다.', + }, + item: { + deleteDesc: '{{modelName}}은(는) 시스템 추론 모델로 사용 중입니다. 제거 후 일부 기능을 사용할 수 없습니다. 확인하시겠습니까?', + freeQuota: '무료 할당량', + }, + addApiKey: 'API 키 추가', + invalidApiKey: '잘못된 API 키', + encrypted: { + front: 'API 키는 다음 기술을 사용하여 암호화되어 저장됩니다', + back: ' 기술.', + }, + freeQuota: { + howToEarn: '얻는 방법', + }, + addMoreModelProvider: '모델 제공자 추가', + addModel: '모델 추가', + modelsNum: '{{num}}개의 모델', + showModels: '모델 표시', + showModelsNum: '{{num}}개의 모델 표시', + collapse: '축소', + config: '설정', + modelAndParameters: '모델 및 매개변수', + model: '모델', + featureSupported: '{{feature}} 지원됨', + callTimes: '호출 횟수', + credits: '메시지 크레딧', + buyQuota: '할당량 구매', + getFreeTokens: '무료 토큰 받기', + priorityUsing: '우선 사용', + deprecated: '사용 중단됨', + confirmDelete: '삭제를 확인하시겠습니까?', + quotaTip: '남은 무료 토큰 사용 가능', + loadPresets: '프리셋 로드', + parameters: '매개변수', + }, + dataSource: { + add: '데이터 소스 추가하기', + connect: '연결하기', + notion: { + title: 'Notion', + description: '노션을 지식 데이터 소스로 사용하기.', + connectedWorkspace: '작업 공간에 연결됨', + addWorkspace: '작업 공간에 추가하기', + connected: '연결됨', + disconnected: '연결 안됨', + changeAuthorizedPages: '허가된 페이지 변경하기', + pagesAuthorized: '페이지가 허가됨', + sync: '동기화', + remove: '제거하기', + selector: { + pageSelected: '페이지 선택됨', + searchPages: '페이지 검색...', + noSearchResult: '검색 결과 없음', + addPages: '페이지 추가하기', + preview: '미리보기', + }, + }, + }, + plugin: { + serpapi: { + apiKey: 'API 키', + apiKeyPlaceholder: 'API 키를 입력하세요', + keyFrom: 'SerpAPI 계정 페이지에서 SerpAPI 키를 가져오세요', + }, + }, + apiBasedExtension: { + title: 'API 기반 확장은 Dify 애플리케이션 전체에서 간편한 사용을 위한 설정을 단순화하고 집중적인 API 관리를 제공합니다.', + link: '사용자 정의 API 기반 확장을 개발하는 방법 배우기', + linkUrl: 'https://docs.dify.ai/features/extension/api_based_extension', + add: 'API 기반 확장 추가', + selector: { + title: 'API 기반 확장', + placeholder: 'API 기반 확장을 선택하세요', + manage: 'API 기반 확장 관리', + }, + modal: { + title: 'API 기반 확장 추가', + editTitle: 'API 기반 확장 편집', + name: { + title: '이름', + placeholder: '이름을 입력하세요', + }, + apiEndpoint: { + title: 'API 엔드포인트', + placeholder: 'API 엔드포인트를 입력하세요', + }, + apiKey: { + title: 'API 키', + placeholder: 'API 키를 입력하세요', + lengthError: 'API 키는 5자 미만이어야 합니다', + }, + }, + type: '유형', + }, + about: { + changeLog: '변경 로그', + updateNow: '지금 업데이트', + nowAvailable: 'Dify {{version}} 사용 가능합니다.', + latestAvailable: 'Dify {{version}} 최신 버전입니다.', + }, + appMenus: { + overview: '개요', + promptEng: '오케스트레이트', + apiAccess: 'API 액세스', + logAndAnn: '로그 및 어노테이션', + logs: '로그', + }, + environment: { + testing: '테스트', + development: '개발', + }, + appModes: { + completionApp: '텍스트 생성', + chatApp: '채팅 앱', + }, + datasetMenus: { + documents: '문서', + hitTesting: '검색 테스트', + settings: '설정', + emptyTip: '연결된 지식이 없습니다. 애플리케이션 또는 플러그인으로 이동하여 연결을 완료하세요.', + viewDoc: '문서 보기', + relatedApp: '관련 앱', + }, + voiceInput: { + speaking: '지금 말하고 있습니다...', + converting: '텍스트로 변환 중...', + notAllow: '마이크가 허용되지 않았습니다', + }, + modelName: { + 'gpt-3.5-turbo': 'GPT-3.5-Turbo', + 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', + 'gpt-4': 'GPT-4', + 'gpt-4-32k': 'GPT-4-32K', + 'text-davinci-003': 'Text-Davinci-003', + 'text-embedding-ada-002': 'Text-Embedding-Ada-002', + 'whisper-1': 'Whisper-1', + 'claude-instant-1': 'Claude-Instant', + 'claude-2': 'Claude-2', + }, + chat: { + renameConversation: '대화 이름 바꾸기', + conversationName: '대화 이름', + conversationNamePlaceholder: '대화 이름을 입력하세요', + conversationNameCanNotEmpty: '대화 이름은 필수입니다', + citation: { + title: '인용', + linkToDataset: '지식 링크', + characters: '문자수:', + hitCount: '검색 횟수:', + vectorHash: '벡터 해시:', + hitScore: '검색 점수:', + }, + }, + promptEditor: { + placeholder: '여기에 프롬프트 단어를 입력하세요. 변수를 삽입하려면 "{{"를 입력하고, 프롬프트 컨텐츠 블록을 삽입하려면 "/"를 입력하세요.', + context: { + item: { + title: '컨텍스트', + desc: '컨텍스트 템플릿을 삽입합니다.', + }, + modal: { + title: '{{num}} 번째 컨텍스트', + add: '컨텍스트 추가', + footer: '아래의 컨텍스트 섹션에서 컨텍스트를 관리할 수 있습니다.', + }, + }, + history: { + item: { + title: '대화 기록', + desc: '과거 메시지 템플릿을 삽입합니다.', + }, + modal: { + title: '예시', + user: '안녕하세요', + assistant: '안녕하세요! 오늘은 어떻게 도와드릴까요?', + edit: '대화 역할 이름 편집', + }, + }, + variable: { + item: { + title: '변수 및 외부 도구', + desc: '변수 및 외부 도구를 삽입합니다.', + }, + outputToolDisabledItem: { + title: '변수', + desc: '변수를 삽입합니다.', + }, + modal: { + add: '새로운 변수', + addTool: '새로운 도구', + }, + }, + query: { + item: { + title: '쿼리', + desc: '사용자 쿼리 템플릿을 삽입합니다.', + }, + }, + existed: '프롬프트에 이미 존재합니다', + }, + imageUploader: { + uploadFromComputer: '컴퓨터에서 업로드', + uploadFromComputerReadError: '이미지 읽기 실패. 다시 시도하세요.', + uploadFromComputerUploadError: '이미지 업로드 실패. 다시 업로드하세요.', + uploadFromComputerLimit: '업로드 이미지 크기는 {{size}} MB를 초과할 수 없습니다', + pasteImageLink: '이미지 링크 붙여넣기', + pasteImageLinkInputPlaceholder: '여기에 이미지 링크를 붙여넣으세요', + pasteImageLinkInvalid: '유효하지 않은 이미지 링크', + imageUpload: '이미지 업로드', + }, + tag: { + placeholder: '모든 태그', + addNew: '새 태그 추가', + noTag: '태그 없음', + noTagYet: '아직 태그가 없습니다', + addTag: '태그 추가', + editTag: '태그 편집', + manageTags: '태그 관리', + selectorPlaceholder: '검색 또는 생성할 문자를 입력하세요', + create: '생성', + delete: '태그 삭제', + deleteTip: '태그가 사용 중입니다. 삭제하시겠습니까?', + created: '태그가 성공적으로 생성되었습니다', + failed: '태그 생성에 실패했습니다', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/custom.ts b/web/i18n/ko-KR/custom.ts new file mode 100644 index 0000000000..6205a67275 --- /dev/null +++ b/web/i18n/ko-KR/custom.ts @@ -0,0 +1,30 @@ +const translation = { + custom: '사용자 정의', + upgradeTip: { + prefix: '플랜을 업그레이드하여', + suffix: '브랜드를 사용자 정의하세요.', + }, + webapp: { + title: 'WebApp 브랜드 사용자 정의', + removeBrand: 'Powered by Dify 삭제', + changeLogo: 'Powered by 브랜드 이미지 변경', + changeLogoTip: '최소 크기 40x40px의 SVG 또는 PNG 형식', + }, + app: { + title: '앱 헤더 브랜드 사용자 정의', + changeLogoTip: '최소 크기 80x80px의 SVG 또는 PNG 형식', + }, + upload: '업로드', + uploading: '업로드 중', + uploadedFail: '이미지 업로드 실패. 다시 업로드해 주세요.', + change: '변경', + apply: '적용', + restore: '기본값으로 복원', + customize: { + contactUs: '문의하기', + prefix: '앱 내 브랜드 로고를 사용자 정의하려면,', + suffix: '엔터프라이즈 버전으로 업그레이드하세요.', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/dataset-creation.ts b/web/i18n/ko-KR/dataset-creation.ts new file mode 100644 index 0000000000..3039f69d6d --- /dev/null +++ b/web/i18n/ko-KR/dataset-creation.ts @@ -0,0 +1,131 @@ +const translation = { + steps: { + header: { + creation: '지식 생성', + update: '데이터 추가', + }, + one: '데이터 소스 선택', + two: '텍스트 전처리 및 클리닝', + three: '실행 및 완료', + }, + error: { + unavailable: '이 지식은 사용할 수 없습니다', + }, + stepOne: { + filePreview: '파일 미리보기', + pagePreview: '페이지 미리보기', + dataSourceType: { + file: '텍스트 파일에서 가져오기', + notion: 'Notion 동기화', + web: '웹 사이트 동기화', + }, + uploader: { + title: '텍스트 파일 업로드', + button: '파일을 끌어다 놓거나', + browse: '찾아보기', + tip: '{{supportTypes}}을(를) 지원합니다. 파일당 최대 크기는 {{size}}MB입니다.', + validation: { + typeError: '지원되지 않는 파일 유형입니다', + size: '파일 크기가 너무 큽니다. 최대 크기는 {{size}}MB입니다', + count: '여러 파일은 지원되지 않습니다', + filesNumber: '일괄 업로드 제한({{filesNumber}}개)에 도달했습니다.', + }, + cancel: '취소', + change: '변경', + failed: '업로드에 실패했습니다', + }, + notionSyncTitle: 'Notion에 연결되지 않았습니다', + notionSyncTip: 'Notion과 동기화하려면 먼저 Notion에 연결해야 합니다.', + connect: '연결하기', + button: '다음', + emptyDatasetCreation: '비어있는 지식 생성', + modal: { + title: '비어있는 지식 생성', + tip: '비어있는 지식에는 문서가 포함되지 않으며 언제든지 문서를 업로드할 수 있습니다.', + input: '지식 이름', + placeholder: '입력하세요', + nameNotEmpty: '이름은 비워둘 수 없습니다', + nameLengthInvaild: '이름은 1~40자여야 합니다', + cancelButton: '취소', + confirmButton: '생성', + failed: '생성에 실패했습니다', + }, + }, + stepTwo: { + segmentation: '청크 설정', + auto: '자동', + autoDescription: '청크 및 전처리 규칙을 자동으로 설정합니다. 처음 사용자는 이 옵션을 선택하는 것을 권장합니다.', + custom: '사용자 설정', + customDescription: '청크 규칙, 청크 길이, 전처리 규칙 등을 사용자 정의합니다.', + separator: '세그먼트 식별자', + separatorPlaceholder: '예: 줄바꿈(\\\\n) 또는 특수 구분자(예: "***")', + maxLength: '최대 청크 길이', + overlap: '청크 중첩', + overlapTip: '청크 중첩을 설정하여 그 사이의 의미적 연관성을 유지하고 검색 효과를 향상시킬 수 있습니다. 최대 청크 크기의 10%~25%로 설정하는 것이 좋습니다.', + overlapCheck: '청크 중첩은 최대 청크 길이를 초과할 수 없습니다', + rules: '텍스트 전처리 규칙', + removeExtraSpaces: '연속된 공백, 줄바꿈, 탭을 대체합니다', + removeUrlEmails: '모든 URL과 이메일 주소를 제거합니다', + removeStopwords: '일반적인 불용어(예: "a", "an", "the" 등)를 제거합니다', + preview: '미리보기', + reset: '초기화', + indexMode: '인덱스 모드', + qualified: '고품질', + recommend: '추천', + qualifiedTip: '사용자 쿼리에 대해 더 높은 정확성을 제공하기 위해 기본 시스템 임베딩 인터페이스를 호출하여 처리합니다.', + warning: '모델 제공자의 API 키를 설정하세요.', + click: '설정으로 이동', + economical: '경제적', + economicalTip: '오프라인 벡터 엔진, 키워드 인덱스 등을 사용하여 토큰 소비 없이 정확도를 낮춥니다.', + QATitle: '질문과 답변 형식으로 세그먼트화', + QATip: '이 옵션을 활성화하면 추가 토큰이 소비됩니다', + QALanguage: '사용 언어', + emstimateCost: '예상 비용', + emstimateSegment: '예상 청크 수', + segmentCount: '청크', + calculating: '계산 중...', + fileSource: '문서 전처리', + notionSource: '페이지 전처리', + other: '기타', + fileUnit: '파일', + notionUnit: '페이지', + previousStep: '이전 단계', + nextStep: '저장하고 처리', + save: '저장하고 처리', + cancel: '취소', + sideTipTitle: '청크와 전처리가 필요한 이유', + sideTipP1: '텍스트 데이터를 처리할 때 청크와 클리닝은 두 가지 중요한 전처리 단계입니다.', + sideTipP2: '세그멘테이션은 긴 텍스트를 단락으로 분할하여 모델이 이해하기 쉽게 합니다. 이로 인해 모델 결과의 품질과 관련성이 향상됩니다.', + sideTipP3: '클리닝은 불필요한 문자 및 형식을 제거하여 지식을 더 깔끔하고 분석 가능한 것으로 만듭니다.', + sideTipP4: '적절한 청크와 클리닝은 모델의 성능을 향상시키고 정확하고 가치 있는 결과를 제공합니다.', + previewTitle: '미리보기', + previewTitleButton: '미리보기', + previewButton: '질문-답변 형식으로 전환', + previewSwitchTipStart: '현재 청크 미리보기는 텍스트 형식입니다. 질문과 답변 형식 미리보기로 전환하면', + previewSwitchTipEnd: ' 추가 토큰이 소비됩니다', + characters: '문자', + indexSettedTip: '인덱스 방식을 변경하려면,', + retrivalSettedTip: '인덱스 방식을 변경하려면,', + datasetSettingLink: '지식 설정', + }, + stepThree: { + creationTitle: '🎉 지식이 생성되었습니다', + creationContent: '지식 이름이 자동으로 설정되었지만 언제든지 변경할 수 있습니다', + label: '지식 이름', + additionTitle: '🎉 문서가 업로드되었습니다', + additionP1: '문서가 지식에 업로드되었습니다', + additionP2: '지식의 문서 목록에서 찾을 수 있습니다.', + stop: '처리 중지', + resume: '처리 재개', + navTo: '문서로 이동', + sideTipTitle: '다음 단계는 무엇인가요', + sideTipContent: + '문서 인덱싱이 완료되면 지식을 응용 프로그램 컨텍스트로 통합할 수 있습니다. 프롬프트 오케스트레이션 페이지에서 컨텍스트 설정을 찾을 수 있습니다. 또한 독립된 ChatGPT 인덱스 플러그인으로 출시할 수도 있습니다.', + modelTitle: '임베딩을 중지해도 괜찮습니까?', + modelContent: '나중에 처리를 다시 시작해야 할 경우, 중단한 위치에서 계속합니다.', + modelButtonConfirm: '확인', + modelButtonCancel: '취소', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/dataset-documents.ts b/web/i18n/ko-KR/dataset-documents.ts new file mode 100644 index 0000000000..8e7db58a6d --- /dev/null +++ b/web/i18n/ko-KR/dataset-documents.ts @@ -0,0 +1,348 @@ +const translation = { + list: { + title: '문서', + desc: '지식의 모든 파일이 여기에 표시되며, 전체 지식이 Dify의 인용문이나 챗 플러그인을 통해 링크되거나 색인화될 수 있습니다.', + addFile: '파일 추가', + addPages: '페이지 추가', + table: { + header: { + fileName: '파일명', + words: '단어 수', + hitCount: '검색 횟수', + uploadTime: '업로드 시간', + status: '상태', + action: '동작', + }, + }, + action: { + uploadFile: '새 파일 업로드', + settings: '세그먼트 설정', + addButton: '청크 추가', + add: '청크 추가', + batchAdd: '일괄 추가', + archive: '아카이브', + unarchive: '아카이브 해제', + delete: '삭제', + enableWarning: '아카이브된 파일은 활성화할 수 없습니다.', + sync: '동기화', + }, + index: { + enable: '활성화', + disable: '비활성화', + all: '모두', + enableTip: '파일을 색인화할 수 있습니다.', + disableTip: '파일을 색인화할 수 없습니다.', + }, + status: { + queuing: '대기 중', + indexing: '색인화 중', + paused: '일시 중지됨', + error: '오류', + available: '사용 가능', + enabled: '활성화됨', + disabled: '비활성화됨', + archived: '아카이브됨', + }, + empty: { + title: '아직 문서가 없습니다', + upload: { + tip: '파일을 업로드하거나 웹 사이트에서 동기화하거나 Notion이나 GitHub 같은 웹 앱에서 동기화할 수 있습니다.', + }, + sync: { + tip: 'Dify는 정기적으로 Notion에서 파일을 다운로드하고 처리합니다.', + }, + }, + delete: { + title: '정말 삭제하시겠습니까?', + content: '나중에 처리를 계속해야 하는 경우 중단한 곳에서 계속합니다.', + }, + batchModal: { + title: '일괄 추가', + csvUploadTitle: 'CSV 파일을 여기로 드래그 앤 드롭하거나', + browse: '찾아보기', + tip: 'CSV 파일은 다음 구조를 따라야 합니다:', + question: '질문', + answer: '답변', + contentTitle: '청크 내용', + content: '내용', + template: '여기서 템플릿 다운로드', + cancel: '취소', + run: '일괄 실행', + runError: '일괄 실행에 실패했습니다', + processing: '일괄 처리 중', + completed: '가져오기 완료', + error: '가져오기 오류', + ok: '확인', + }, + }, + metadata: { + title: '메타데이터', + desc: '문서 메타데이터에 레이블을 붙여 AI가 신속하게 접근할 수 있고 사용자에게 출처가 공개됩니다.', + dateTimeFormat: 'YYYY년 M월 D일 hh:mm A', + docTypeSelectTitle: '문서 유형을 선택하세요', + docTypeChangeTitle: '문서 유형 변경', + docTypeSelectWarning: '문서 유형을 변경하면 현재 입력된 메타데이터가 유지되지 않습니다.', + firstMetaAction: '시작하기', + placeholder: { + add: '추가', + select: '선택', + }, + source: { + upload_file: '파일 업로드', + notion: 'Notion에서 동기화', + github: 'GitHub에서 동기화', + }, + type: { + book: '도서', + webPage: '웹 페이지', + paper: '논문', + socialMediaPost: '소셜 미디어 게시물', + personalDocument: '개인 문서', + businessDocument: '비즈니스 문서', + IMChat: 'IM 채팅', + wikipediaEntry: '위키피디아 항목', + notion: 'Notion에서 동기화', + github: 'GitHub에서 동기화', + technicalParameters: '기술적 매개변수', + }, + field: { + processRule: { + processDoc: '문서 처리', + segmentRule: '청크 규칙', + segmentLength: '청크 길이', + processClean: '텍스트 전처리', + }, + book: { + title: '제목', + language: '언어', + author: '저자', + publisher: '출판사', + publicationDate: '출판일', + ISBN: 'ISBN', + category: '카테고리', + }, + webPage: { + title: '제목', + url: 'URL', + language: '언어', + authorPublisher: '저자/출판사', + publishDate: '공개일', + topicsKeywords: '주제/키워드', + description: '설명', + }, + paper: { + title: '제목', + language: '언어', + author: '저자', + publishDate: '공개일', + journalConferenceName: '저널/학회명', + volumeIssuePage: '권호페이지', + DOI: 'DOI', + topicsKeywords: '주제/키워드', + abstract: '요약', + }, + socialMediaPost: { + platform: '플랫폼', + authorUsername: '저자/사용자명', + publishDate: '공개일', + postURL: '게시물 URL', + topicsTags: '주제/태그', + }, + personalDocument: { + title: '제목', + author: '저자', + creationDate: '생성일', + lastModifiedDate: '최종 수정일', + documentType: '문서 유형', + tagsCategory: '태그/카테고리', + }, + businessDocument: { + title: '제목', + author: '저자', + creationDate: '생성일', + lastModifiedDate: '최종 수정일', + documentType: '문서 유형', + departmentTeam: '부서/팀', + }, + IMChat: { + chatPlatform: '채팅 플랫폼', + chatPartiesGroupName: '채팅 참여자/그룹명', + participants: '참여자', + startDate: '시작일', + endDate: '종료일', + topicsKeywords: '주제/키워드', + fileType: '파일 유형', + }, + wikipediaEntry: { + title: '제목', + language: '언어', + webpageURL: '웹 페이지 URL', + editorContributor: '편집자/기고자', + lastEditDate: '최종 편집일', + summaryIntroduction: '요약/소개', + }, + notion: { + title: '제목', + language: '언어', + author: '저자', + createdTime: '생성 일시', + lastModifiedTime: '최종 수정 일시', + url: 'URL', + tag: '태그', + description: '설명', + }, + github: { + repoName: '저장소 이름', + repoDesc: '저장소 설명', + repoOwner: '저장소 소유자', + fileName: '파일 이름', + filePath: '파일 경로', + programmingLang: '프로그래밍 언어', + url: 'URL', + license: '라이선스', + lastCommitTime: '최종 커밋 시간', + lastCommitAuthor: '최종 커밋 작성자', + }, + originInfo: { + originalFilename: '원본 파일 이름', + originalFileSize: '원본 파일 크기', + uploadDate: '업로드 일시', + lastUpdateDate: '최종 업데이트 일시', + source: '소스', + }, + technicalParameters: { + segmentSpecification: '청크 사양', + segmentLength: '청크 길이', + avgParagraphLength: '평균 문단 길이', + paragraphs: '문단', + hitCount: '검색 횟수', + embeddingTime: '임베딩 시간', + embeddedSpend: '임베딩 시간', + }, + }, + languageMap: { + zh: '중국어', + en: '영어', + es: '스페인어', + fr: '프랑스어', + de: '독일어', + ja: '일본어', + ko: '한국어', + ru: '러시아어', + ar: '아랍어', + pt: '포르투갈어', + it: '이탈리아어', + nl: '네덜란드어', + pl: '폴란드어', + sv: '스웨덴어', + tr: '터키어', + he: '히브리어', + hi: '힌디어', + da: '덴마크어', + fi: '핀란드어', + no: '노르웨이어', + hu: '헝가리어', + el: '그리스어', + cs: '체코어', + th: '태국어', + id: '인도네시아어', + }, + categoryMap: { + book: { + fiction: '소설', + biography: '전기', + history: '역사', + science: '과학', + technology: '기술', + education: '교육', + philosophy: '철학', + religion: '종교', + socialSciences: '사회과학', + art: '예술', + travel: '여행', + health: '건강', + selfHelp: '자기 도움', + businessEconomics: '비즈니스/경제', + cooking: '요리', + childrenYoungAdults: '어린이/청소년', + comicsGraphicNovels: '만화/그래픽 소설', + poetry: '시', + drama: '연극', + other: '기타', + }, + personalDoc: { + notes: '메모', + blogDraft: '블로그 초안', + diary: '다이어리', + researchReport: '연구 보고서', + bookExcerpt: '책 발췌', + schedule: '일정', + list: '목록', + projectOverview: '프로젝트 개요', + photoCollection: '사진 컬렉션', + creativeWriting: '창작 글', + codeSnippet: '코드 스니펫', + designDraft: '디자인 초안', + personalResume: '이력서', + other: '기타', + }, + businessDoc: { + meetingMinutes: '회의록', + researchReport: '연구 보고서', + proposal: '제안서', + employeeHandbook: '직원 안내서', + trainingMaterials: '교육 자료', + requirementsDocument: '요구 사항 문서', + designDocument: '디자인 문서', + productSpecification: '제품 사양서', + financialReport: '재무 보고서', + marketAnalysis: '시장 분석', + projectPlan: '프로젝트 계획서', + teamStructure: '팀 구조', + policiesProcedures: '정책 및 절차', + contractsAgreements: '계약 및 협약', + emailCorrespondence: '이메일 통신', + other: '기타', + }, + }, + }, + embedding: { + processing: '임베딩 처리 중...', + paused: '임베딩이 일시 중지되었습니다', + completed: '임베딩이 완료되었습니다', + error: '임베딩 오류', + docName: '문서 전처리', + mode: '세그먼트 규칙', + segmentLength: '청크의 길이', + textCleaning: '텍스트 전처리', + segments: '세그먼트', + highQuality: '고품질 모드', + economy: '경제 모드', + estimate: '소비량 예상', + stop: '처리 중지', + resume: '처리 재개', + automatic: '자동', + custom: '사용자 정의', + previewTip: '임베딩이 완료된 후에 세그먼트 미리보기를 사용할 수 있습니다', + }, + segment: { + paragraphs: '단락', + keywords: '키워드', + addKeyWord: '키워드 추가', + keywordError: '키워드 최대 길이는 20자입니다', + characters: '문자', + hitCount: '검색 횟수', + vectorHash: '벡터 해시: ', + questionPlaceholder: '질문을 입력하세요', + questionEmpty: '질문을 비워둘 수 없습니다', + answerPlaceholder: '답변을 입력하세요', + answerEmpty: '답변을 비워둘 수 없습니다', + contentPlaceholder: '내용을 입력하세요', + contentEmpty: '내용을 비워둘 수 없습니다', + newTextSegment: '새로운 텍스트 세그먼트', + newQaSegment: '새로운 Q&A 세그먼트', + delete: '이 청크를 삭제하시겠습니까?', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/dataset-hit-testing.ts b/web/i18n/ko-KR/dataset-hit-testing.ts new file mode 100644 index 0000000000..df0dbe5208 --- /dev/null +++ b/web/i18n/ko-KR/dataset-hit-testing.ts @@ -0,0 +1,28 @@ +const translation = { + title: '검색 테스트', + desc: '주어진 쿼리 텍스트에 기반하여 지식의 검색 효과를 테스트합니다.', + dateTimeFormat: 'YYYY/MM/DD HH:mm', + recents: '최근 결과', + table: { + header: { + source: '소스', + text: '텍스트', + time: '시간', + }, + }, + input: { + title: '소스 텍스트', + placeholder: '텍스트를 입력하세요. 간결한 설명문이 좋습니다.', + countWarning: '최대 200자까지 입력할 수 있습니다.', + indexWarning: '고품질 지식만.', + testing: '테스트 중', + }, + hit: { + title: '검색 결과 단락', + emptyTip: '검색 테스트 결과가 여기에 표시됩니다.', + }, + noRecentTip: '최근 쿼리 결과가 없습니다.', + viewChart: '벡터 차트 보기', +} + +export default translation diff --git a/web/i18n/ko-KR/dataset-settings.ts b/web/i18n/ko-KR/dataset-settings.ts new file mode 100644 index 0000000000..b3193c3e4f --- /dev/null +++ b/web/i18n/ko-KR/dataset-settings.ts @@ -0,0 +1,33 @@ +const translation = { + title: '지식 설정', + desc: '여기에서 지식의 속성과 작동 방법을 변경할 수 있습니다.', + form: { + name: '지식 이름', + namePlaceholder: '지식 이름을 입력하세요', + nameError: '이름은 비워둘 수 없습니다', + desc: '지식 설명', + descInfo: '지식 내용을 개괄하는 명확한 텍스트 설명을 작성하세요. 이 설명은 여러 지식 중에서 선택하는 기준으로 사용됩니다.', + descPlaceholder: '이 지식에 포함된 내용을 설명하세요. 자세한 설명은 AI가 지식 내용에 빠르게 접근할 수 있도록 합니다. 비어 있으면 Dify가 기본 검색 전략을 사용합니다.', + descWrite: '좋은 지식 설명 작성 방법 배우기', + permissions: '권한', + permissionsOnlyMe: '나만', + permissionsAllMember: '모든 팀 멤버', + indexMethod: '인덱스 방법', + indexMethodHighQuality: '고품질', + indexMethodHighQualityTip: '사용자 쿼리 시 더 높은 정확도를 제공하기 위해 OpenAI의 임베딩 인터페이스를 호출하여 처리합니다.', + indexMethodEconomy: '경제적', + indexMethodEconomyTip: '오프라인 벡터 엔진, 키워드 인덱스 등을 사용하여 토큰을 소비하지 않고도 정확도를 감소시킵니다.', + embeddingModel: '임베딩 모델', + embeddingModelTip: '임베딩 모델 변경은', + embeddingModelTipLink: '설정', + retrievalSetting: { + title: '검색 설정', + learnMore: '자세히 알아보기', + description: ' 검색 방법에 대한 자세한 정보', + longDescription: ' 검색 방법에 대한 자세한 내용은 언제든지 지식 설정에서 변경할 수 있습니다.', + }, + save: '저장', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/dataset.ts b/web/i18n/ko-KR/dataset.ts new file mode 100644 index 0000000000..d80d9925ca --- /dev/null +++ b/web/i18n/ko-KR/dataset.ts @@ -0,0 +1,48 @@ +const translation = { + knowledge: '지식', + documentCount: ' 문서', + wordCount: ' k 단어', + appCount: ' 연결된 앱', + createDataset: '지식 생성', + createDatasetIntro: '자체 텍스트 데이터를 가져오거나 LLM 컨텍스트를 강화하기 위해 웹훅을 통해 실시간 데이터를 기록할 수 있습니다.', + deleteDatasetConfirmTitle: '이 지식을 삭제하시겠습니까?', + deleteDatasetConfirmContent: '지식을 삭제하면 다시 되돌릴 수 없습니다. 사용자는 더 이상 귀하의 지식에 액세스할 수 없으며 모든 프롬프트 설정과 로그가 영구적으로 삭제됩니다.', + datasetDeleted: '지식이 삭제되었습니다', + datasetDeleteFailed: '지식 삭제에 실패했습니다', + didYouKnow: '알고 계셨나요?', + intro1: '지식을 Dify 애플리케이션에 ', + intro2: '컨텍스트로', + intro3: ' 통합할 수 있습니다.', + intro4: '혹은, ', + intro5: '이처럼', + intro6: ' 독립적인 ChatGPT 인덱스 플러그인으로 공개할 수 있습니다', + unavailable: '사용 불가', + unavailableTip: '임베딩 모델을 사용할 수 없습니다. 기본 임베딩 모델을 설정해야 합니다.', + datasets: '지식', + datasetsApi: 'API', + retrieval: { + semantic_search: { + title: '벡터 검색', + description: '쿼리의 임베딩을 생성하고, 해당 벡터 표현에 가장 유사한 텍스트 청크를 검색합니다.', + }, + full_text_search: { + title: '전체 텍스트 검색', + description: '문서 내 모든 용어를 인덱싱하여 사용자가 원하는 용어를 검색하고 관련 텍스트 청크를 가져올 수 있게 합니다.', + }, + hybrid_search: { + title: '하이브리드 검색', + description: '전체 텍스트 검색과 벡터 검색을 동시에 실행하고 사용자 쿼리에 가장 적합한 매치를 선택하기 위해 다시 랭크를 매깁니다. 재랭크 모델 API 설정이 필요합니다.', + recommend: '추천', + }, + invertedIndex: { + title: '역 인덱스', + description: '효율적인 검색에 사용되는 구조입니다. 각 용어는 문서나 웹 페이지에 포함된 것을 가리키며, 용어마다 체계적으로 정리되어 있습니다.', + }, + change: '변경', + changeRetrievalMethod: '검색 방법 변경', + }, + docsFailedNotice: '문서 인덱스에 실패했습니다', + retry: '재시도', +} + +export default translation diff --git a/web/i18n/ko-KR/explore.ts b/web/i18n/ko-KR/explore.ts new file mode 100644 index 0000000000..6a6522fd1a --- /dev/null +++ b/web/i18n/ko-KR/explore.ts @@ -0,0 +1,43 @@ +const translation = { + title: '탐색', + sidebar: { + discovery: '탐색', + chat: '채팅', + workspace: '작업 공간', + action: { + pin: '고정', + unpin: '고정 해제', + rename: '이름 변경', + delete: '삭제', + }, + delete: { + title: '앱 삭제', + content: '이 앱을 삭제해도 괜찮습니까?', + }, + }, + apps: { + title: 'Dify로 앱 탐색', + description: '이 템플릿 앱을 즉시 사용하거나 템플릿을 기반으로 고유한 앱을 사용자 정의하세요.', + allCategories: '모든 카테고리', + }, + appCard: { + addToWorkspace: '작업 공간에 추가', + customize: '사용자 정의', + }, + appCustomize: { + title: '{{name}}으로 앱 만들기', + subTitle: '앱 아이콘 및 이름', + nameRequired: '앱 이름은 필수입니다', + }, + category: { + Assistant: '어시스턴트', + Writing: '작성', + Translate: '번역', + Programming: '프로그래밍', + Agent: '에이전트', + Workflow: '워크플로우', + HR: '인사', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/layout.ts b/web/i18n/ko-KR/layout.ts new file mode 100644 index 0000000000..e2410dd34b --- /dev/null +++ b/web/i18n/ko-KR/layout.ts @@ -0,0 +1,3 @@ +const translation = {} + +export default translation diff --git a/web/i18n/ko-KR/login.ts b/web/i18n/ko-KR/login.ts new file mode 100644 index 0000000000..ee0867d1db --- /dev/null +++ b/web/i18n/ko-KR/login.ts @@ -0,0 +1,60 @@ +const translation = { + pageTitle: '시작하기 🎉', + welcome: 'Dify에 오신 것을 환영합니다. 계속하려면 로그인하세요.', + email: '이메일 주소', + emailPlaceholder: '이메일 주소를 입력하세요', + password: '비밀번호', + passwordPlaceholder: '비밀번호를 입력하세요', + name: '사용자 이름', + namePlaceholder: '사용자 이름을 입력하세요', + forget: '비밀번호를 잊으셨나요?', + signBtn: '로그인', + installBtn: '설치', + setAdminAccount: '관리자 계정 설정', + setAdminAccountDesc: '앱 생성 및 LLM 제공자 관리 등 최고 권한을 가진 관리자 계정 설정', + createAndSignIn: '계정 생성 및 로그인', + oneMoreStep: '마지막 단계', + createSample: '이 정보를 기반으로 샘플 앱을 생성합니다.', + invitationCode: '초대 코드', + invitationCodePlaceholder: '초대 코드를 입력하세요', + interfaceLanguage: '인터페이스 언어', + timezone: '시간대', + go: 'Dify로 이동', + sendUsMail: '간단한 소개를 메일로 보내주시면 초대 요청을 처리해드립니다.', + acceptPP: '개인정보 처리 방침에 동의합니다.', + reset: '비밀번호를 재설정하려면 다음 명령을 실행하세요:', + withGitHub: 'GitHub로 계속', + withGoogle: 'Google로 계속', + rightTitle: 'LLM의 최대 잠재력을 발휘하세요', + rightDesc: '매력적이고 조작 가능하며 개선 가능한 AI 애플리케이션을 쉽게 구축하세요.', + tos: '이용약관', + pp: '개인정보 처리 방침', + tosDesc: '가입함으로써 다음 내용에 동의하게 됩니다.', + goToInit: '계정이 초기화되지 않았다면 초기화 페이지로 이동하세요.', + donthave: '계정이 없으신가요?', + invalidInvitationCode: '유효하지 않은 초대 코드입니다.', + accountAlreadyInited: '계정은 이미 초기화되었습니다.', + error: { + emailEmpty: '이메일 주소를 입력하세요.', + emailInValid: '유효한 이메일 주소를 입력하세요.', + nameEmpty: '사용자 이름을 입력하세요.', + passwordEmpty: '비밀번호를 입력하세요.', + passwordInvalid: '비밀번호는 문자와 숫자를 포함하고 8자 이상이어야 합니다.', + }, + license: { + tip: 'Dify Community Edition을 시작하기 전에 GitHub의', + link: '오픈 소스 라이선스', + }, + join: '가입하기', + joinTipStart: '당신을 초대합니다.', + joinTipEnd: '팀에 가입하세요.', + invalid: '링크의 유효 기간이 만료되었습니다.', + explore: 'Dify를 탐색하세요', + activatedTipStart: '이제', + activatedTipEnd: '팀에 가입되었습니다.', + activated: '지금 로그인하세요', + adminInitPassword: '관리자 초기화 비밀번호', + validate: '확인', +} + +export default translation diff --git a/web/i18n/ko-KR/register.ts b/web/i18n/ko-KR/register.ts new file mode 100644 index 0000000000..e2410dd34b --- /dev/null +++ b/web/i18n/ko-KR/register.ts @@ -0,0 +1,3 @@ +const translation = {} + +export default translation diff --git a/web/i18n/ko-KR/run-log.ts b/web/i18n/ko-KR/run-log.ts new file mode 100644 index 0000000000..2be73f26b8 --- /dev/null +++ b/web/i18n/ko-KR/run-log.ts @@ -0,0 +1,29 @@ +const translation = { + input: '입력', + result: '결과', + detail: '상세정보', + tracing: '트레이싱', + resultPanel: { + status: '상태', + time: '소요 시간', + tokens: '토큰 총합', + }, + meta: { + title: '메타데이터', + status: '상태', + version: '버전', + executor: '실행자', + startTime: '시작 시간', + time: '소요 시간', + tokens: '토큰 총합', + steps: '실행 단계', + }, + resultEmpty: { + title: '이 실행에서는 JSON 형식만 출력됩니다', + tipLeft: '를 방문해주세요', + link: '상세 정보 패널', + tipRight: '를 확인하세요.', + }, +} + +export default translation diff --git a/web/i18n/ko-KR/share-app.ts b/web/i18n/ko-KR/share-app.ts new file mode 100644 index 0000000000..c677323b10 --- /dev/null +++ b/web/i18n/ko-KR/share-app.ts @@ -0,0 +1,70 @@ +const translation = { + common: { + welcome: '이용해주셔서 감사합니다', + appUnavailable: '앱을 사용할 수 없습니다', + appUnkonwError: '앱을 사용할 수 없습니다', + }, + chat: { + newChat: '새 채팅', + pinnedTitle: '고정됨', + unpinnedTitle: '채팅', + newChatDefaultName: '새 대화', + resetChat: '대화 재설정', + powerBy: 'Powered by', + prompt: '프롬프트', + privatePromptConfigTitle: '채팅 설정', + publicPromptConfigTitle: '초기 프롬프트', + configStatusDes: '시작하기 전에 채팅 설정을 변경할 수 있습니다', + configDisabled: '이전 세션의 설정이 현재 세션에서 사용되었습니다.', + startChat: '채팅 시작', + privacyPolicyLeft: '앱 개발자가 제공하는', + privacyPolicyMiddle: '개인 정보 보호 정책', + privacyPolicyRight: '을 읽어보세요.', + deleteConversation: { + title: '대화 삭제', + content: '이 대화를 삭제하시겠습니까?', + }, + tryToSolve: '해결하려고 합니다', + temporarySystemIssue: '죄송합니다. 일시적인 시스템 문제가 발생했습니다.', + }, + generation: { + tabs: { + create: '일회용 실행', + batch: '일괄 실행', + saved: '저장된 결과', + }, + savedNoData: { + title: '아직 저장된 결과가 없습니다!', + description: '컨텐츠 생성을 시작하고 저장된 결과를 여기서 찾아보세요.', + startCreateContent: '컨텐츠 생성 시작', + }, + title: 'AI 완성', + queryTitle: '컨텐츠 쿼리', + completionResult: '완성 결과', + queryPlaceholder: '쿼리 컨텐츠를 작성해주세요...', + run: '실행', + copy: '복사', + resultTitle: 'AI 완성', + noData: 'AI가 필요한 내용을 제공할 것입니다.', + csvUploadTitle: 'CSV 파일을 여기로 끌어다 놓거나', + browse: '찾아보기', + csvStructureTitle: 'CSV 파일은 다음 구조를 따라야 합니다:', + downloadTemplate: '여기에서 템플릿 다운로드', + field: '필드', + batchFailed: { + info: '{{num}} 회의 실행이 실패했습니다', + retry: '재시도', + outputPlaceholder: '출력 컨텐츠 없음', + }, + errorMsg: { + empty: '업로드된 파일에 컨텐츠를 입력해주세요.', + fileStructNotMatch: '업로드된 CSV 파일이 구조와 일치하지 않습니다.', + emptyLine: '줄 {{rowIndex}}이(가) 비어 있습니다.', + invalidLine: '줄 {{rowIndex}}: {{varName}}의 값은 비워둘 수 없습니다.', + moreThanMaxLengthLine: '줄 {{rowIndex}}: {{varName}}의 값은 {{maxLength}}자를 초과할 수 없습니다.', + atLeastOne: '업로드된 파일에는 적어도 한 줄의 입력이 필요합니다.', + }, + }, +} + +export default translation diff --git a/web/i18n/ko-KR/tools.ts b/web/i18n/ko-KR/tools.ts new file mode 100644 index 0000000000..a74d029d40 --- /dev/null +++ b/web/i18n/ko-KR/tools.ts @@ -0,0 +1,115 @@ +const translation = { + title: '도구', + createCustomTool: '커스텀 도구 만들기', + type: { + all: '모두', + builtIn: '내장', + custom: '커스텀', + }, + contribute: { + line1: '저는 Dify에', + line2: '도구를 기여하는데 관심이 있습니다.', + viewGuide: '가이드 보기', + }, + author: '저자', + auth: { + unauthorized: '인증되지 않음', + authorized: '인증됨', + setup: '사용을 위한 인증 설정', + setupModalTitle: '인증 설정', + setupModalTitleDescription: '자격 증명을 구성한 후에 워크스페이스의 모든 멤버가 이 도구를 사용하여 애플리케이션을 조작할 수 있습니다.', + }, + includeToolNum: '{{num}}개의 도구가 포함되어 있습니다', + addTool: '도구 추가', + createTool: { + title: '커스텀 도구 만들기', + editAction: '설정', + editTitle: '커스텀 도구 편집', + name: '이름', + toolNamePlaceHolder: '도구 이름을 입력하세요', + schema: '스키마', + schemaPlaceHolder: '여기에 OpenAPI 스키마를 입력하세요', + viewSchemaSpec: 'OpenAPI-Swagger 명세 보기', + importFromUrl: 'URL에서 가져오기', + importFromUrlPlaceHolder: 'https://...', + urlError: '유효한 URL을 입력하세요', + examples: '예시', + exampleOptions: { + json: '날씨 (JSON)', + yaml: '펫 스토어 (YAML)', + blankTemplate: '빈 템플릿', + }, + availableTools: { + title: '사용 가능한 도구', + name: '이름', + description: '설명', + method: '메소드', + path: '경로', + action: '동작', + test: '테스트', + }, + authMethod: { + title: '인증 방법', + type: '인증 유형', + keyTooltip: 'HTTP 헤더 키입니다. 생각이 없으면 "Authorization"으로 남겨둘 수 있습니다. 또는 사용자 정의 값을 설정할 수 있습니다.', + types: { + none: '없음', + api_key: 'API 키', + apiKeyPlaceholder: 'API 키의 HTTP 헤더 이름', + apiValuePlaceholder: 'API 키를 입력하세요', + }, + key: '키', + value: '값', + }, + authHeaderPrefix: { + title: '인증 유형', + types: { + basic: '베이직', + bearer: '베어러', + custom: '사용자 정의', + }, + }, + privacyPolicy: '개인정보 처리방침', + privacyPolicyPlaceholder: '개인정보 처리방침을 입력하세요', + }, + test: { + title: '테스트', + parametersValue: '파라미터 및 값', + parameters: '파라미터', + value: '값', + testResult: '테스트 결과', + testResultPlaceholder: '테스트 결과가 여기에 표시됩니다', + }, + thought: { + using: '사용 중', + used: '사용됨', + requestTitle: '요청', + responseTitle: '응답', + }, + setBuiltInTools: { + info: '정보', + setting: '설정', + toolDescription: '도구 설명', + parameters: '파라미터', + string: '문자열', + number: '숫자', + required: '필수', + infoAndSetting: '정보 및 설정', + }, + noCustomTool: { + title: '커스텀 도구가 없습니다!', + content: 'AI 앱을 구축하기 위한 커스텀 도구를 여기서 추가 및 관리합니다.', + createTool: '도구 만들기', + }, + noSearchRes: { + title: '죄송합니다. 결과가 없습니다!', + content: '검색 결과가 없습니다.', + reset: '검색 초기화', + }, + builtInPromptTitle: '프롬프트', + toolRemoved: '도구가 제거되었습니다', + notAuthorized: '권한이 없습니다', + howToGet: '획득 방법', +} + +export default translation diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts new file mode 100644 index 0000000000..fbf5ad954e --- /dev/null +++ b/web/i18n/ko-KR/workflow.ts @@ -0,0 +1,358 @@ +const translation = { + common: { + editing: '편집 중', + autoSaved: '자동 저장됨', + unpublished: '미게시', + published: '게시됨', + publish: '게시', + update: '업데이트', + run: '실행', + running: '실행 중', + inRunMode: '실행 모드', + inPreview: '미리보기 중', + inPreviewMode: '미리보기 모드', + preview: '미리보기', + viewRunHistory: '실행 기록 보기', + runHistory: '실행 기록', + goBackToEdit: '편집으로 돌아가기', + conversationLog: '대화 로그', + features: '기능', + debugAndPreview: '디버그 및 미리보기', + restart: '재시작', + currentDraft: '현재 초안', + currentDraftUnpublished: '현재 초안 (게시되지 않음)', + latestPublished: '최신 게시됨', + publishedAt: '게시 시간', + restore: '복원', + runApp: '앱 실행', + batchRunApp: '일괄 실행 앱', + accessAPIReference: 'API 참조에 액세스', + embedIntoSite: '사이트에 포함', + addTitle: '제목 추가...', + addDescription: '설명 추가...', + noVar: '변수 없음', + searchVar: '변수 검색', + variableNamePlaceholder: '변수명', + setVarValuePlaceholder: '변수 값 설정', + needConnecttip: '이 단계에는 연결된 항목이 없습니다', + maxTreeDepth: '각 브랜치 당 최대 {{depth}} 노드 제한', + needEndNode: '종료 블록을 추가해야 합니다', + needAnswerNode: '응답 블록을 추가해야 합니다', + workflowProcess: '워크플로우 처리', + notRunning: '아직 실행되지 않음', + previewPlaceholder: '챗봇 디버깅을 시작하려면 아래 상자에 내용을 입력하세요', + effectVarConfirm: { + title: '변수 삭제', + content: '다른 노드에서 변수를 사용하고 있습니다. 그래도 삭제하시겠습니까?', + }, + insertVarTip: '빠른 삽입을 위해 \'/\' 키를 누르세요', + processData: '데이터 처리', + input: '입력', + output: '출력', + jinjaEditorPlaceholder: '\'/\' 또는 \'{\'를 입력하여 변수를 삽입하세요.', + viewOnly: '보기 모드', + showRunHistory: '실행 기록 보기', + copy: '복사', + duplicate: '복제', + addBlock: '블록 추가', + pasteHere: '여기에 붙여넣기', + pointerMode: '선택 모드', + handMode: '시점 모드', + }, + errorMsg: { + fieldRequired: '{{field}}은(는) 필수입니다', + authRequired: '인증이 필요합니다', + invalidJson: '{{field}}이(가) 유효하지 않습니다', + fields: { + variable: '변수명', + variableValue: '변수 값', + code: '코드', + model: '모델', + rerankModel: '재순위 모델', + }, + invalidVariable: '유효하지 않은 변수', + }, + singleRun: { + testRun: '테스트 실행', + startRun: '실행 시작', + running: '실행 중', + }, + tabs: { + 'searchBlock': '블록 검색', + 'blocks': '블록', + 'builtInTool': '내장 도구', + 'customTool': '커스텀 도구', + 'question-understand': '질문 이해', + 'logic': '논리', + 'transform': '변환', + 'utilities': '유틸리티', + 'noResult': '일치하는 결과 없음', + }, + blocks: { + 'start': '시작', + 'end': '끝', + 'answer': '답변', + 'llm': 'LLM', + 'knowledge-retrieval': '지식 검색', + 'question-classifier': '질문 분류기', + 'if-else': 'IF/ELSE', + 'code': '코드', + 'template-transform': '템플릿', + 'http-request': 'HTTP 요청', + 'variable-assigner': '변수 할당기', + }, + blocksAbout: { + 'start': '워크플로우 시작을 위한 매개변수를 정의합니다', + 'end': '워크플로우 종료 및 결과 유형을 정의합니다', + 'answer': '채팅 대화의 응답 내용을 정의합니다', + 'llm': '대규모 언어 모델을 호출하여 질문에 답변하거나 자연어를 처리합니다', + 'knowledge-retrieval': '사용자의 질문과 관련된 텍스트 콘텐츠를 지식에서 쿼리할 수 있도록 합니다', + 'question-classifier': '사용자의 질문 분류 조건을 정의하고, LLM은 분류 기술에 따라 대화가 어떻게 진행될지 정의할 수 있습니다', + 'if-else': 'IF/ELSE 조건에 따라 워크플로우를 두 가지 분기로 나눌 수 있습니다', + 'code': '사용자 정의 로직을 구현하기 위해 Python 또는 NodeJS 코드를 실행합니다', + 'template-transform': 'Jinja 템플릿 구문을 사용하여 데이터를 문자열로 변환합니다', + 'http-request': 'HTTP 프로토콜을 통해 서버 요청을 보낼 수 있습니다', + 'variable-assigner': '다른 분기에서 동일한 변수에 변수를 할당하여 후속 노드의 통일된 구성을 달성할 수 있습니다', + }, + operator: { + zoomIn: '확대', + zoomOut: '축소', + zoomTo50: '50%로 확대', + zoomTo100: '100%로 확대', + zoomToFit: '적합하게 확대', + }, + panel: { + userInputField: '사용자 입력 필드', + changeBlock: '블록 변경', + helpLink: '도움말 링크', + about: '정보', + createdBy: '작성자', + nextStep: '다음 단계', + addNextStep: '이 워크플로우에 다음 블록 추가', + selectNextStep: '다음 블록 선택', + runThisStep: '이 단계 실행', + checklist: '체크리스트', + checklistTip: '게시하기 전에 모든 문제가 해결되었는지 확인하세요', + checklistResolved: '모든 문제가 해결되었습니다', + organizeBlocks: '블록 정리', + change: '변경', + }, + nodes: { + common: { + outputVars: '출력 변수', + insertVarTip: '변수 삽입', + memory: { + memory: '메모리', + memoryTip: '채팅 메모리 설정', + windowSize: '윈도우 크기', + conversationRoleName: '대화 역할 이름', + user: '사용자 접두사', + assistant: '어시스턴트 접두사', + }, + memories: { + title: '메모리', + tip: '채팅 메모리', + builtIn: '내장', + }, + }, + start: { + required: '필수', + inputField: '입력 필드', + builtInVar: '내장 변수', + outputVars: { + query: '사용자 입력', + memories: { + des: '대화 기록', + type: '메시지 유형', + content: '메시지 내용', + }, + files: '파일 목록', + }, + noVarTip: '워크플로우에서 사용할 수 있는 입력을 설정합니다', + }, + end: { + outputs: '출력', + output: { + type: '출력 유형', + variable: '출력 변수', + }, + type: { + 'none': '없음', + 'plain-text': '평문', + 'structured': '구조화', + }, + }, + answer: { + answer: '답변', + outputVars: '출력 변수', + }, + llm: { + model: '모델', + variables: '변수', + context: '컨텍스트', + contextTooltip: '컨텍스트로 지식을 가져올 수 있습니다', + notSetContextInPromptTip: '컨텍스트 기능을 활성화하려면 PROMPT에 컨텍스트 변수를 입력하세요.', + prompt: '프롬프트', + roleDescription: { + system: '대화의 고수준 명령을 제공합니다', + user: '모델에 대한 지시, 쿼리 또는 텍스트 기반 입력을 제공합니다', + assistant: '사용자 메시지를 기반으로 모델의 응답을 생성합니다', + }, + addMessage: '메시지 추가', + vision: '비전', + files: '파일', + resolution: { + name: '해상도', + high: '높음', + low: '낮음', + }, + outputVars: { + output: '컨텐츠 생성', + usage: '모델 사용 정보', + }, + singleRun: { + variable: '변수', + }, + sysQueryInUser: '사용자 메시지에 sys.query가 요구됩니다.', + }, + knowledgeRetrieval: { + queryVariable: '쿼리 변수', + knowledge: '지식', + outputVars: { + output: '검색된 세그먼트화된 데이터', + content: '세그먼트화된 콘텐츠', + title: '세그먼트화된 제목', + icon: '세그먼트화된 아이콘', + url: '세그먼트화된 URL', + metadata: '기타 메타데이터', + }, + }, + http: { + inputVars: '입력 변수', + api: 'API', + apiPlaceholder: 'URL 입력, \'/\'을 입력하여 변수 삽입', + notStartWithHttp: 'API는 http:// 또는 https://로 시작해야 합니다', + key: '키', + value: '값', + bulkEdit: '일괄 수정', + keyValueEdit: '키-값 수정', + headers: '헤더', + params: '파라미터', + body: '바디', + outputVars: { + body: '응답 콘텐츠', + statusCode: '응답 상태 코드', + headers: '응답 헤더 목록 JSON', + files: '파일 목록', + }, + authorization: { + 'authorization': '인증', + 'authorizationType': '인증 유형', + 'no-auth': '없음', + 'api-key': 'API 키', + 'auth-type': '인증 유형', + 'basic': '기본', + 'bearer': 'Bearer', + 'custom': '사용자 정의', + 'api-key-title': 'API 키', + 'header': '헤더', + }, + insertVarPlaceholder: '변수 삽입을 위해 \'/\'를 입력하세요', + timeout: { + title: '타임아웃', + connectLabel: '연결 타임아웃', + connectPlaceholder: '연결 타임아웃을 초 단위로 입력하세요', + readLabel: '읽기 타임아웃', + readPlaceholder: '읽기 타임아웃을 초 단위로 입력하세요', + writeLabel: '쓰기 타임아웃', + writePlaceholder: '쓰기 타임아웃을 초 단위로 입력하세요', + }, + }, + code: { + inputVars: '입력 변수', + outputVars: '출력 변수', + }, + templateTransform: { + inputVars: '입력 변수', + code: '코드', + codeSupportTip: 'Jinja2만 지원됩니다', + outputVars: { + output: '변환된 콘텐츠', + }, + }, + ifElse: { + if: '만약', + else: '그렇지 않으면', + elseDescription: 'IF 조건이 충족되지 않을 경우 실행할 로직을 정의합니다.', + and: '그리고', + or: '또는', + operator: '연산자', + notSetVariable: '먼저 변수를 설정하세요', + comparisonOperator: { + 'contains': '포함', + 'not contains': '미포함', + 'start with': '시작하는', + 'end with': '끝나는', + 'is': '일치', + 'is not': '불일치', + 'empty': '빈 값', + 'not empty': '빈 값이 아님', + 'null': 'null', + 'not null': 'null이 아님', + }, + enterValue: '값을 입력하세요', + addCondition: '조건 추가', + conditionNotSetup: '조건이 설정되지 않았습니다', + }, + variableAssigner: { + title: '변수 할당', + outputType: '출력 유형', + outputVarType: '출력 변수 유형', + varNotSet: '변수가 설정되지 않았습니다', + noVarTip: '할당할 변수를 추가하세요', + type: { + string: '문자열', + number: '숫자', + object: '객체', + array: '배열', + }, + outputVars: { + output: '할당된 변수의 값', + }, + }, + tool: { + toAuthorize: '승인하려면', + inputVars: '입력 변수', + outputVars: { + text: '툴이 생성한 콘텐츠', + files: { + title: '툴이 생성한 파일', + type: '지원 유형: 현재 이미지만 지원됩니다', + transfer_method: '전송 방법: remote_url 또는 local_file 값', + url: '이미지 URL', + upload_file_id: '업로드 파일 ID', + }, + }, + }, + questionClassifiers: { + model: '모델', + inputVars: '입력 변수', + outputVars: { + className: '클래스 이름', + }, + class: '클래스', + classNamePlaceholder: '클래스 이름을 입력하세요', + advancedSetting: '고급 설정', + topicName: '주제명', + topicPlaceholder: '주제명을 입력하세요', + addClass: '클래스 추가', + instruction: '지시', + instructionPlaceholder: '지시를 입력하세요', + }, + }, + tracing: { + stopBy: '{{user}}에 의해 중지됨', + }, +} + +export default translation diff --git a/web/i18n/language.ts b/web/i18n/language.ts index 4a8b03f00a..0e440ba830 100644 --- a/web/i18n/language.ts +++ b/web/i18n/language.ts @@ -74,8 +74,8 @@ export const languages = [ { value: 'ko-KR', name: '한국어 (대한민국)', - example: '안녕, Dify!', - supported: false, + example: '안녕하세요, Dify!', + supported: true, }, { value: 'ru-RU', @@ -152,7 +152,7 @@ export const NOTICE_I18N = { fr_FR: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', de_DE: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', ja_JP: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', - ko_KR: 'Our system will be unavailable from 19:00 to 24:00 UTC on August 28 for an upgrade. For questions, kindly contact our support team (support@dify.ai). We value your patience.', + ko_KR: '시스템이 업그레이드를 위해 UTC 시간대로 8월 28일 19:00 ~ 24:00에 사용 불가될 예정입니다. 질문이 있으시면 지원 팀에 연락주세요 (support@dify.ai). 최선을 다해 답변해드리겠습니다.', pl_PL: 'Nasz system będzie niedostępny od 19:00 do 24:00 UTC 28 sierpnia w celu aktualizacji. W przypadku pytań prosimy o kontakt z naszym zespołem wsparcia (support@dify.ai). Doceniamy Twoją cierpliwość.', uk_UA: 'Наша система буде недоступна з 19:00 до 24:00 UTC 28 серпня для оновлення. Якщо у вас виникнуть запитання, будь ласка, зв’яжіться з нашою службою підтримки (support@dify.ai). Дякуємо за терпіння.', vi_VN: 'Hệ thống của chúng tôi sẽ ngừng hoạt động từ 19:00 đến 24:00 UTC vào ngày 28 tháng 8 để nâng cấp. Nếu có thắc mắc, vui lòng liên hệ với nhóm hỗ trợ của chúng tôi (support@dify.ai). Chúng tôi đánh giá cao sự kiên nhẫn của bạn.', From ece0f08a2b1afdb3c49200c84630959b8cbd2982 Mon Sep 17 00:00:00 2001 From: orangeclk Date: Mon, 13 May 2024 17:40:53 +0800 Subject: [PATCH 068/267] add yi models (#4335) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 陈力坤 --- .../model_providers/yi/llm/_position.yaml | 6 +++ .../yi/llm/yi-large-turbo.yaml | 43 ++++++++++++++++++ .../model_providers/yi/llm/yi-large.yaml | 43 ++++++++++++++++++ .../yi/llm/yi-medium-200k.yaml | 43 ++++++++++++++++++ .../model_providers/yi/llm/yi-medium.yaml | 43 ++++++++++++++++++ .../model_providers/yi/llm/yi-spark.yaml | 43 ++++++++++++++++++ .../model_providers/yi/llm/yi-vision.yaml | 44 +++++++++++++++++++ 7 files changed, 265 insertions(+) create mode 100644 api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml create mode 100644 api/core/model_runtime/model_providers/yi/llm/yi-large.yaml create mode 100644 api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml create mode 100644 api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml create mode 100644 api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml create mode 100644 api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml diff --git a/api/core/model_runtime/model_providers/yi/llm/_position.yaml b/api/core/model_runtime/model_providers/yi/llm/_position.yaml index 12838d670f..e876893b41 100644 --- a/api/core/model_runtime/model_providers/yi/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/yi/llm/_position.yaml @@ -1,3 +1,9 @@ - yi-34b-chat-0205 - yi-34b-chat-200k - yi-vl-plus +- yi-large +- yi-medium +- yi-vision +- yi-medium-200k +- yi-spark +- yi-large-turbo diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml new file mode 100644 index 0000000000..1d00eca2ca --- /dev/null +++ b/api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml @@ -0,0 +1,43 @@ +model: yi-large-turbo +label: + zh_Hans: yi-large-turbo + en_US: yi-large-turbo +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 16384 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 1024 + min: 1 + max: 16384 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. +pricing: + input: '12' + output: '12' + unit: '0.000001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-large.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-large.yaml new file mode 100644 index 0000000000..347f511280 --- /dev/null +++ b/api/core/model_runtime/model_providers/yi/llm/yi-large.yaml @@ -0,0 +1,43 @@ +model: yi-large +label: + zh_Hans: yi-large + en_US: yi-large +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 16384 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 1024 + min: 1 + max: 16384 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. +pricing: + input: '20' + output: '20' + unit: '0.000001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml new file mode 100644 index 0000000000..e8ddbcba97 --- /dev/null +++ b/api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml @@ -0,0 +1,43 @@ +model: yi-medium-200k +label: + zh_Hans: yi-medium-200k + en_US: yi-medium-200k +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 204800 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 1024 + min: 1 + max: 204800 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. +pricing: + input: '12' + output: '12' + unit: '0.000001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml new file mode 100644 index 0000000000..4f0244d1f5 --- /dev/null +++ b/api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml @@ -0,0 +1,43 @@ +model: yi-medium +label: + zh_Hans: yi-medium + en_US: yi-medium +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 16384 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 1024 + min: 1 + max: 16384 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. +pricing: + input: '2.5' + output: '2.5' + unit: '0.000001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml new file mode 100644 index 0000000000..e28e9fd8c0 --- /dev/null +++ b/api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml @@ -0,0 +1,43 @@ +model: yi-spark +label: + zh_Hans: yi-spark + en_US: yi-spark +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 16384 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 1024 + min: 1 + max: 16384 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. +pricing: + input: '1' + output: '1' + unit: '0.000001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml new file mode 100644 index 0000000000..bce34f5836 --- /dev/null +++ b/api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml @@ -0,0 +1,44 @@ +model: yi-vision +label: + zh_Hans: yi-vision + en_US: yi-vision +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 4096 +parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 1024 + min: 1 + max: 4096 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. +pricing: + input: '6' + output: '6' + unit: '0.000001' + currency: RMB From 0f14fdd4c906a9e29b9a1c87d9b235cda02eec24 Mon Sep 17 00:00:00 2001 From: sino Date: Mon, 13 May 2024 20:36:23 +0800 Subject: [PATCH 069/267] fix: handleUpdateWorkflowCanvas is not a function (#4343) --- web/app/components/workflow/panel/record.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/panel/record.tsx b/web/app/components/workflow/panel/record.tsx index 9841b07ab1..079dd2cc86 100644 --- a/web/app/components/workflow/panel/record.tsx +++ b/web/app/components/workflow/panel/record.tsx @@ -2,11 +2,11 @@ import { memo, useCallback } from 'react' import type { WorkflowDataUpdator } from '../types' import Run from '../run' import { useStore } from '../store' -import { useWorkflowInteractions } from '../hooks' +import { useWorkflowUpdate } from '../hooks' const Record = () => { const historyWorkflowData = useStore(s => s.historyWorkflowData) - const { handleUpdateWorkflowCanvas } = useWorkflowInteractions() + const { handleUpdateWorkflowCanvas } = useWorkflowUpdate() const handleResultCallback = useCallback((res: any) => { const graph: WorkflowDataUpdator = res.graph From e8311357ff4724b62977f753268304fad2f58172 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Tue, 14 May 2024 02:52:41 +0800 Subject: [PATCH 070/267] feat: gpt-4o (#4346) --- .../model_providers/openai/llm/_position.yaml | 2 + .../openai/llm/gpt-4o-2024-05-13.yaml | 44 +++++++++++++++++++ .../model_providers/openai/llm/gpt-4o.yaml | 44 +++++++++++++++++++ api/requirements.txt | 4 +- 4 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml create mode 100644 api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml diff --git a/api/core/model_runtime/model_providers/openai/llm/_position.yaml b/api/core/model_runtime/model_providers/openai/llm/_position.yaml index 3808d670c3..566055e3f7 100644 --- a/api/core/model_runtime/model_providers/openai/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/_position.yaml @@ -1,4 +1,6 @@ - gpt-4 +- gpt-4o +- gpt-4o-2024-05-13 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-turbo-preview diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml new file mode 100644 index 0000000000..f0d835cba2 --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml @@ -0,0 +1,44 @@ +model: gpt-4o-2024-05-13 +label: + zh_Hans: gpt-4o-2024-05-13 + en_US: gpt-4o-2024-05-13 +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 4096 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '5.00' + output: '15.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml new file mode 100644 index 0000000000..4f141f772f --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml @@ -0,0 +1,44 @@ +model: gpt-4o +label: + zh_Hans: gpt-4o + en_US: gpt-4o +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 4096 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '5.00' + output: '15.00' + unit: '0.000001' + currency: USD diff --git a/api/requirements.txt b/api/requirements.txt index 6d08202527..39cbfaad99 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -9,8 +9,8 @@ flask-restful~=0.3.10 flask-cors~=4.0.0 gunicorn~=22.0.0 gevent~=23.9.1 -openai~=1.26.0 -tiktoken~=0.6.0 +openai~=1.29.0 +tiktoken~=0.7.0 psycopg2-binary~=2.9.6 pycryptodome==3.19.1 python-dotenv==1.0.0 From eee95190cc4cd2a86453fb5f39efdb5f26a06804 Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 14 May 2024 03:18:26 +0800 Subject: [PATCH 071/267] version to 0.6.8 (#4347) --- api/config.py | 2 +- docker/docker-compose.yaml | 6 +++--- web/package.json | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/config.py b/api/config.py index 4dcd44237a..8ec5febb1d 100644 --- a/api/config.py +++ b/api/config.py @@ -107,7 +107,7 @@ class Config: # ------------------------ # General Configurations. # ------------------------ - self.CURRENT_VERSION = "0.6.7" + self.CURRENT_VERSION = "0.6.8" self.COMMIT_SHA = get_env('COMMIT_SHA') self.EDITION = get_env('EDITION') self.DEPLOY_ENV = get_env('DEPLOY_ENV') diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index e232ef436a..cad382a860 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.6.7 + image: langgenius/dify-api:0.6.8 restart: always environment: # Startup mode, 'api' starts the API server. @@ -180,7 +180,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.6.7 + image: langgenius/dify-api:0.6.8 restart: always environment: CONSOLE_WEB_URL: '' @@ -295,7 +295,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.6.7 + image: langgenius/dify-web:0.6.8 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/web/package.json b/web/package.json index 6c183f7205..fc995b2ec5 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.6.7", + "version": "0.6.8", "private": true, "scripts": { "dev": "next dev", From 08e4103fa1ec389ae6d84f1d02cccd010e47c657 Mon Sep 17 00:00:00 2001 From: leejoo0 <81673835+leejoo0@users.noreply.github.com> Date: Tue, 14 May 2024 16:36:03 +0900 Subject: [PATCH 072/267] Create README_KR.md (#4364) --- README.md | 1 + README_CN.md | 1 + README_ES.md | 1 + README_FR.md | 1 + README_JA.md | 1 + README_KL.md | 1 + README_KR.md | 243 +++++++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 249 insertions(+) create mode 100644 README_KR.md diff --git a/README.md b/README.md index f331c442c3..c43b52d7ad 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ README en Español README en Français README tlhIngan Hol + README in Korean

diff --git a/README_CN.md b/README_CN.md index dc4e564c50..c6e81b532a 100644 --- a/README_CN.md +++ b/README_CN.md @@ -35,6 +35,7 @@ 上个月的提交次数 上个月的提交次数 上个月的提交次数 + 上个月的提交次数
diff --git a/README_ES.md b/README_ES.md index 4758a107e9..efc1bdfd41 100644 --- a/README_ES.md +++ b/README_ES.md @@ -35,6 +35,7 @@ Actividad de Commits el último mes Actividad de Commits el último mes Actividad de Commits el último mes + Actividad de Commits el último mes

# diff --git a/README_FR.md b/README_FR.md index 2b9b6b038e..4f12f3788e 100644 --- a/README_FR.md +++ b/README_FR.md @@ -35,6 +35,7 @@ Commits le mois dernier Commits le mois dernier Commits le mois dernier + Commits le mois dernier

# diff --git a/README_JA.md b/README_JA.md index 47ecbe884c..11de404c7d 100644 --- a/README_JA.md +++ b/README_JA.md @@ -35,6 +35,7 @@ 先月のコミット 先月のコミット 先月のコミット + 先月のコミット

# diff --git a/README_KL.md b/README_KL.md index a52f859bab..b1eb5073f6 100644 --- a/README_KL.md +++ b/README_KL.md @@ -35,6 +35,7 @@ Commits last month Commits last month Commits last month + Commits last month

# diff --git a/README_KR.md b/README_KR.md new file mode 100644 index 0000000000..9c809fa017 --- /dev/null +++ b/README_KR.md @@ -0,0 +1,243 @@ +![cover-v5-optimized](https://github.com/langgenius/dify/assets/13230914/f9e19af5-61ba-4119-b926-d10c4c06ebab) + +

+ Dify 클라우드 · + 셀프-호스팅 · + 문서 · + 기업 문의 +

+ +

+ + Static Badge + + Static Badge + + chat on Discord + + follow on Twitter + + Docker Pulls + + Commits last month + + Issues closed + + Discussion posts +

+ +

+ README in English + 简体中文版自述文件 + 日本語のREADME + README en Español + README en Français + README tlhIngan Hol + 한국어 README + +

+ + + Dify는 오픈 소스 LLM 앱 개발 플랫폼입니다. 직관적인 인터페이스를 통해 AI 워크플로우, RAG 파이프라인, 에이전트 기능, 모델 관리, 관찰 기능 등을 결합하여 프로토타입에서 프로덕션까지 빠르게 전환할 수 있습니다. 주요 기능 목록은 다음과 같습니다:

+ +**1. 워크플로우**: + 다음 기능들을 비롯한 다양한 기능을 활용하여 시각적 캔버스에서 강력한 AI 워크플로우를 구축하고 테스트하세요. + + + https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa + + + +**2. 포괄적인 모델 지원:**: + +수십 개의 추론 제공업체와 자체 호스팅 솔루션에서 제공하는 수백 개의 독점 및 오픈 소스 LLM과 원활하게 통합되며, GPT, Mistral, Llama3 및 모든 OpenAI API 호환 모델을 포함합니다. 지원되는 모델 제공업체의 전체 목록은 [여기](https://docs.dify.ai/getting-started/readme/model-providers)에서 확인할 수 있습니다. +![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3) + + +**3. 통합 개발환경**: + 프롬프트를 작성하고, 모델 성능을 비교하며, 텍스트-음성 변환과 같은 추가 기능을 채팅 기반 앱에 추가할 수 있는 직관적인 인터페이스를 제공합니다. + +**4. RAG 파이프라인**: + 문서 수집부터 검색까지 모든 것을 다루며, PDF, PPT 및 기타 일반적인 문서 형식에서 텍스트 추출을 위한 기본 지원이 포함되어 있는 광범위한 RAG 기능을 제공합니다. + +**5. 에이전트 기능**: + LLM 함수 호출 또는 ReAct를 기반으로 에이전트를 정의하고 에이전트에 대해 사전 구축된 도구나 사용자 정의 도구를 추가할 수 있습니다. Dify는 Google Search, DELL·E, Stable Diffusion, WolframAlpha 등 AI 에이전트를 위한 50개 이상의 내장 도구를 제공합니다. + +**6. LLMOps**: + 시간 경과에 따른 애플리케이션 로그와 성능을 모니터링하고 분석합니다. 생산 데이터와 주석을 기반으로 프롬프트, 데이터세트, 모델을 지속적으로 개선할 수 있습니다. + +**7. Backend-as-a-Service**: + Dify의 모든 제품에는 해당 API가 함께 제공되므로 Dify를 자신의 비즈니스 로직에 쉽게 통합할 수 있습니다. + +## 기능 비교 +
ミーティング無料の30分間のミーティングをスケジュールしてください。無料の30分間のミーティングをスケジュール
技術サポートAgent
Enterprise Feature (SSO/Access control)Enterprise Features (SSO/Access control) Agent
Agente
Agent
エージェント
Agent
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
기능Dify.AILangChainFlowiseOpenAI Assistants API
프로그래밍 접근 방식API + 앱 중심Python 코드앱 중심API 중심
지원되는 LLMs다양한 종류다양한 종류다양한 종류OpenAI 전용
RAG 엔진
에이전트
워크플로우
가시성
기업용 기능 (SSO/접근 제어)
로컬 배포
+ +## Dify 사용하기 + +- **클라우드
** + 우리는 누구나 설정이 필요 없이 사용해 볼 수 있도록 [Dify 클라우드](https://dify.ai) 서비스를 호스팅합니다. 이는 자체 배포 버전의 모든 기능을 제공하며, 샌드박스 플랜에서 무료로 200회의 GPT-4 호출을 포함합니다. + +- **셀프-호스팅 Dify 커뮤니티 에디션
** + 환경에서 Dify를 빠르게 실행하려면 이 [스타터 가이드를](#quick-start) 참조하세요. + 추가 참조 및 더 심층적인 지침은 [문서](https://docs.dify.ai)를 사용하세요. + +- **기업 / 조직을 위한 Dify
** + 우리는 추가적인 기업 중심 기능을 제공합니다. 당사와 [미팅일정](https://cal.com/guchenhe/30min)을 잡거나 [이메일 보내기](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)를 통해 기업 요구 사항을 논의하십시오.
+ > AWS를 사용하는 스타트업 및 중소기업의 경우 [AWS Marketplace에서 Dify Premium](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)을 확인하고 한 번의 클릭으로 자체 AWS VPC에 배포하십시오. 맞춤형 로고와 브랜딩이 포함된 앱을 생성할 수 있는 옵션이 포함된 저렴한 AMI 제품입니다. + + + +## 앞서가기 + +GitHub에서 Dify에 별표를 찍어 새로운 릴리스를 즉시 알림 받으세요. + +![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4) + + + +## 빠른 시작 +>Dify를 설치하기 전에 컴퓨터가 다음과 같은 최소 시스템 요구 사항을 충족하는지 확인하세요 : +>- CPU >= 2 Core +>- RAM >= 4GB + +
+ +Dify 서버를 시작하는 가장 쉬운 방법은 [docker-compose.yml](docker/docker-compose.yaml) 파일을 실행하는 것입니다. 설치 명령을 실행하기 전에 [Docker](https://docs.docker.com/get-docker/) 및 [Docker Compose](https://docs.docker.com/compose/install/)가 머신에 설치되어 있는지 확인하세요. + +```bash +cd docker +docker compose up -d +``` + +실행 후 브라우저의 [http://localhost/install](http://localhost/install) 에서 Dify 대시보드에 액세스하고 초기화 프로세스를 시작할 수 있습니다. + +> Dify에 기여하거나 추가 개발을 하고 싶다면 소스 코드에서 [배포에 대한 가이드](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)를 참조하세요. + +## 다음 단계 + +구성 커스터마이징이 필요한 경우, [docker-compose.yml](docker/docker-compose.yaml) 파일의 코멘트를 참조하여 환경 구성을 수동으로 설정하십시오. 변경 후 `docker-compose up -d` 를 다시 실행하십시오. 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 확인할 수 있습니다. + + +고가용성 설정을 구성하려면 Dify를 Kubernetes에 배포할 수 있는 커뮤니티 제공 [Helm Charts](https://helm.sh/)가 있습니다. + +- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify) +- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm) + + +## 기여 + +코드에 기여하고 싶은 분들은 [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요. +동시에 Dify를 소셜 미디어와 행사 및 컨퍼런스에 공유하여 지원하는 것을 고려해 주시기 바랍니다. + + +> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요. + +**기여자** + + + + + +## 커뮤니티 & 연락처 + +* [Github 토론](https://github.com/langgenius/dify/discussions). 피드백 공유 및 질문하기에 적합합니다. +* [GitHub 이슈](https://github.com/langgenius/dify/issues). Dify.AI 사용 중 발견한 버그와 기능 제안에 적합합니다. [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요. +* [이메일](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Dify.AI 사용에 대한 질문하기에 적합합니다. +* [디스코드](https://discord.gg/FngNHpbcY7). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다. +* [트위터](https://twitter.com/dify_ai). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다. + +또는 팀원과 직접 미팅을 예약하세요: + + + + + + + + + + + + + + +
연락처목적
Git-Hub-README-Button-3x비즈니스 문의 및 제품 피드백
Git-Hub-README-Button-2x기여, 이슈 및 기능 요청
+ +## Star 히스토리 + +[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date) + + +## 보안 공개 + +개인정보 보호를 위해 보안 문제를 GitHub에 게시하지 마십시오. 대신 security@dify.ai로 질문을 보내주시면 더 자세한 답변을 드리겠습니다. + +## 라이선스 + +이 저장소는 기본적으로 몇 가지 추가 제한 사항이 있는 Apache 2.0인 [Dify 오픈 소스 라이선스](LICENSE)에 따라 사용할 수 있습니다. From 6f1633fa7534a08c522c73618c23484ce641fc07 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Tue, 14 May 2024 15:51:08 +0800 Subject: [PATCH 073/267] fix: delete end node (#4372) --- .../workflow/hooks/use-nodes-interactions.ts | 90 +++++++++++++------ .../panel-operator/panel-operator-popup.tsx | 2 +- web/app/components/workflow/utils.ts | 4 +- 3 files changed, 64 insertions(+), 32 deletions(-) diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index 422e54f18f..026e802268 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -488,12 +488,22 @@ export const useNodesInteractions = () => { _connectedNodeIsSelected: true, }, } + const nodesConnectedSourceOrTargetHandleIdsMap = getNodesConnectedSourceOrTargetHandleIdsMap( + [ + { type: 'add', edge: newEdge }, + ], + nodes, + ) const newNodes = produce(nodes, (draft: Node[]) => { draft.forEach((node) => { node.data.selected = false - if (node.id === prevNode.id) - node.data._connectedSourceHandleIds?.push(prevNodeSourceHandle!) + if (nodesConnectedSourceOrTargetHandleIdsMap[node.id]) { + node.data = { + ...node.data, + ...nodesConnectedSourceOrTargetHandleIdsMap[node.id], + } + } }) draft.push(newNode) }) @@ -512,26 +522,42 @@ export const useNodesInteractions = () => { if (!prevNodeId && nextNodeId) { const nextNodeIndex = nodes.findIndex(node => node.id === nextNodeId) const nextNode = nodes[nextNodeIndex]! - newNode.data._connectedSourceHandleIds = [sourceHandle] + if ((nodeType !== BlockEnum.IfElse) && (nodeType !== BlockEnum.QuestionClassifier)) + newNode.data._connectedSourceHandleIds = [sourceHandle] newNode.data._connectedTargetHandleIds = [] newNode.position = { x: nextNode.position.x, y: nextNode.position.y, } - const newEdge = { - id: `${newNode.id}-${nextNodeId}`, - type: 'custom', - source: newNode.id, - sourceHandle, - target: nextNodeId, - targetHandle: nextNodeTargetHandle, - data: { - sourceType: newNode.data.type, - targetType: nextNode.data.type, - _connectedNodeIsSelected: true, - }, + let newEdge + + if ((nodeType !== BlockEnum.IfElse) && (nodeType !== BlockEnum.QuestionClassifier)) { + newEdge = { + id: `${newNode.id}-${nextNodeId}`, + type: 'custom', + source: newNode.id, + sourceHandle, + target: nextNodeId, + targetHandle: nextNodeTargetHandle, + data: { + sourceType: newNode.data.type, + targetType: nextNode.data.type, + _connectedNodeIsSelected: true, + }, + } } + + let nodesConnectedSourceOrTargetHandleIdsMap: Record + if (newEdge) { + nodesConnectedSourceOrTargetHandleIdsMap = getNodesConnectedSourceOrTargetHandleIdsMap( + [ + { type: 'add', edge: newEdge }, + ], + nodes, + ) + } + const afterNodesInSameBranch = getAfterNodesInSameBranch(nextNodeId!) const afterNodesInSameBranchIds = afterNodesInSameBranch.map(node => node.id) const newNodes = produce(nodes, (draft) => { @@ -541,22 +567,28 @@ export const useNodesInteractions = () => { if (afterNodesInSameBranchIds.includes(node.id)) node.position.x += NODE_WIDTH_X_OFFSET - if (node.id === nextNodeId) - node.data._connectedTargetHandleIds?.push(nextNodeTargetHandle!) + if (nodesConnectedSourceOrTargetHandleIdsMap?.[node.id]) { + node.data = { + ...node.data, + ...nodesConnectedSourceOrTargetHandleIdsMap[node.id], + } + } }) draft.push(newNode) }) setNodes(newNodes) - const newEdges = produce(edges, (draft) => { - draft.forEach((item) => { - item.data = { - ...item.data, - _connectedNodeIsSelected: false, - } + if (newEdge) { + const newEdges = produce(edges, (draft) => { + draft.forEach((item) => { + item.data = { + ...item.data, + _connectedNodeIsSelected: false, + } + }) + draft.push(newEdge) }) - draft.push(newEdge) - }) - setEdges(newEdges) + setEdges(newEdges) + } } if (prevNodeId && nextNodeId) { const prevNode = nodes.find(node => node.id === prevNodeId)! @@ -771,14 +803,14 @@ export const useNodesInteractions = () => { } = store.getState() const nodes = getNodes() - const bundledNodes = nodes.filter(node => node.data._isBundled && node.data.type !== BlockEnum.Start && node.data.type !== BlockEnum.End) + const bundledNodes = nodes.filter(node => node.data._isBundled && node.data.type !== BlockEnum.Start) if (bundledNodes.length) { setClipboardElements(bundledNodes) return } - const selectedNode = nodes.find(node => node.data.selected && node.data.type !== BlockEnum.Start && node.data.type !== BlockEnum.End) + const selectedNode = nodes.find(node => node.data.selected && node.data.type !== BlockEnum.Start) if (selectedNode) setClipboardElements([selectedNode]) @@ -850,7 +882,7 @@ export const useNodesInteractions = () => { } = store.getState() const nodes = getNodes() - const selectedNode = nodes.find(node => node.data.selected && node.data.type !== BlockEnum.Start && node.data.type !== BlockEnum.End) + const selectedNode = nodes.find(node => node.data.selected && node.data.type !== BlockEnum.Start) if (selectedNode) { const nodeType = selectedNode.data.type diff --git a/web/app/components/workflow/nodes/_base/components/panel-operator/panel-operator-popup.tsx b/web/app/components/workflow/nodes/_base/components/panel-operator/panel-operator-popup.tsx index 78e7657569..0a0fad3a4d 100644 --- a/web/app/components/workflow/nodes/_base/components/panel-operator/panel-operator-popup.tsx +++ b/web/app/components/workflow/nodes/_base/components/panel-operator/panel-operator-popup.tsx @@ -108,7 +108,7 @@ const PanelOperatorPopup = ({ ) } { - data.type !== BlockEnum.Start && data.type !== BlockEnum.End && !nodesReadOnly && ( + data.type !== BlockEnum.Start && !nodesReadOnly && ( <>
{ if (!edge.targetHandle) edge.targetHandle = 'target' - if (!edge.data?.sourceType && edge.source) { + if (!edge.data?.sourceType && edge.source && nodesMap[edge.source]) { edge.data = { ...edge.data, sourceType: nodesMap[edge.source].data.type!, } as any } - if (!edge.data?.targetType && edge.target) { + if (!edge.data?.targetType && edge.target && nodesMap[edge.target]) { edge.data = { ...edge.data, targetType: nodesMap[edge.target].data.type!, From 16d47923c33633aec9ce812ad85c4981c5bc6970 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Tue, 14 May 2024 16:01:23 +0800 Subject: [PATCH 074/267] fix: requests timeout (#4370) --- api/core/helper/ssrf_proxy.py | 14 ++++++++++++++ api/core/workflow/nodes/http_request/entities.py | 6 +++--- .../nodes/http_request/http_request_node.py | 6 ++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/api/core/helper/ssrf_proxy.py b/api/core/helper/ssrf_proxy.py index c44d4717e6..276c8a34e7 100644 --- a/api/core/helper/ssrf_proxy.py +++ b/api/core/helper/ssrf_proxy.py @@ -42,6 +42,20 @@ def delete(url, *args, **kwargs): if kwargs['follow_redirects']: kwargs['allow_redirects'] = kwargs['follow_redirects'] kwargs.pop('follow_redirects') + if 'timeout' in kwargs: + timeout = kwargs['timeout'] + if timeout is None: + kwargs.pop('timeout') + elif isinstance(timeout, tuple): + # check length of tuple + if len(timeout) == 2: + kwargs['timeout'] = timeout + elif len(timeout) == 1: + kwargs['timeout'] = timeout[0] + elif len(timeout) > 2: + kwargs['timeout'] = (timeout[0], timeout[1]) + else: + kwargs['timeout'] = (timeout, timeout) return _delete(url=url, *args, proxies=requests_proxies, **kwargs) def head(url, *args, **kwargs): diff --git a/api/core/workflow/nodes/http_request/entities.py b/api/core/workflow/nodes/http_request/entities.py index 31d5a679b0..4a81a4176d 100644 --- a/api/core/workflow/nodes/http_request/entities.py +++ b/api/core/workflow/nodes/http_request/entities.py @@ -40,9 +40,9 @@ class HttpRequestNodeData(BaseNodeData): data: Union[None, str] class Timeout(BaseModel): - connect: int = MAX_CONNECT_TIMEOUT - read: int = MAX_READ_TIMEOUT - write: int = MAX_WRITE_TIMEOUT + connect: Optional[int] = MAX_CONNECT_TIMEOUT + read: Optional[int] = MAX_READ_TIMEOUT + write: Optional[int] = MAX_WRITE_TIMEOUT method: Literal['get', 'post', 'put', 'patch', 'delete', 'head'] url: str diff --git a/api/core/workflow/nodes/http_request/http_request_node.py b/api/core/workflow/nodes/http_request/http_request_node.py index bfd686175a..d983a30695 100644 --- a/api/core/workflow/nodes/http_request/http_request_node.py +++ b/api/core/workflow/nodes/http_request/http_request_node.py @@ -95,8 +95,14 @@ class HttpRequestNode(BaseNode): if timeout is None: return HTTP_REQUEST_DEFAULT_TIMEOUT + if timeout.connect is None: + timeout.connect = HTTP_REQUEST_DEFAULT_TIMEOUT.connect timeout.connect = min(timeout.connect, MAX_CONNECT_TIMEOUT) + if timeout.read is None: + timeout.read = HTTP_REQUEST_DEFAULT_TIMEOUT.read timeout.read = min(timeout.read, MAX_READ_TIMEOUT) + if timeout.write is None: + timeout.write = HTTP_REQUEST_DEFAULT_TIMEOUT.write timeout.write = min(timeout.write, MAX_WRITE_TIMEOUT) return timeout From 3271e3e8031da577d56e5cdaf647e28d826a934f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Tue, 14 May 2024 16:11:12 +0800 Subject: [PATCH 075/267] improve the code readability of http_executor node (#4360) --- .../nodes/http_request/http_executor.py | 194 ++++++------------ 1 file changed, 67 insertions(+), 127 deletions(-) diff --git a/api/core/workflow/nodes/http_request/http_executor.py b/api/core/workflow/nodes/http_request/http_executor.py index 0b07ad8e82..97cb59d02d 100644 --- a/api/core/workflow/nodes/http_request/http_executor.py +++ b/api/core/workflow/nodes/http_request/http_executor.py @@ -14,28 +14,18 @@ from core.workflow.entities.variable_pool import ValueType, VariablePool from core.workflow.nodes.http_request.entities import HttpRequestNodeData from core.workflow.utils.variable_template_parser import VariableTemplateParser -MAX_BINARY_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_BINARY_SIZE', str(1024 * 1024 * 10))) # 10MB +MAX_BINARY_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_BINARY_SIZE', 1024 * 1024 * 10)) # 10MB READABLE_MAX_BINARY_SIZE = f'{MAX_BINARY_SIZE / 1024 / 1024:.2f}MB' -MAX_TEXT_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_TEXT_SIZE', str(1024 * 1024))) # 10MB # 1MB +MAX_TEXT_SIZE = int(os.environ.get('HTTP_REQUEST_NODE_MAX_TEXT_SIZE', 1024 * 1024)) # 1MB READABLE_MAX_TEXT_SIZE = f'{MAX_TEXT_SIZE / 1024 / 1024:.2f}MB' + class HttpExecutorResponse: headers: dict[str, str] response: Union[httpx.Response, requests.Response] def __init__(self, response: Union[httpx.Response, requests.Response] = None): - """ - init - """ - headers = {} - if isinstance(response, httpx.Response): - for k, v in response.headers.items(): - headers[k] = v - elif isinstance(response, requests.Response): - for k, v in response.headers.items(): - headers[k] = v - - self.headers = headers + self.headers = response.headers self.response = response @property @@ -45,21 +35,11 @@ class HttpExecutorResponse: """ content_type = self.get_content_type() file_content_types = ['image', 'audio', 'video'] - for v in file_content_types: - if v in content_type: - return True - - return False + + return any(v in content_type for v in file_content_types) def get_content_type(self) -> str: - """ - get content type - """ - for key, val in self.headers.items(): - if key.lower() == 'content-type': - return val - - return '' + return self.headers.get('content-type') def extract_file(self) -> tuple[str, bytes]: """ @@ -67,29 +47,25 @@ class HttpExecutorResponse: """ if self.is_file: return self.get_content_type(), self.body - + return '', b'' - + @property def content(self) -> str: """ get content """ - if isinstance(self.response, httpx.Response): - return self.response.text - elif isinstance(self.response, requests.Response): + if isinstance(self.response, httpx.Response | requests.Response): return self.response.text else: raise ValueError(f'Invalid response type {type(self.response)}') - + @property def body(self) -> bytes: """ get body """ - if isinstance(self.response, httpx.Response): - return self.response.content - elif isinstance(self.response, requests.Response): + if isinstance(self.response, httpx.Response | requests.Response): return self.response.content else: raise ValueError(f'Invalid response type {type(self.response)}') @@ -99,20 +75,18 @@ class HttpExecutorResponse: """ get status code """ - if isinstance(self.response, httpx.Response): - return self.response.status_code - elif isinstance(self.response, requests.Response): + if isinstance(self.response, httpx.Response | requests.Response): return self.response.status_code else: raise ValueError(f'Invalid response type {type(self.response)}') - + @property def size(self) -> int: """ get size """ return len(self.body) - + @property def readable_size(self) -> str: """ @@ -138,10 +112,8 @@ class HttpExecutor: variable_selectors: list[VariableSelector] timeout: HttpRequestNodeData.Timeout - def __init__(self, node_data: HttpRequestNodeData, timeout: HttpRequestNodeData.Timeout, variable_pool: Optional[VariablePool] = None): - """ - init - """ + def __init__(self, node_data: HttpRequestNodeData, timeout: HttpRequestNodeData.Timeout, + variable_pool: Optional[VariablePool] = None): self.server_url = node_data.url self.method = node_data.method self.authorization = node_data.authorization @@ -155,7 +127,8 @@ class HttpExecutor: self.variable_selectors = [] self._init_template(node_data, variable_pool) - def _is_json_body(self, body: HttpRequestNodeData.Body): + @staticmethod + def _is_json_body(body: HttpRequestNodeData.Body): """ check if body is json """ @@ -165,55 +138,46 @@ class HttpExecutor: return True except: return False - + return False + @staticmethod + def _to_dict(convert_item: str, convert_text: str, maxsplit: int = -1): + """ + Convert the string like `aa:bb\n cc:dd` to dict `{aa:bb, cc:dd}` + :param convert_item: A label for what item to be converted, params, headers or body. + :param convert_text: The string containing key-value pairs separated by '\n'. + :param maxsplit: The maximum number of splits allowed for the ':' character in each key-value pair. Default is -1 (no limit). + :return: A dictionary containing the key-value pairs from the input string. + """ + kv_paris = convert_text.split('\n') + result = {} + for kv in kv_paris: + if not kv.strip(): + continue + + kv = kv.split(':', maxsplit=maxsplit) + if len(kv) == 2: + k, v = kv + elif len(kv) == 1: + k, v = kv[0], '' + else: + raise ValueError(f'Invalid {convert_item} {kv}') + result[k.strip()] = v + return result + def _init_template(self, node_data: HttpRequestNodeData, variable_pool: Optional[VariablePool] = None): - """ - init template - """ - variable_selectors = [] # extract all template in url self.server_url, server_url_variable_selectors = self._format_template(node_data.url, variable_pool) # extract all template in params params, params_variable_selectors = self._format_template(node_data.params, variable_pool) - - # fill in params - kv_paris = params.split('\n') - for kv in kv_paris: - if not kv.strip(): - continue - - kv = kv.split(':') - if len(kv) == 2: - k, v = kv - elif len(kv) == 1: - k, v = kv[0], '' - else: - raise ValueError(f'Invalid params {kv}') - - self.params[k.strip()] = v + self.params = self._to_dict("params", params) # extract all template in headers headers, headers_variable_selectors = self._format_template(node_data.headers, variable_pool) - - # fill in headers - kv_paris = headers.split('\n') - for kv in kv_paris: - if not kv.strip(): - continue - - kv = kv.split(':') - if len(kv) == 2: - k, v = kv - elif len(kv) == 1: - k, v = kv[0], '' - else: - raise ValueError(f'Invalid headers {kv}') - - self.headers[k.strip()] = v.strip() + self.headers = self._to_dict("headers", headers) # extract all template in body body_data_variable_selectors = [] @@ -231,18 +195,7 @@ class HttpExecutor: self.headers['Content-Type'] = 'application/x-www-form-urlencoded' if node_data.body.type in ['form-data', 'x-www-form-urlencoded']: - body = {} - kv_paris = body_data.split('\n') - for kv in kv_paris: - if not kv.strip(): - continue - kv = kv.split(':', 1) - if len(kv) == 2: - body[kv[0].strip()] = kv[1] - elif len(kv) == 1: - body[kv[0].strip()] = '' - else: - raise ValueError(f'Invalid body {kv}') + body = self._to_dict("body", body_data, 1) if node_data.body.type == 'form-data': self.files = { @@ -261,14 +214,14 @@ class HttpExecutor: self.variable_selectors = (server_url_variable_selectors + params_variable_selectors + headers_variable_selectors + body_data_variable_selectors) - + def _assembling_headers(self) -> dict[str, Any]: authorization = deepcopy(self.authorization) headers = deepcopy(self.headers) or {} if self.authorization.type == 'api-key': if self.authorization.config.api_key is None: raise ValueError('api_key is required') - + if not self.authorization.config.header: authorization.config.header = 'Authorization' @@ -278,9 +231,9 @@ class HttpExecutor: headers[authorization.config.header] = f'Basic {authorization.config.api_key}' elif self.authorization.config.type == 'custom': headers[authorization.config.header] = authorization.config.api_key - + return headers - + def _validate_and_parse_response(self, response: Union[httpx.Response, requests.Response]) -> HttpExecutorResponse: """ validate the response @@ -289,21 +242,22 @@ class HttpExecutor: executor_response = HttpExecutorResponse(response) else: raise ValueError(f'Invalid response type {type(response)}') - + if executor_response.is_file: if executor_response.size > MAX_BINARY_SIZE: - raise ValueError(f'File size is too large, max size is {READABLE_MAX_BINARY_SIZE}, but current size is {executor_response.readable_size}.') + raise ValueError( + f'File size is too large, max size is {READABLE_MAX_BINARY_SIZE}, but current size is {executor_response.readable_size}.') else: if executor_response.size > MAX_TEXT_SIZE: - raise ValueError(f'Text size is too large, max size is {READABLE_MAX_TEXT_SIZE}, but current size is {executor_response.readable_size}.') - + raise ValueError( + f'Text size is too large, max size is {READABLE_MAX_TEXT_SIZE}, but current size is {executor_response.readable_size}.') + return executor_response - + def _do_http_request(self, headers: dict[str, Any]) -> httpx.Response: """ do http request depending on api bundle """ - # do http request kwargs = { 'url': self.server_url, 'headers': headers, @@ -312,25 +266,14 @@ class HttpExecutor: 'follow_redirects': True } - if self.method == 'get': - response = ssrf_proxy.get(**kwargs) - elif self.method == 'post': - response = ssrf_proxy.post(data=self.body, files=self.files, **kwargs) - elif self.method == 'put': - response = ssrf_proxy.put(data=self.body, files=self.files, **kwargs) - elif self.method == 'delete': - response = ssrf_proxy.delete(data=self.body, files=self.files, **kwargs) - elif self.method == 'patch': - response = ssrf_proxy.patch(data=self.body, files=self.files, **kwargs) - elif self.method == 'head': - response = ssrf_proxy.head(**kwargs) - elif self.method == 'options': - response = ssrf_proxy.options(**kwargs) + if self.method in ('get', 'head', 'options'): + response = getattr(ssrf_proxy, self.method)(**kwargs) + elif self.method in ('post', 'put', 'delete', 'patch'): + response = getattr(ssrf_proxy, self.method)(data=self.body, files=self.files, **kwargs) else: raise ValueError(f'Invalid http method {self.method}') - return response - + def invoke(self) -> HttpExecutorResponse: """ invoke http request @@ -343,14 +286,11 @@ class HttpExecutor: # validate response return self._validate_and_parse_response(response) - + def to_raw_request(self, mask_authorization_header: Optional[bool] = True) -> str: """ convert to raw request """ - if mask_authorization_header == None: - mask_authorization_header = True - server_url = self.server_url if self.params: server_url += f'?{urlencode(self.params)}' @@ -365,11 +305,11 @@ class HttpExecutor: authorization_header = 'Authorization' if self.authorization.config and self.authorization.config.header: authorization_header = self.authorization.config.header - + if k.lower() == authorization_header.lower(): raw_request += f'{k}: {"*" * len(v)}\n' continue - + raw_request += f'{k}: {v}\n' raw_request += '\n' From 66c8070da86db919469fd8e4b3f0c7623b6d4e28 Mon Sep 17 00:00:00 2001 From: Joel Date: Tue, 14 May 2024 16:29:41 +0800 Subject: [PATCH 076/267] fix: Jinja switch not aligned in vertical direction (#4374) --- .../workflow/nodes/_base/components/prompt/editor.tsx | 2 +- .../components/workflow/nodes/llm/components/config-prompt.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx index 08a714e385..6c354c0a2c 100644 --- a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx +++ b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx @@ -130,7 +130,7 @@ const Editor: FC = ({ } hideArrow > -
+
= ({ return true })() return ( -
+
{canDrag && } Date: Tue, 14 May 2024 12:48:24 +0300 Subject: [PATCH 077/267] feat: Use Romanian & other langs in QA (#4205) Co-authored-by: crazywoola <427733928@qq.com> --- api/constants/languages.py | 1 + .../create/step-two/language-select/index.tsx | 12 +- web/hooks/use-metadata.ts | 1 + web/i18n/language.ts | 24 + web/i18n/ro-RO/app-annotation.ts | 87 +++ web/i18n/ro-RO/app-api.ts | 83 +++ web/i18n/ro-RO/app-debug.ts | 424 ++++++++++++++ web/i18n/ro-RO/app-log.ts | 91 +++ web/i18n/ro-RO/app-overview.ts | 143 +++++ web/i18n/ro-RO/app.ts | 90 +++ web/i18n/ro-RO/billing.ts | 115 ++++ web/i18n/ro-RO/common.ts | 529 ++++++++++++++++++ web/i18n/ro-RO/custom.ts | 30 + web/i18n/ro-RO/dataset-creation.ts | 130 +++++ web/i18n/ro-RO/dataset-documents.ts | 349 ++++++++++++ web/i18n/ro-RO/dataset-hit-testing.ts | 28 + web/i18n/ro-RO/dataset-settings.ts | 33 ++ web/i18n/ro-RO/dataset.ts | 49 ++ web/i18n/ro-RO/explore.ts | 41 ++ web/i18n/ro-RO/layout.ts | 4 + web/i18n/ro-RO/login.ts | 61 ++ web/i18n/ro-RO/register.ts | 4 + web/i18n/ro-RO/run-log.ts | 29 + web/i18n/ro-RO/share-app.ts | 74 +++ web/i18n/ro-RO/tools.ts | 115 ++++ web/i18n/ro-RO/workflow.ts | 352 ++++++++++++ 26 files changed, 2896 insertions(+), 3 deletions(-) create mode 100644 web/i18n/ro-RO/app-annotation.ts create mode 100644 web/i18n/ro-RO/app-api.ts create mode 100644 web/i18n/ro-RO/app-debug.ts create mode 100644 web/i18n/ro-RO/app-log.ts create mode 100644 web/i18n/ro-RO/app-overview.ts create mode 100644 web/i18n/ro-RO/app.ts create mode 100644 web/i18n/ro-RO/billing.ts create mode 100644 web/i18n/ro-RO/common.ts create mode 100644 web/i18n/ro-RO/custom.ts create mode 100644 web/i18n/ro-RO/dataset-creation.ts create mode 100644 web/i18n/ro-RO/dataset-documents.ts create mode 100644 web/i18n/ro-RO/dataset-hit-testing.ts create mode 100644 web/i18n/ro-RO/dataset-settings.ts create mode 100644 web/i18n/ro-RO/dataset.ts create mode 100644 web/i18n/ro-RO/explore.ts create mode 100644 web/i18n/ro-RO/layout.ts create mode 100644 web/i18n/ro-RO/login.ts create mode 100644 web/i18n/ro-RO/register.ts create mode 100644 web/i18n/ro-RO/run-log.ts create mode 100644 web/i18n/ro-RO/share-app.ts create mode 100644 web/i18n/ro-RO/tools.ts create mode 100644 web/i18n/ro-RO/workflow.ts diff --git a/api/constants/languages.py b/api/constants/languages.py index 7eb00816ab..b4626cf51f 100644 --- a/api/constants/languages.py +++ b/api/constants/languages.py @@ -16,6 +16,7 @@ language_timezone_mapping = { 'it-IT': 'Europe/Rome', 'uk-UA': 'Europe/Kyiv', 'vi-VN': 'Asia/Ho_Chi_Minh', + 'ro-RO': 'Europe/Bucharest', 'pl-PL': 'Europe/Warsaw', } diff --git a/web/app/components/datasets/create/step-two/language-select/index.tsx b/web/app/components/datasets/create/step-two/language-select/index.tsx index 859a4c5823..adc146fbc2 100644 --- a/web/app/components/datasets/create/step-two/language-select/index.tsx +++ b/web/app/components/datasets/create/step-two/language-select/index.tsx @@ -4,6 +4,7 @@ import React from 'react' import cn from 'classnames' import { ChevronDown } from '@/app/components/base/icons/src/vender/line/arrows' import Popover from '@/app/components/base/popover' +import { languages } from '@/i18n/language' export type ILanguageSelectProps = { currentLanguage: string @@ -20,13 +21,18 @@ const LanguageSelect: FC = ({ trigger='click' htmlContent={
-
onSelect('English')}>English
-
onSelect('Chinese')}>简体中文
+ {languages.filter(language => language.supported).map(({ prompt_name, name }) => ( +
onSelect(prompt_name)}>{prompt_name} +
+ ))}
} btnElement={
- {currentLanguage === 'English' ? 'English' : '简体中文'} + {currentLanguage}
} diff --git a/web/hooks/use-metadata.ts b/web/hooks/use-metadata.ts index 4301548bcd..6a4965f2bf 100644 --- a/web/hooks/use-metadata.ts +++ b/web/hooks/use-metadata.ts @@ -314,6 +314,7 @@ export const useLanguages = () => { cs: t(`${langPrefix}cs`), th: t(`${langPrefix}th`), id: t(`${langPrefix}id`), + ro: t(`${langPrefix}ro`), } } diff --git a/web/i18n/language.ts b/web/i18n/language.ts index 0e440ba830..a0fc64ecc3 100644 --- a/web/i18n/language.ts +++ b/web/i18n/language.ts @@ -19,6 +19,7 @@ export type I18nText = { 'vi-VN': string 'de_DE': string 'zh_Hant': string + 'ro-RO': string 'pl-PL': string } @@ -26,96 +27,119 @@ export const languages = [ { value: 'en-US', name: 'English (United States)', + prompt_name: 'English', example: 'Hello, Dify!', supported: true, }, { value: 'zh-Hans', name: '简体中文', + prompt_name: 'Chinese Simplified', example: '你好,Dify!', supported: true, }, { value: 'zh-Hant', name: '繁體中文', + prompt_name: 'Chinese Traditional', example: '你好,Dify!', supported: true, }, { value: 'pt-BR', name: 'Português (Brasil)', + prompt_name: 'Portuguese', example: 'Olá, Dify!', supported: true, }, { value: 'es-ES', name: 'Español (España)', + prompt_name: 'Spanish', example: 'Saluton, Dify!', supported: false, }, { value: 'fr-FR', name: 'Français (France)', + prompt_name: 'French', example: 'Bonjour, Dify!', supported: true, }, { value: 'de-DE', name: 'Deutsch (Deutschland)', + prompt_name: 'German', example: 'Hallo, Dify!', supported: true, }, { value: 'ja-JP', name: '日本語 (日本)', + prompt_name: 'Japanese', example: 'こんにちは、Dify!', supported: true, }, { value: 'ko-KR', name: '한국어 (대한민국)', + prompt_name: 'Korean', example: '안녕하세요, Dify!', supported: true, }, { value: 'ru-RU', name: 'Русский (Россия)', + prompt_name: 'Russian', example: ' Привет, Dify!', supported: false, }, { value: 'it-IT', name: 'Italiano (Italia)', + prompt_name: 'Italian', example: 'Ciao, Dify!', supported: false, }, { value: 'th-TH', name: 'ไทย (ประเทศไทย)', + prompt_name: 'Thai', example: 'สวัสดี Dify!', supported: false, }, { value: 'id-ID', name: 'Bahasa Indonesia', + prompt_name: 'Indonesian', example: 'Saluto, Dify!', supported: false, }, { value: 'uk-UA', name: 'Українська (Україна)', + prompt_name: 'Ukrainian', example: 'Привет, Dify!', supported: true, }, { value: 'vi-VN', name: 'Tiếng Việt (Việt Nam)', + prompt_name: 'Vietnamese', example: 'Xin chào, Dify!', supported: true, }, + { + value: 'ro-RO', + name: 'Română (România)', + prompt_name: 'Romanian', + example: 'Salut, Dify!', + supported: true, + }, { value: 'pl-PL', name: 'Polski (Polish)', + prompt_name: 'Polish', example: 'Cześć, Dify!', supported: true, }, diff --git a/web/i18n/ro-RO/app-annotation.ts b/web/i18n/ro-RO/app-annotation.ts new file mode 100644 index 0000000000..42fd17c12b --- /dev/null +++ b/web/i18n/ro-RO/app-annotation.ts @@ -0,0 +1,87 @@ +const translation = { + title: 'Anotări', + name: 'Răspuns la Anotație', + editBy: 'Răspuns editat de {{author}}', + noData: { + title: 'Fără anotări', + description: 'Puteți edita anotările în timpul depanării aplicației sau importați anotări în masă aici pentru un răspuns de înaltă calitate.', + }, + table: { + header: { + question: 'întrebare', + answer: 'răspuns', + createdAt: 'creat la', + hits: 'accesări', + actions: 'acțiuni', + addAnnotation: 'Adaugă Anotație', + bulkImport: 'Import în Masă', + bulkExport: 'Export în Masă', + clearAll: 'Șterge Toate Anotațiile', + }, + }, + editModal: { + title: 'Editează Răspunsul la Anotație', + queryName: 'Interogare Utilizator', + answerName: 'Povestitorul Bot', + yourAnswer: 'Răspunsul Tău', + answerPlaceholder: 'Scrie răspunsul tău aici', + yourQuery: 'Interogarea Ta', + queryPlaceholder: 'Scrie interogarea ta aici', + removeThisCache: 'Elimină Această Anotație', + createdAt: 'Creat La', + }, + addModal: { + title: 'Adaugă Răspuns la Anotație', + queryName: 'Întrebare', + answerName: 'Răspuns', + answerPlaceholder: 'Scrie răspunsul aici', + queryPlaceholder: 'Scrie întrebarea aici', + createNext: 'Adaugă un alt răspuns anotat', + }, + batchModal: { + title: 'Import în Masă', + csvUploadTitle: 'Trage și plasează fișierul tău CSV aici, sau ', + browse: 'răsfoiește', + tip: 'Fișierul CSV trebuie să respecte următoarea structură:', + question: 'întrebare', + answer: 'răspuns', + contentTitle: 'conținutul secțiunii', + content: 'conținut', + template: 'Descarcă șablonul aici', + cancel: 'Anulează', + run: 'Rulează Lotul', + runError: 'Eroare la rularea lotului', + processing: 'În procesare', + completed: 'Import finalizat', + error: 'Eroare de Import', + ok: 'OK', + }, + errorMessage: { + answerRequired: 'Răspunsul este necesar', + queryRequired: 'Întrebarea este necesară', + }, + viewModal: { + annotatedResponse: 'Răspuns Anotat', + hitHistory: 'Istoric Accesări', + hit: 'Acces', + hits: 'Accesări', + noHitHistory: 'Fără istoric de accesări', + }, + hitHistoryTable: { + query: 'Interogare', + match: 'Potrivire', + response: 'Răspuns', + source: 'Sursă', + score: 'Scor', + time: 'Timp', + }, + initSetup: { + title: 'Configurare Inițială Răspuns la Anotație', + configTitle: 'Configurare Răspuns la Anotație', + confirmBtn: 'Salvează & Activează', + configConfirmBtn: 'Salvează', + }, + embeddingModelSwitchTip: 'Model de vectorizare a textului anotației, schimbarea modelelor va fi reîncorporată, rezultând costuri suplimentare.', +} + +export default translation diff --git a/web/i18n/ro-RO/app-api.ts b/web/i18n/ro-RO/app-api.ts new file mode 100644 index 0000000000..0b86ec6976 --- /dev/null +++ b/web/i18n/ro-RO/app-api.ts @@ -0,0 +1,83 @@ +const translation = { + apiServer: 'Server API', + apiKey: 'Cheia API', + status: 'Stare', + disabled: 'Dezactivat', + ok: 'În Serviciu', + copy: 'Copiază', + copied: 'Copiat', + play: 'Redă', + pause: 'Pauză', + playing: 'În redare', + loading: 'Se încarcă', + merMaind: { + rerender: 'Reprocesare', + }, + never: 'Niciodată', + apiKeyModal: { + apiSecretKey: 'Cheia secretă API', + apiSecretKeyTips: 'Pentru a preveni abuzul API, protejați-vă Cheia API. Evitați utilizarea ei ca text simplu în codul front-end. :)', + createNewSecretKey: 'Creează o nouă cheie secretă', + secretKey: 'Cheie Secretă', + created: 'CREATĂ', + lastUsed: 'ULTIMA UTILIZARE', + generateTips: 'Păstrați această cheie într-un loc sigur și accesibil.', + }, + actionMsg: { + deleteConfirmTitle: 'Ștergeți această cheie secretă?', + deleteConfirmTips: 'Această acțiune nu poate fi anulată.', + ok: 'OK', + }, + completionMode: { + title: 'API completare aplicație', + info: 'Pentru generarea de text de înaltă calitate, cum ar fi articole, rezumate și traduceri, utilizați API-ul de mesaje de completare cu intrare de la utilizator. Generarea de text se bazează pe parametrii modelului și șabloanele de prompturi stabilite în Ingineria Prompturilor Dify.', + createCompletionApi: 'Creează mesaj de completare', + createCompletionApiTip: 'Creează un mesaj de completare pentru a sprijini modul de întrebare și răspuns.', + inputsTips: '(Opțional) Furnizați câmpuri de intrare pentru utilizator ca perechi cheie-valoare, corespunzătoare variabilelor din Ingineria Prompt. Cheia este numele variabilei, Valoarea este valoarea parametrului. Dacă tipul de câmp este Select, Valoarea trimisă trebuie să fie una dintre opțiunile prestabilite.', + queryTips: 'Conținutul textului de intrare al utilizatorului.', + blocking: 'Tip blocant, așteptând finalizarea execuției și returnarea rezultatelor. (Cererea poate fi întreruptă dacă procesul este lung)', + streaming: 'returnare în flux. Implementarea returnării în flux bazată pe SSE (Evenimente trimise de server).', + messageFeedbackApi: 'Feedback mesaj (apreciere)', + messageFeedbackApiTip: 'Evaluează mesajele primite în numele utilizatorilor finali cu aprecieri sau dezaprecieri. Aceste date sunt vizibile în pagina Jurnale & Anotații și sunt utilizate pentru ajustarea fină a modelului viitor.', + messageIDTip: 'ID mesaj', + ratingTip: 'apreciere sau dezapreciere, nul este anulare', + parametersApi: 'Obțineți informații despre parametrii aplicației', + parametersApiTip: 'Recuperați parametrii de intrare configurați, inclusiv numele variabilelor, denumirile câmpurilor, tipurile și valorile implicite. De obicei, sunt folosiți pentru a afișa aceste câmpuri într-un formular sau pentru a completa valorile implicite după încărcarea clientului.', + }, + chatMode: { + title: 'API chat aplicație', + info: 'Pentru aplicații conversaționale versatile folosind un format Q&A, apelați API-ul de mesaje de chat pentru a iniția un dialog. Mențineți conversațiile continue transmitând conversation_id returnat. Parametrii de răspuns și șabloanele depind de setările Ingineriei Prompt Dify.', + createChatApi: 'Creează mesaj de chat', + createChatApiTip: 'Creează un nou mesaj de conversație sau continuă un dialog existent.', + inputsTips: '(Opțional) Furnizați câmpuri de intrare pentru utilizator ca perechi cheie-valoare, corespunzătoare variabilelor din Ingineria Prompt. Cheia este numele variabilei, Valoarea este valoarea parametrului. Dacă tipul de câmp este Select, Valoarea trimisă trebuie să fie una dintre opțiunile prestabilite.', + queryTips: 'Conținutul întrebării/utilizatorului introdus', + blocking: 'Tip blocant, așteptând finalizarea execuției și returnarea rezultatelor. (Cererea poate fi întreruptă dacă procesul este lung)', + streaming: 'returnare în flux. Implementarea returnării în flux bazată pe SSE (Evenimente trimise de server).', + conversationIdTip: '(Opțional) ID conversație: lăsați gol pentru prima conversație; transmiteți conversation_id din context pentru a continua dialogul.', + messageFeedbackApi: 'Feedback terminal utilizator mesaj, apreciere', + messageFeedbackApiTip: 'Evaluează mesajele primite în numele utilizatorilor finali cu aprecieri sau dezaprecieri. Aceste date sunt vizibile în pagina Jurnale & Anotații și sunt utilizate pentru ajustarea fină a modelului viitor.', + messageIDTip: 'ID mesaj', + ratingTip: 'apreciere sau dezapreciere, nul este anulare', + chatMsgHistoryApi: 'Obțineți istoricul mesajelor de chat', + chatMsgHistoryApiTip: 'Prima pagină returnează ultimele `limită` bare, care sunt în ordine inversă.', + chatMsgHistoryConversationIdTip: 'ID conversație', + chatMsgHistoryFirstId: 'ID-ul primului înregistrare de chat de pe pagina curentă. Implicit este niciunul.', + chatMsgHistoryLimit: 'Câte chat-uri sunt returnate într-o singură cerere', + conversationsListApi: 'Obțineți lista de conversații', + conversationsListApiTip: 'Obține lista de sesiuni a utilizatorului curent. În mod implicit, ultimele 20 de sesiuni sunt returnate.', + conversationsListFirstIdTip: 'ID-ul ultimei înregistrări de pe pagina curentă, implicit niciunul.', + conversationsListLimitTip: 'Câte chat-uri sunt returnate într-o singură cerere', + conversationRenamingApi: 'Redenumirea conversației', + conversationRenamingApiTip: 'Redenumiți conversațiile; numele este afișat în interfețele client cu sesiuni multiple.', + conversationRenamingNameTip: 'Nume nou', + parametersApi: 'Obțineți informații despre parametrii aplicației', + parametersApiTip: 'Recuperați parametrii de intrare configurați, inclusiv numele variabilelor, denumirile câmpurilor, tipurile și valorile implicite. De obicei, sunt folosiți pentru a afișa aceste câmpuri într-un formular sau pentru a completa valorile implicite după încărcarea clientului.', + }, + develop: { + requestBody: 'Corpul cererii', + pathParams: 'Parametrii căii', + query: 'Interogare', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/app-debug.ts b/web/i18n/ro-RO/app-debug.ts new file mode 100644 index 0000000000..adb747b514 --- /dev/null +++ b/web/i18n/ro-RO/app-debug.ts @@ -0,0 +1,424 @@ +const translation = { + pageTitle: { + line1: 'PROMPT', + line2: 'Inginerie', + }, + orchestrate: 'Orchestrează', + promptMode: { + simple: 'Comută la Modul Expert pentru a edita întregul PROMPT', + advanced: 'Mod Expert', + switchBack: 'Comută înapoi', + advancedWarning: { + title: 'Ați commutat la Modul Expert și, odată ce modificați PROMPT-ul, NU puteți reveni la modul de bază.', + description: 'În Modul Expert, puteți edita întregul PROMPT.', + learnMore: 'Aflați mai multe', + ok: 'OK', + }, + operation: { + addMessage: 'Adaugă mesaj', + }, + contextMissing: 'Componentă de context lipsește, eficacitatea promptului poate să nu fie bună.', + }, + operation: { + applyConfig: 'Publică', + resetConfig: 'Resetează', + debugConfig: 'Depanează', + addFeature: 'Adaugă funcție', + automatic: 'Automat', + stopResponding: 'Oprește răspunsul', + agree: 'Îmi place', + disagree: 'Nu îmi place', + cancelAgree: 'Anulează Îmi place', + cancelDisagree: 'Anulează Nu îmi place', + userAction: 'Utilizator ', + }, + notSetAPIKey: { + title: 'Cheia furnizorului LLM nu a fost setată', + trailFinished: 'Perioada de încercare a expirat', + description: 'Cheia furnizorului LLM nu a fost setată și trebuie să fie setată înainte de depanare.', + settingBtn: 'Du-te la setări', + }, + trailUseGPT4Info: { + title: 'Nu se acceptă acum gpt-4', + description: 'Pentru a utiliza gpt-4, vă rugăm să setați cheia API.', + }, + feature: { + groupChat: { + title: 'Îmbunătățire chat', + description: 'Adăugați setări pre-conversație pentru aplicații, pentru a îmbunătăți experiența utilizatorilor.', + }, + groupExperience: { + title: 'Îmbunătățire experiență', + }, + conversationOpener: { + title: 'Reîncărcări conversație', + description: 'Într-o aplicație de chat, prima propoziție pe care IA o vorbește în mod activ utilizatorului este de obicei utilizată ca salut.', + }, + suggestedQuestionsAfterAnswer: { + title: 'Urmărire', + description: 'Setarea sugestiilor pentru întrebările următoare poate oferi utilizatorilor o conversație mai bună.', + resDes: '3 sugestii pentru următoarea întrebare a utilizatorului.', + tryToAsk: 'Încercați să întrebați', + }, + moreLikeThis: { + title: 'Mai multe ca aceasta', + description: 'Generați mai multe texte o dată, apoi editați și continuați să generați', + generateNumTip: 'Numărul de generări fiecăruia', + tip: 'Utilizarea acestei funcții va genera un consum suplimentar de jetoane', + }, + speechToText: { + title: 'Voce la text', + description: 'După activare, puteți utiliza intrarea vocală.', + resDes: 'Intrarea vocală este activată', + }, + textToSpeech: { + title: 'Text la voce', + description: 'După activare, textul poate fi convertit în vorbire.', + resDes: 'Textul la audio este activat', + }, + citation: { + title: 'Citări și atribuiri', + description: 'După activare, se vor afișa documentul sursă și secțiunea atribuită a conținutului generat.', + resDes: 'Citări și atribuiri sunt activate', + }, + annotation: { + title: 'Răspuns anotat', + description: 'Puteți adăuga manual răspunsuri de înaltă calitate în cache pentru a le prioritiza la potrivirea cu întrebările similare ale utilizatorilor.', + resDes: 'Răspuns anotat este activat', + scoreThreshold: { + title: 'Prag de scor', + description: 'Folosit pentru a seta pragul de similitudine pentru răspunsul anotat.', + easyMatch: 'Potrivire simplă', + accurateMatch: 'Potrivire precisă', + }, + matchVariable: { + title: 'Variabilă de potrivire', + choosePlaceholder: 'Alegeți variabila de potrivire', + }, + cacheManagement: 'Adnotări', + cached: 'Adnotat', + remove: 'Elimină', + removeConfirm: 'Ștergeți această adnotare?', + add: 'Adaugă adnotare', + edit: 'Editează adnotare', + }, + dataSet: { + title: 'Context', + noData: 'Puteți importa Cunoștințe ca context', + words: 'Cuvinte', + textBlocks: 'Blocuri de text', + selectTitle: 'Selectați Cunoștințe de referință', + selected: 'Cunoștințe selectate', + noDataSet: 'Nu s-au găsit Cunoștințe', + toCreate: 'Du-te la creare', + notSupportSelectMulti: 'În prezent se acceptă doar o singură Cunoștință', + queryVariable: { + title: 'Variabilă de interogare', + tip: 'Această variabilă va fi utilizată ca intrare de interogare pentru recuperarea contextului, obținând informații de context legate de intrarea acestei variabile.', + choosePlaceholder: 'Alegeți variabila de interogare', + noVar: 'Nicio variabilă', + noVarTip: 'vă rugăm să creați o variabilă în secțiunea Variabile', + unableToQueryDataSet: 'Imposibil de interogat Cunoștințele', + unableToQueryDataSetTip: 'Nu s-a reușit interogarea cu succes a Cunoștințelor, vă rugăm să alegeți o variabilă de interogare a contextului în secțiunea context.', + ok: 'OK', + contextVarNotEmpty: 'variabila de interogare a contextului nu poate fi goală', + deleteContextVarTitle: 'Ștergeți variabila "{{varName}}"?', + deleteContextVarTip: 'Această variabilă a fost setată ca variabilă de interogare a contextului și eliminarea ei va afecta utilizarea normală a Cunoștințelor. Dacă totuși trebuie să o ștergeți, vă rugăm să o reselectați în secțiunea context.', + }, + }, + tools: { + title: 'Instrumente', + tips: 'Instrumentele oferă o metodă de apel API standard, luând intrarea utilizatorului sau variabilele ca parametri de solicitare pentru interogarea datelor externe ca context.', + toolsInUse: '{{count}} instrumente în uz', + modal: { + title: 'Instrument', + toolType: { + title: 'Tip instrument', + placeholder: 'Vă rugăm să selectați tipul de instrument', + }, + name: { + title: 'Nume', + placeholder: 'Vă rugăm să introduceți numele', + }, + variableName: { + title: 'Nume variabilă', + placeholder: 'Vă rugăm să introduceți numele variabilei', + }, + }, + }, + conversationHistory: { + title: 'Istoric conversație', + description: 'Setați numele prefixe pentru rolurile de conversație', + tip: 'Istoricul conversației nu este activat, adăugați în promptul de mai sus.', + learnMore: 'Aflați mai multe', + editModal: { + title: 'Editați numele rolurilor de conversație', + userPrefix: 'Prefix utilizator', + assistantPrefix: 'Prefix asistent', + }, + }, + toolbox: { + title: 'TRUSĂ DE UNELTE', + }, + moderation: { + title: 'Moderarea conținutului', + description: 'Asigurați securitatea ieșirii modelului folosind API-ul de moderare sau menținând o listă de cuvinte sensibile.', + allEnabled: 'Conținut INTRARE/IEȘIRE activat', + inputEnabled: 'Conținut INTRARE activat', + outputEnabled: 'Conținut IEȘIRE activat', + modal: { + title: 'Setări de moderare a conținutului', + provider: { + title: 'Furnizor', + openai: 'Moderare OpenAI', + openaiTip: { + prefix: 'Moderarea OpenAI necesită o cheie API OpenAI configurată în', + suffix: '.', + }, + keywords: 'Cuvinte cheie', + }, + keywords: { + tip: 'Câte unul pe rând, separate prin linii noi. Maxim 100 de caractere pe linie.', + placeholder: 'Câte unul pe rând, separate prin linii noi', + line: 'Linie', + }, + content: { + input: 'Moderează conținut INTRARE', + output: 'Moderează conținut IEȘIRE', + preset: 'Răspunsuri prestabilite', + placeholder: 'Conținut răspunsuri prestabilite aici', + condition: 'Moderează conținut INTRARE și IEȘIRE activat cel puțin unul', + fromApi: 'Răspunsurile prestabilite sunt returnate de API', + errorMessage: 'Răspunsurile prestabilite nu pot fi goale', + supportMarkdown: 'Markdown suportat', + }, + openaiNotConfig: { + before: 'Moderarea OpenAI necesită o cheie API OpenAI configurată în', + after: '', + }, + }, + }, + }, + automatic: { + title: 'Orchestrarea automată a aplicațiilor', + description: 'Descrieți scenariul dvs., Dify vă va orchestra o aplicație pentru dvs.', + intendedAudience: 'Care este publicul țintă?', + intendedAudiencePlaceHolder: 'de ex. Student', + solveProblem: 'Ce probleme speră ei că IA le poate rezolva?', + solveProblemPlaceHolder: 'de ex. Extrage informații și rezumă informații din rapoarte și articole lungi', + generate: 'Generează', + audiencesRequired: 'Publicul țintă este necesar', + problemRequired: 'Problema este necesară', + resTitle: 'Am orchestrat următoarea aplicație pentru dvs.', + apply: 'Aplicați această orchestrare', + noData: 'Descrieți cazul de utilizare din stânga, previzualizarea orchestrării se va afișa aici.', + loading: 'Orchestrarea aplicației pentru dvs...', + overwriteTitle: 'Suprascrieți configurația existentă?', + overwriteMessage: 'Aplicarea acestei orchestrări va suprascrie configurația existentă.', + }, + resetConfig: { + title: 'Confirmați resetarea?', + message: + 'Resetarea renunță la modificări, restabilind ultima configurație publicată.', + }, + errorMessage: { + nameOfKeyRequired: 'numele cheii: {{key}} este obligatoriu', + valueOfVarRequired: 'valoarea {{key}} nu poate fi goală', + queryRequired: 'Textul solicitării este obligatoriu.', + waitForResponse: + 'Vă rugăm să așteptați finalizarea răspunsului la mesajul anterior.', + waitForBatchResponse: + 'Vă rugăm să așteptați finalizarea sarcinii în lot.', + notSelectModel: 'Vă rugăm să alegeți un model', + waitForImgUpload: 'Vă rugăm să așteptați încărcarea imaginii', + }, + chatSubTitle: 'Instrucțiuni', + completionSubTitle: 'Prefix prompt', + promptTip: + 'Prompturile ghidează răspunsurile AI cu instrucțiuni și constrângeri. Inserați variabile ca {{input}}. Acest prompt nu va fi vizibil utilizatorilor.', + formattingChangedTitle: 'Formatarea s-a schimbat', + formattingChangedText: + 'Modificarea formatării va reseta zona de depanare, ești sigur?', + variableTitle: 'Variabile', + variableTip: + 'Utilizatorii completează variabile într-un formular, înlocuind automat variabilele din prompt.', + notSetVar: 'Variabilele permit utilizatorilor să introducă cuvinte de prompt sau remarci de deschidere atunci când completează formulare. Puteți încerca să introduceți "{{input}}" în cuvintele de prompt.', + autoAddVar: 'Variabilele nedefinite la care se face referire în pre-prompt, doriți să le adăugați în formularul de intrare al utilizatorului?', + variableTable: { + key: 'Cheie variabilă', + name: 'Nume câmp de intrare utilizator', + optional: 'Opțional', + type: 'Tip intrare', + action: 'Acțiuni', + typeString: 'Șir', + typeSelect: 'Selectează', + }, + varKeyError: { + canNoBeEmpty: 'Cheia variabilei nu poate fi goală', + tooLong: 'Cheia variabilei: {{key}} este prea lungă. Nu poate fi mai lungă de 30 de caractere', + notValid: 'Cheia variabilei: {{key}} este nevalidă. Poate conține doar litere, cifre și sublinieri', + notStartWithNumber: 'Cheia variabilei: {{key}} nu poate începe cu un număr', + keyAlreadyExists: 'Cheia variabilei: :{{key}} deja există', + }, + otherError: { + promptNoBeEmpty: 'Promptul nu poate fi gol', + historyNoBeEmpty: 'Istoricul conversației trebuie setat în prompt', + queryNoBeEmpty: 'Interogația trebuie setată în prompt', + }, + variableConig: { + 'addModalTitle': 'Adăugați câmp de intrare', + 'editModalTitle': 'Editați câmpul de intrare', + 'description': 'Setare pentru variabila {{varName}}', + 'fieldType': 'Tip de câmp', + 'string': 'Text scurt', + 'text-input': 'Text scurt', + 'paragraph': 'Paragraf', + 'select': 'Selectați', + 'number': 'Număr', + 'notSet': 'Nesetat, încercați să tastați {{input}} în promptul de prefix', + 'stringTitle': 'Opțiuni casetă text formular', + 'maxLength': 'Lungime maximă', + 'options': 'Opțiuni', + 'addOption': 'Adăugați opțiune', + 'apiBasedVar': 'Variabilă bazată pe API', + 'varName': 'Nume variabilă', + 'labelName': 'Nume etichetă', + 'inputPlaceholder': 'Vă rugăm să introduceți', + 'required': 'Obligatoriu', + 'errorMsg': { + varNameRequired: 'Numele variabilei este obligatoriu', + labelNameRequired: 'Numele etichetei este obligatoriu', + varNameCanBeRepeat: 'Numele variabilei nu poate fi repetat', + atLeastOneOption: 'Este necesară cel puțin o opțiune', + optionRepeat: 'Există opțiuni repetate', + }, + }, + vision: { + name: 'Viziune', + description: 'Activarea Viziunii va permite modelului să primească imagini și să răspundă la întrebări despre ele.', + settings: 'Setări', + visionSettings: { + title: 'Setări Viziune', + resolution: 'Rezoluție', + resolutionTooltip: `rezoluția joasă va permite modelului să primească o versiune de 512 x 512 pixeli a imaginii și să o reprezinte cu un buget de 65 de tokenuri. Acest lucru permite API-ului să returneze răspunsuri mai rapide și să consume mai puține tokenuri de intrare pentru cazurile de utilizare care nu necesită detalii ridicate. + \n + rezoluția ridicată va permite în primul rând modelului să vadă imaginea la rezoluție scăzută și apoi va crea decupaje detaliate ale imaginilor de intrare ca pătrate de 512 pixeli, în funcție de dimensiunea imaginii de intrare. Fiecare decupaj detaliat utilizează un buget de token dublu, pentru un total de 129 de tokenuri.`, + high: 'Ridicat', + low: 'Scăzut', + uploadMethod: 'Metodă de încărcare', + both: 'Ambele', + localUpload: 'Încărcare locală', + url: 'URL', + uploadLimit: 'Limită de încărcare', + }, + }, + voice: { + name: 'Voce', + defaultDisplay: 'Voce implicită', + description: 'Setări de voce text-to-speech', + settings: 'Setări', + voiceSettings: { + title: 'Setări Voce', + language: 'Limbă', + resolutionTooltip: 'Suport pentru limba de voce text-to-speech.', + voice: 'Voce', + }, + }, + openingStatement: { + title: 'Deschizător de conversație', + add: 'Adăugare', + writeOpener: 'Scrieți deschizătorul', + placeholder: 'Scrieți aici mesajul de deschidere, puteți utiliza variabile, încercați să tastați {{variable}}.', + openingQuestion: 'Întrebări de deschidere', + noDataPlaceHolder: + 'Începerea conversației cu utilizatorul poate ajuta AI să stabilească o conexiune mai strânsă cu ei în aplicațiile conversaționale.', + varTip: 'Puteți utiliza variabile, încercați să tastați {{variable}}', + tooShort: 'Este necesară o promptare inițială de cel puțin 20 de cuvinte pentru a genera o remarcă de deschidere a conversației.', + notIncludeKey: 'Promptarea inițială nu include variabila: {{key}}. Vă rugăm să o adăugați la promptarea inițială.', + }, + modelConfig: { + model: 'Model', + setTone: 'Setați tonul răspunsurilor', + title: 'Model și Parametri', + modeType: { + chat: 'Chat', + completion: 'Completare', + }, + }, + inputs: { + title: 'Depanare și previzualizare', + noPrompt: 'Încercați să scrieți o promptare în câmpul de intrare pre-promptare', + userInputField: 'Câmp de intrare utilizator', + noVar: 'Completați valoarea variabilei, care va fi înlocuită automat în promptarea cuvintelor de fiecare dată când este pornită o nouă sesiune.', + chatVarTip: + 'Completați valoarea variabilei, care va fi înlocuită automat în promptarea cuvintelor de fiecare dată când este pornită o nouă sesiune', + completionVarTip: + 'Completați valoarea variabilei, care va fi înlocuită automat în promptarea cuvintelor de fiecare dată când este trimisă o întrebare.', + previewTitle: 'Previzualizare promptare', + queryTitle: 'Conținut interogare', + queryPlaceholder: 'Vă rugăm să introduceți textul solicitării.', + run: 'RULARE', + }, + result: 'Text de ieșire', + datasetConfig: { + settingTitle: 'Setări de recuperare', + knowledgeTip: 'Faceți clic pe butonul "+" pentru a adăuga cunoștințe', + retrieveOneWay: { + title: 'Recuperare N-la-1', + description: 'Pe baza intenției utilizatorului și a descrierilor Cunoștințelor, Agentul selectează în mod autonom cea mai bună Cunoștință pentru interogare. Cel mai bun pentru aplicații cu Cunoștințe distincte și limitate.', + }, + retrieveMultiWay: { + title: 'Recuperare multi-cale', + description: 'Pe baza intenției utilizatorului, interogați toate Cunoștințele, recuperați textul relevant din mai multe surse și selectați cele mai bune rezultate care se potrivesc interogării utilizatorului după reclasificare. Este necesară configurarea API-ului Rerank Model.', + }, + rerankModelRequired: 'Este necesar modelul Rerank', + params: 'Parametri', + top_k: 'Top K', + top_kTip: 'Utilizat pentru a filtra bucățile cele mai similare cu întrebările utilizatorilor. Sistemul va ajusta, de asemenea, în mod dinamic valoarea Top K, în funcție de max_tokens al modelului selectat.', + score_threshold: 'Prag scor', + score_thresholdTip: 'Utilizat pentru a seta pragul de similitudine pentru filtrarea bucăților.', + retrieveChangeTip: 'Modificarea modului de indexare și a modului de recuperare poate afecta aplicațiile asociate cu aceste Cunoștințe.', + }, + debugAsSingleModel: 'Depanare ca Model Unic', + debugAsMultipleModel: 'Depanare ca Modele Multiple', + duplicateModel: 'Duplicare', + publishAs: 'Publicare ca', + assistantType: { + name: 'Tip Asistent', + chatAssistant: { + name: 'Asistent de bază', + description: 'Construiți un asistent bazat pe chat utilizând un Model de Limbaj Mare', + }, + agentAssistant: { + name: 'Asistent Agent', + description: 'Construiți un Agent inteligent care poate alege în mod autonom instrumente pentru a îndeplini sarcinile', + }, + }, + agent: { + agentMode: 'Mod Agent', + agentModeDes: 'Setați tipul de mod de inferență pentru agent', + agentModeType: { + ReACT: 'ReAct', + functionCall: 'Apel de Funcție', + }, + setting: { + name: 'Setări Agent', + description: 'Setările Asistentului Agent permit setarea modului agent și a funcțiilor avansate, cum ar fi prompturile încorporate, disponibile numai în tipul Agent.', + maximumIterations: { + name: 'Iterații maxime', + description: 'Limitați numărul de iterații pe care le poate executa un asistent agent', + }, + }, + buildInPrompt: 'Prompt încorporat', + firstPrompt: 'Primul Prompt', + nextIteration: 'Iterația următoare', + promptPlaceholder: 'Scrieți promptul aici', + tools: { + name: 'Instrumente', + description: 'Utilizarea instrumentelor poate extinde capacitățile LLM, cum ar fi căutarea pe internet sau efectuarea de calcule științifice', + enabled: 'Activat', + }, + }, +} + +export default translation diff --git a/web/i18n/ro-RO/app-log.ts b/web/i18n/ro-RO/app-log.ts new file mode 100644 index 0000000000..78cf9653e7 --- /dev/null +++ b/web/i18n/ro-RO/app-log.ts @@ -0,0 +1,91 @@ +const translation = { + title: 'Jurnale', + description: 'Jurnalele înregistrează starea de funcționare a aplicației, inclusiv intrările utilizatorilor și răspunsurile AI.', + dateTimeFormat: 'DD/MM/YYYY hh:mm A', + table: { + header: { + time: 'Oră', + endUser: 'Utilizator final', + input: 'Intrare', + output: 'Ieșire', + summary: 'Titlu', + messageCount: 'Număr de mesaje', + userRate: 'Evaluare utilizator', + adminRate: 'Evaluare op.', + startTime: 'ORA DE ÎNCEPERE', + status: 'STARE', + runtime: 'TIMP DE RULARE', + tokens: 'JETOANE', + user: 'UTILIZATOR FINAL', + version: 'VERSIUNE', + }, + pagination: { + previous: 'Anterior', + next: 'Următor', + }, + empty: { + noChat: 'Încă nu există nicio conversație', + noOutput: 'Fără ieșire', + element: { + title: 'Există cineva acolo?', + content: 'Observați și annotați interacțiunile dintre utilizatorii finali și aplicațiile AI pentru a îmbunătăți în mod continuu acuratețea AI. Puteți încerca să partajați sau să testați aplicația web, apoi reveniți la această pagină.', + }, + }, + }, + detail: { + time: 'Oră', + conversationId: 'ID conversație', + promptTemplate: 'Șablon prompt', + promptTemplateBeforeChat: 'Șablon prompt înainte de chat · Ca mesaj de sistem', + annotationTip: 'Îmbunătățiri marcate de {{user}}', + timeConsuming: '', + second: 's', + tokenCost: 'Jetoane cheltuite', + loading: 'se încarcă', + operation: { + like: 'apreciere', + dislike: 'dezaprobare', + addAnnotation: 'Adăugați o îmbunătățire', + editAnnotation: 'Editați o îmbunătățire', + annotationPlaceholder: 'Introduceți răspunsul așteptat pe care doriți ca AI să îl furnizeze, care poate fi utilizat pentru fine-tuning-ul modelului și îmbunătățirea continuă a calității generării de text în viitor.', + }, + variables: 'Variabile', + uploadImages: 'Imagini încărcate', + }, + filter: { + period: { + today: 'Astăzi', + last7days: 'Ultimele 7 zile', + last4weeks: 'Ultimele 4 săptămâni', + last3months: 'Ultimele 3 luni', + last12months: 'Ultimele 12 luni', + monthToDate: 'Luna curentă', + quarterToDate: 'Trimestrul curent', + yearToDate: 'Anul curent', + allTime: 'Tot timpul', + }, + annotation: { + all: 'Toate', + annotated: 'Îmbunătățiri annotate ({{count}} elemente)', + not_annotated: 'Fără annotări', + }, + }, + workflowTitle: 'Jurnale de flux de lucru', + workflowSubtitle: 'Jurnalul a înregistrat operațiunea Automate.', + runDetail: { + title: 'Jurnal de conversație', + workflowTitle: 'Detalii jurnal', + }, + promptLog: 'Jurnal prompt', + agentLog: 'Jurnal agent', + viewLog: 'Vizualizare jurnal', + agentLogDetail: { + agentMode: 'Mod agent', + toolUsed: 'Instrument utilizat', + iterations: 'Iterații', + iteration: 'Iterație', + finalProcessing: 'Procesare finală', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/app-overview.ts b/web/i18n/ro-RO/app-overview.ts new file mode 100644 index 0000000000..b8a5d072b9 --- /dev/null +++ b/web/i18n/ro-RO/app-overview.ts @@ -0,0 +1,143 @@ +const translation = { + welcome: { + firstStepTip: 'Pentru a începe,', + enterKeyTip: 'introduceți cheia API OpenAI mai jos', + getKeyTip: 'Obțineți cheia API de la panoul de control OpenAI', + placeholder: 'Cheia API OpenAI (de ex. sk-xxxx)', + }, + apiKeyInfo: { + cloud: { + trial: { + title: 'Utilizați cota de probă a furnizorului {{providerName}}.', + description: 'Cota de probă este furnizată pentru utilizarea de testare. Înainte ca apelurile cotei de probă să se epuizeze, vă rugăm să configurați propriul furnizor de modele sau să achiziționați o cotă suplimentară.', + }, + exhausted: { + title: 'Cota de probă a fost epuizată, vă rugăm să configurați cheia API.', + description: 'Cota de probă a fost epuizată. Vă rugăm să configurați propriul furnizor de modele sau să achiziționați o cotă suplimentară.', + }, + }, + selfHost: { + title: { + row1: 'Pentru a începe,', + row2: 'configurați mai întâi furnizorul de modele.', + }, + }, + callTimes: 'Apeluri efectuate', + usedToken: 'Token utilizat', + setAPIBtn: 'Mergeți la configurarea furnizorului de modele', + tryCloud: 'Sau încercați versiunea cloud a Dify cu cotă gratuită', + }, + overview: { + title: 'Prezentare generală', + appInfo: { + explanation: 'Aplicație web AI gata de utilizare', + accessibleAddress: 'URL public', + preview: 'Previzualizare', + regenerate: 'Regenerare', + preUseReminder: 'Activați aplicația web înainte de a continua.', + settings: { + entry: 'Setări', + title: 'Setări aplicație web', + webName: 'Nume aplicație web', + webDesc: 'Descriere aplicație web', + webDescTip: 'Acest text va fi afișat pe partea clientului, oferind îndrumare de bază privind modul de utilizare a aplicației', + webDescPlaceholder: 'Introduceți descrierea aplicației web', + language: 'Limbă', + more: { + entry: 'Afișați mai multe setări', + copyright: 'Drepturi de autor', + copyRightPlaceholder: 'Introduceți numele autorului sau al organizației', + privacyPolicy: 'Politica de confidențialitate', + privacyPolicyPlaceholder: 'Introduceți link-ul politicii de confidențialitate', + privacyPolicyTip: 'Ajută vizitatorii să înțeleagă datele pe care le colectează aplicația, consultați Politica de confidențialitate a Dify.', + }, + }, + embedded: { + entry: 'Încorporat', + title: 'Încorporați pe site-ul web', + explanation: 'Alegeți modul de încorporare a aplicației de chat pe site-ul web', + iframe: 'Pentru a adăuga aplicația de chat oriunde pe site-ul web, adăugați acest iframe la codul HTML.', + scripts: 'Pentru a adăuga o aplicație de chat în colțul din dreapta jos al site-ului web, adăugați acest cod la codul HTML.', + chromePlugin: 'Instalați extensia Chrome Dify Chatbot', + copied: 'Copiat', + copy: 'Copiați', + }, + qrcode: { + title: 'Cod QR pentru partajare', + scan: 'Scanați pentru a partaja aplicația', + download: 'Descărcați codul QR', + }, + customize: { + way: 'mod', + entry: 'Personalizare', + title: 'Personalizați aplicația web AI', + explanation: 'Puteți personaliza interfața frontală a aplicației web pentru a se potrivi cu scenariul și stilul dorit.', + way1: { + name: 'Bifurcați codul clientului, modificați-l și implementați-l pe Vercel (recomandat)', + step1: 'Bifurcați codul clientului și modificați-l', + step1Tip: 'Faceți clic aici pentru a bifurca codul sursă în contul dvs. GitHub și a modifica codul', + step1Operation: 'Dify-WebClient', + step2: 'Implementați pe Vercel', + step2Tip: 'Faceți clic aici pentru a importa depozitul în Vercel și a implementa', + step2Operation: 'Importați depozitul', + step3: 'Configurați variabilele de mediu', + step3Tip: 'Adăugați următoarele variabile de mediu în Vercel', + }, + way2: { + name: 'Scrieți cod pe partea clientului pentru a apela API-ul și implementați-l pe un server', + operation: 'Documentație', + }, + }, + }, + apiInfo: { + title: 'API serviciu backend', + explanation: 'Ușor de integrat în aplicația dvs.', + accessibleAddress: 'Punct final API serviciu', + doc: 'Referință API', + }, + status: { + running: 'În service', + disable: 'Dezactivat', + }, + }, + analysis: { + title: 'Analiză', + ms: 'ms', + tokenPS: 'Token/s', + totalMessages: { + title: 'Mesaje totale', + explanation: 'Număr de interacțiuni AI zilnice; exclud proiectarea și depanarea promptelor.', + }, + activeUsers: { + title: 'Utilizatori activi', + explanation: 'Utilizatori unici care se angajează în întrebări și răspunsuri cu AI; exclud proiectarea și depanarea promptelor.', + }, + tokenUsage: { + title: 'Utilizare token', + explanation: 'Reflectă utilizarea zilnică a tokenurilor de către modelul lingvistic pentru aplicație, utilă pentru controlul costurilor.', + consumed: 'Consumat', + }, + avgSessionInteractions: { + title: 'Interacțiuni medii pe sesiune', + explanation: 'Număr de comunicări continue utilizator-AI; pentru aplicații bazate pe conversație.', + }, + avgUserInteractions: { + title: 'Interacțiuni medii pe utilizator', + explanation: 'Reflectă frecvența de utilizare zilnică a utilizatorilor. Această metrica reflectă cât de fideli sunt utilizatorii.', + }, + userSatisfactionRate: { + title: 'Rata de satisfacție a utilizatorilor', + explanation: 'Numărul de aprecieri la 1.000 de mesaje. Acest lucru indică proporția de răspunsuri cu care utilizatorii sunt foarte mulțumiți.', + }, + avgResponseTime: { + title: 'Timp mediu de răspuns', + explanation: 'Timp (ms) pentru procesarea/răspunsul AI; pentru aplicații bazate pe text.', + }, + tps: { + title: 'Viteza de ieșire a tokenurilor', + explanation: 'Măsoară performanța modelului de limbaj mare. Numără viteza de ieșire a tokenurilor din modelul de limbaj mare de la începutul cererii până la finalizarea ieșirii.', + }, + }, +} + +export default translation diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts new file mode 100644 index 0000000000..333678863f --- /dev/null +++ b/web/i18n/ro-RO/app.ts @@ -0,0 +1,90 @@ +const translation = { + createApp: 'CREEAZĂ APLICAȚIE', + types: { + all: 'Toate', + chatbot: 'Chatbot', + agent: 'Agent', + workflow: 'Flux de lucru', + completion: 'Finalizare', + }, + duplicate: 'Duplicat', + duplicateTitle: 'Duplică Aplicația', + export: 'Exportă DSL', + exportFailed: 'Exportul DSL a eșuat.', + importDSL: 'Importă fișier DSL', + createFromConfigFile: 'Creează din fișier DSL', + deleteAppConfirmTitle: 'Ștergi această aplicație?', + deleteAppConfirmContent: + 'Ștergerea aplicației este ireversibilă. Utilizatorii nu vor mai putea accesa aplicația ta, iar toate configurațiile promptului și jurnalele vor fi șterse permanent.', + appDeleted: 'Aplicația a fost ștearsă', + appDeleteFailed: 'Ștergerea aplicației a eșuat', + join: 'Alătură-te comunității', + communityIntro: + 'Discută cu membrii echipei, colaboratorii și dezvoltatorii pe diferite canale.', + roadmap: 'Vezi planul nostru de dezvoltare', + newApp: { + startFromBlank: 'Creează din Nou', + startFromTemplate: 'Creează din Șablon', + captionAppType: 'Ce tip de aplicație vrei să creezi?', + chatbotDescription: 'Construiește o aplicație bazată pe chat. Această aplicație folosește un format întrebare-răspuns, permițând mai multe runde de conversație continuă.', + completionDescription: 'Construiește o aplicație care generează text de înaltă calitate pe baza indicațiilor, cum ar fi generarea de articole, rezumate, traduceri și mai multe.', + completionWarning: 'Acest tip de aplicație nu va mai fi acceptat.', + agentDescription: 'Construiește un Agent inteligent care poate alege în mod autonom instrumentele pentru a îndeplini sarcinile', + workflowDescription: 'Construiește o aplicație care generează text de înaltă calitate pe baza unui flux de lucru orchestrat cu un grad ridicat de personalizare. Este potrivit pentru utilizatorii experimentați.', + workflowWarning: 'În prezent în beta', + chatbotType: 'Metodă de orchestrare a chatbot-ului', + basic: 'De bază', + basicTip: 'Pentru începători, se poate comuta la Chatflow mai târziu', + basicFor: 'PENTRU ÎNCEPĂTORI', + basicDescription: 'Orchestrarea de bază permite orchestrarea unei aplicații Chatbot folosind setări simple, fără posibilitatea de a modifica prompturile încorporate. Este potrivit pentru începători.', + advanced: 'Chatflow', + advancedFor: 'Pentru utilizatori avansați', + advancedDescription: 'Orchestrarea fluxului de lucru orchestrează chatboți sub forma fluxurilor de lucru, oferind un grad ridicat de personalizare, inclusiv posibilitatea de a edita prompturile încorporate. Este potrivit pentru utilizatorii experimentați.', + captionName: 'Pictogramă și nume aplicație', + appNamePlaceholder: 'Dă-i aplicației tale un nume', + captionDescription: 'Descriere', + appDescriptionPlaceholder: 'Introduceți descrierea aplicației', + useTemplate: 'Folosește acest șablon', + previewDemo: 'Previzualizează demo', + chatApp: 'Asistent', + chatAppIntro: + 'Vreau să construiesc o aplicație bazată pe chat. Această aplicație folosește un format întrebare-răspuns, permițând mai multe runde de conversație continuă.', + agentAssistant: 'Asistent Agent Nou', + completeApp: 'Generator de text', + completeAppIntro: + 'Vreau să creez o aplicație care generează text de înaltă calitate pe baza indicațiilor, cum ar fi generarea de articole, rezumate, traduceri și mai multe.', + showTemplates: 'Vreau să aleg dintr-un șablon', + hideTemplates: 'Înapoi la selecția modului', + Create: 'Creează', + Cancel: 'Anulează', + nameNotEmpty: 'Numele nu poate fi gol', + appTemplateNotSelected: 'Vă rugăm să selectați un șablon', + appTypeRequired: 'Vă rugăm să selectați un tip de aplicație', + appCreated: 'Aplicația a fost creată', + appCreateFailed: 'Crearea aplicației a eșuat', + }, + editApp: 'Editează Info', + editAppTitle: 'Editează Info Aplicație', + editDone: 'Informațiile despre aplicație au fost actualizate', + editFailed: 'Actualizarea informațiilor despre aplicație a eșuat', + emoji: { + ok: 'OK', + cancel: 'Anulează', + }, + switch: 'Comută la Orchestrare Flux de Lucru', + switchTipStart: 'O nouă copie a aplicației va fi creată pentru tine, iar noua copie va comuta la Orchestrare Flux de Lucru. Noua copie ', + switchTip: 'nu va permite', + switchTipEnd: ' comutarea înapoi la Orchestrare de Bază.', + switchLabel: 'Copia aplicației care urmează să fie creată', + removeOriginal: 'Șterge aplicația originală', + switchStart: 'Începe comutarea', + typeSelector: { + all: 'TOATE Tipurile', + chatbot: 'Chatbot', + agent: 'Agent', + workflow: 'Flux de lucru', + completion: 'Finalizare', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/billing.ts b/web/i18n/ro-RO/billing.ts new file mode 100644 index 0000000000..57b9986889 --- /dev/null +++ b/web/i18n/ro-RO/billing.ts @@ -0,0 +1,115 @@ +const translation = { + currentPlan: 'Planul curent', + upgradeBtn: { + plain: 'Actualizează planul', + encourage: 'Actualizează acum', + encourageShort: 'Actualizează', + }, + viewBilling: 'Gestionează facturarea și abonamentele', + buyPermissionDeniedTip: 'Vă rugăm să contactați administratorul dvs. de întreprindere pentru a vă abona', + plansCommon: { + title: 'Alegeți un plan potrivit pentru dvs.', + yearlyTip: 'Obțineți 2 luni gratuite prin abonarea anuală!', + mostPopular: 'Cel mai popular', + planRange: { + monthly: 'Lunar', + yearly: 'Anual', + }, + month: 'lună', + year: 'an', + save: 'Economisește ', + free: 'Gratuit', + currentPlan: 'Planul curent', + contractSales: 'Contactați vânzările', + contractOwner: 'Contactați managerul echipei', + startForFree: 'Începe gratuit', + getStartedWith: 'Începe cu ', + contactSales: 'Contactați vânzările', + talkToSales: 'Vorbiți cu vânzările', + modelProviders: 'Furnizori de modele', + teamMembers: 'Membri ai echipei', + buildApps: 'Construiește aplicații', + vectorSpace: 'Spațiu vectorial', + vectorSpaceBillingTooltip: 'Fiecare 1MB poate stoca aproximativ 1,2 milioane de caractere de date vectorizate (estimat folosind OpenAI Embeddings, variază în funcție de modele).', + vectorSpaceTooltip: 'Spațiul vectorial este sistemul de memorie pe termen lung necesar pentru ca LLM-urile să înțeleagă datele dvs.', + documentsUploadQuota: 'Cotă de încărcare a documentelor', + documentProcessingPriority: 'Prioritatea procesării documentelor', + documentProcessingPriorityTip: 'Pentru o prioritate mai mare a procesării documentelor, vă rugăm să actualizați planul.', + documentProcessingPriorityUpgrade: 'Procesați mai multe date cu o acuratețe mai mare și la viteze mai rapide.', + priority: { + 'standard': 'Standard', + 'priority': 'Prioritate', + 'top-priority': 'Prioritate maximă', + }, + logsHistory: 'Istoricul jurnalelor', + customTools: 'Instrumente personalizate', + unavailable: 'Indisponibil', + days: 'zile', + unlimited: 'Nelimitat', + support: 'Asistență', + supportItems: { + communityForums: 'Forumuri comunitare', + emailSupport: 'Asistență prin e-mail', + priorityEmail: 'Asistență prioritară prin e-mail și chat', + logoChange: 'Schimbarea logo-ului', + SSOAuthentication: 'Autentificare SSO', + personalizedSupport: 'Asistență personalizată', + dedicatedAPISupport: 'Asistență API dedicată', + customIntegration: 'Integrare și asistență personalizate', + ragAPIRequest: 'Solicitări API RAG', + bulkUpload: 'Încărcare în bloc a documentelor', + agentMode: 'Mod agent', + workflow: 'Flux de lucru', + }, + comingSoon: 'Vine în curând', + member: 'Membru', + memberAfter: 'Membru', + messageRequest: { + title: 'Credite de mesaje', + tooltip: 'Cote de invocare a mesajelor pentru diferite planuri utilizând modele OpenAI (cu excepția gpt4). Mesajele peste limită vor utiliza cheia API OpenAI.', + }, + annotatedResponse: { + title: 'Limite de cotă de anotare', + tooltip: 'Editarea și anotarea manuală a răspunsurilor oferă capacități de întrebări și răspunsuri personalizabile și de înaltă calitate pentru aplicații. (Aplicabil numai în aplicațiile de chat)', + }, + ragAPIRequestTooltip: 'Se referă la numărul de apeluri API care invocă doar capacitățile de procesare a bazei de cunoștințe a Dify.', + receiptInfo: 'Doar proprietarul echipei și administratorul echipei pot să se aboneze și să vizualizeze informațiile de facturare', + }, + plans: { + sandbox: { + name: 'Sandbox', + description: '200 de încercări gratuite GPT', + includesTitle: 'Include:', + }, + professional: { + name: 'Professional', + description: 'Pentru persoane fizice și echipe mici pentru a debloca mai multă putere la un preț accesibil.', + includesTitle: 'Tot ce este în planul gratuit, plus:', + }, + team: { + name: 'Echipă', + description: 'Colaborați fără limite și bucurați-vă de performanțe de top.', + includesTitle: 'Tot ce este în planul Professional, plus:', + }, + enterprise: { + name: 'Întreprindere', + description: 'Obțineți capacități și asistență complete pentru sisteme critice la scară largă.', + includesTitle: 'Tot ce este în planul Echipă, plus:', + }, + }, + vectorSpace: { + fullTip: 'Spațiul vectorial este plin.', + fullSolution: 'Actualizați-vă planul pentru a obține mai mult spațiu.', + }, + apps: { + fullTipLine1: 'Actualizați-vă planul pentru a', + fullTipLine2: 'construi mai multe aplicații.', + }, + annotatedResponse: { + fullTipLine1: 'Actualizați-vă planul pentru a', + fullTipLine2: 'anota mai multe conversații.', + quotaTitle: 'Cotă de răspuns anotat', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts new file mode 100644 index 0000000000..40d8185449 --- /dev/null +++ b/web/i18n/ro-RO/common.ts @@ -0,0 +1,529 @@ +const translation = { + api: { + success: 'Succes', + actionSuccess: 'Acțiune reușită', + saved: 'Salvat', + create: 'Creat', + remove: 'Eliminat', + }, + operation: { + create: 'Creează', + confirm: 'Confirmă', + cancel: 'Anulează', + clear: 'Șterge', + save: 'Salvează', + edit: 'Editează', + add: 'Adaugă', + added: 'Adăugat', + refresh: 'Reîncarcă', + reset: 'Resetează', + search: 'Caută', + change: 'Schimbă', + remove: 'Elimină', + send: 'Trimite', + copy: 'Copiază', + lineBreak: 'Linie nouă', + sure: 'Sunt sigur', + download: 'Descarcă', + delete: 'Șterge', + settings: 'Setări', + setup: 'Configurare', + getForFree: 'Obține gratuit', + reload: 'Reîncarcă', + ok: 'OK', + log: 'Jurnal', + learnMore: 'Află mai multe', + params: 'Parametri', + duplicate: 'Duplică', + rename: 'Redenumește', + }, + placeholder: { + input: 'Vă rugăm să introduceți', + select: 'Vă rugăm să selectați', + }, + voice: { + language: { + zhHans: 'Chineză', + zhHant: 'Chineză tradițională', + enUS: 'Engleză', + deDE: 'Germană', + frFR: 'Franceză', + esES: 'Spaniolă', + itIT: 'Italiană', + thTH: 'Thailandeză', + idID: 'Indoneziană', + jaJP: 'Japoneză', + koKR: 'Coreeană', + ptBR: 'Portugheză', + ruRU: 'Rusă', + ukUA: 'Ucraineană', + viVN: 'Vietnameză', + }, + }, + unit: { + char: 'caractere', + }, + actionMsg: { + noModification: 'Nicio modificare în acest moment.', + modifiedSuccessfully: 'Modificat cu succes', + modifiedUnsuccessfully: 'Modificare eșuată', + copySuccessfully: 'Copiat cu succes', + paySucceeded: 'Plata a reușit', + payCancelled: 'Plata a fost anulată', + generatedSuccessfully: 'Generat cu succes', + generatedUnsuccessfully: 'Generare eșuată', + }, + model: { + params: { + temperature: 'Temperatură', + temperatureTip: + 'Controlează aleatorietatea: Reducerea duce la mai puține completări aleatorii. Pe măsură ce temperatura se apropie de zero, modelul va deveni deterministic și repetitiv.', + top_p: 'Top P', + top_pTip: + 'Controlează diversitatea prin eșantionarea nucleului: 0,5 înseamnă că jumătate din toate opțiunile ponderate după probabilitate sunt luate în considerare.', + presence_penalty: 'Penalizare prezență', + presence_penaltyTip: + 'Cât de mult să se penalizeze noile jetoane în funcție de dacă apar sau nu în textul de până acum.\nCrește probabilitatea modelului de a vorbi despre subiecte noi.', + frequency_penalty: 'Penalizare frecvență', + frequency_penaltyTip: + 'Cât de mult să se penalizeze noile jetoane în funcție de frecvența lor existentă în textul de până acum.\nScade probabilitatea modelului de a repeta aceeași linie cuvânt cu cuvânt.', + max_tokens: 'Jetoane maxime', + max_tokensTip: + 'Folosit pentru a limita lungimea maximă a răspunsului, în jetoane.\nValori mai mari pot limita spațiul rămas pentru cuvintele promptului, jurnalele de chat și cunoștințe.\nSe recomandă să fie setat la mai puțin de două treimi\ngpt-4-1106-preview, gpt-4-vision-preview jetoane maxime (intrare 128k ieșire 4k)', + maxTokenSettingTip: 'Setarea jetoanelor maxime este ridicată, limitând potențial spațiul pentru prompturi, interogări și date. Luați în considerare setarea acesteia la sub 2/3.', + setToCurrentModelMaxTokenTip: 'Jetoanele maxime sunt actualizate la 80% din jetoanele maxime ale modelului curent {{maxToken}}.', + stop_sequences: 'Secvențe de oprire', + stop_sequencesTip: 'Până la patru secvențe în care API-ul va înceta să genereze mai multe jetoane. Textul returnat nu va conține secvența de oprire.', + stop_sequencesPlaceholder: 'Introduceți secvența și apăsați Tab', + }, + tone: { + Creative: 'Creativ', + Balanced: 'Echilibrat', + Precise: 'Precis', + Custom: 'Personalizat', + }, + addMoreModel: 'Mergeți la setări pentru a adăuga mai multe modele', + }, + menus: { + status: 'beta', + explore: 'Explorează', + apps: 'Studio', + plugins: 'Plugin-uri', + pluginsTips: 'Integrați plugin-uri terțe părți sau creați AI-Plugin-uri compatibile cu ChatGPT.', + datasets: 'Cunoștințe', + datasetsTips: 'CURÂND DISPONIBIL: Importați-vă propriile date text sau scrieți date în timp real prin Webhook pentru îmbunătățirea contextului LLM.', + newApp: 'Aplicație nouă', + newDataset: 'Creează Cunoștințe', + tools: 'Instrumente', + }, + userProfile: { + settings: 'Setări', + workspace: 'Spațiu de lucru', + createWorkspace: 'Creează Spațiu de lucru', + helpCenter: 'Ajutor', + roadmapAndFeedback: 'Feedback', + community: 'Comunitate', + about: 'Despre', + logout: 'Deconectare', + }, + settings: { + accountGroup: 'CONT', + workplaceGroup: 'SPAȚIU DE LUCRU', + account: 'Contul meu', + members: 'Membri', + billing: 'Facturare', + integrations: 'Integrări', + language: 'Limbă', + provider: 'Furnizor de modele', + dataSource: 'Sursă de date', + plugin: 'Plugin-uri', + apiBasedExtension: 'Extensie API', + }, + account: { + avatar: 'Avatar', + name: 'Nume', + email: 'Email', + password: 'Parolă', + passwordTip: 'Puteți seta o parolă permanentă dacă nu doriți să utilizați coduri de conectare temporare', + setPassword: 'Setează o parolă', + resetPassword: 'Resetează parola', + currentPassword: 'Parola curentă', + newPassword: 'Parolă nouă', + confirmPassword: 'Confirmă parola', + notEqual: 'Cele două parole sunt diferite.', + langGeniusAccount: 'Cont Dify', + langGeniusAccountTip: 'Contul Dify și datele de utilizator asociate.', + editName: 'Editează Nume', + showAppLength: 'Afișează {{length}} aplicații', + }, + members: { + team: 'Echipă', + invite: 'Adaugă', + name: 'NUME', + lastActive: 'ULTIMA ACTIVITATE', + role: 'ROLURI', + pending: 'În așteptare...', + owner: 'Proprietar', + admin: 'Administrator', + adminTip: 'Poate construi aplicații și gestiona setările echipei', + normal: 'Normal', + normalTip: 'Poate doar utiliza aplicații, nu poate construi aplicații', + inviteTeamMember: 'Adaugă membru în echipă', + inviteTeamMemberTip: 'Pot accesa direct datele echipei dvs. după autentificare.', + email: 'Email', + emailInvalid: 'Format de email invalid', + emailPlaceholder: 'Vă rugăm să introduceți emailuri', + sendInvite: 'Trimite invitație', + invitedAsRole: 'Invitat ca utilizator {{role}}', + invitationSent: 'Invitație trimisă', + invitationSentTip: 'Invitația a fost trimisă și pot să se autentifice în Dify pentru a accesa datele echipei dvs.', + invitationLink: 'Link de invitație', + failedinvitationEmails: 'Următorii utilizatori nu au fost invitați cu succes', + ok: 'OK', + removeFromTeam: 'Elimină din echipă', + removeFromTeamTip: 'Va elimina accesul la echipă', + setAdmin: 'Setează ca administrator', + setMember: 'Setează ca membru obișnuit', + disinvite: 'Anulează invitația', + deleteMember: 'Șterge membru', + you: '(Dvs.)', + }, + integrations: { + connected: 'Conectat', + google: 'Google', + googleAccount: 'Autentificare cu cont Google', + github: 'GitHub', + githubAccount: 'Autentificare cu cont GitHub', + connect: 'Conectează', + }, + language: { + displayLanguage: 'Limbă de afișare', + timezone: 'Fus orar', + }, + provider: { + apiKey: 'Cheie API', + enterYourKey: 'Introduceți cheia API aici', + invalidKey: 'Cheie API OpenAI nevalidă', + validatedError: 'Validare eșuată: ', + validating: 'Se validează cheia...', + saveFailed: 'Salvarea cheii API a eșuat', + apiKeyExceedBill: 'Această CHEIE API nu are cotă disponibilă, vă rugăm să citiți', + addKey: 'Adaugă cheie', + comingSoon: 'Curând disponibil', + editKey: 'Editează', + invalidApiKey: 'Cheie API nevalidă', + azure: { + apiBase: 'Bază API', + apiBasePlaceholder: 'URL-ul de bază al API-ului pentru punctul final Azure OpenAI.', + apiKey: 'Cheie API', + apiKeyPlaceholder: 'Introduceți cheia API aici', + helpTip: 'Aflați despre serviciul Azure OpenAI', + }, + openaiHosted: { + openaiHosted: 'OpenAI găzduit', + onTrial: 'ÎN PROBĂ', + exhausted: 'COTĂ EPUIZATĂ', + desc: 'Serviciul de găzduire OpenAI furnizat de Dify vă permite să utilizați modele precum GPT-3.5. Înainte ca cota de probă să fie epuizată, trebuie să configurați alți furnizori de modele.', + callTimes: 'Apeluri', + usedUp: 'Cota de probă a fost epuizată. Adăugați propriul furnizor de modele.', + useYourModel: 'În prezent se utilizează propriul furnizor de modele.', + close: 'Închide', + }, + anthropicHosted: { + anthropicHosted: 'Anthropic Claude', + onTrial: 'ÎN PROBĂ', + exhausted: 'COTĂ EPUIZATĂ', + desc: 'Model puternic, care excelează într-o gamă largă de sarcini, de la dialog sofisticat și generare de conținut creativ, până la instrucțiuni detaliate.', + callTimes: 'Apeluri', + usedUp: 'Cota de probă a fost epuizată. Adăugați propriul furnizor de modele.', + useYourModel: 'În prezent se utilizează propriul furnizor de modele.', + close: 'Închide', + }, + anthropic: { + using: 'Capacitatea de încorporare utilizează', + enableTip: 'Pentru a activa modelul Anthropic, trebuie să vă legați mai întâi la OpenAI sau la serviciul Azure OpenAI.', + notEnabled: 'Nu este activat', + keyFrom: 'Obțineți cheia API de la Anthropic', + }, + encrypted: { + front: 'Cheia dvs. API va fi criptată și stocată folosind', + back: ' tehnologie.', + }, + }, + modelProvider: { + notConfigured: 'Modelul de sistem nu a fost încă configurat complet, iar unele funcții pot fi indisponibile.', + systemModelSettings: 'Setări model de sistem', + systemModelSettingsLink: 'De ce este necesar să se configureze un model de sistem?', + selectModel: 'Selectați modelul dvs.', + setupModelFirst: 'Vă rugăm să configurați mai întâi modelul', + systemReasoningModel: { + key: 'Model de raționament de sistem', + tip: 'Setați modelul de inferență implicit care va fi utilizat pentru crearea aplicațiilor, precum și caracteristici precum generarea de nume pentru dialog și sugestia următoarei întrebări vor utiliza, de asemenea, modelul de inferență implicit.', + }, + embeddingModel: { + key: 'Model de încorporare', + tip: 'Setați modelul implicit pentru procesarea încorporării documentelor a Cunoștințelor, atât pentru recuperare, cât și pentru importul Cunoștințelor, folosind acest model de încorporare pentru procesarea vectorizării. Comutarea va cauza inconsecvența dimensiunii vectorului între Cunoștințele importate și întrebarea, ceea ce va duce la eșecul recuperării. Pentru a evita eșecul recuperării, vă rugăm să nu comutați acest model la întâmplare.', + required: 'Modelul de încorporare este obligatoriu', + }, + speechToTextModel: { + key: 'Model de conversie text-la-vorbire', + tip: 'Setați modelul implicit pentru intrarea de conversie text-la-vorbire în conversație.', + }, + ttsModel: { + key: 'Model de conversie vorbire-la-text', + tip: 'Setați modelul implicit pentru intrarea de conversie vorbire-la-text în conversație.', + }, + rerankModel: { + key: 'Model de reordonare', + tip: 'Modelul de reordonare va reordona lista de documente candidate pe baza potrivirii semantice cu interogarea utilizatorului, îmbunătățind rezultatele clasificării semantice', + }, + quota: 'Cotă', + searchModel: 'Model de căutare', + noModelFound: 'Nu a fost găsit niciun model pentru {{model}}', + models: 'Modele', + showMoreModelProvider: 'Arată mai multe furnizori de modele', + selector: { + tip: 'Acest model a fost eliminat. Vă rugăm să adăugați un model sau să selectați un alt model.', + emptyTip: 'Nu există modele disponibile', + emptySetting: 'Vă rugăm să mergeți la setări pentru a configura', + rerankTip: 'Vă rugăm să configurați modelul de reordonare', + }, + card: { + quota: 'COTĂ', + onTrial: 'În probă', + paid: 'Plătit', + quotaExhausted: 'Cotă epuizată', + callTimes: 'Apeluri', + tokens: 'Jetoane', + buyQuota: 'Cumpără cotă', + priorityUse: 'Utilizare prioritară', + removeKey: 'Elimină cheia API', + tip: 'Prioritate va fi acordată cotei plătite. Cota de probă va fi utilizată după epuizarea cotei plătite.', + }, + item: { + deleteDesc: '{{modelName}} sunt utilizate ca modele de raționare a sistemului. Unele funcții nu vor fi disponibile după eliminare. Vă rugăm să confirmați.', + freeQuota: 'COTĂ GRATUITĂ', + }, + addApiKey: 'Adăugați cheia dvs. API', + invalidApiKey: 'Cheie API nevalidă', + encrypted: { + front: 'Cheia dvs. API va fi criptată și stocată folosind', + back: ' tehnologie.', + }, + freeQuota: { + howToEarn: 'Cum să câștigați', + }, + addMoreModelProvider: 'ADĂUGAȚI MAI MULȚI FURNIZORI DE MODELE', + addModel: 'Adăugați model', + modelsNum: '{{num}} Modele', + showModels: 'Arată modele', + showModelsNum: 'Arată {{num}} modele', + collapse: 'Restrânge', + config: 'Configurare', + modelAndParameters: 'Model și parametri', + model: 'Model', + featureSupported: '{{feature}} acceptat', + callTimes: 'Apeluri', + credits: 'Credite mesaje', + buyQuota: 'Cumpără cotă', + getFreeTokens: 'Obțineți jetoane gratuite', + priorityUsing: 'Prioritizează utilizarea', + deprecated: 'Învechit', + confirmDelete: 'confirmați ștergerea?', + quotaTip: 'Jetoane gratuite disponibile rămase', + loadPresets: 'Încarcă presetări', + parameters: 'PARAMETRI', + }, + dataSource: { + add: 'Adăugați o sursă de date', + connect: 'Conectați', + notion: { + title: 'Notion', + description: 'Utilizarea Notion ca sursă de date pentru Cunoștințe.', + connectedWorkspace: 'Spațiu de lucru conectat', + addWorkspace: 'Adăugați spațiu de lucru', + connected: 'Conectat', + disconnected: 'Deconectat', + changeAuthorizedPages: 'Schimbați paginile autorizate', + pagesAuthorized: 'Pagini autorizate', + sync: 'Sincronizare', + remove: 'Elimină', + selector: { + pageSelected: 'Pagini selectate', + searchPages: 'Căutați pagini...', + noSearchResult: 'Niciun rezultat la căutare', + addPages: 'Adăugați pagini', + preview: 'PREVIZUALIZARE', + }, + }, + }, + plugin: { + serpapi: { + apiKey: 'Cheie API', + apiKeyPlaceholder: 'Introduceți cheia dvs. API', + keyFrom: 'Obțineți cheia dvs. SerpAPI din pagina contului SerpAPI', + }, + }, + apiBasedExtension: { + title: 'Extensiile bazate pe API oferă o gestionare centralizată a API-urilor, simplificând configurația pentru o utilizare ușoară în aplicațiile Dify.', + link: 'Aflați cum să dezvoltați propria extensie bazată pe API.', + linkUrl: 'https://docs.dify.ai/features/extension/api_based_extension', + add: 'Adăugați extensie API', + selector: { + title: 'Extensie API', + placeholder: 'Vă rugăm să selectați extensia API', + manage: 'Gestionați extensia API', + }, + modal: { + title: 'Adăugați extensie API', + editTitle: 'Editați extensia API', + name: { + title: 'Nume', + placeholder: 'Vă rugăm să introduceți numele', + }, + apiEndpoint: { + title: 'Endpoint API', + placeholder: 'Vă rugăm să introduceți endpoint-ul API', + }, + apiKey: { + title: 'Cheie API', + placeholder: 'Vă rugăm să introduceți cheia API', + lengthError: 'Lungimea cheii API nu poate fi mai mică de 5 caractere', + }, + }, + type: 'Tip', + }, + about: { + changeLog: 'Jurnal modificări', + updateNow: 'Actualizați acum', + nowAvailable: 'Dify {{version}} este acum disponibil.', + latestAvailable: 'Dify {{version}} este ultima versiune disponibilă.', + }, + appMenus: { + overview: 'Prezentare generală', + promptEng: 'Orchestrare', + apiAccess: 'Acces API', + logAndAnn: 'Jurnale și Ann.', + logs: 'Jurnale', + }, + environment: { + testing: 'TESTARE', + development: 'DEZVOLTARE', + }, + appModes: { + completionApp: 'Generator de text', + chatApp: 'Aplicație de chat', + }, + datasetMenus: { + documents: 'Documente', + hitTesting: 'Testare recuperare', + settings: 'Setări', + emptyTip: 'Cunoștințele nu au fost asociate, vă rugăm să mergeți la aplicație sau la plug-in pentru a finaliza asocierea.', + viewDoc: 'Vizualizați documentația', + relatedApp: 'aplicații asociate', + }, + voiceInput: { + speaking: 'Vorbiți acum...', + converting: 'Se convertește la text...', + notAllow: 'microfonul nu este autorizat', + }, + modelName: { + 'gpt-3.5-turbo': 'GPT-3.5-Turbo', + 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', + 'gpt-4': 'GPT-4', + 'gpt-4-32k': 'GPT-4-32K', + 'text-davinci-003': 'Text-Davinci-003', + 'text-embedding-ada-002': 'Text-Embedding-Ada-002', + 'whisper-1': 'Whisper-1', + 'claude-instant-1': 'Claude-Instant', + 'claude-2': 'Claude-2', + }, + chat: { + renameConversation: 'Redenumește conversația', + conversationName: 'Nume conversație', + conversationNamePlaceholder: 'Vă rugăm să introduceți numele conversației', + conversationNameCanNotEmpty: 'Numele conversației este obligatoriu', + citation: { + title: 'CITĂRI', + linkToDataset: 'Legătură la Cunoștințe', + characters: 'Caractere:', + hitCount: 'Număr de recuperări:', + vectorHash: 'Hash vector:', + hitScore: 'Scor de recuperare:', + }, + }, + promptEditor: { + placeholder: 'Scrieți aici prompt-ul, introduceți \'{}\' pentru a insera o variabilă, introduceți \'/\' pentru a insera un bloc de conținut prompt', + context: { + item: { + title: 'Context', + desc: 'Inserați șablon de context', + }, + modal: { + title: '{{num}} Cunoștințe în context', + add: 'Adăugați context ', + footer: 'Puteți gestiona contextele în secțiunea Context de mai jos.', + }, + }, + history: { + item: { + title: 'Istoric conversație', + desc: 'Inserați șablon de mesaj istoric', + }, + modal: { + title: 'EXEMPLU', + user: 'Salut', + assistant: 'Salut! Cum vă pot ajuta astăzi?', + edit: 'Editați numele rolurilor de conversație', + }, + }, + variable: { + item: { + title: 'Variabile și instrumente externe', + desc: 'Inserați variabile și instrumente externe', + }, + outputToolDisabledItem: { + title: 'Variabile', + desc: 'Inserați variabile', + }, + modal: { + add: 'Nouă variabilă', + addTool: 'Nou instrument', + }, + }, + query: { + item: { + title: 'Interogare', + desc: 'Inserați șablon de interogare utilizator', + }, + }, + existed: 'Există deja în prompt', + }, + imageUploader: { + uploadFromComputer: 'Încărcați de pe computer', + uploadFromComputerReadError: 'Citirea imaginii a eșuat, vă rugăm încercați din nou.', + uploadFromComputerUploadError: 'Încărcarea imaginii a eșuat, vă rugăm încărcați din nou.', + uploadFromComputerLimit: 'Imaginile încărcate nu pot depăși {{size}} MB', + pasteImageLink: 'Inserați link-ul imaginii', + pasteImageLinkInputPlaceholder: 'Inserați link-ul imaginii aici', + pasteImageLinkInvalid: 'Link-ul imaginii este nevalid', + imageUpload: 'Încărcare imagine', + }, + tag: { + placeholder: 'Toate etichetele', + addNew: 'Adăugați o etichetă nouă', + noTag: 'Nicio etichetă', + noTagYet: 'Încă nu există etichete', + addTag: 'Adăugați etichete', + editTag: 'Editați etichete', + manageTags: 'Gestionați etichete', + selectorPlaceholder: 'Tastați pentru a căuta sau crea', + create: 'Creați', + delete: 'Ștergeți eticheta', + deleteTip: 'Eticheta este utilizată, ștergeți-o?', + created: 'Etichetă creată cu succes', + failed: 'Crearea etichetei a eșuat', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/custom.ts b/web/i18n/ro-RO/custom.ts new file mode 100644 index 0000000000..0e10d59ec0 --- /dev/null +++ b/web/i18n/ro-RO/custom.ts @@ -0,0 +1,30 @@ +const translation = { + custom: 'Personalizare', + upgradeTip: { + prefix: 'Actualizați-vă planul pentru a', + suffix: 'să vă personalizați marca.', + }, + webapp: { + title: 'Personalizați marca WebApp', + removeBrand: 'Eliminați "Powered by Dify"', + changeLogo: 'Schimbați imaginea mărcii "Powered by"', + changeLogoTip: 'Format SVG sau PNG cu o dimensiune minimă de 40x40px', + }, + app: { + title: 'Personalizați marca antetului aplicației', + changeLogoTip: 'Format SVG sau PNG cu o dimensiune minimă de 80x80px', + }, + upload: 'Încărcare', + uploading: 'Se încarcă', + uploadedFail: 'Încărcarea imaginii a eșuat, vă rugăm să o reîncărcați.', + change: 'Schimbă', + apply: 'Aplică', + restore: 'Restabilește valorile implicite', + customize: { + contactUs: ' contactați-ne ', + prefix: 'Pentru a personaliza sigla mărcii în cadrul aplicației, vă rugăm', + suffix: 'să actualizați la ediția Enterprise.', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/dataset-creation.ts b/web/i18n/ro-RO/dataset-creation.ts new file mode 100644 index 0000000000..89e614e00c --- /dev/null +++ b/web/i18n/ro-RO/dataset-creation.ts @@ -0,0 +1,130 @@ +const translation = { + steps: { + header: { + creation: 'Creați Cunoștințe', + update: 'Adăugați date', + }, + one: 'Alegeți sursa de date', + two: 'Prelucrarea și curățarea textului', + three: 'Executați și finalizați', + }, + error: { + unavailable: 'Această Cunoștință nu este disponibilă', + }, + stepOne: { + filePreview: 'Previzualizare fișier', + pagePreview: 'Previzualizare pagină', + dataSourceType: { + file: 'Importați din fișier text', + notion: 'Sincronizați din Notion', + web: 'Sincronizați din site web', + }, + uploader: { + title: 'Încărcați fișier text', + button: 'Trageți și fixați fișierul, sau', + browse: 'Răsfoire', + tip: 'Acceptă {{supportTypes}}. Maxim {{size}}MB fiecare.', + validation: { + typeError: 'Tipul de fișier nu este acceptat', + size: 'Fișierul este prea mare. Maximul este de {{size}}MB', + count: 'Nu se acceptă mai multe fișiere', + filesNumber: 'Ați atins limita de încărcare în lot de {{filesNumber}} fișiere.', + }, + cancel: 'Anulează', + change: 'Schimbă', + failed: 'Încărcarea a eșuat', + }, + notionSyncTitle: 'Notion nu este conectat', + notionSyncTip: 'Pentru a sincroniza cu Notion, trebuie mai întâi să se stabilească o conexiune la Notion.', + connect: 'Mergi la conectare', + button: 'următorul', + emptyDatasetCreation: 'Vreau să creez o Cunoștință goală', + modal: { + title: 'Creați o Cunoștință goală', + tip: 'O Cunoștință goală nu va conține niciun document, iar dvs. puteți încărca documente în orice moment.', + input: 'Numele Cunoștinței', + placeholder: 'Vă rugăm să introduceți', + nameNotEmpty: 'Numele nu poate fi gol', + nameLengthInvaild: 'Numele trebuie să fie între 1 și 40 de caractere', + cancelButton: 'Anulează', + confirmButton: 'Creează', + failed: 'Crearea a eșuat', + }, + }, + stepTwo: { + segmentation: 'Setări de segmentare', + auto: 'Automat', + autoDescription: 'Setează automat regulile de segmentare și prelucrare. Utilizatorilor necunoscuți li se recomandă să selecteze această opțiune.', + custom: 'Personalizat', + customDescription: 'Personalizați regulile de segmentare, lungimea segmentelor și regulile de prelucrare, etc.', + separator: 'Identificator de segment', + separatorPlaceholder: 'De exemplu, linie nouă (\\\\n) sau separator special (cum ar fi "***")', + maxLength: 'Lungimea maximă a segmentului', + overlap: 'Suprapunerea segmentelor', + overlapTip: 'Setarea suprapunerii segmentelor poate menține relevanța semantică între ele, îmbunătățind efectul de recuperare. Se recomandă să setați 10%-25% din dimensiunea maximă a segmentului.', + overlapCheck: 'suprapunerea segmentului nu ar trebui să fie mai mare decât lungimea maximă a segmentului', + rules: 'Reguli de prelucrare a textului', + removeExtraSpaces: 'Înlocuiește spațiile consecutive, liniile noi și taburile', + removeUrlEmails: 'Șterge toate adresele URL și e-mailurile', + removeStopwords: 'Eliminați cuvintele de umplere, cum ar fi "a", "an", "the"', + preview: 'Confirmă și previzualizează', + reset: 'Resetează', + indexMode: 'Mod de indexare', + qualified: 'Calitate ridicată', + recommend: 'Recomandă', + qualifiedTip: 'Apelează interfața de încorporare a sistemului implicit pentru a procesa și a oferi o precizie mai mare atunci când utilizatorii interoghează.', + warning: 'Vă rugăm să setați mai întâi cheia API a furnizorului de modele.', + click: 'Mergi la setări', + economical: 'Economic', + economicalTip: 'Utilizați motoare de vectori offline, indexuri de cuvinte cheie etc. pentru a reduce precizia fără a cheltui jetoane', + QATitle: 'Segmentarea în format Întrebare și Răspuns', + QATip: 'Activarea acestei opțiuni va consuma mai multe jetoane', + QALanguage: 'Segmentează folosind', + emstimateCost: 'Estimare', + emstimateSegment: 'Segmente estimate', + segmentCount: 'segmente', + calculating: 'Se calculează...', + fileSource: 'Prelucrează documente', + notionSource: 'Prelucrează pagini', + other: 'și alte ', + fileUnit: ' fișiere', + notionUnit: ' pagini', + previousStep: 'Pasul anterior', + nextStep: 'Salvează și Procesează', + save: 'Salvează și Procesează', + cancel: 'Anulează', + sideTipTitle: 'De ce segmentare și prelucrare?', + sideTipP1: 'Atunci când se prelucrează date text, segmentarea și curățarea sunt două etape importante de pre-procesare.', + sideTipP2: 'Segmentarea împarte textul lung în paragrafe, astfel încât modelele să poată înțelege mai bine. Acest lucru îmbunătățește calitatea și relevanța rezultatelor modelului.', + sideTipP3: 'Curățarea elimină caracterele și formatele inutile, făcând Cunoștințele mai curate și mai ușor de analizat.', + sideTipP4: 'O segmentare și curățare adecvată îmbunătățesc performanța modelului, oferind rezultate mai precise și valoroase.', + previewTitle: 'Previzualizare', + previewTitleButton: 'Previzualizare', + previewButton: 'Comutare la format întrebare și răspuns', + previewSwitchTipStart: 'Previzualizarea curentă a segmentului este în format text, comutarea la o previzualizare în format întrebare și răspuns va', + previewSwitchTipEnd: ' consuma jetoane suplimentare', + characters: 'caractere', + indexSettedTip: 'Pentru a modifica metoda de indexare, vă rugăm să mergeți la ', + retrivalSettedTip: 'Pentru a modifica metoda de indexare, vă rugăm să mergeți la ', + datasetSettingLink: 'setările Cunoștinței.', + }, + stepThree: { + creationTitle: '🎉 Cunoștință creată', + creationContent: 'Am denumit automat Cunoștința, o puteți modifica în orice moment', + label: 'Numele Cunoștinței', + additionTitle: '🎉 Document încărcat', + additionP1: 'Documentul a fost încărcat în Cunoștință', + additionP2: ', îl puteți găsi în lista de documente a Cunoștinței.', + stop: 'Oprește procesarea', + resume: 'Reia procesarea', + navTo: 'Mergi la document', + sideTipTitle: 'Ce urmează', + sideTipContent: 'După ce documentul a terminat indexarea, Cunoștința poate fi integrată în aplicație ca context, puteți găsi setările contextuale în pagina de orchestrare a prompturilor. De asemenea, o puteți crea ca un plugin de indexare ChatGPT independent pentru a o publica.', + modelTitle: 'Sunteți sigur că doriți să opriți încorporarea?', + modelContent: 'Dacă trebuie să reluați procesarea mai târziu, veți continua de unde ați rămas.', + modelButtonConfirm: 'Confirmă', + modelButtonCancel: 'Anulează', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/dataset-documents.ts b/web/i18n/ro-RO/dataset-documents.ts new file mode 100644 index 0000000000..a7c0bf5d51 --- /dev/null +++ b/web/i18n/ro-RO/dataset-documents.ts @@ -0,0 +1,349 @@ +const translation = { + list: { + title: 'Documente', + desc: 'Toate fișierele din Cunoștințe sunt afișate aici, iar întreaga Cunoaștere poate fi legată de citări Dify sau indexată prin intermediul pluginului Chat.', + addFile: 'adaugă fișier', + addPages: 'Adaugă pagini', + table: { + header: { + fileName: 'NUMELE FIȘIERULUI', + words: 'CUVINTE', + hitCount: 'NUMĂR DE RECUPERĂRI', + uploadTime: 'TIMP DE ÎNCĂRCARE', + status: 'STARE', + action: 'ACȚIUNE', + }, + }, + action: { + uploadFile: 'Încarcă un fișier nou', + settings: 'Setări de segment', + addButton: 'Adaugă segment', + add: 'Adaugă un segment', + batchAdd: 'Adăugare în lot', + archive: 'Arhivează', + unarchive: 'Dezarhivează', + delete: 'Șterge', + enableWarning: 'Fișierul arhivat nu poate fi activat', + sync: 'Sincronizează', + }, + index: { + enable: 'Activează', + disable: 'Dezactivează', + all: 'Toate', + enableTip: 'Fișierul poate fi indexat', + disableTip: 'Fișierul nu poate fi indexat', + }, + status: { + queuing: 'În coadă', + indexing: 'Indexare', + paused: 'Întrerupt', + error: 'Eroare', + available: 'Disponibil', + enabled: 'Activat', + disabled: 'Dezactivat', + archived: 'Arhivat', + }, + empty: { + title: 'Nu există încă documentație', + upload: { + tip: 'Puteți încărca fișiere, sincroniza de pe site-ul web sau din aplicații web precum Notion, GitHub etc.', + }, + sync: { + tip: 'Dify va descărca periodic fișiere din Notion și va finaliza procesarea.', + }, + }, + delete: { + title: 'Sigur doriți să ștergeți?', + content: 'Dacă trebuie să reluați procesarea mai târziu, veți continua de unde ați rămas', + }, + batchModal: { + title: 'Adăugare în lot a segmentelor', + csvUploadTitle: 'Trage și plasează fișierul tău CSV aici sau ', + browse: 'răsfoiește', + tip: 'Fișierul CSV trebuie să respecte următoarea structură:', + question: 'întrebare', + answer: 'răspuns', + contentTitle: 'conținut segment', + content: 'conținut', + template: 'Descărcați șablonul aici', + cancel: 'Anulează', + run: 'Rulează Lot', + runError: 'Eșec la rularea lotului', + processing: 'În procesare lot', + completed: 'Import finalizat', + error: 'Eroare la import', + ok: 'OK', + }, + }, + metadata: { + title: 'Metadate', + desc: 'Etichetarea metadatelor pentru documente permite accesarea rapidă a acestora de către IA și expune sursa referințelor pentru utilizatori.', + dateTimeFormat: 'D MMMM YYYY hh:mm A', + docTypeSelectTitle: 'Vă rugăm să selectați un tip de document', + docTypeChangeTitle: 'Schimbați tipul de document', + docTypeSelectWarning: + 'Dacă tipul de document este schimbat, metadatele completate acum nu vor mai fi păstrate', + firstMetaAction: 'Să începem', + placeholder: { + add: 'Adaugă ', + select: 'Selectează ', + }, + source: { + upload_file: 'Încarcă fișier', + notion: 'Sincronizează din Notion', + github: 'Sincronizează din Github', + }, + type: { + book: 'Carte', + webPage: 'Pagină web', + paper: 'Lucrare', + socialMediaPost: 'Postare pe rețele sociale', + personalDocument: 'Document personal', + businessDocument: 'Document de afaceri', + IMChat: 'Conversație IM', + wikipediaEntry: 'Intrare Wikipedia', + notion: 'Sincronizează din Notion', + github: 'Sincronizează din Github', + technicalParameters: 'Parametri tehnici', + }, + field: { + processRule: { + processDoc: 'Procesează documentul', + segmentRule: 'Regulă de segment', + segmentLength: 'Lungimea segmentelor', + processClean: 'Curățare text procesare', + }, + book: { + title: 'Titlu', + language: 'Limbă', + author: 'Autor', + publisher: 'Editor', + publicationDate: 'Data publicării', + ISBN: 'ISBN', + category: 'Categorie', + }, + webPage: { + title: 'Titlu', + url: 'URL', + language: 'Limbă', + authorPublisher: 'Autor/Editor', + publishDate: 'Data publicării', + topicsKeywords: 'Subiecte/Cuvinte cheie', + description: 'Descriere', + }, + paper: { + title: 'Titlu', + language: 'Limbă', + author: 'Autor', + publishDate: 'Data publicării', + journalConferenceName: 'Nume jurnal/conferință', + volumeIssuePage: 'Volum/Număr/Pagină', + DOI: 'DOI', + topicsKeywords: 'Subiecte/Cuvinte cheie', + abstract: 'Rezumat', + }, + socialMediaPost: { + platform: 'Platformă', + authorUsername: 'Autor/Nume de utilizator', + publishDate: 'Data publicării', + postURL: 'URL postare', + topicsTags: 'Subiecte/Etichete', + }, + personalDocument: { + title: 'Titlu', + author: 'Autor', + creationDate: 'Data creării', + lastModifiedDate: 'Ultima dată modificat', + documentType: 'Tip document', + tagsCategory: 'Etichete/Categorie', + }, + businessDocument: { + title: 'Titlu', + author: 'Autor', + creationDate: 'Data creării', + lastModifiedDate: 'Ultima dată modificat', + documentType: 'Tip document', + departmentTeam: 'Departament/Echipă', + }, + IMChat: { + chatPlatform: 'Platformă de chat', + chatPartiesGroupName: 'Persoane din chat/Nume grup', + participants: 'Participanți', + startDate: 'Data începerii', + endDate: 'Data încheierii', + topicsKeywords: 'Subiecte/Cuvinte cheie', + fileType: 'Tip fișier', + }, + wikipediaEntry: { + title: 'Titlu', + language: 'Limbă', + webpageURL: 'URL pagină web', + editorContributor: 'Editor/Contributor', + lastEditDate: 'Ultima dată modificat', + summaryIntroduction: 'Rezumat/Introducere', + }, + notion: { + title: 'Titlu', + language: 'Limbă', + author: 'Autor', + createdTime: 'Dată creare', + lastModifiedTime: 'Ultima dată modificat', + url: 'URL', + tag: 'Etichetă', + description: 'Descriere', + }, + github: { + repoName: 'Nume depozit', + repoDesc: 'Descriere depozit', + repoOwner: 'Proprietar depozit', + fileName: 'Nume fișier', + filePath: 'Cale fișier', + programmingLang: 'Limbaj de programare', + url: 'URL', + license: 'Licență', + lastCommitTime: 'Ultima dată comitere', + lastCommitAuthor: 'Ultimul autor comitere', + }, + originInfo: { + originalFilename: 'Nume fișier original', + originalFileSize: 'Dimensiune fișier original', + uploadDate: 'Dată încărcare', + lastUpdateDate: 'Ultima dată actualizare', + source: 'Sursă', + }, + technicalParameters: { + segmentSpecification: 'Specificație segmente', + segmentLength: 'Lungime segmente', + avgParagraphLength: 'Lungime medie paragraf', + paragraphs: 'Paragrafe', + hitCount: 'Număr de recuperări', + embeddingTime: 'Timp încorporare', + embeddedSpend: 'Cheltuieli încorporare', + }, + }, + languageMap: { + zh: 'Chineză', + en: 'Engleză', + es: 'Spaniolă', + fr: 'Franceză', + de: 'Germană', + ja: 'Japoneză', + ko: 'Coreeană', + ru: 'Rusă', + ar: 'Arabă', + pt: 'Portugheză', + it: 'Italiană', + nl: 'Olandeză', + pl: 'Poloneză', + sv: 'Suedeză', + tr: 'Turcă', + he: 'Ebraică', + hi: 'Hindi', + da: 'Daneză', + fi: 'Finlandeză', + no: 'Norvegiană', + hu: 'Maghiară', + el: 'Greacă', + cs: 'Cehă', + th: 'Tailandeză', + id: 'Indoneziană', + }, + categoryMap: { + book: { + fiction: 'Ficțiune', + biography: 'Biografie', + history: 'Istorie', + science: 'Știință', + technology: 'Tehnologie', + education: 'Educație', + philosophy: 'Filozofie', + religion: 'Religie', + socialSciences: 'ȘtiințeSociale', + art: 'Artă', + travel: 'Călătorii', + health: 'Sănătate', + selfHelp: 'AutoAjutorare', + businessEconomics: 'AfaceriEconomie', + cooking: 'Bucătărie', + childrenYoungAdults: 'CopiiTineri', + comicsGraphicNovels: 'ComicsRomaneCgrafice', + poetry: 'Poezie', + drama: 'Dramă', + other: 'Altele', + }, + personalDoc: { + notes: 'Note', + blogDraft: 'Ciornă blog', + diary: 'Jurnal', + researchReport: 'Raport de cercetare', + bookExcerpt: 'Extras carte', + schedule: 'Program', + list: 'Listă', + projectOverview: 'Prezentare generală proiect', + photoCollection: 'Colecție foto', + creativeWriting: 'Scriere creativă', + codeSnippet: 'Fragment de cod', + designDraft: 'Schiță de design', + personalResume: 'CV personal', + other: 'Altele', + }, + businessDoc: { + meetingMinutes: 'Proces-verbal ședință', + researchReport: 'Raport de cercetare', + proposal: 'Propunere', + employeeHandbook: 'Manual angajat', + trainingMaterials: 'Materiale de formare', + requirementsDocument: 'Document cerințe', + designDocument: 'Document de design', + productSpecification: 'Specificație produs', + financialReport: 'Raport financiar', + marketAnalysis: 'Analiză piață', + projectPlan: 'Plan de proiect', + teamStructure: 'Structură echipă', + policiesProcedures: 'Politici și proceduri', + contractsAgreements: 'Contracte și acorduri', + emailCorrespondence: 'Corespondență email', + other: 'Altele', + }, + }, + }, + embedding: { + processing: 'Procesare încorporare...', + paused: 'Încorporare întreruptă', + completed: 'Încorporare finalizată', + error: 'Eroare la încorporare', + docName: 'Prelucrare document', + mode: 'Regula de segmentare', + segmentLength: 'Lungime segmente', + textCleaning: 'Pre-definiție și curățare text', + segments: 'Paragrafe', + highQuality: 'Mod calitate ridicată', + economy: 'Mod economic', + estimate: 'Consum estimat', + stop: 'Oprește procesarea', + resume: 'Reia procesarea', + automatic: 'Automat', + custom: 'Personalizat', + previewTip: 'Previzualizarea paragrafului va fi disponibilă după finalizarea încorporării', + }, + segment: { + paragraphs: 'Paragrafe', + keywords: 'Cuvinte cheie', + addKeyWord: 'Adăugați un cuvânt cheie', + keywordError: 'Lungimea maximă a cuvântului cheie este de 20 de caractere', + characters: 'caractere', + hitCount: 'Număr de rezultate', + vectorHash: 'Vector hash: ', + questionPlaceholder: 'adăugați întrebarea aici', + questionEmpty: 'Întrebarea nu poate fi goală', + answerPlaceholder: 'adăugați răspunsul aici', + answerEmpty: 'Răspunsul nu poate fi gol', + contentPlaceholder: 'adăugați conținutul aici', + contentEmpty: 'Conținutul nu poate fi gol', + newTextSegment: 'Nou segment de text', + newQaSegment: 'Nou segment de întrebări și răspunsuri', + delete: 'Ștergeți acest fragment?', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/dataset-hit-testing.ts b/web/i18n/ro-RO/dataset-hit-testing.ts new file mode 100644 index 0000000000..5dc5d70221 --- /dev/null +++ b/web/i18n/ro-RO/dataset-hit-testing.ts @@ -0,0 +1,28 @@ +const translation = { + title: 'Testarea Recuperării', + desc: 'Testați efectul de atingere al Cunoștințelor pe baza textului interogat dat.', + dateTimeFormat: 'DD/MM/YYYY hh:mm A', + recents: 'Recente', + table: { + header: { + source: 'Sursă', + text: 'Text', + time: 'Timp', + }, + }, + input: { + title: 'Text sursă', + placeholder: 'Vă rugăm să introduceți un text, se recomandă o propoziție declarativă scurtă.', + countWarning: 'Până la 200 de caractere.', + indexWarning: 'Doar Cunoștințe de înaltă calitate.', + testing: 'Testare', + }, + hit: { + title: 'PARAGRAFE DE RECUPERARE', + emptyTip: 'Rezultatele testării de recuperare vor apărea aici', + }, + noRecentTip: 'Nu există rezultate de interogare recente aici', + viewChart: 'Vizualizați GRAFICUL VECTORIAL', +} + +export default translation diff --git a/web/i18n/ro-RO/dataset-settings.ts b/web/i18n/ro-RO/dataset-settings.ts new file mode 100644 index 0000000000..bc940cb481 --- /dev/null +++ b/web/i18n/ro-RO/dataset-settings.ts @@ -0,0 +1,33 @@ +const translation = { + title: 'Setări de cunoștințe', + desc: 'Aici puteți modifica proprietățile și metodele de lucru ale cunoștințelor.', + form: { + name: 'Numele cunoștințelor', + namePlaceholder: 'Vă rugăm să introduceți numele cunoștințelor', + nameError: 'Numele nu poate fi gol', + desc: 'Descrierea cunoștințelor', + descInfo: 'Vă rugăm să scrieți o descriere textuală clară pentru a contura conținutul cunoștințelor. Această descriere va fi utilizată ca bază pentru potrivire atunci când se selectează din mai multe cunoștințe pentru inferență.', + descPlaceholder: 'Descrieți ce se află în aceste cunoștințe. O descriere detaliată permite AI să acceseze conținutul cunoștințelor într-un timp oportun. Dacă este gol, Dify va folosi strategia implicită.', + descWrite: 'Aflați cum să scrieți o descriere bună a cunoștințelor.', + permissions: 'Permisiuni', + permissionsOnlyMe: 'Doar eu', + permissionsAllMember: 'Toți membrii echipei', + indexMethod: 'Metodă de indexare', + indexMethodHighQuality: 'Calitate ridicată', + indexMethodHighQualityTip: 'Apelați interfața de încorporare a OpenAI pentru procesare pentru a oferi o acuratețe mai ridicată atunci când utilizatorii interogă.', + indexMethodEconomy: 'Economică', + indexMethodEconomyTip: 'Utilizați motoare de vectori offline, indexuri de cuvinte cheie etc. pentru a reduce acuratețea fără a cheltui jetoane', + embeddingModel: 'Model de încorporare', + embeddingModelTip: 'Schimbați modelul încorporat, vă rugăm să accesați ', + embeddingModelTipLink: 'Setări', + retrievalSetting: { + title: 'Setări de recuperare', + learnMore: 'Aflați mai multe', + description: ' despre metoda de recuperare.', + longDescription: ' despre metoda de recuperare, o puteți schimba în orice moment în setările cunoștințelor.', + }, + save: 'Salvare', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/dataset.ts b/web/i18n/ro-RO/dataset.ts new file mode 100644 index 0000000000..eb6f8e84b4 --- /dev/null +++ b/web/i18n/ro-RO/dataset.ts @@ -0,0 +1,49 @@ +const translation = { + knowledge: 'Cunoștințe', + documentCount: ' documente', + wordCount: ' mii de cuvinte', + appCount: ' aplicații conectate', + createDataset: 'Creează Cunoștințe', + createDatasetIntro: 'Importați-vă propriile date text sau scrieți date în timp real prin Webhook pentru îmbunătățirea contextului LLM.', + deleteDatasetConfirmTitle: 'Ștergeți această Cunoștință?', + deleteDatasetConfirmContent: + 'Ștergerea Cunoștințelor este ireversibilă. Utilizatorii nu vor mai putea accesa Cunoștințele, iar toate configurațiile și jurnalele prompt vor fi șterse permanent.', + datasetDeleted: 'Cunoștințe șterse', + datasetDeleteFailed: 'Eșec la ștergerea Cunoștințelor', + didYouKnow: 'Știați că?', + intro1: 'Cunoștințele pot fi integrate în aplicația Dify ', + intro2: 'ca un context', + intro3: ',', + intro4: 'sau ele ', + intro5: 'pot fi create', + intro6: ' ca un plug-in index ChatGPT standalone pentru publicare', + unavailable: 'Indisponibil', + unavailableTip: 'Modelul de încorporare nu este disponibil, modelul de încorporare implicit trebuie configurat', + datasets: 'CUNOȘTINȚE', + datasetsApi: 'ACCES API', + retrieval: { + semantic_search: { + title: 'Căutare Vector', + description: 'Generați încorporările interogărilor și căutați bucata de text cea mai similară cu reprezentarea sa vectorială.', + }, + full_text_search: { + title: 'Căutare Full-Text', + description: 'Indexați toți termenii din document, permițând utilizatorilor să caute orice termen și să recupereze bucățile de text relevante care conțin acei termeni.', + }, + hybrid_search: { + title: 'Căutare Hibridă', + description: 'Executați căutări full-text și căutări vectoriale în același timp, reclasificați pentru a selecta cea mai bună potrivire pentru interogarea utilizatorului. Configurarea API-ului modelului Rerank este necesară.', + recommend: 'Recomandat', + }, + invertedIndex: { + title: 'Index Inversat', + description: 'Indexul inversat este o structură utilizată pentru recuperare eficientă. Organizat după termeni, fiecare termen indică documentele sau paginile web care îl conțin.', + }, + change: 'Schimbă', + changeRetrievalMethod: 'Schimbă metoda de recuperare', + }, + docsFailedNotice: 'documentele nu au putut fi indexate', + retry: 'Reîncercați', +} + +export default translation diff --git a/web/i18n/ro-RO/explore.ts b/web/i18n/ro-RO/explore.ts new file mode 100644 index 0000000000..6f4ba294ab --- /dev/null +++ b/web/i18n/ro-RO/explore.ts @@ -0,0 +1,41 @@ +const translation = { + title: 'Explorați', + sidebar: { + discovery: 'Descoperire', + chat: 'Chat', + workspace: 'Spațiu de lucru', + action: { + pin: 'Fixați', + unpin: 'Deblocați', + rename: 'Redenumire', + delete: 'Ștergeți', + }, + delete: { + title: 'Ștergeți aplicația', + content: 'Sunteți sigur că doriți să ștergeți această aplicație?', + }, + }, + apps: { + title: 'Explorați aplicațiile Dify', + description: 'Utilizați aceste aplicații model imediat sau personalizați-vă propria aplicație pe baza modelelor.', + allCategories: 'Recomandate', + }, + appCard: { + addToWorkspace: 'Adăugați la spațiul de lucru', + customize: 'Personalizați', + }, + appCustomize: { + title: 'Creați o aplicație din {{name}}', + subTitle: 'Pictogramă și nume aplicație', + nameRequired: 'Numele aplicației este obligatoriu', + }, + category: { + Assistant: 'Asistent', + Writing: 'Scriere', + Translate: 'Traducere', + Programming: 'Programare', + HR: 'Resurse Umane', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/layout.ts b/web/i18n/ro-RO/layout.ts new file mode 100644 index 0000000000..928649474b --- /dev/null +++ b/web/i18n/ro-RO/layout.ts @@ -0,0 +1,4 @@ +const translation = { +} + +export default translation diff --git a/web/i18n/ro-RO/login.ts b/web/i18n/ro-RO/login.ts new file mode 100644 index 0000000000..3f22f8d169 --- /dev/null +++ b/web/i18n/ro-RO/login.ts @@ -0,0 +1,61 @@ +const translation = { + pageTitle: 'Bun venit! Hai să începem!👋', + welcome: 'Bine ai venit la Dify, te rugăm să te autentifici pentru a continua.', + email: 'Adresă de email', + emailPlaceholder: 'Adresa ta de email', + password: 'Parolă', + passwordPlaceholder: 'Parola ta', + name: 'Nume de utilizator', + namePlaceholder: 'Numele tău de utilizator', + forget: 'Ai uitat parola?', + signBtn: 'Autentificare', + sso: 'Continuă cu SSO', + installBtn: 'Configurare', + setAdminAccount: 'Configurare cont de administrator', + setAdminAccountDesc: 'Privilegii maxime pentru contul de administrator, care poate fi utilizat pentru crearea de aplicații și gestionarea furnizorilor LLM, etc.', + createAndSignIn: 'Creează și autentifică-te', + oneMoreStep: 'Un pas în plus', + createSample: 'Pe baza acestor informații, vom crea o aplicație de exemplu pentru tine', + invitationCode: 'Cod de invitație', + invitationCodePlaceholder: 'Codul tău de invitație', + interfaceLanguage: 'Limba interfeței', + timezone: 'Fus orar', + go: 'Mergi la Dify', + sendUsMail: 'Trimite-ne un email cu introducerea ta și noi ne vom ocupa de cererea de invitație.', + acceptPP: 'Am citit și accept politica de confidențialitate', + reset: 'Rulați următoarea comandă pentru a vă reseta parola', + withGitHub: 'Continuă cu GitHub', + withGoogle: 'Continuă cu Google', + rightTitle: 'Deblochează întregul potențial al LLM', + rightDesc: 'Construiește cu ușurință aplicații AI captivante din punct de vedere vizual, utilizabile și îmbunătățibile.', + tos: 'Termeni și condiții', + pp: 'Politica de confidențialitate', + tosDesc: 'Prin înregistrarea, ești de acord cu', + goToInit: 'Dacă nu ai inițializat încă contul, te rugăm să mergi la pagina de inițializare', + donthave: 'Nu ai?', + invalidInvitationCode: 'Cod de invitație invalid', + accountAlreadyInited: 'Contul este deja inițializat', + error: { + emailEmpty: 'Adresa de email este obligatorie', + emailInValid: 'Te rugăm să introduci o adresă de email validă', + nameEmpty: 'Numele este obligatoriu', + passwordEmpty: 'Parola este obligatorie', + passwordInvalid: 'Parola trebuie să conțină litere și cifre, iar lungimea trebuie să fie mai mare de 8 caractere', + }, + license: { + tip: 'Înainte de a începe Dify Community Edition, citește', + link: 'Licența open-source de pe GitHub', + }, + join: 'Alătură-te', + joinTipStart: 'Te invităm să te alături echipei', + joinTipEnd: 'pe Dify', + invalid: 'Link-ul a expirat', + explore: 'Explorează Dify', + activatedTipStart: 'Te-ai alăturat echipei', + activatedTipEnd: '', + activated: 'Autentifică-te acum', + adminInitPassword: 'Parola de inițializare a administratorului', + validate: 'Validează', +} + +export default translation diff --git a/web/i18n/ro-RO/register.ts b/web/i18n/ro-RO/register.ts new file mode 100644 index 0000000000..928649474b --- /dev/null +++ b/web/i18n/ro-RO/register.ts @@ -0,0 +1,4 @@ +const translation = { +} + +export default translation diff --git a/web/i18n/ro-RO/run-log.ts b/web/i18n/ro-RO/run-log.ts new file mode 100644 index 0000000000..6a1b33e0dd --- /dev/null +++ b/web/i18n/ro-RO/run-log.ts @@ -0,0 +1,29 @@ +const translation = { + input: 'INTRARE', + result: 'REZULTAT', + detail: 'DETALIU', + tracing: 'URMĂRIRE', + resultPanel: { + status: 'STARE', + time: 'TIMP SCURS', + tokens: 'TOTAL TOKENI', + }, + meta: { + title: 'METADATE', + status: 'Stare', + version: 'Versiune', + executor: 'Executor', + startTime: 'Timp de început', + time: 'Timp scurs', + tokens: 'Total tokeni', + steps: 'Pași de rulare', + }, + resultEmpty: { + title: 'Această rulare generează doar format JSON,', + tipLeft: 'vă rugăm să mergeți la ', + link: 'panoul de detalii', + tipRight: ' pentru a o vizualiza.', + }, +} + +export default translation diff --git a/web/i18n/ro-RO/share-app.ts b/web/i18n/ro-RO/share-app.ts new file mode 100644 index 0000000000..d6c1032f1b --- /dev/null +++ b/web/i18n/ro-RO/share-app.ts @@ -0,0 +1,74 @@ +const translation = { + common: { + welcome: 'Bun venit la utilizare', + appUnavailable: 'Aplicația nu este disponibilă', + appUnkonwError: 'Aplicația nu este disponibilă', + }, + chat: { + newChat: 'Chat nou', + pinnedTitle: 'Fixat', + unpinnedTitle: 'Conversații', + newChatDefaultName: 'Conversație nouă', + resetChat: 'Resetează conversația', + powerBy: 'Furnizat de', + prompt: 'Sugestie', + privatePromptConfigTitle: 'Setări conversație', + publicPromptConfigTitle: 'Sugestie inițială', + configStatusDes: 'Înainte de a începe, puteți modifica setările conversației', + configDisabled: + 'Setările sesiunii anterioare au fost utilizate pentru această sesiune.', + startChat: 'Începe chat', + privacyPolicyLeft: + 'Vă rugăm să citiți ', + privacyPolicyMiddle: + 'politica de confidențialitate', + privacyPolicyRight: + ' furnizată de dezvoltatorul aplicației.', + deleteConversation: { + title: 'Șterge conversația', + content: 'Sigur doriți să ștergeți această conversație?', + }, + tryToSolve: 'Încercați să rezolvați', + temporarySystemIssue: 'Ne pare rău, problemă temporară a sistemului.', + }, + generation: { + tabs: { + create: 'Rulează o singură dată', + batch: 'Rulează în lot', + saved: 'Salvat', + }, + savedNoData: { + title: 'Nu ați salvat încă un rezultat!', + description: 'Începeți generarea de conținut și găsiți aici rezultatele salvate.', + startCreateContent: 'Începeți crearea de conținut', + }, + title: 'Completare AI', + queryTitle: 'Conținutul interogării', + completionResult: 'Rezultatul completării', + queryPlaceholder: 'Scrieți conținutul interogării...', + run: 'Execută', + copy: 'Copiază', + resultTitle: 'Completare AI', + noData: 'AI vă va oferi ceea ce doriți aici.', + csvUploadTitle: 'Trageți și plasați fișierul CSV aici sau ', + browse: 'răsfoiți', + csvStructureTitle: 'Fișierul CSV trebuie să respecte următoarea structură:', + downloadTemplate: 'Descărcați șablonul aici', + field: 'Câmp', + batchFailed: { + info: '{{num}} execuții eșuate', + retry: 'Reîncercați', + outputPlaceholder: 'Niciun conținut de ieșire', + }, + errorMsg: { + empty: 'Vă rugăm să introduceți conținut în fișierul încărcat.', + fileStructNotMatch: 'Fișierul CSV încărcat nu se potrivește cu structura.', + emptyLine: 'Rândul {{rowIndex}} este gol', + invalidLine: 'Rândul {{rowIndex}}: valoarea {{varName}} nu poate fi goală', + moreThanMaxLengthLine: 'Rândul {{rowIndex}}: valoarea {{varName}} nu poate avea mai mult de {{maxLength}} caractere', + atLeastOne: 'Vă rugăm să introduceți cel puțin un rând în fișierul încărcat.', + }, + }, +} + +export default translation diff --git a/web/i18n/ro-RO/tools.ts b/web/i18n/ro-RO/tools.ts new file mode 100644 index 0000000000..a35633fa95 --- /dev/null +++ b/web/i18n/ro-RO/tools.ts @@ -0,0 +1,115 @@ +const translation = { + title: 'Instrumente', + createCustomTool: 'Creează Instrument Personalizat', + type: { + all: 'Toate', + builtIn: 'Incorporat', + custom: 'Personalizat', + }, + contribute: { + line1: 'Sunt interesat să ', + line2: 'contribui la Dify cu instrumente.', + viewGuide: 'Vezi ghidul', + }, + author: 'De', + auth: { + unauthorized: 'Pentru a Autoriza', + authorized: 'Autorizat', + setup: 'Configurează autorizarea pentru a utiliza', + setupModalTitle: 'Configurează Autorizarea', + setupModalTitleDescription: 'După configurarea credențialelor, toți membrii din spațiul de lucru pot utiliza acest instrument la orchestrarea aplicațiilor.', + }, + includeToolNum: '{{num}} instrumente incluse', + addTool: 'Adaugă Instrument', + createTool: { + title: 'Creează Instrument Personalizat', + editAction: 'Configurează', + editTitle: 'Editează Instrument Personalizat', + name: 'Nume', + toolNamePlaceHolder: 'Introduceți numele instrumentului', + schema: 'Schema', + schemaPlaceHolder: 'Introduceți aici schema OpenAPI', + viewSchemaSpec: 'Vezi specificația OpenAPI-Swagger', + importFromUrl: 'Importă de la URL', + importFromUrlPlaceHolder: 'https://...', + urlError: 'Vă rugăm să introduceți un URL valid', + examples: 'Exemple', + exampleOptions: { + json: 'Vreme(JSON)', + yaml: 'Pet Store(YAML)', + blankTemplate: 'Șablon Gol', + }, + availableTools: { + title: 'Instrumente Disponibile', + name: 'Nume', + description: 'Descriere', + method: 'Metodă', + path: 'Cale', + action: 'Acțiuni', + test: 'Testează', + }, + authMethod: { + title: 'Metoda de Autorizare', + type: 'Tipul de Autorizare', + keyTooltip: 'Cheie antet HTTP, puteți lăsa "Autorizare" dacă nu știți ce este sau setați-o la o valoare personalizată', + types: { + none: 'Niciuna', + api_key: 'Cheie API', + apiKeyPlaceholder: 'Nume antet HTTP pentru cheia API', + apiValuePlaceholder: 'Introduceți cheia API', + }, + key: 'Cheie', + value: 'Valoare', + }, + authHeaderPrefix: { + title: 'Tipul de Autentificare', + types: { + basic: 'Basic', + bearer: 'Bearer', + custom: 'Personalizat', + }, + }, + privacyPolicy: 'Politica de Confidențialitate', + privacyPolicyPlaceholder: 'Vă rugăm să introduceți politica de confidențialitate', + }, + test: { + title: 'Testează', + parametersValue: 'Parametri & Valoare', + parameters: 'Parametri', + value: 'Valoare', + testResult: 'Rezultate Test', + testResultPlaceholder: 'Rezultatul testului va fi afișat aici', + }, + thought: { + using: 'Utilizând', + used: 'Utilizat', + requestTitle: 'Cerere către', + responseTitle: 'Răspuns de la', + }, + setBuiltInTools: { + info: 'Informații', + setting: 'Setări', + toolDescription: 'Descriere instrument', + parameters: 'parametri', + string: 'șir', + number: 'număr', + required: 'Obligatoriu', + infoAndSetting: 'Informații și Setări', + }, + noCustomTool: { + title: 'Niciun instrument personalizat!', + content: 'Adăugați și gestionați aici instrumentele personalizate pentru construirea aplicațiilor AI.', + createTool: 'Creează Instrument', + }, + noSearchRes: { + title: 'Ne pare rău, nu s-au găsit rezultate!', + content: 'Nu am putut găsi niciun instrument care să se potrivească căutării dvs.', + reset: 'Resetează Căutarea', + }, + builtInPromptTitle: 'Prompt', + toolRemoved: 'Instrument eliminat', + notAuthorized: 'Instrument neautorizat', + howToGet: 'Cum să obții', +} + +export default translation diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts new file mode 100644 index 0000000000..f7069bff9a --- /dev/null +++ b/web/i18n/ro-RO/workflow.ts @@ -0,0 +1,352 @@ +const translation = { + common: { + editing: 'Editare', + autoSaved: 'Salvat automat', + unpublished: 'Nepublicat', + published: 'Publicat', + publish: 'Publică', + update: 'Actualizează', + run: 'Rulează', + running: 'În desfășurare', + inRunMode: 'În modul de rulare', + inPreview: 'În previzualizare', + inPreviewMode: 'În modul de previzualizare', + preview: 'Previzualizare', + viewRunHistory: 'Vezi istoricul de rulare', + runHistory: 'Istoric de rulare', + goBackToEdit: 'Înapoi la editor', + conversationLog: 'Jurnal de conversație', + features: 'Caracteristici', + debugAndPreview: 'Depanare și previzualizare', + restart: 'Repornește', + currentDraft: 'Schița curentă', + currentDraftUnpublished: 'Schița curentă nepublicată', + latestPublished: 'Cel mai recent publicat', + publishedAt: 'Publicat la', + restore: 'Restaurează', + runApp: 'Rulează aplicația', + batchRunApp: 'Rulează aplicația în lot', + accessAPIReference: 'Accesează referința API', + embedIntoSite: 'Încorporează în site', + addTitle: 'Adaugă titlu...', + addDescription: 'Adaugă descriere...', + noVar: 'Nicio variabilă', + searchVar: 'Caută variabilă', + variableNamePlaceholder: 'Numele variabilei', + setVarValuePlaceholder: 'Setează variabila', + needConnecttip: 'Această etapă nu este conectată la nimic', + maxTreeDepth: 'Limită maximă de {{depth}} noduri pe ramură', + needEndNode: 'Blocul de Sfârșit trebuie adăugat', + needAnswerNode: 'Blocul de Răspuns trebuie adăugat', + workflowProcess: 'Proces de flux de lucru', + notRunning: 'Încă nu rulează', + previewPlaceholder: 'Introduceți conținutul în caseta de mai jos pentru a începe depanarea Chatbot-ului', + effectVarConfirm: { + title: 'Elimină variabila', + content: 'Variabila este utilizată în alte noduri. Doriți totuși să o eliminați?', + }, + insertVarTip: 'Apăsați tasta \'/\' pentru a insera rapid', + processData: 'Procesează datele', + input: 'Intrare', + output: 'Ieșire', + jinjaEditorPlaceholder: 'Tastați \'/\' sau \'{\' pentru a insera variabila', + viewOnly: 'Doar vizualizare', + showRunHistory: 'Afișează istoricul de rulare', + }, + errorMsg: { + fieldRequired: '{{field}} este obligatoriu', + authRequired: 'Este necesară autorizarea', + invalidJson: '{{field}} nu este un JSON valid', + fields: { + variable: 'Numele variabilei', + variableValue: 'Valoarea variabilei', + code: 'Cod', + model: 'Model', + rerankModel: 'Model de reevaluare', + }, + invalidVariable: 'Variabilă invalidă', + }, + singleRun: { + testRun: 'Rulare de test', + startRun: 'Începe rularea', + running: 'În desfășurare', + }, + tabs: { + 'searchBlock': 'Căutare bloc', + 'blocks': 'Blocuri', + 'builtInTool': 'Instrument încorporat', + 'customTool': 'Instrument personalizat', + 'question-understand': 'Înțelegerea întrebării', + 'logic': 'Logică', + 'transform': 'Transformare', + 'utilities': 'Utilități', + 'noResult': 'Nicio potrivire găsită', + }, + blocks: { + 'start': 'Început', + 'end': 'Sfârșit', + 'answer': 'Răspuns', + 'llm': 'LLM', + 'knowledge-retrieval': 'Recuperare cunoștințe', + 'question-classifier': 'Clasificator de întrebări', + 'if-else': 'DACĂ/ALTĂFEL', + 'code': 'Cod', + 'template-transform': 'Șablon', + 'http-request': 'Cerere HTTP', + 'variable-assigner': 'Atribuire variabilă', + }, + blocksAbout: { + 'start': 'Definiți parametrii inițiali pentru lansarea unui flux de lucru', + 'end': 'Definiți sfârșitul și tipul de rezultat al unui flux de lucru', + 'answer': 'Definiți conținutul de răspuns al unei conversații prin chat', + 'llm': 'Invocarea modelelor de limbaj extinse pentru a răspunde la întrebări sau a procesa limbajul natural', + 'knowledge-retrieval': 'Vă permite să interogați conținutul text legat de întrebările utilizatorului din Cunoștințe', + 'question-classifier': 'Definiți condițiile de clasificare ale întrebărilor utilizatorului, LLM poate defini modul în care conversația progresează pe baza descrierii clasificării', + 'if-else': 'Vă permite să împărțiți fluxul de lucru în două ramuri pe baza condițiilor dacă/altăfel', + 'code': 'Executați un fragment de cod Python sau NodeJS pentru a implementa o logică personalizată', + 'template-transform': 'Conversia datelor în șiruri folosind sintaxa șablonului Jinja', + 'http-request': 'Permite trimiterea de cereri de la server prin protocolul HTTP', + 'variable-assigner': 'Atribuie variabile în diferite ramuri aceleiași variabile pentru a obține o configurare unificată a post-nodurilor', + }, + operator: { + zoomIn: 'Mărește', + zoomOut: 'Micșorează', + zoomTo50: 'Zoom la 50%', + zoomTo100: 'Zoom la 100%', + zoomToFit: 'Zoom la potrivire', + }, + panel: { + userInputField: 'Câmp de intrare utilizator', + changeBlock: 'Schimbă blocul', + helpLink: 'Link de ajutor', + about: 'Despre', + createdBy: 'Creat de ', + nextStep: 'Pasul următor', + addNextStep: 'Adăugați următorul bloc în acest flux de lucru', + selectNextStep: 'Selectați blocul următor', + runThisStep: 'Rulează acest pas', + checklist: 'Listă de verificare', + checklistTip: 'Asigurați-vă că toate problemele sunt rezolvate înainte de publicare', + checklistResolved: 'Toate problemele sunt rezolvate', + organizeBlocks: 'Organizați blocurile', + change: 'Schimbă', + }, + nodes: { + common: { + outputVars: 'Variabile de ieșire', + insertVarTip: 'Inserați variabila', + memory: { + memory: 'Memorie', + memoryTip: 'Configurări de memorie pentru chat', + windowSize: 'Dimensiunea ferestrei', + conversationRoleName: 'Numele rolului de conversație', + user: 'Prefix utilizator', + assistant: 'Prefix asistent', + }, + memories: { + title: 'Amintiri', + tip: 'Memorie de chat', + builtIn: 'Încorporat', + }, + }, + start: { + required: 'obligatoriu', + inputField: 'Câmp de intrare', + builtInVar: 'Variabile încorporate', + outputVars: { + query: 'Intrare utilizator', + memories: { + des: 'Istoricul conversației', + type: 'tipul mesajului', + content: 'conținutul mesajului', + }, + files: 'Listă de fișiere', + }, + noVarTip: 'Setați intrările care pot fi utilizate în fluxul de lucru', + }, + end: { + outputs: 'Ieșiri', + output: { + type: 'tipul ieșirii', + variable: 'variabila de ieșire', + }, + type: { + 'none': 'Niciunul', + 'plain-text': 'Text simplu', + 'structured': 'Structurat', + }, + }, + answer: { + answer: 'Răspuns', + outputVars: 'Variabile de ieșire', + }, + llm: { + model: 'model', + variables: 'variabile', + context: 'context', + contextTooltip: 'Puteți importa Cunoștințe ca context', + notSetContextInPromptTip: 'Pentru a activa funcția de context, vă rugăm să completați variabila de context în PROMPT.', + prompt: 'prompt', + roleDescription: { + system: 'Dați instrucțiuni de nivel înalt pentru conversație', + user: 'Furnizați instrucțiuni, interogări sau orice intrare bazată pe text pentru model', + assistant: 'Răspunsurile modelului bazate pe mesajele utilizatorului', + }, + addMessage: 'Adăugați mesaj', + vision: 'viziune', + files: 'Fișiere', + resolution: { + name: 'Rezoluție', + high: 'Înaltă', + low: 'Scăzută', + }, + outputVars: { + output: 'Generați conținut', + usage: 'Informații despre utilizarea modelului', + }, + singleRun: { + variable: 'Variabilă', + }, + sysQueryInUser: 'sys.query în mesajul utilizatorului este obligatoriu', + }, + knowledgeRetrieval: { + queryVariable: 'Variabilă de interogare', + knowledge: 'Cunoștințe', + outputVars: { + output: 'Date segmentate de recuperare', + content: 'Conținut segmentat', + title: 'Titlu segmentat', + icon: 'Pictogramă segmentată', + url: 'URL segmentat', + metadata: 'Alte metadate', + }, + }, + http: { + inputVars: 'Variabile de intrare', + api: 'API', + apiPlaceholder: 'Introduceți URL-ul, tastați \'/\' pentru a insera variabila', + notStartWithHttp: 'API-ul trebuie să înceapă cu http:// sau https://', + key: 'Cheie', + value: 'Valoare', + bulkEdit: 'Editare în bloc', + keyValueEdit: 'Editare cheie-valoare', + headers: 'Antete', + params: 'Parametri', + body: 'Corp', + outputVars: { + body: 'Conținutul răspunsului', + statusCode: 'Codul de stare al răspunsului', + headers: 'Listă anteturi răspuns JSON', + files: 'Listă de fișiere', + }, + authorization: { + 'authorization': 'Autorizare', + 'authorizationType': 'Tipul autorizării', + 'no-auth': 'Niciunul', + 'api-key': 'Cheie API', + 'auth-type': 'Tipul autentificării', + 'basic': 'De bază', + 'bearer': 'Bearer', + 'custom': 'Personalizat', + 'api-key-title': 'Cheie API', + 'header': 'Antet', + }, + insertVarPlaceholder: 'tastați \'/\' pentru a insera variabila', + timeout: { + title: 'Timeout', + connectLabel: 'Timeout de conectare', + connectPlaceholder: 'Introduceți timeout-ul de conectare în secunde', + readLabel: 'Timeout de citire', + readPlaceholder: 'Introduceți timeout-ul de citire în secunde', + writeLabel: 'Timeout de scriere', + writePlaceholder: 'Introduceți timeout-ul de scriere în secunde', + }, + }, + code: { + inputVars: 'Variabile de intrare', + outputVars: 'Variabile de ieșire', + }, + templateTransform: { + inputVars: 'Variabile de intrare', + code: 'Cod', + codeSupportTip: 'Acceptă doar Jinja2', + outputVars: { + output: 'Conținutul transformat', + }, + }, + ifElse: { + if: 'Dacă', + else: 'Altfel', + elseDescription: 'Utilizat pentru a defini logica care trebuie executată atunci când condiția dacă nu este îndeplinită.', + and: 'și', + or: 'sau', + operator: 'Operator', + notSetVariable: 'Vă rugăm să setați mai întâi variabila', + comparisonOperator: { + 'contains': 'conține', + 'not contains': 'nu conține', + 'start with': 'începe cu', + 'end with': 'se termină cu', + 'is': 'este', + 'is not': 'nu este', + 'empty': 'este gol', + 'not empty': 'nu este gol', + 'null': 'este nul', + 'not null': 'nu este nul', + }, + enterValue: 'Introduceți valoarea', + addCondition: 'Adăugați condiție', + conditionNotSetup: 'Condiția NU este configurată', + }, + variableAssigner: { + title: 'Atribuire variabile', + outputType: 'Tipul ieșirii', + outputVarType: 'Tipul variabilei de ieșire', + varNotSet: 'Variabila nu este setată', + noVarTip: 'Adăugați variabilele care trebuie atribuite', + type: { + string: 'Șir de caractere', + number: 'Număr', + object: 'Obiect', + array: 'Tablou', + }, + outputVars: { + output: 'Valoarea variabilei atribuite', + }, + }, + tool: { + toAuthorize: 'Pentru a autoriza', + inputVars: 'Variabile de intrare', + outputVars: { + text: 'Conținut generat de instrument', + files: { + title: 'Fișiere generate de instrument', + type: 'Tipul suportat. Acum suportă doar imaginea', + transfer_method: 'Metoda de transfer. Valoarea este remote_url sau local_file', + url: 'URL-ul imaginii', + upload_file_id: 'ID-ul fișierului încărcat', + }, + }, + }, + questionClassifiers: { + model: 'model', + inputVars: 'Variabile de intrare', + outputVars: { + className: 'Numele clasei', + }, + class: 'Clasă', + classNamePlaceholder: 'Scrieți numele clasei', + advancedSetting: 'Configurare avansată', + topicName: 'Numele subiectului', + topicPlaceholder: 'Scrieți numele subiectului', + addClass: 'Adăugați clasă', + instruction: 'Instrucțiune', + instructionPlaceholder: 'Scrieți instrucțiunea', + }, + }, + tracing: { + stopBy: 'Oprit de {{user}}', + }, +} + +export default translation From 98140ae5d98aa8b8d11f9a85cb8804555317d950 Mon Sep 17 00:00:00 2001 From: kerlion <40377268+kerlion@users.noreply.github.com> Date: Tue, 14 May 2024 19:54:31 +0800 Subject: [PATCH 078/267] fix the issue of MILVUS_DATABASE has no effect. (#4353) --- api/core/rag/datasource/vdb/milvus/milvus_vector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/rag/datasource/vdb/milvus/milvus_vector.py b/api/core/rag/datasource/vdb/milvus/milvus_vector.py index c90fe3b188..0586e279d3 100644 --- a/api/core/rag/datasource/vdb/milvus/milvus_vector.py +++ b/api/core/rag/datasource/vdb/milvus/milvus_vector.py @@ -259,5 +259,5 @@ class MilvusVector(BaseVector): uri = "https://" + str(config.host) + ":" + str(config.port) else: uri = "http://" + str(config.host) + ":" + str(config.port) - client = MilvusClient(uri=uri, user=config.user, password=config.password) + client = MilvusClient(uri=uri, user=config.user, password=config.password,db_name=config.database) return client From 2eb468f8851802fd76a9d86899ce42ad90a0ed7a Mon Sep 17 00:00:00 2001 From: Charles Zhou Date: Tue, 14 May 2024 10:44:53 -0500 Subject: [PATCH 079/267] fix: add timeout to SMTPClient to prevent worker blocking (#4352) --- api/libs/smtp.py | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/api/libs/smtp.py b/api/libs/smtp.py index 30a795bd70..f5735cbb2e 100644 --- a/api/libs/smtp.py +++ b/api/libs/smtp.py @@ -1,3 +1,4 @@ +import logging import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText @@ -13,15 +14,30 @@ class SMTPClient: self._use_tls = use_tls def send(self, mail: dict): - smtp = smtplib.SMTP(self.server, self.port) - if self._use_tls: - smtp.starttls() - if self.username and self.password: - smtp.login(self.username, self.password) - msg = MIMEMultipart() - msg['Subject'] = mail['subject'] - msg['From'] = self._from - msg['To'] = mail['to'] - msg.attach(MIMEText(mail['html'], 'html')) - smtp.sendmail(self.username, mail['to'], msg.as_string()) - smtp.quit() + smtp = None + try: + smtp = smtplib.SMTP(self.server, self.port, timeout=10) + if self._use_tls: + smtp.starttls() + if self.username and self.password: + smtp.login(self.username, self.password) + + msg = MIMEMultipart() + msg['Subject'] = mail['subject'] + msg['From'] = self._from + msg['To'] = mail['to'] + msg.attach(MIMEText(mail['html'], 'html')) + + smtp.sendmail(self._from, mail['to'], msg.as_string()) + except smtplib.SMTPException as e: + logging.error(f"SMTP error occurred: {str(e)}") + raise + except TimeoutError as e: + logging.error(f"Timeout occurred while sending email: {str(e)}") + raise + except Exception as e: + logging.error(f"Unexpected error occurred while sending email: {str(e)}") + raise + finally: + if smtp: + smtp.quit() From 5d6d0e63c538a1a78fbc4d8203425c783420769d Mon Sep 17 00:00:00 2001 From: Shuto Otaki <105141999+shutootaki@users.noreply.github.com> Date: Wed, 15 May 2024 08:48:19 +0900 Subject: [PATCH 080/267] docs: Add CONTRIBUTING_JA.md (#4383) --- CONTRIBUTING_JA.md | 160 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 CONTRIBUTING_JA.md diff --git a/CONTRIBUTING_JA.md b/CONTRIBUTING_JA.md new file mode 100644 index 0000000000..c9329d6102 --- /dev/null +++ b/CONTRIBUTING_JA.md @@ -0,0 +1,160 @@ +Dify にコントリビュートしたいとお考えなのですね。それは素晴らしいことです。 +私たちは、LLM アプリケーションの構築と管理のための最も直感的なワークフローを設計するという壮大な野望を持っています。人数も資金も限られている新興企業として、コミュニティからの支援は本当に重要です。 + +私たちは現状を鑑み、機敏かつ迅速に開発をする必要がありますが、同時にあなたのようなコントリビューターの方々に、可能な限りスムーズな貢献体験をしていただきたいと思っています。そのためにこのコントリビュートガイドを作成しました。 +コードベースやコントリビュータの方々と私たちがどのように仕事をしているのかに慣れていただき、楽しいパートにすぐに飛び込めるようにすることが目的です。 + +このガイドは Dify そのものと同様に、継続的に改善されています。実際のプロジェクトに遅れをとることがあるかもしれませんが、ご理解をお願いします。 + +ライセンスに関しては、私たちの短い[ライセンスおよびコントリビューター規約](./LICENSE)をお読みください。また、コミュニティは[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)を遵守しています。 + +## 飛び込む前に + +[既存の Issue](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) を探すか、[新しい Issue](https://github.com/langgenius/dify/issues/new/choose) を作成してください。私たちは Issue を 2 つのタイプに分類しています。 + +### 機能リクエスト + +* 新しい機能要望を出す場合は、提案する機能が何を実現するものなのかを説明し、可能な限り多くの文脈を含めてください。[@perzeusss](https://github.com/perzeuss)は、あなたの要望を書き出すのに役立つ [Feature Request Copilot](https://udify.app/chat/MK2kVSnw1gakVwMX) を作ってくれました。気軽に試してみてください。 + +* 既存の課題から 1 つ選びたい場合は、その下にコメントを書いてください。 + + 関連する方向で作業しているチームメンバーが参加します。すべてが良好であれば、コーディングを開始する許可が与えられます。私たちが変更を提案した場合にあなたの作業が無駄になることがないよう、それまでこの機能の作業を控えていただくようお願いいたします。 + + 提案された機能がどの分野に属するかによって、あなたは異なるチーム・メンバーと話をするかもしれません。以下は、各チームメンバーが現在取り組んでいる分野の概要です。 + +| Member | Scope | +| --------------------------------------------------------------------------------------- | ------------------------------------ | +| [@yeuoly](https://github.com/Yeuoly) | エージェントアーキテクチャ | +| [@jyong](https://github.com/JohnJyong) | RAG パイプライン設計 | +| [@GarfieldDai](https://github.com/GarfieldDai) | workflow orchestrations の構築 | +| [@iamjoel](https://github.com/iamjoel) & [@zxhlyh](https://github.com/zxhlyh) | フロントエンドを使いやすくする | +| [@guchenhe](https://github.com/guchenhe) & [@crazywoola](https://github.com/crazywoola) | 開発者体験、何でも相談できる窓口 | +| [@takatost](https://github.com/takatost) | 全体的な製品の方向性とアーキテクチャ | + +優先順位の付け方: + +| Feature Type | Priority | +| --------------------------------------------------------------------------------------------------------------------- | --------------- | +| チームメンバーによってラベル付けされた優先度の高い機能 | High Priority | +| [community feedback board](https://github.com/langgenius/dify/discussions/categories/feedbacks)の人気の機能リクエスト | Medium Priority | +| 非コア機能とマイナーな機能強化 | Low Priority | +| 価値はあるが即効性はない | Future-Feature | + +### その他 (バグレポート、パフォーマンスの最適化、誤字の修正など) + +* すぐにコーディングを始めてください + +優先順位の付け方: + +| Issue Type | Priority | +| -------------------------------------------------------------------------------------- | --------------- | +| コア機能のバグ(ログインできない、アプリケーションが動作しない、セキュリティの抜け穴) | Critical | +| 致命的でないバグ、パフォーマンス向上 | Medium Priority | +| 細かな修正(誤字脱字、機能はするが分かりにくい UI) | Low Priority | + +## インストール + +Dify を開発用にセットアップする手順は以下の通りです。 + +### 1. このリポジトリをフォークする + +### 2. リポジトリをクローンする + +フォークしたリポジトリをターミナルからクローンします。 + +``` +git clone git@github.com:/dify.git +``` + +### 3. 依存関係の確認 + +Dify を構築するには次の依存関係が必要です。それらがシステムにインストールされていることを確認してください。 + +- [Docker](https://www.docker.com/) +- [Docker Compose](https://docs.docker.com/compose/install/) +- [Node.js v18.x (LTS)](http://nodejs.org) +- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/) +- [Python](https://www.python.org/) version 3.10.x + +### 4. インストール + +Dify はバックエンドとフロントエンドから構成されています。 +まず`cd api/`でバックエンドのディレクトリに移動し、[Backend README](api/README.md)に従ってインストールします。 +次に別のターミナルで、`cd web/`でフロントエンドのディレクトリに移動し、[Frontend README](web/README.md)に従ってインストールしてください。 + +よくある問題とトラブルシューティングの手順については、[installation FAQ](https://docs.dify.ai/getting-started/faq/install-faq) を確認してください。 + +### 5. ブラウザで dify にアクセスする + +設定を確認するために、ブラウザで[http://localhost:3000](http://localhost:3000)(デフォルト、または自分で設定した URL とポート)にアクセスしてください。Dify が起動して実行中であることが確認できるはずです。 + +## 開発中 + +モデルプロバイダーを追加する場合は、[このガイド](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/README.md)が役立ちます。 + +Agent や Workflow にツールプロバイダーを追加する場合は、[このガイド](./api/core/tools/README.md)が役立ちます。 + +Dify のバックエンドとフロントエンドの概要を簡単に説明します。 + +### バックエンド + +Dify のバックエンドは[Flask](https://flask.palletsprojects.com/en/3.0.x/)を使って Python で書かれています。ORM には[SQLAlchemy](https://www.sqlalchemy.org/)を、タスクキューには[Celery](https://docs.celeryq.dev/en/stable/getting-started/introduction.html)を使っています。認証ロジックは Flask-login 経由で行われます。 + +``` +[api/] +├── constants // コードベース全体で使用される定数設定 +├── controllers // APIルート定義とリクエスト処理ロジック +├── core // アプリケーションの中核的な管理、モデル統合、およびツール +├── docker // Dockerおよびコンテナ関連の設定 +├── events // イベントのハンドリングと処理 +├── extensions // 第三者のフレームワーク/プラットフォームとの拡張 +├── fields // シリアライゼーション/マーシャリング用のフィールド定義 +├── libs // 再利用可能なライブラリとヘルパー +├── migrations // データベースマイグレーションスクリプト +├── models // データベースモデルとスキーマ定義 +├── services // ビジネスロジックの定義 +├── storage // 秘密鍵の保存 +├── tasks // 非同期タスクとバックグラウンドジョブの処理 +└── tests // テスト関連のファイル +``` + +### フロントエンド + +このウェブサイトは、Typescript の[Next.js](https://nextjs.org/)ボイラープレートでブートストラップされており、スタイリングには[Tailwind CSS](https://tailwindcss.com/)を使用しています。国際化には[React-i18next](https://react.i18next.com/)を使用しています。 + +``` +[web/] +├── app // レイアウト、ページ、コンポーネント +│ ├── (commonLayout) // アプリ全体で共通のレイアウト +│ ├── (shareLayout) // トークン特有のセッションで共有されるレイアウト +│ ├── activate // アクティベートページ +│ ├── components // ページやレイアウトで共有されるコンポーネント +│ ├── install // インストールページ +│ ├── signin // サインインページ +│ └── styles // グローバルに共有されるスタイル +├── assets // 静的アセット +├── bin // ビルドステップで実行されるスクリプト +├── config // 調整可能な設定とオプション +├── context // アプリの異なる部分で使用される共有コンテキスト +├── dictionaries // 言語別の翻訳ファイル +├── docker // コンテナ設定 +├── hooks // 再利用可能なフック +├── i18n // 国際化設定 +├── models // データモデルとAPIレスポンスの形状を記述 +├── public // ファビコンなどのメタアセット +├── service // APIアクションの形状を指定 +├── test +├── types // 関数のパラメータと戻り値の記述 +└── utils // 共有ユーティリティ関数 +``` + +## PR を投稿する + +いよいよ、私たちのリポジトリにプルリクエスト (PR) を提出する時が来ました。主要な機能については、まず `deploy/dev` ブランチにマージしてテストしてから `main` ブランチにマージします。 +マージ競合などの問題が発生した場合、またはプル リクエストを開く方法がわからない場合は、[GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) をチェックしてみてください。 + +これで完了です!あなたの PR がマージされると、[README](https://github.com/langgenius/dify/blob/main/README.md) にコントリビューターとして紹介されます。 + +## ヘルプを得る + +コントリビュート中に行き詰まったり、疑問が生じたりした場合は、GitHub の関連する issue から質問していただくか、[Discord](https://discord.gg/8Tpq4AcN9c)でチャットしてください。 From e2a78888b92c8ec34bd1bba289a9c00c2b4c5d80 Mon Sep 17 00:00:00 2001 From: Kota Matsumoto Date: Wed, 15 May 2024 09:05:41 +0900 Subject: [PATCH 081/267] Fix: setup google-storage client (#4296) Co-authored-by: kotamat Co-authored-by: crazywoola <427733928@qq.com> --- api/extensions/storage/google_storage.py | 14 ++++++++++---- docker/docker-compose.yaml | 2 ++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/api/extensions/storage/google_storage.py b/api/extensions/storage/google_storage.py index f6c69eb0ae..97004fddab 100644 --- a/api/extensions/storage/google_storage.py +++ b/api/extensions/storage/google_storage.py @@ -1,4 +1,5 @@ import base64 +import io from collections.abc import Generator from contextlib import closing @@ -15,14 +16,19 @@ class GoogleStorage(BaseStorage): super().__init__(app) app_config = self.app.config self.bucket_name = app_config.get('GOOGLE_STORAGE_BUCKET_NAME') - service_account_json = base64.b64decode(app_config.get('GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64')).decode( - 'utf-8') - self.client = GoogleCloudStorage.Client().from_service_account_json(service_account_json) + service_account_json_str = app_config.get('GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64') + # if service_account_json_str is empty, use Application Default Credentials + if service_account_json_str: + service_account_json = base64.b64decode(service_account_json_str).decode('utf-8') + self.client = GoogleCloudStorage.Client.from_service_account_info(service_account_json) + else: + self.client = GoogleCloudStorage.Client() def save(self, filename, data): bucket = self.client.get_bucket(self.bucket_name) blob = bucket.blob(filename) - blob.upload_from_file(data) + with io.BytesIO(data) as stream: + blob.upload_from_file(stream) def load_once(self, filename: str) -> bytes: bucket = self.client.get_bucket(self.bucket_name) diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index cad382a860..edd4106b2d 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -88,6 +88,7 @@ services: AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net' # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`. GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name' + # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty. GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string' # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`. VECTOR_STORE: weaviate @@ -226,6 +227,7 @@ services: AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net' # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`. GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name' + # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty. GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string' # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`. VECTOR_STORE: weaviate From 332baca538feb9e50b99e0bca7bcb2b816bdb66c Mon Sep 17 00:00:00 2001 From: Yash Parmar <82636823+Yash-1511@users.noreply.github.com> Date: Wed, 15 May 2024 05:35:54 +0530 Subject: [PATCH 082/267] FIX: fix the temperature value of ollama model (#4027) --- api/core/model_runtime/model_providers/ollama/llm/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index fcb94084a5..894195dcc1 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -451,7 +451,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel): "more creatively. (Default: 0.8)"), default=0.1, min=0, - max=2 + max=1 ), ParameterRule( name=DefaultParameterName.TOP_P.value, From 1d0f88264f5a531b2dd5b462712cee160874afab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Wed, 15 May 2024 11:05:46 +0800 Subject: [PATCH 083/267] Fix HTTP REQUEST NODE is always waiting but endpoint have responsed (#4395) --- api/core/workflow/nodes/http_request/http_executor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/http_request/http_executor.py b/api/core/workflow/nodes/http_request/http_executor.py index 97cb59d02d..2aa79f51ac 100644 --- a/api/core/workflow/nodes/http_request/http_executor.py +++ b/api/core/workflow/nodes/http_request/http_executor.py @@ -25,7 +25,10 @@ class HttpExecutorResponse: response: Union[httpx.Response, requests.Response] def __init__(self, response: Union[httpx.Response, requests.Response] = None): - self.headers = response.headers + self.headers = {} + if isinstance(response, httpx.Response | requests.Response): + for k, v in response.headers.items(): + self.headers[k] = v self.response = response @property From 182dadd4335eca1404e11bf178fb5dd4071d8895 Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Wed, 15 May 2024 12:25:04 +0800 Subject: [PATCH 084/267] chore: remove model as tool (#4409) --- .../console/workspace/tool_providers.py | 27 -- api/core/tools/entities/user_entities.py | 1 - api/core/tools/model_tools/anthropic.yaml | 20 -- api/core/tools/model_tools/google.yaml | 13 - api/core/tools/model_tools/openai.yaml | 13 - api/core/tools/model_tools/zhipuai.yaml | 13 - api/core/tools/provider/_position.yaml | 4 - api/core/tools/provider/builtin/_positions.py | 5 +- .../tools/provider/model_tool_provider.py | 244 ------------------ api/core/tools/tool/model_tool.py | 159 ------------ api/core/tools/tool_manager.py | 67 ----- api/core/tools/utils/configuration.py | 68 +---- api/services/tools_transform_service.py | 28 -- 13 files changed, 2 insertions(+), 660 deletions(-) delete mode 100644 api/core/tools/model_tools/anthropic.yaml delete mode 100644 api/core/tools/model_tools/google.yaml delete mode 100644 api/core/tools/model_tools/openai.yaml delete mode 100644 api/core/tools/model_tools/zhipuai.yaml delete mode 100644 api/core/tools/provider/model_tool_provider.py delete mode 100644 api/core/tools/tool/model_tool.py diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index b02008339e..3057de4559 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -98,31 +98,6 @@ class ToolBuiltinProviderIconApi(Resource): icon_cache_max_age = int(current_app.config.get('TOOL_ICON_CACHE_MAX_AGE')) return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age) -class ToolModelProviderIconApi(Resource): - @setup_required - def get(self, provider): - icon_bytes, mimetype = ToolManageService.get_model_tool_provider_icon(provider) - return send_file(io.BytesIO(icon_bytes), mimetype=mimetype) - -class ToolModelProviderListToolsApi(Resource): - @setup_required - @login_required - @account_initialization_required - def get(self): - user_id = current_user.id - tenant_id = current_user.current_tenant_id - - parser = reqparse.RequestParser() - parser.add_argument('provider', type=str, required=True, nullable=False, location='args') - - args = parser.parse_args() - - return jsonable_encoder(ToolManageService.list_model_tool_provider_tools( - user_id, - tenant_id, - args['provider'], - )) - class ToolApiProviderAddApi(Resource): @setup_required @login_required @@ -350,8 +325,6 @@ api.add_resource(ToolBuiltinProviderUpdateApi, '/workspaces/current/tool-provide api.add_resource(ToolBuiltinProviderGetCredentialsApi, '/workspaces/current/tool-provider/builtin//credentials') api.add_resource(ToolBuiltinProviderCredentialsSchemaApi, '/workspaces/current/tool-provider/builtin//credentials_schema') api.add_resource(ToolBuiltinProviderIconApi, '/workspaces/current/tool-provider/builtin//icon') -api.add_resource(ToolModelProviderIconApi, '/workspaces/current/tool-provider/model//icon') -api.add_resource(ToolModelProviderListToolsApi, '/workspaces/current/tool-provider/model/tools') api.add_resource(ToolApiProviderAddApi, '/workspaces/current/tool-provider/api/add') api.add_resource(ToolApiProviderGetRemoteSchemaApi, '/workspaces/current/tool-provider/api/remote') api.add_resource(ToolApiProviderListToolsApi, '/workspaces/current/tool-provider/api/tools') diff --git a/api/core/tools/entities/user_entities.py b/api/core/tools/entities/user_entities.py index 171bf831e2..48fe5b0ed5 100644 --- a/api/core/tools/entities/user_entities.py +++ b/api/core/tools/entities/user_entities.py @@ -20,7 +20,6 @@ class UserToolProvider(BaseModel): BUILTIN = "builtin" APP = "app" API = "api" - MODEL = "model" id: str author: str diff --git a/api/core/tools/model_tools/anthropic.yaml b/api/core/tools/model_tools/anthropic.yaml deleted file mode 100644 index 4ccb973df5..0000000000 --- a/api/core/tools/model_tools/anthropic.yaml +++ /dev/null @@ -1,20 +0,0 @@ -provider: anthropic -label: - en_US: Anthropic Model Tools - zh_Hans: Anthropic 模型能力 - pt_BR: Anthropic Model Tools -models: - - type: llm - model: claude-3-sonnet-20240229 - label: - zh_Hans: Claude3 Sonnet 视觉 - en_US: Claude3 Sonnet Vision - properties: - image_parameter_name: image_id - - type: llm - model: claude-3-opus-20240229 - label: - zh_Hans: Claude3 Opus 视觉 - en_US: Claude3 Opus Vision - properties: - image_parameter_name: image_id diff --git a/api/core/tools/model_tools/google.yaml b/api/core/tools/model_tools/google.yaml deleted file mode 100644 index d81e1b0735..0000000000 --- a/api/core/tools/model_tools/google.yaml +++ /dev/null @@ -1,13 +0,0 @@ -provider: google -label: - en_US: Google Model Tools - zh_Hans: Google 模型能力 - pt_BR: Google Model Tools -models: - - type: llm - model: gemini-pro-vision - label: - zh_Hans: Gemini Pro 视觉 - en_US: Gemini Pro Vision - properties: - image_parameter_name: image_id diff --git a/api/core/tools/model_tools/openai.yaml b/api/core/tools/model_tools/openai.yaml deleted file mode 100644 index 45cbb295a9..0000000000 --- a/api/core/tools/model_tools/openai.yaml +++ /dev/null @@ -1,13 +0,0 @@ -provider: openai -label: - en_US: OpenAI Model Tools - zh_Hans: OpenAI 模型能力 - pt_BR: OpenAI Model Tools -models: - - type: llm - model: gpt-4-vision-preview - label: - zh_Hans: GPT-4 视觉 - en_US: GPT-4 Vision - properties: - image_parameter_name: image_id diff --git a/api/core/tools/model_tools/zhipuai.yaml b/api/core/tools/model_tools/zhipuai.yaml deleted file mode 100644 index 19a932eb89..0000000000 --- a/api/core/tools/model_tools/zhipuai.yaml +++ /dev/null @@ -1,13 +0,0 @@ -provider: zhipuai -label: - en_US: ZhipuAI Model Tools - zh_Hans: ZhipuAI 模型能力 - pt_BR: ZhipuAI Model Tools -models: - - type: llm - model: glm-4v - label: - zh_Hans: GLM-4 视觉 - en_US: GLM-4 Vision - properties: - image_parameter_name: image_id diff --git a/api/core/tools/provider/_position.yaml b/api/core/tools/provider/_position.yaml index 5c9454c11c..a39a00fffd 100644 --- a/api/core/tools/provider/_position.yaml +++ b/api/core/tools/provider/_position.yaml @@ -6,16 +6,12 @@ - azuredalle - stability - wikipedia -- model.openai -- model.google -- model.anthropic - yahoo - arxiv - pubmed - stablediffusion - webscraper - jina -- model.zhipuai - aippt - youtube - code diff --git a/api/core/tools/provider/builtin/_positions.py b/api/core/tools/provider/builtin/_positions.py index 2bf70bd356..d0826ddcf0 100644 --- a/api/core/tools/provider/builtin/_positions.py +++ b/api/core/tools/provider/builtin/_positions.py @@ -13,10 +13,7 @@ class BuiltinToolProviderSort: cls._position = get_position_map(os.path.join(os.path.dirname(__file__), '..')) def name_func(provider: UserToolProvider) -> str: - if provider.type == UserToolProvider.ProviderType.MODEL: - return f'model.{provider.name}' - else: - return provider.name + return provider.name sorted_providers = sort_by_position_map(cls._position, providers, name_func) diff --git a/api/core/tools/provider/model_tool_provider.py b/api/core/tools/provider/model_tool_provider.py deleted file mode 100644 index ef47e9aae9..0000000000 --- a/api/core/tools/provider/model_tool_provider.py +++ /dev/null @@ -1,244 +0,0 @@ -from copy import deepcopy -from typing import Any - -from core.entities.model_entities import ModelStatus -from core.errors.error import ProviderTokenNotInitError -from core.model_manager import ModelInstance -from core.model_runtime.entities.model_entities import ModelFeature, ModelType -from core.provider_manager import ProviderConfiguration, ProviderManager, ProviderModelBundle -from core.tools.entities.common_entities import I18nObject -from core.tools.entities.tool_entities import ( - ModelToolPropertyKey, - ToolDescription, - ToolIdentity, - ToolParameter, - ToolProviderCredentials, - ToolProviderIdentity, - ToolProviderType, -) -from core.tools.errors import ToolNotFoundError -from core.tools.provider.tool_provider import ToolProviderController -from core.tools.tool.model_tool import ModelTool -from core.tools.tool.tool import Tool -from core.tools.utils.configuration import ModelToolConfigurationManager - - -class ModelToolProviderController(ToolProviderController): - configuration: ProviderConfiguration = None - is_active: bool = False - - def __init__(self, configuration: ProviderConfiguration = None, **kwargs): - """ - init the provider - - :param data: the data of the provider - """ - super().__init__(**kwargs) - self.configuration = configuration - - @staticmethod - def from_db(configuration: ProviderConfiguration = None) -> 'ModelToolProviderController': - """ - init the provider from db - - :param configuration: the configuration of the provider - """ - # check if all models are active - if configuration is None: - return None - is_active = True - models = configuration.get_provider_models() - for model in models: - if model.status != ModelStatus.ACTIVE: - is_active = False - break - - # get the provider configuration - model_tool_configuration = ModelToolConfigurationManager.get_configuration(configuration.provider.provider) - if model_tool_configuration is None: - raise RuntimeError(f'no configuration found for provider {configuration.provider.provider}') - - # override the configuration - if model_tool_configuration.label: - label = deepcopy(model_tool_configuration.label) - if label.en_US: - label.en_US = model_tool_configuration.label.en_US - if label.zh_Hans: - label.zh_Hans = model_tool_configuration.label.zh_Hans - else: - label = I18nObject( - en_US=configuration.provider.label.en_US, - zh_Hans=configuration.provider.label.zh_Hans - ) - - return ModelToolProviderController( - is_active=is_active, - identity=ToolProviderIdentity( - author='Dify', - name=configuration.provider.provider, - description=I18nObject( - zh_Hans=f'{label.zh_Hans} 模型能力提供商', - en_US=f'{label.en_US} model capability provider' - ), - label=I18nObject( - zh_Hans=label.zh_Hans, - en_US=label.en_US - ), - icon=configuration.provider.icon_small.en_US, - ), - configuration=configuration, - credentials_schema={}, - ) - - @staticmethod - def is_configuration_valid(configuration: ProviderConfiguration) -> bool: - """ - check if the configuration has a model can be used as a tool - """ - models = configuration.get_provider_models() - for model in models: - if model.model_type == ModelType.LLM and ModelFeature.VISION in (model.features or []): - return True - return False - - def _get_model_tools(self, tenant_id: str = None) -> list[ModelTool]: - """ - returns a list of tools that the provider can provide - - :return: list of tools - """ - tenant_id = tenant_id or 'ffffffff-ffff-ffff-ffff-ffffffffffff' - provider_manager = ProviderManager() - if self.configuration is None: - configurations = provider_manager.get_configurations(tenant_id=tenant_id).values() - self.configuration = next(filter(lambda x: x.provider == self.identity.name, configurations), None) - # get all tools - tools: list[ModelTool] = [] - # get all models - if not self.configuration: - return tools - configuration = self.configuration - - provider_configuration = ModelToolConfigurationManager.get_configuration(configuration.provider.provider) - if provider_configuration is None: - raise RuntimeError(f'no configuration found for provider {configuration.provider.provider}') - - for model in configuration.get_provider_models(): - model_configuration = ModelToolConfigurationManager.get_model_configuration(self.configuration.provider.provider, model.model) - if model_configuration is None: - continue - - if model.model_type == ModelType.LLM and ModelFeature.VISION in (model.features or []): - provider_instance = configuration.get_provider_instance() - model_type_instance = provider_instance.get_model_instance(model.model_type) - provider_model_bundle = ProviderModelBundle( - configuration=configuration, - provider_instance=provider_instance, - model_type_instance=model_type_instance - ) - - try: - model_instance = ModelInstance(provider_model_bundle, model.model) - except ProviderTokenNotInitError: - model_instance = None - - tools.append(ModelTool( - identity=ToolIdentity( - author='Dify', - name=model.model, - label=model_configuration.label, - ), - parameters=[ - ToolParameter( - name=ModelToolPropertyKey.IMAGE_PARAMETER_NAME.value, - label=I18nObject(zh_Hans='图片ID', en_US='Image ID'), - human_description=I18nObject(zh_Hans='图片ID', en_US='Image ID'), - type=ToolParameter.ToolParameterType.STRING, - form=ToolParameter.ToolParameterForm.LLM, - required=True, - default=Tool.VARIABLE_KEY.IMAGE.value - ) - ], - description=ToolDescription( - human=I18nObject(zh_Hans='图生文工具', en_US='Convert image to text'), - llm='Vision tool used to extract text and other visual information from images, can be used for OCR, image captioning, etc.', - ), - is_team_authorization=model.status == ModelStatus.ACTIVE, - tool_type=ModelTool.ModelToolType.VISION, - model_instance=model_instance, - model=model.model, - )) - - self.tools = tools - return tools - - def get_credentials_schema(self) -> dict[str, ToolProviderCredentials]: - """ - returns the credentials schema of the provider - - :return: the credentials schema - """ - return {} - - def get_tools(self, user_id: str, tenant_id: str) -> list[ModelTool]: - """ - returns a list of tools that the provider can provide - - :return: list of tools - """ - return self._get_model_tools(tenant_id=tenant_id) - - def get_tool(self, tool_name: str) -> ModelTool: - """ - get tool by name - - :param tool_name: the name of the tool - :return: the tool - """ - if self.tools is None: - self.get_tools(user_id='', tenant_id=self.configuration.tenant_id) - - for tool in self.tools: - if tool.identity.name == tool_name: - return tool - - raise ValueError(f'tool {tool_name} not found') - - def get_parameters(self, tool_name: str) -> list[ToolParameter]: - """ - returns the parameters of the tool - - :param tool_name: the name of the tool, defined in `get_tools` - :return: list of parameters - """ - tool = next(filter(lambda x: x.identity.name == tool_name, self.get_tools()), None) - if tool is None: - raise ToolNotFoundError(f'tool {tool_name} not found') - return tool.parameters - - @property - def app_type(self) -> ToolProviderType: - """ - returns the type of the provider - - :return: type of the provider - """ - return ToolProviderType.MODEL - - def validate_credentials(self, credentials: dict[str, Any]) -> None: - """ - validate the credentials of the provider - - :param tool_name: the name of the tool, defined in `get_tools` - :param credentials: the credentials of the tool - """ - pass - - def _validate_credentials(self, credentials: dict[str, Any]) -> None: - """ - validate the credentials of the provider - - :param tool_name: the name of the tool, defined in `get_tools` - :param credentials: the credentials of the tool - """ - pass \ No newline at end of file diff --git a/api/core/tools/tool/model_tool.py b/api/core/tools/tool/model_tool.py deleted file mode 100644 index b87e85f89c..0000000000 --- a/api/core/tools/tool/model_tool.py +++ /dev/null @@ -1,159 +0,0 @@ -from base64 import b64encode -from enum import Enum -from typing import Any, cast - -from core.model_manager import ModelInstance -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import ( - PromptMessageContent, - PromptMessageContentType, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.tools.entities.tool_entities import ModelToolPropertyKey, ToolInvokeMessage, ToolProviderType -from core.tools.tool.tool import Tool - -VISION_PROMPT = """## Image Recognition Task -### Task Description -I require a powerful vision language model for an image recognition task. The model should be capable of extracting various details from the images, including but not limited to text content, layout distribution, color distribution, main subjects, and emotional expressions. -### Specific Requirements -1. **Text Content Extraction:** Ensure that the model accurately recognizes and extracts text content from the images, regardless of text size, font, or color. -2. **Layout Distribution Analysis:** The model should analyze the layout structure of the images, capturing the relationships between various elements and providing detailed information about the image layout. -3. **Color Distribution Analysis:** Extract information about color distribution in the images, including primary colors, color combinations, and other relevant details. -4. **Main Subject Recognition:** The model should accurately identify the main subjects in the images and provide detailed descriptions of these subjects. -5. **Emotional Expression Analysis:** Analyze and describe the emotions or expressions conveyed in the images based on facial expressions, postures, and other relevant features. -### Additional Considerations -- Ensure that the extracted information is as comprehensive and accurate as possible. -- For each task, provide confidence scores or relevance scores for the model outputs to assess the reliability of the results. -- If necessary, pose specific questions for different tasks to guide the model in better understanding the images and providing relevant information.""" - -class ModelTool(Tool): - class ModelToolType(Enum): - """ - the type of the model tool - """ - VISION = 'vision' - - model_configuration: dict[str, Any] = None - tool_type: ModelToolType - - def __init__(self, model_instance: ModelInstance = None, model: str = None, - tool_type: ModelToolType = ModelToolType.VISION, - properties: dict[ModelToolPropertyKey, Any] = None, - **kwargs): - """ - init the tool - """ - kwargs['model_configuration'] = { - 'model_instance': model_instance, - 'model': model, - 'properties': properties - } - kwargs['tool_type'] = tool_type - super().__init__(**kwargs) - - """ - Model tool - """ - def fork_tool_runtime(self, meta: dict[str, Any]) -> 'Tool': - """ - fork a new tool with meta data - - :param meta: the meta data of a tool call processing, tenant_id is required - :return: the new tool - """ - return self.__class__( - identity=self.identity.copy() if self.identity else None, - parameters=self.parameters.copy() if self.parameters else None, - description=self.description.copy() if self.description else None, - model_instance=self.model_configuration['model_instance'], - model=self.model_configuration['model'], - tool_type=self.tool_type, - runtime=Tool.Runtime(**meta) - ) - - def validate_credentials(self, credentials: dict[str, Any], parameters: dict[str, Any], format_only: bool = False) -> None: - """ - validate the credentials for Model tool - """ - pass - - def tool_provider_type(self) -> ToolProviderType: - return ToolProviderType.BUILT_IN - - def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]: - """ - """ - model_instance = self.model_configuration['model_instance'] - if not model_instance: - return self.create_text_message('the tool is not configured correctly') - - if self.tool_type == ModelTool.ModelToolType.VISION: - return self._invoke_llm_vision(user_id, tool_parameters) - else: - return self.create_text_message('the tool is not configured correctly') - - def _invoke_llm_vision(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]: - # get image - image_parameter_name = self.model_configuration['properties'].get(ModelToolPropertyKey.IMAGE_PARAMETER_NAME, 'image_id') - image_id = tool_parameters.pop(image_parameter_name, '') - if not image_id: - image = self.get_default_image_variable() - if not image: - return self.create_text_message('Please upload an image or input image_id') - else: - image = self.get_variable(image_id) - if not image: - image = self.get_default_image_variable() - if not image: - return self.create_text_message('Please upload an image or input image_id') - - if not image: - return self.create_text_message('Please upload an image or input image_id') - - # get image - image = self.get_variable_file(image.name) - if not image: - return self.create_text_message('Failed to get image') - - # organize prompt messages - prompt_messages = [ - SystemPromptMessage( - content=VISION_PROMPT - ), - UserPromptMessage( - content=[ - PromptMessageContent( - type=PromptMessageContentType.TEXT, - data='Recognize the image and extract the information from the image.' - ), - PromptMessageContent( - type=PromptMessageContentType.IMAGE, - data=f'data:image/png;base64,{b64encode(image).decode("utf-8")}' - ) - ] - ) - ] - - llm_instance = cast(LargeLanguageModel, self.model_configuration['model_instance']) - result: LLMResult = llm_instance.invoke( - model=self.model_configuration['model'], - credentials=self.runtime.credentials, - prompt_messages=prompt_messages, - model_parameters=tool_parameters, - tools=[], - stop=[], - stream=False, - user=user_id, - ) - - if not result: - return self.create_text_message('Failed to extract information from the image') - - # get result - content = result.message.content - if not content: - return self.create_text_message('Failed to extract information from the image') - - return self.create_text_message(content) \ No newline at end of file diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index a29bdfcd11..d46f1f22a5 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -10,7 +10,6 @@ from flask import current_app from core.agent.entities import AgentToolEntity from core.model_runtime.utils.encoders import jsonable_encoder -from core.provider_manager import ProviderManager from core.tools import * from core.tools.entities.common_entities import I18nObject from core.tools.entities.tool_entities import ( @@ -22,7 +21,6 @@ from core.tools.errors import ToolProviderNotFoundError from core.tools.provider.api_tool_provider import ApiBasedToolProviderController from core.tools.provider.builtin._positions import BuiltinToolProviderSort from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController -from core.tools.provider.model_tool_provider import ModelToolProviderController from core.tools.tool.api_tool import ApiTool from core.tools.tool.builtin_tool import BuiltinTool from core.tools.tool.tool import Tool @@ -159,19 +157,6 @@ class ToolManager: 'tenant_id': tenant_id, 'credentials': decrypted_credentials, }) - elif provider_type == 'model': - if tenant_id is None: - raise ValueError('tenant id is required for model provider') - # get model provider - model_provider = cls.get_model_provider(tenant_id, provider_name) - - # get tool - model_tool = model_provider.get_tool(tool_name) - - return model_tool.fork_tool_runtime(meta={ - 'tenant_id': tenant_id, - 'credentials': model_tool.model_configuration['model_instance'].credentials - }) elif provider_type == 'app': raise NotImplementedError('app provider not implemented') else: @@ -367,49 +352,6 @@ class ToolManager: cls._builtin_providers = {} cls._builtin_providers_loaded = False - # @classmethod - # def list_model_providers(cls, tenant_id: str = None) -> list[ModelToolProviderController]: - # """ - # list all the model providers - - # :return: the list of the model providers - # """ - # tenant_id = tenant_id or 'ffffffff-ffff-ffff-ffff-ffffffffffff' - # # get configurations - # model_configurations = ModelToolConfigurationManager.get_all_configuration() - # # get all providers - # provider_manager = ProviderManager() - # configurations = provider_manager.get_configurations(tenant_id).values() - # # get model providers - # model_providers: list[ModelToolProviderController] = [] - # for configuration in configurations: - # # all the model tool should be configurated - # if configuration.provider.provider not in model_configurations: - # continue - # if not ModelToolProviderController.is_configuration_valid(configuration): - # continue - # model_providers.append(ModelToolProviderController.from_db(configuration)) - - # return model_providers - - @classmethod - def get_model_provider(cls, tenant_id: str, provider_name: str) -> ModelToolProviderController: - """ - get the model provider - - :param provider_name: the name of the provider - - :return: the provider - """ - # get configurations - provider_manager = ProviderManager() - configurations = provider_manager.get_configurations(tenant_id) - configuration = configurations.get(provider_name) - if configuration is None: - raise ToolProviderNotFoundError(f'model provider {provider_name} not found') - - return ModelToolProviderController.from_db(configuration) - @classmethod def get_tool_label(cls, tool_name: str) -> Union[I18nObject, None]: """ @@ -455,15 +397,6 @@ class ToolManager: result_providers[provider.identity.name] = user_provider - # # get model tool providers - # model_providers = cls.list_model_providers(tenant_id=tenant_id) - # # append model providers - # for provider in model_providers: - # user_provider = ToolTransformService.model_provider_to_user_provider( - # db_provider=provider, - # ) - # result_providers[f'model_provider.{provider.identity.name}'] = user_provider - # get db api providers db_api_providers: list[ApiToolProvider] = db.session.query(ApiToolProvider). \ filter(ApiToolProvider.tenant_id == tenant_id).all() diff --git a/api/core/tools/utils/configuration.py b/api/core/tools/utils/configuration.py index 917f8411c4..b68efad124 100644 --- a/api/core/tools/utils/configuration.py +++ b/api/core/tools/utils/configuration.py @@ -1,16 +1,12 @@ -import os from copy import deepcopy -from typing import Any, Union +from typing import Any from pydantic import BaseModel -from yaml import FullLoader, load from core.helper import encrypter from core.helper.tool_parameter_cache import ToolParameterCache, ToolParameterCacheType from core.helper.tool_provider_cache import ToolProviderCredentialsCache, ToolProviderCredentialsCacheType from core.tools.entities.tool_entities import ( - ModelToolConfiguration, - ModelToolProviderConfiguration, ToolParameter, ToolProviderCredentials, ) @@ -231,65 +227,3 @@ class ToolParameterConfigurationManager(BaseModel): identity_id=self.identity_id ) cache.delete() - -class ModelToolConfigurationManager: - """ - Model as tool configuration - """ - _configurations: dict[str, ModelToolProviderConfiguration] = {} - _model_configurations: dict[str, ModelToolConfiguration] = {} - _inited = False - - @classmethod - def _init_configuration(cls): - """ - init configuration - """ - - absolute_path = os.path.abspath(os.path.dirname(__file__)) - model_tools_path = os.path.join(absolute_path, '..', 'model_tools') - - # get all .yaml file - files = [f for f in os.listdir(model_tools_path) if f.endswith('.yaml')] - - for file in files: - provider = file.split('.')[0] - with open(os.path.join(model_tools_path, file), encoding='utf-8') as f: - configurations = ModelToolProviderConfiguration(**load(f, Loader=FullLoader)) - models = configurations.models or [] - for model in models: - model_key = f'{provider}.{model.model}' - cls._model_configurations[model_key] = model - - cls._configurations[provider] = configurations - cls._inited = True - - @classmethod - def get_configuration(cls, provider: str) -> Union[ModelToolProviderConfiguration, None]: - """ - get configuration by provider - """ - if not cls._inited: - cls._init_configuration() - return cls._configurations.get(provider, None) - - @classmethod - def get_all_configuration(cls) -> dict[str, ModelToolProviderConfiguration]: - """ - get all configurations - """ - if not cls._inited: - cls._init_configuration() - return cls._configurations - - @classmethod - def get_model_configuration(cls, provider: str, model: str) -> Union[ModelToolConfiguration, None]: - """ - get model configuration - """ - key = f'{provider}.{model}' - - if not cls._inited: - cls._init_configuration() - - return cls._model_configurations.get(key, None) \ No newline at end of file diff --git a/api/services/tools_transform_service.py b/api/services/tools_transform_service.py index 3ef9f52e62..ed7fd589b8 100644 --- a/api/services/tools_transform_service.py +++ b/api/services/tools_transform_service.py @@ -10,7 +10,6 @@ from core.tools.entities.tool_entities import ApiProviderAuthType, ToolParameter from core.tools.entities.user_entities import UserTool, UserToolProvider from core.tools.provider.api_tool_provider import ApiBasedToolProviderController from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController -from core.tools.provider.model_tool_provider import ModelToolProviderController from core.tools.tool.tool import Tool from core.tools.utils.configuration import ToolConfigurationManager from models.tools import ApiToolProvider, BuiltinToolProvider @@ -28,8 +27,6 @@ class ToolTransformService: if provider_type == UserToolProvider.ProviderType.BUILTIN.value: return url_prefix + 'builtin/' + provider_name + '/icon' - elif provider_type == UserToolProvider.ProviderType.MODEL.value: - return url_prefix + 'model/' + provider_name + '/icon' elif provider_type == UserToolProvider.ProviderType.API.value: try: return json.loads(icon) @@ -185,31 +182,6 @@ class ToolTransformService: return result - @staticmethod - def model_provider_to_user_provider( - db_provider: ModelToolProviderController, - ) -> UserToolProvider: - """ - convert provider controller to user provider - """ - return UserToolProvider( - id=db_provider.identity.name, - author=db_provider.identity.author, - name=db_provider.identity.name, - description=I18nObject( - en_US=db_provider.identity.description.en_US, - zh_Hans=db_provider.identity.description.zh_Hans, - ), - icon=db_provider.identity.icon, - label=I18nObject( - en_US=db_provider.identity.label.en_US, - zh_Hans=db_provider.identity.label.zh_Hans, - ), - type=UserToolProvider.ProviderType.MODEL, - masked_credentials={}, - is_team_authorization=db_provider.is_active, - ) - @staticmethod def tool_to_user_tool( tool: Union[ApiBasedToolBundle, Tool], credentials: dict = None, tenant_id: str = None From c0fe414e0af5399a9c90afabce28a28cd5221b68 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Wed, 15 May 2024 15:12:36 +0800 Subject: [PATCH 085/267] fix: workflow delete edge when node is selected (#4414) --- web/app/components/workflow/hooks/use-nodes-interactions.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index 026e802268..e0e4f69c78 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -922,8 +922,13 @@ export const useNodesInteractions = () => { const { getNodes, + edges, } = store.getState() + const edgeSelected = edges.some(edge => edge.selected) + if (edgeSelected) + return + const nodes = getNodes() const bundledNodes = nodes.filter(node => node.data._isBundled && node.data.type !== BlockEnum.Start) From 97b65f9b4b9255fa69af1795249a7a9842d29146 Mon Sep 17 00:00:00 2001 From: "Charlie.Wei" Date: Wed, 15 May 2024 15:23:16 +0800 Subject: [PATCH 086/267] Optimize webscraper (#4392) Co-authored-by: luowei Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/core/rag/extractor/extract_processor.py | 13 +++++++++++ .../builtin/webscraper/tools/webscraper.py | 17 ++++++++------ .../builtin/webscraper/tools/webscraper.yaml | 20 +++++++++++++++++ api/core/tools/utils/web_reader_tool.py | 22 +++++++++++++++---- 4 files changed, 61 insertions(+), 11 deletions(-) diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index 1136e11f76..a7adea8a05 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -1,6 +1,8 @@ +import re import tempfile from pathlib import Path from typing import Union +from urllib.parse import unquote import requests from flask import current_app @@ -55,6 +57,17 @@ class ExtractProcessor: with tempfile.TemporaryDirectory() as temp_dir: suffix = Path(url).suffix + if not suffix and suffix != '.': + # get content-type + if response.headers.get('Content-Type'): + suffix = '.' + response.headers.get('Content-Type').split('/')[-1] + else: + content_disposition = response.headers.get('Content-Disposition') + filename_match = re.search(r'filename="([^"]+)"', content_disposition) + if filename_match: + filename = unquote(filename_match.group(1)) + suffix = '.' + re.search(r'\.(\w+)$', filename).group(1) + file_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" with open(file_path, 'wb') as file: file.write(response.content) diff --git a/api/core/tools/provider/builtin/webscraper/tools/webscraper.py b/api/core/tools/provider/builtin/webscraper/tools/webscraper.py index 5e8c405b47..3d098e6768 100644 --- a/api/core/tools/provider/builtin/webscraper/tools/webscraper.py +++ b/api/core/tools/provider/builtin/webscraper/tools/webscraper.py @@ -7,9 +7,9 @@ from core.tools.tool.builtin_tool import BuiltinTool class WebscraperTool(BuiltinTool): def _invoke(self, - user_id: str, - tool_parameters: dict[str, Any], - ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + user_id: str, + tool_parameters: dict[str, Any], + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: """ invoke tools """ @@ -18,12 +18,15 @@ class WebscraperTool(BuiltinTool): user_agent = tool_parameters.get('user_agent', '') if not url: return self.create_text_message('Please input url') - + # get webpage result = self.get_url(url, user_agent=user_agent) - # summarize and return - return self.create_text_message(self.summary(user_id=user_id, content=result)) + if tool_parameters.get('generate_summary'): + # summarize and return + return self.create_text_message(self.summary(user_id=user_id, content=result)) + else: + # return full webpage + return self.create_text_message(result) except Exception as e: raise ToolInvokeError(str(e)) - \ No newline at end of file diff --git a/api/core/tools/provider/builtin/webscraper/tools/webscraper.yaml b/api/core/tools/provider/builtin/webscraper/tools/webscraper.yaml index 5782dbb0c7..180cfec6fc 100644 --- a/api/core/tools/provider/builtin/webscraper/tools/webscraper.yaml +++ b/api/core/tools/provider/builtin/webscraper/tools/webscraper.yaml @@ -38,3 +38,23 @@ parameters: pt_BR: used for identifying the browser. form: form default: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1000.0 Safari/537.36 + - name: generate_summary + type: boolean + required: false + label: + en_US: Whether to generate summary + zh_Hans: 是否生成摘要 + human_description: + en_US: If true, the crawler will only return the page summary content. + zh_Hans: 如果启用,爬虫将仅返回页面摘要内容。 + form: form + options: + - value: true + label: + en_US: Yes + zh_Hans: 是 + - value: false + label: + en_US: No + zh_Hans: 否 + default: false diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py index 4c6fbb2780..96e4824940 100644 --- a/api/core/tools/utils/web_reader_tool.py +++ b/api/core/tools/utils/web_reader_tool.py @@ -1,5 +1,6 @@ import hashlib import json +import mimetypes import os import re import site @@ -7,6 +8,7 @@ import subprocess import tempfile import unicodedata from contextlib import contextmanager +from urllib.parse import unquote import requests from bs4 import BeautifulSoup, CData, Comment, NavigableString @@ -39,22 +41,34 @@ def get_url(url: str, user_agent: str = None) -> str: } if user_agent: headers["User-Agent"] = user_agent - - supported_content_types = extract_processor.SUPPORT_URL_CONTENT_TYPES + ["text/html"] - response = requests.get(url, headers=headers, allow_redirects=True, timeout=(5, 10)) + main_content_type = None + supported_content_types = extract_processor.SUPPORT_URL_CONTENT_TYPES + ["text/html"] + response = requests.head(url, headers=headers, allow_redirects=True, timeout=(5, 10)) if response.status_code != 200: return "URL returned status code {}.".format(response.status_code) # check content-type - main_content_type = response.headers.get('Content-Type').split(';')[0].strip() + content_type = response.headers.get('Content-Type') + if content_type: + main_content_type = response.headers.get('Content-Type').split(';')[0].strip() + else: + content_disposition = response.headers.get('Content-Disposition') + filename_match = re.search(r'filename="([^"]+)"', content_disposition) + if filename_match: + filename = unquote(filename_match.group(1)) + extension = re.search(r'\.(\w+)$', filename) + if extension: + main_content_type = mimetypes.guess_type(filename)[0] + if main_content_type not in supported_content_types: return "Unsupported content-type [{}] of URL.".format(main_content_type) if main_content_type in extract_processor.SUPPORT_URL_CONTENT_TYPES: return ExtractProcessor.load_from_url(url, return_text=True) + response = requests.get(url, headers=headers, allow_redirects=True, timeout=(120, 300)) a = extract_using_readabilipy(response.text) if not a['plain_text'] or not a['plain_text'].strip(): From a76ae2d756d17a4da1d9451e4992a481233584cc Mon Sep 17 00:00:00 2001 From: VoidIsVoid <343750470@qq.com> Date: Wed, 15 May 2024 15:24:40 +0800 Subject: [PATCH 087/267] chore: remove useless code in knowledge_retrieval_node (#4412) --- .../nodes/knowledge_retrieval/knowledge_retrieval_node.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index be3cec9152..3190856db2 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -146,7 +146,6 @@ class KnowledgeRetrievalNode(BaseNode): if 'score' in item.metadata and item.metadata['score']: document_score_list[item.metadata['doc_id']] = item.metadata['score'] - document_context_list = [] index_node_ids = [document.metadata['doc_id'] for document in all_documents] segments = DocumentSegment.query.filter( DocumentSegment.dataset_id.in_(dataset_ids), @@ -160,11 +159,6 @@ class KnowledgeRetrievalNode(BaseNode): sorted_segments = sorted(segments, key=lambda segment: index_node_id_to_position.get(segment.index_node_id, float('inf'))) - for segment in sorted_segments: - if segment.answer: - document_context_list.append(f'question:{segment.content} answer:{segment.answer}') - else: - document_context_list.append(segment.content) for segment in sorted_segments: dataset = Dataset.query.filter_by( From da81233d61699e5586918715bd99700c7ae99cc1 Mon Sep 17 00:00:00 2001 From: huchengyi Date: Wed, 15 May 2024 15:27:15 +0800 Subject: [PATCH 088/267] Custom sqlalchemy database uri scheme is supported (#4367) --- api/config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/api/config.py b/api/config.py index 8ec5febb1d..eba27c7c70 100644 --- a/api/config.py +++ b/api/config.py @@ -28,6 +28,7 @@ DEFAULTS = { 'STORAGE_LOCAL_PATH': 'storage', 'CHECK_UPDATE_URL': 'https://updates.dify.ai', 'DEPLOY_ENV': 'PRODUCTION', + 'SQLALCHEMY_DATABASE_URI_SCHEME': 'postgresql', 'SQLALCHEMY_POOL_SIZE': 30, 'SQLALCHEMY_MAX_OVERFLOW': 10, 'SQLALCHEMY_POOL_RECYCLE': 3600, @@ -165,10 +166,11 @@ class Config: key: get_env(key) for key in ['DB_USERNAME', 'DB_PASSWORD', 'DB_HOST', 'DB_PORT', 'DB_DATABASE', 'DB_CHARSET'] } + self.SQLALCHEMY_DATABASE_URI_SCHEME = get_env('SQLALCHEMY_DATABASE_URI_SCHEME') db_extras = f"?client_encoding={db_credentials['DB_CHARSET']}" if db_credentials['DB_CHARSET'] else "" - self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}" + self.SQLALCHEMY_DATABASE_URI = f"{self.SQLALCHEMY_DATABASE_URI_SCHEME}://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}" self.SQLALCHEMY_ENGINE_OPTIONS = { 'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')), 'max_overflow': int(get_env('SQLALCHEMY_MAX_OVERFLOW')), From dd949311162c6f4c369c8e13bb13522e685fe9bc Mon Sep 17 00:00:00 2001 From: Garfield Dai Date: Wed, 15 May 2024 16:14:49 +0800 Subject: [PATCH 089/267] Remove useless code (#4416) --- api/controllers/console/__init__.py | 3 - .../console/enterprise/__init__.py | 0 .../console/enterprise/enterprise_sso.py | 59 ------- api/controllers/console/feature.py | 7 +- api/controllers/web/__init__.py | 2 +- api/controllers/web/app.py | 6 +- api/controllers/web/error.py | 6 + api/controllers/web/feature.py | 12 ++ api/controllers/web/passport.py | 12 +- api/controllers/web/wraps.py | 78 +++++++--- .../enterprise/enterprise_feature_service.py | 28 ---- .../enterprise/enterprise_sso_service.py | 60 ------- api/services/feature_service.py | 25 +++ web/app/(shareLayout)/chat/[token]/page.tsx | 1 - .../(shareLayout)/chatbot/[token]/page.tsx | 81 +++++++++- web/app/(shareLayout)/webapp-signin/page.tsx | 147 ++++++++++++++++++ web/app/components/share/utils.ts | 36 +++++ web/app/signin/page.tsx | 21 +-- ...{enterpriseSSOForm.tsx => userSSOForm.tsx} | 14 +- web/service/base.ts | 24 +++ web/service/common.ts | 5 + web/service/enterprise.ts | 14 -- web/service/share.ts | 24 +++ web/service/sso.ts | 9 ++ web/types/enterprise.ts | 9 -- web/types/feature.ts | 13 ++ 26 files changed, 466 insertions(+), 230 deletions(-) delete mode 100644 api/controllers/console/enterprise/__init__.py delete mode 100644 api/controllers/console/enterprise/enterprise_sso.py create mode 100644 api/controllers/web/feature.py delete mode 100644 api/services/enterprise/enterprise_feature_service.py delete mode 100644 api/services/enterprise/enterprise_sso_service.py create mode 100644 web/app/(shareLayout)/webapp-signin/page.tsx rename web/app/signin/{enterpriseSSOForm.tsx => userSSOForm.tsx} (85%) delete mode 100644 web/service/enterprise.ts create mode 100644 web/service/sso.ts delete mode 100644 web/types/enterprise.ts create mode 100644 web/types/feature.ts diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py index 498557cd51..72ec05f654 100644 --- a/api/controllers/console/__init__.py +++ b/api/controllers/console/__init__.py @@ -37,9 +37,6 @@ from .billing import billing # Import datasets controllers from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing -# Import enterprise controllers -from .enterprise import enterprise_sso - # Import explore controllers from .explore import ( audio, diff --git a/api/controllers/console/enterprise/__init__.py b/api/controllers/console/enterprise/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/controllers/console/enterprise/enterprise_sso.py b/api/controllers/console/enterprise/enterprise_sso.py deleted file mode 100644 index f6a2897d5a..0000000000 --- a/api/controllers/console/enterprise/enterprise_sso.py +++ /dev/null @@ -1,59 +0,0 @@ -from flask import current_app, redirect -from flask_restful import Resource, reqparse - -from controllers.console import api -from controllers.console.setup import setup_required -from services.enterprise.enterprise_sso_service import EnterpriseSSOService - - -class EnterpriseSSOSamlLogin(Resource): - - @setup_required - def get(self): - return EnterpriseSSOService.get_sso_saml_login() - - -class EnterpriseSSOSamlAcs(Resource): - - @setup_required - def post(self): - parser = reqparse.RequestParser() - parser.add_argument('SAMLResponse', type=str, required=True, location='form') - args = parser.parse_args() - saml_response = args['SAMLResponse'] - - try: - token = EnterpriseSSOService.post_sso_saml_acs(saml_response) - return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}/signin?console_token={token}') - except Exception as e: - return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}/signin?message={str(e)}') - - -class EnterpriseSSOOidcLogin(Resource): - - @setup_required - def get(self): - return EnterpriseSSOService.get_sso_oidc_login() - - -class EnterpriseSSOOidcCallback(Resource): - - @setup_required - def get(self): - parser = reqparse.RequestParser() - parser.add_argument('state', type=str, required=True, location='args') - parser.add_argument('code', type=str, required=True, location='args') - parser.add_argument('oidc-state', type=str, required=True, location='cookies') - args = parser.parse_args() - - try: - token = EnterpriseSSOService.get_sso_oidc_callback(args) - return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}/signin?console_token={token}') - except Exception as e: - return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}/signin?message={str(e)}') - - -api.add_resource(EnterpriseSSOSamlLogin, '/enterprise/sso/saml/login') -api.add_resource(EnterpriseSSOSamlAcs, '/enterprise/sso/saml/acs') -api.add_resource(EnterpriseSSOOidcLogin, '/enterprise/sso/oidc/login') -api.add_resource(EnterpriseSSOOidcCallback, '/enterprise/sso/oidc/callback') diff --git a/api/controllers/console/feature.py b/api/controllers/console/feature.py index 325652a447..7334f85a57 100644 --- a/api/controllers/console/feature.py +++ b/api/controllers/console/feature.py @@ -1,7 +1,6 @@ from flask_login import current_user from flask_restful import Resource -from services.enterprise.enterprise_feature_service import EnterpriseFeatureService from services.feature_service import FeatureService from . import api @@ -15,10 +14,10 @@ class FeatureApi(Resource): return FeatureService.get_features(current_user.current_tenant_id).dict() -class EnterpriseFeatureApi(Resource): +class SystemFeatureApi(Resource): def get(self): - return EnterpriseFeatureService.get_enterprise_features().dict() + return FeatureService.get_system_features().dict() api.add_resource(FeatureApi, '/features') -api.add_resource(EnterpriseFeatureApi, '/enterprise-features') +api.add_resource(SystemFeatureApi, '/system-features') diff --git a/api/controllers/web/__init__.py b/api/controllers/web/__init__.py index b6d46d4081..aa19bdc034 100644 --- a/api/controllers/web/__init__.py +++ b/api/controllers/web/__init__.py @@ -6,4 +6,4 @@ bp = Blueprint('web', __name__, url_prefix='/api') api = ExternalApi(bp) -from . import app, audio, completion, conversation, file, message, passport, saved_message, site, workflow +from . import app, audio, completion, conversation, feature, file, message, passport, saved_message, site, workflow diff --git a/api/controllers/web/app.py b/api/controllers/web/app.py index 2586f2e6ec..91d9015c33 100644 --- a/api/controllers/web/app.py +++ b/api/controllers/web/app.py @@ -1,14 +1,10 @@ -import json - from flask import current_app from flask_restful import fields, marshal_with from controllers.web import api from controllers.web.error import AppUnavailableError from controllers.web.wraps import WebApiResource -from extensions.ext_database import db -from models.model import App, AppMode, AppModelConfig -from models.tools import ApiToolProvider +from models.model import App, AppMode from services.app_service import AppService diff --git a/api/controllers/web/error.py b/api/controllers/web/error.py index 390e3fe7d1..bc87f51051 100644 --- a/api/controllers/web/error.py +++ b/api/controllers/web/error.py @@ -115,3 +115,9 @@ class UnsupportedFileTypeError(BaseHTTPException): error_code = 'unsupported_file_type' description = "File type not allowed." code = 415 + + +class WebSSOAuthRequiredError(BaseHTTPException): + error_code = 'web_sso_auth_required' + description = "Web SSO authentication required." + code = 401 diff --git a/api/controllers/web/feature.py b/api/controllers/web/feature.py new file mode 100644 index 0000000000..65842d78c6 --- /dev/null +++ b/api/controllers/web/feature.py @@ -0,0 +1,12 @@ +from flask_restful import Resource + +from controllers.web import api +from services.feature_service import FeatureService + + +class SystemFeatureApi(Resource): + def get(self): + return FeatureService.get_system_features().dict() + + +api.add_resource(SystemFeatureApi, '/system-features') diff --git a/api/controllers/web/passport.py b/api/controllers/web/passport.py index 92b28d8125..ccc8683a79 100644 --- a/api/controllers/web/passport.py +++ b/api/controllers/web/passport.py @@ -5,14 +5,21 @@ from flask_restful import Resource from werkzeug.exceptions import NotFound, Unauthorized from controllers.web import api +from controllers.web.error import WebSSOAuthRequiredError from extensions.ext_database import db from libs.passport import PassportService from models.model import App, EndUser, Site +from services.feature_service import FeatureService class PassportResource(Resource): """Base resource for passport.""" def get(self): + + system_features = FeatureService.get_system_features() + if system_features.sso_enforced_for_web: + raise WebSSOAuthRequiredError() + app_code = request.headers.get('X-App-Code') if app_code is None: raise Unauthorized('X-App-Code header is missing.') @@ -28,7 +35,7 @@ class PassportResource(Resource): app_model = db.session.query(App).filter(App.id == site.app_id).first() if not app_model or app_model.status != 'normal' or not app_model.enable_site: raise NotFound() - + end_user = EndUser( tenant_id=app_model.tenant_id, app_id=app_model.id, @@ -36,6 +43,7 @@ class PassportResource(Resource): is_anonymous=True, session_id=generate_session_id(), ) + db.session.add(end_user) db.session.commit() @@ -53,8 +61,10 @@ class PassportResource(Resource): 'access_token': tk, } + api.add_resource(PassportResource, '/passport') + def generate_session_id(): """ Generate a unique session ID. diff --git a/api/controllers/web/wraps.py b/api/controllers/web/wraps.py index bdaa476f34..f5ab49d7e1 100644 --- a/api/controllers/web/wraps.py +++ b/api/controllers/web/wraps.py @@ -2,11 +2,13 @@ from functools import wraps from flask import request from flask_restful import Resource -from werkzeug.exceptions import NotFound, Unauthorized +from werkzeug.exceptions import BadRequest, NotFound, Unauthorized +from controllers.web.error import WebSSOAuthRequiredError from extensions.ext_database import db from libs.passport import PassportService from models.model import App, EndUser, Site +from services.feature_service import FeatureService def validate_jwt_token(view=None): @@ -21,34 +23,60 @@ def validate_jwt_token(view=None): return decorator(view) return decorator + def decode_jwt_token(): - auth_header = request.headers.get('Authorization') - if auth_header is None: - raise Unauthorized('Authorization header is missing.') + system_features = FeatureService.get_system_features() - if ' ' not in auth_header: - raise Unauthorized('Invalid Authorization header format. Expected \'Bearer \' format.') - - auth_scheme, tk = auth_header.split(None, 1) - auth_scheme = auth_scheme.lower() + try: + auth_header = request.headers.get('Authorization') + if auth_header is None: + raise Unauthorized('Authorization header is missing.') - if auth_scheme != 'bearer': - raise Unauthorized('Invalid Authorization header format. Expected \'Bearer \' format.') - decoded = PassportService().verify(tk) - app_code = decoded.get('app_code') - app_model = db.session.query(App).filter(App.id == decoded['app_id']).first() - site = db.session.query(Site).filter(Site.code == app_code).first() - if not app_model: - raise NotFound() - if not app_code or not site: - raise Unauthorized('Site URL is no longer valid.') - if app_model.enable_site is False: - raise Unauthorized('Site is disabled.') - end_user = db.session.query(EndUser).filter(EndUser.id == decoded['end_user_id']).first() - if not end_user: - raise NotFound() + if ' ' not in auth_header: + raise Unauthorized('Invalid Authorization header format. Expected \'Bearer \' format.') + + auth_scheme, tk = auth_header.split(None, 1) + auth_scheme = auth_scheme.lower() + + if auth_scheme != 'bearer': + raise Unauthorized('Invalid Authorization header format. Expected \'Bearer \' format.') + decoded = PassportService().verify(tk) + app_code = decoded.get('app_code') + app_model = db.session.query(App).filter(App.id == decoded['app_id']).first() + site = db.session.query(Site).filter(Site.code == app_code).first() + if not app_model: + raise NotFound() + if not app_code or not site: + raise BadRequest('Site URL is no longer valid.') + if app_model.enable_site is False: + raise BadRequest('Site is disabled.') + end_user = db.session.query(EndUser).filter(EndUser.id == decoded['end_user_id']).first() + if not end_user: + raise NotFound() + + _validate_web_sso_token(decoded, system_features) + + return app_model, end_user + except Unauthorized as e: + if system_features.sso_enforced_for_web: + raise WebSSOAuthRequiredError() + + raise Unauthorized(e.description) + + +def _validate_web_sso_token(decoded, system_features): + # Check if SSO is enforced for web, and if the token source is not SSO, raise an error and redirect to SSO login + if system_features.sso_enforced_for_web: + source = decoded.get('token_source') + if not source or source != 'sso': + raise WebSSOAuthRequiredError() + + # Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login + if not system_features.sso_enforced_for_web: + source = decoded.get('token_source') + if source and source == 'sso': + raise Unauthorized('sso token expired.') - return app_model, end_user class WebApiResource(Resource): method_decorators = [validate_jwt_token] diff --git a/api/services/enterprise/enterprise_feature_service.py b/api/services/enterprise/enterprise_feature_service.py deleted file mode 100644 index fe33349aa8..0000000000 --- a/api/services/enterprise/enterprise_feature_service.py +++ /dev/null @@ -1,28 +0,0 @@ -from flask import current_app -from pydantic import BaseModel - -from services.enterprise.enterprise_service import EnterpriseService - - -class EnterpriseFeatureModel(BaseModel): - sso_enforced_for_signin: bool = False - sso_enforced_for_signin_protocol: str = '' - - -class EnterpriseFeatureService: - - @classmethod - def get_enterprise_features(cls) -> EnterpriseFeatureModel: - features = EnterpriseFeatureModel() - - if current_app.config['ENTERPRISE_ENABLED']: - cls._fulfill_params_from_enterprise(features) - - return features - - @classmethod - def _fulfill_params_from_enterprise(cls, features): - enterprise_info = EnterpriseService.get_info() - - features.sso_enforced_for_signin = enterprise_info['sso_enforced_for_signin'] - features.sso_enforced_for_signin_protocol = enterprise_info['sso_enforced_for_signin_protocol'] diff --git a/api/services/enterprise/enterprise_sso_service.py b/api/services/enterprise/enterprise_sso_service.py deleted file mode 100644 index d8e19f23bf..0000000000 --- a/api/services/enterprise/enterprise_sso_service.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging - -from models.account import Account, AccountStatus -from services.account_service import AccountService, TenantService -from services.enterprise.base import EnterpriseRequest - -logger = logging.getLogger(__name__) - - -class EnterpriseSSOService: - - @classmethod - def get_sso_saml_login(cls) -> str: - return EnterpriseRequest.send_request('GET', '/sso/saml/login') - - @classmethod - def post_sso_saml_acs(cls, saml_response: str) -> str: - response = EnterpriseRequest.send_request('POST', '/sso/saml/acs', json={'SAMLResponse': saml_response}) - if 'email' not in response or response['email'] is None: - logger.exception(response) - raise Exception('Saml response is invalid') - - return cls.login_with_email(response.get('email')) - - @classmethod - def get_sso_oidc_login(cls): - return EnterpriseRequest.send_request('GET', '/sso/oidc/login') - - @classmethod - def get_sso_oidc_callback(cls, args: dict): - state_from_query = args['state'] - code_from_query = args['code'] - state_from_cookies = args['oidc-state'] - - if state_from_cookies != state_from_query: - raise Exception('invalid state or code') - - response = EnterpriseRequest.send_request('GET', '/sso/oidc/callback', params={'code': code_from_query}) - if 'email' not in response or response['email'] is None: - logger.exception(response) - raise Exception('OIDC response is invalid') - - return cls.login_with_email(response.get('email')) - - @classmethod - def login_with_email(cls, email: str) -> str: - account = Account.query.filter_by(email=email).first() - if account is None: - raise Exception('account not found, please contact system admin to invite you to join in a workspace') - - if account.status == AccountStatus.BANNED: - raise Exception('account is banned, please contact system admin') - - tenants = TenantService.get_join_tenants(account) - if len(tenants) == 0: - raise Exception("workspace not found, please contact system admin to invite you to join in a workspace") - - token = AccountService.get_account_jwt_token(account) - - return token diff --git a/api/services/feature_service.py b/api/services/feature_service.py index 3cf51d11a0..29842d68b7 100644 --- a/api/services/feature_service.py +++ b/api/services/feature_service.py @@ -2,6 +2,7 @@ from flask import current_app from pydantic import BaseModel from services.billing_service import BillingService +from services.enterprise.enterprise_service import EnterpriseService class SubscriptionModel(BaseModel): @@ -30,6 +31,13 @@ class FeatureModel(BaseModel): can_replace_logo: bool = False +class SystemFeatureModel(BaseModel): + sso_enforced_for_signin: bool = False + sso_enforced_for_signin_protocol: str = '' + sso_enforced_for_web: bool = False + sso_enforced_for_web_protocol: str = '' + + class FeatureService: @classmethod @@ -43,6 +51,15 @@ class FeatureService: return features + @classmethod + def get_system_features(cls) -> SystemFeatureModel: + system_features = SystemFeatureModel() + + if current_app.config['ENTERPRISE_ENABLED']: + cls._fulfill_params_from_enterprise(system_features) + + return system_features + @classmethod def _fulfill_params_from_env(cls, features: FeatureModel): features.can_replace_logo = current_app.config['CAN_REPLACE_LOGO'] @@ -73,3 +90,11 @@ class FeatureService: features.docs_processing = billing_info['docs_processing'] features.can_replace_logo = billing_info['can_replace_logo'] + @classmethod + def _fulfill_params_from_enterprise(cls, features): + enterprise_info = EnterpriseService.get_info() + + features.sso_enforced_for_signin = enterprise_info['sso_enforced_for_signin'] + features.sso_enforced_for_signin_protocol = enterprise_info['sso_enforced_for_signin_protocol'] + features.sso_enforced_for_web = enterprise_info['sso_enforced_for_web'] + features.sso_enforced_for_web_protocol = enterprise_info['sso_enforced_for_web_protocol'] diff --git a/web/app/(shareLayout)/chat/[token]/page.tsx b/web/app/(shareLayout)/chat/[token]/page.tsx index 6c3fe2b4a4..56b2e0da7d 100644 --- a/web/app/(shareLayout)/chat/[token]/page.tsx +++ b/web/app/(shareLayout)/chat/[token]/page.tsx @@ -1,5 +1,4 @@ 'use client' - import type { FC } from 'react' import React from 'react' diff --git a/web/app/(shareLayout)/chatbot/[token]/page.tsx b/web/app/(shareLayout)/chatbot/[token]/page.tsx index 8aa182893a..0dc7b07169 100644 --- a/web/app/(shareLayout)/chatbot/[token]/page.tsx +++ b/web/app/(shareLayout)/chatbot/[token]/page.tsx @@ -1,12 +1,87 @@ +'use client' import type { FC } from 'react' -import React from 'react' - +import React, { useEffect } from 'react' +import cn from 'classnames' import type { IMainProps } from '@/app/components/share/chat' import Main from '@/app/components/share/chatbot' +import Loading from '@/app/components/base/loading' +import { fetchSystemFeatures } from '@/service/share' +import LogoSite from '@/app/components/base/logo/logo-site' const Chatbot: FC = () => { + const [isSSOEnforced, setIsSSOEnforced] = React.useState(true) + const [loading, setLoading] = React.useState(true) + + useEffect(() => { + fetchSystemFeatures().then((res) => { + setIsSSOEnforced(res.sso_enforced_for_web) + setLoading(false) + }) + }, []) + return ( -
+ <> + { + loading + ? ( +
+
+ +
+
+ ) + : ( + <> + {isSSOEnforced + ? ( +
+
+
+ +
+ +
+
+
+

+ Warning: Chatbot is not available +

+

+ Because SSO is enforced. Please contact your administrator. +

+
+
+
+
+
+ ) + :
+ } + + )} + ) } diff --git a/web/app/(shareLayout)/webapp-signin/page.tsx b/web/app/(shareLayout)/webapp-signin/page.tsx new file mode 100644 index 0000000000..d0d05cdd0d --- /dev/null +++ b/web/app/(shareLayout)/webapp-signin/page.tsx @@ -0,0 +1,147 @@ +'use client' +import cn from 'classnames' +import { useRouter, useSearchParams } from 'next/navigation' +import type { FC } from 'react' +import React, { useEffect, useState } from 'react' +import { useTranslation } from 'react-i18next' +import Toast from '@/app/components/base/toast' +import Button from '@/app/components/base/button' +import { fetchSystemFeatures, fetchWebOIDCSSOUrl, fetchWebSAMLSSOUrl } from '@/service/share' +import LogoSite from '@/app/components/base/logo/logo-site' +import { setAccessToken } from '@/app/components/share/utils' + +const WebSSOForm: FC = () => { + const searchParams = useSearchParams() + + const redirectUrl = searchParams.get('redirect_url') + const tokenFromUrl = searchParams.get('web_sso_token') + const message = searchParams.get('message') + + const router = useRouter() + const { t } = useTranslation() + + const [isLoading, setIsLoading] = useState(false) + const [protocal, setProtocal] = useState('') + + useEffect(() => { + const fetchFeaturesAndSetToken = async () => { + await fetchSystemFeatures().then((res) => { + setProtocal(res.sso_enforced_for_web_protocol) + }) + + // Callback from SSO, process token and redirect + if (tokenFromUrl && redirectUrl) { + const appCode = redirectUrl.split('/').pop() + if (!appCode) { + Toast.notify({ + type: 'error', + message: 'redirect url is invalid. App code is not found.', + }) + return + } + + await setAccessToken(appCode, tokenFromUrl) + router.push(redirectUrl) + } + } + + fetchFeaturesAndSetToken() + + if (message) { + Toast.notify({ + type: 'error', + message, + }) + } + }, []) + + const handleSSOLogin = () => { + setIsLoading(true) + + if (!redirectUrl) { + Toast.notify({ + type: 'error', + message: 'redirect url is not found.', + }) + setIsLoading(false) + return + } + + const appCode = redirectUrl.split('/').pop() + if (!appCode) { + Toast.notify({ + type: 'error', + message: 'redirect url is invalid. App code is not found.', + }) + return + } + + if (protocal === 'saml') { + fetchWebSAMLSSOUrl(appCode, redirectUrl).then((res) => { + router.push(res.url) + }).finally(() => { + setIsLoading(false) + }) + } + else if (protocal === 'oidc') { + fetchWebOIDCSSOUrl(appCode, redirectUrl).then((res) => { + router.push(res.url) + }).finally(() => { + setIsLoading(false) + }) + } + else { + Toast.notify({ + type: 'error', + message: 'sso protocal is not supported.', + }) + setIsLoading(false) + } + } + + return ( +
+
+
+ +
+ +
+
+
+

{t('login.pageTitle')}

+
+
+ +
+
+
+
+
+ ) +} + +export default React.memo(WebSSOForm) diff --git a/web/app/components/share/utils.ts b/web/app/components/share/utils.ts index 6362fd8330..5a41523404 100644 --- a/web/app/components/share/utils.ts +++ b/web/app/components/share/utils.ts @@ -1,4 +1,6 @@ +import { CONVERSATION_ID_INFO } from '../base/chat/constants' import { fetchAccessToken } from '@/service/share' + export const checkOrSetAccessToken = async () => { const sharedToken = globalThis.location.pathname.split('/').slice(-1)[0] const accessToken = localStorage.getItem('token') || JSON.stringify({ [sharedToken]: '' }) @@ -15,3 +17,37 @@ export const checkOrSetAccessToken = async () => { localStorage.setItem('token', JSON.stringify(accessTokenJson)) } } + +export const setAccessToken = async (sharedToken: string, token: string) => { + const accessToken = localStorage.getItem('token') || JSON.stringify({ [sharedToken]: '' }) + let accessTokenJson = { [sharedToken]: '' } + try { + accessTokenJson = JSON.parse(accessToken) + } + catch (e) { + + } + + localStorage.removeItem(CONVERSATION_ID_INFO) + + accessTokenJson[sharedToken] = token + localStorage.setItem('token', JSON.stringify(accessTokenJson)) +} + +export const removeAccessToken = () => { + const sharedToken = globalThis.location.pathname.split('/').slice(-1)[0] + + const accessToken = localStorage.getItem('token') || JSON.stringify({ [sharedToken]: '' }) + let accessTokenJson = { [sharedToken]: '' } + try { + accessTokenJson = JSON.parse(accessToken) + } + catch (e) { + + } + + localStorage.removeItem(CONVERSATION_ID_INFO) + + delete accessTokenJson[sharedToken] + localStorage.setItem('token', JSON.stringify(accessTokenJson)) +} diff --git a/web/app/signin/page.tsx b/web/app/signin/page.tsx index 8abb656c2e..b0ee172a95 100644 --- a/web/app/signin/page.tsx +++ b/web/app/signin/page.tsx @@ -6,19 +6,20 @@ import Loading from '../components/base/loading' import Forms from './forms' import Header from './_header' import style from './page.module.css' -import EnterpriseSSOForm from './enterpriseSSOForm' +import UserSSOForm from './userSSOForm' import { IS_CE_EDITION } from '@/config' -import { getEnterpriseFeatures } from '@/service/enterprise' -import type { EnterpriseFeatures } from '@/types/enterprise' -import { defaultEnterpriseFeatures } from '@/types/enterprise' + +import type { SystemFeatures } from '@/types/feature' +import { defaultSystemFeatures } from '@/types/feature' +import { getSystemFeatures } from '@/service/common' const SignIn = () => { const [loading, setLoading] = useState(true) - const [enterpriseFeatures, setEnterpriseFeatures] = useState(defaultEnterpriseFeatures) + const [systemFeatures, setSystemFeatures] = useState(defaultSystemFeatures) useEffect(() => { - getEnterpriseFeatures().then((res) => { - setEnterpriseFeatures(res) + getSystemFeatures().then((res) => { + setSystemFeatures(res) }).finally(() => { setLoading(false) }) @@ -70,7 +71,7 @@ gtag('config', 'AW-11217955271"');
)} - {!loading && !enterpriseFeatures.sso_enforced_for_signin && ( + {!loading && !systemFeatures.sso_enforced_for_signin && ( <>
@@ -79,8 +80,8 @@ gtag('config', 'AW-11217955271"'); )} - {!loading && enterpriseFeatures.sso_enforced_for_signin && ( - + {!loading && systemFeatures.sso_enforced_for_signin && ( + )}
diff --git a/web/app/signin/enterpriseSSOForm.tsx b/web/app/signin/userSSOForm.tsx similarity index 85% rename from web/app/signin/enterpriseSSOForm.tsx rename to web/app/signin/userSSOForm.tsx index 747f2aa478..fe95be8c66 100644 --- a/web/app/signin/enterpriseSSOForm.tsx +++ b/web/app/signin/userSSOForm.tsx @@ -5,14 +5,14 @@ import type { FC } from 'react' import { useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' import Toast from '@/app/components/base/toast' -import { getOIDCSSOUrl, getSAMLSSOUrl } from '@/service/enterprise' +import { getUserOIDCSSOUrl, getUserSAMLSSOUrl } from '@/service/sso' import Button from '@/app/components/base/button' -type EnterpriseSSOFormProps = { +type UserSSOFormProps = { protocol: string } -const EnterpriseSSOForm: FC = ({ +const UserSSOForm: FC = ({ protocol, }) => { const searchParams = useSearchParams() @@ -41,15 +41,15 @@ const EnterpriseSSOForm: FC = ({ const handleSSOLogin = () => { setIsLoading(true) if (protocol === 'saml') { - getSAMLSSOUrl().then((res) => { + getUserSAMLSSOUrl().then((res) => { router.push(res.url) }).finally(() => { setIsLoading(false) }) } else { - getOIDCSSOUrl().then((res) => { - document.cookie = `oidc-state=${res.state}` + getUserOIDCSSOUrl().then((res) => { + document.cookie = `user-oidc-state=${res.state}` router.push(res.url) }).finally(() => { setIsLoading(false) @@ -84,4 +84,4 @@ const EnterpriseSSOForm: FC = ({ ) } -export default EnterpriseSSOForm +export default UserSSOForm diff --git a/web/service/base.ts b/web/service/base.ts index 48baeaeb05..c500e31d7e 100644 --- a/web/service/base.ts +++ b/web/service/base.ts @@ -10,6 +10,7 @@ import type { WorkflowFinishedResponse, WorkflowStartedResponse, } from '@/types/workflow' +import { removeAccessToken } from '@/app/components/share/utils' const TIME_OUT = 100000 const ContentType = { @@ -97,6 +98,10 @@ function unicodeToChar(text: string) { }) } +function requiredWebSSOLogin() { + globalThis.location.href = `/webapp-signin?redirect_url=${globalThis.location.pathname}` +} + export function format(text: string) { let res = text.trim() if (res.startsWith('\n')) @@ -308,6 +313,15 @@ const baseFetch = ( return bodyJson.then((data: ResponseError) => { if (!silent) Toast.notify({ type: 'error', message: data.message }) + + if (data.code === 'web_sso_auth_required') + requiredWebSSOLogin() + + if (data.code === 'unauthorized') { + removeAccessToken() + globalThis.location.reload() + } + return Promise.reject(data) }) } @@ -467,6 +481,16 @@ export const ssePost = ( if (!/^(2|3)\d{2}$/.test(String(res.status))) { res.json().then((data: any) => { Toast.notify({ type: 'error', message: data.message || 'Server Error' }) + + if (isPublicAPI) { + if (data.code === 'web_sso_auth_required') + requiredWebSSOLogin() + + if (data.code === 'unauthorized') { + removeAccessToken() + globalThis.location.reload() + } + } }) onError?.('Server Error') return diff --git a/web/service/common.ts b/web/service/common.ts index 3a7d97af14..98fe50488c 100644 --- a/web/service/common.ts +++ b/web/service/common.ts @@ -34,6 +34,7 @@ import type { ModelProvider, } from '@/app/components/header/account-setting/model-provider-page/declarations' import type { RETRIEVE_METHOD } from '@/types/app' +import type { SystemFeatures } from '@/types/feature' export const login: Fetcher }> = ({ url, body }) => { return post(url, { body }) as Promise @@ -271,3 +272,7 @@ type RetrievalMethodsRes = { export const fetchSupportRetrievalMethods: Fetcher = (url) => { return get(url) } + +export const getSystemFeatures = () => { + return get('/system-features') +} diff --git a/web/service/enterprise.ts b/web/service/enterprise.ts deleted file mode 100644 index b7d9c8213d..0000000000 --- a/web/service/enterprise.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { get } from './base' -import type { EnterpriseFeatures } from '@/types/enterprise' - -export const getEnterpriseFeatures = () => { - return get('/enterprise-features') -} - -export const getSAMLSSOUrl = () => { - return get<{ url: string }>('/enterprise/sso/saml/login') -} - -export const getOIDCSSOUrl = () => { - return get<{ url: string; state: string }>('/enterprise/sso/oidc/login') -} diff --git a/web/service/share.ts b/web/service/share.ts index 48a99705a3..4b8ce6d3b3 100644 --- a/web/service/share.ts +++ b/web/service/share.ts @@ -11,6 +11,7 @@ import type { ConversationItem, } from '@/models/share' import type { ChatConfig } from '@/app/components/base/chat/types' +import type { SystemFeatures } from '@/types/feature' function getAction(action: 'get' | 'post' | 'del' | 'patch', isInstalledApp: boolean) { switch (action) { @@ -135,6 +136,29 @@ export const fetchAppParams = async (isInstalledApp: boolean, installedAppId = ' return (getAction('get', isInstalledApp))(getUrl('parameters', isInstalledApp, installedAppId)) as Promise } +export const fetchSystemFeatures = async () => { + return (getAction('get', false))(getUrl('system-features', false, '')) as Promise +} + +export const fetchWebSAMLSSOUrl = async (appCode: string, redirectUrl: string) => { + return (getAction('get', false))(getUrl('/enterprise/sso/saml/login', false, ''), { + params: { + app_code: appCode, + redirect_url: redirectUrl, + }, + }) as Promise<{ url: string }> +} + +export const fetchWebOIDCSSOUrl = async (appCode: string, redirectUrl: string) => { + return (getAction('get', false))(getUrl('/enterprise/sso/oidc/login', false, ''), { + params: { + app_code: appCode, + redirect_url: redirectUrl, + }, + + }) as Promise<{ url: string }> +} + export const fetchAppMeta = async (isInstalledApp: boolean, installedAppId = '') => { return (getAction('get', isInstalledApp))(getUrl('meta', isInstalledApp, installedAppId)) as Promise } diff --git a/web/service/sso.ts b/web/service/sso.ts new file mode 100644 index 0000000000..77b81fe4a6 --- /dev/null +++ b/web/service/sso.ts @@ -0,0 +1,9 @@ +import { get } from './base' + +export const getUserSAMLSSOUrl = () => { + return get<{ url: string }>('/enterprise/sso/saml/login') +} + +export const getUserOIDCSSOUrl = () => { + return get<{ url: string; state: string }>('/enterprise/sso/oidc/login') +} diff --git a/web/types/enterprise.ts b/web/types/enterprise.ts deleted file mode 100644 index 479c593c04..0000000000 --- a/web/types/enterprise.ts +++ /dev/null @@ -1,9 +0,0 @@ -export type EnterpriseFeatures = { - sso_enforced_for_signin: boolean - sso_enforced_for_signin_protocol: string -} - -export const defaultEnterpriseFeatures: EnterpriseFeatures = { - sso_enforced_for_signin: false, - sso_enforced_for_signin_protocol: '', -} diff --git a/web/types/feature.ts b/web/types/feature.ts new file mode 100644 index 0000000000..89af9d21ab --- /dev/null +++ b/web/types/feature.ts @@ -0,0 +1,13 @@ +export type SystemFeatures = { + sso_enforced_for_signin: boolean + sso_enforced_for_signin_protocol: string + sso_enforced_for_web: boolean + sso_enforced_for_web_protocol: string +} + +export const defaultSystemFeatures: SystemFeatures = { + sso_enforced_for_signin: false, + sso_enforced_for_signin_protocol: '', + sso_enforced_for_web: false, + sso_enforced_for_web_protocol: '', +} From 6e9066ebf4edd278c8a4c080f0dbebc9630d5a41 Mon Sep 17 00:00:00 2001 From: sino Date: Thu, 16 May 2024 11:41:24 +0800 Subject: [PATCH 090/267] feat: support doubao llm and embeding models (#4431) --- .../volcengine_maas/llm/models.py | 62 ++++++++++++++++++- .../volcengine_maas/text_embedding/models.py | 9 +++ .../text_embedding/text_embedding.py | 42 ++++++++++++- .../volcengine_maas/volcengine_maas.yaml | 47 ++++++++++++-- .../volcengine_maas/test_embedding.py | 4 ++ 5 files changed, 156 insertions(+), 8 deletions(-) create mode 100644 api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py index d022f0069b..2e8ff314fc 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py @@ -1,4 +1,64 @@ ModelConfigs = { + 'Doubao-pro-4k': { + 'req_params': { + 'max_prompt_tokens': 4096, + 'max_new_tokens': 4096, + }, + 'model_properties': { + 'context_size': 4096, + 'mode': 'chat', + } + }, + 'Doubao-lite-4k': { + 'req_params': { + 'max_prompt_tokens': 4096, + 'max_new_tokens': 4096, + }, + 'model_properties': { + 'context_size': 4096, + 'mode': 'chat', + } + }, + 'Doubao-pro-32k': { + 'req_params': { + 'max_prompt_tokens': 32768, + 'max_new_tokens': 32768, + }, + 'model_properties': { + 'context_size': 32768, + 'mode': 'chat', + } + }, + 'Doubao-lite-32k': { + 'req_params': { + 'max_prompt_tokens': 32768, + 'max_new_tokens': 32768, + }, + 'model_properties': { + 'context_size': 32768, + 'mode': 'chat', + } + }, + 'Doubao-pro-128k': { + 'req_params': { + 'max_prompt_tokens': 131072, + 'max_new_tokens': 131072, + }, + 'model_properties': { + 'context_size': 131072, + 'mode': 'chat', + } + }, + 'Doubao-lite-128k': { + 'req_params': { + 'max_prompt_tokens': 131072, + 'max_new_tokens': 131072, + }, + 'model_properties': { + 'context_size': 131072, + 'mode': 'chat', + } + }, 'Skylark2-pro-4k': { 'req_params': { 'max_prompt_tokens': 4096, @@ -8,5 +68,5 @@ ModelConfigs = { 'context_size': 4096, 'mode': 'chat', } - } + }, } diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py new file mode 100644 index 0000000000..569f89e975 --- /dev/null +++ b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py @@ -0,0 +1,9 @@ +ModelConfigs = { + 'Doubao-embedding': { + 'req_params': {}, + 'model_properties': { + 'context_size': 4096, + 'max_chunks': 1, + } + }, +} diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py index d63399aec2..10b01c0d0d 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py @@ -1,7 +1,16 @@ import time +from decimal import Decimal from typing import Optional -from core.model_runtime.entities.model_entities import PriceType +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import ( + AIModelEntity, + FetchFrom, + ModelPropertyKey, + ModelType, + PriceConfig, + PriceType, +) from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, @@ -21,6 +30,7 @@ from core.model_runtime.model_providers.volcengine_maas.errors import ( RateLimitErrors, ServerUnavailableErrors, ) +from core.model_runtime.model_providers.volcengine_maas.text_embedding.models import ModelConfigs from core.model_runtime.model_providers.volcengine_maas.volc_sdk import MaasException @@ -45,7 +55,7 @@ class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel): resp = MaaSClient.wrap_exception(lambda: client.embeddings(texts)) usage = self._calc_response_usage( - model=model, credentials=credentials, tokens=resp['total_tokens']) + model=model, credentials=credentials, tokens=resp['usage']['total_tokens']) result = TextEmbeddingResult( model=model, @@ -101,6 +111,34 @@ class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel): InvokeBadRequestError: BadRequestErrors.values(), } + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: + """ + generate custom model entities from credentials + """ + model_properties = ModelConfigs.get( + credentials['base_model_name'], {}).get('model_properties', {}).copy() + if credentials.get('context_size'): + model_properties[ModelPropertyKey.CONTEXT_SIZE] = int( + credentials.get('context_size', 4096)) + if credentials.get('max_chunks'): + model_properties[ModelPropertyKey.MAX_CHUNKS] = int( + credentials.get('max_chunks', 4096)) + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + model_type=ModelType.TEXT_EMBEDDING, + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties=model_properties, + parameter_rules=[], + pricing=PriceConfig( + input=Decimal(credentials.get('input_price', 0)), + unit=Decimal(credentials.get('unit', 0)), + currency=credentials.get('currency', "USD") + ) + ) + + return entity + def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: """ Calculate response usage diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml index 4f299ecae0..d7bcbd43f8 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml +++ b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml @@ -76,21 +76,60 @@ model_credential_schema: en_US: Enter your Endpoint ID zh_Hans: 输入您的 Endpoint ID - variable: base_model_name - show_on: - - variable: __model_type - value: llm label: en_US: Base Model zh_Hans: 基础模型 type: select required: true options: + - label: + en_US: Doubao-pro-4k + value: Doubao-pro-4k + show_on: + - variable: __model_type + value: llm + - label: + en_US: Doubao-lite-4k + value: Doubao-lite-4k + show_on: + - variable: __model_type + value: llm + - label: + en_US: Doubao-pro-32k + value: Doubao-pro-32k + show_on: + - variable: __model_type + value: llm + - label: + en_US: Doubao-lite-32k + value: Doubao-lite-32k + show_on: + - variable: __model_type + value: llm + - label: + en_US: Doubao-pro-128k + value: Doubao-pro-128k + show_on: + - variable: __model_type + value: llm + - label: + en_US: Doubao-lite-128k + value: Doubao-lite-128k + show_on: + - variable: __model_type + value: llm - label: en_US: Skylark2-pro-4k value: Skylark2-pro-4k show_on: - variable: __model_type value: llm + - label: + en_US: Doubao-embedding + value: Doubao-embedding + show_on: + - variable: __model_type + value: text-embedding - label: en_US: Custom zh_Hans: 自定义 @@ -122,8 +161,6 @@ model_credential_schema: - variable: context_size required: true show_on: - - variable: __model_type - value: llm - variable: base_model_name value: Custom label: diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py b/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py index 61e9f704af..3b399d604e 100644 --- a/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py +++ b/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py @@ -21,6 +21,7 @@ def test_validate_credentials(): 'volc_access_key_id': 'INVALID', 'volc_secret_access_key': 'INVALID', 'endpoint_id': 'INVALID', + 'base_model_name': 'Doubao-embedding', } ) @@ -32,6 +33,7 @@ def test_validate_credentials(): 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), 'endpoint_id': os.environ.get('VOLC_EMBEDDING_ENDPOINT_ID'), + 'base_model_name': 'Doubao-embedding', }, ) @@ -47,6 +49,7 @@ def test_invoke_model(): 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), 'endpoint_id': os.environ.get('VOLC_EMBEDDING_ENDPOINT_ID'), + 'base_model_name': 'Doubao-embedding', }, texts=[ "hello", @@ -71,6 +74,7 @@ def test_get_num_tokens(): 'volc_access_key_id': os.environ.get('VOLC_API_KEY'), 'volc_secret_access_key': os.environ.get('VOLC_SECRET_KEY'), 'endpoint_id': os.environ.get('VOLC_EMBEDDING_ENDPOINT_ID'), + 'base_model_name': 'Doubao-embedding', }, texts=[ "hello", From 3df47b7b597e640243d2de4a5f746d965650192b Mon Sep 17 00:00:00 2001 From: Han Fangyuan Date: Thu, 16 May 2024 13:04:57 +0800 Subject: [PATCH 091/267] fix: wrong category name in examples of question classifier completion prompt (#4421) --- .../workflow/nodes/question_classifier/template_prompts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/workflow/nodes/question_classifier/template_prompts.py b/api/core/workflow/nodes/question_classifier/template_prompts.py index 1af171762f..23bd05a809 100644 --- a/api/core/workflow/nodes/question_classifier/template_prompts.py +++ b/api/core/workflow/nodes/question_classifier/template_prompts.py @@ -65,7 +65,7 @@ Here is the chat example between human and assistant, inside User:{{"input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], "categories": [{{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"}},{{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"}},{{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"}},{{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}}], "classification_instructions": ["classify the text based on the feedback provided by customer"]}} Assistant:{{"keywords": ["recently", "great experience", "company", "service", "prompt", "staff", "friendly"],"category_id": "f5660049-284f-41a7-b301-fd24176a711c","category_name": "Customer Service"}} User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"}},{{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"}},{{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}}], "classification_instructions": []}} -Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Customer Service"}} +Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Experience"}} ### Memory Here is the chat histories between human and assistant, inside XML tags. @@ -75,4 +75,4 @@ Here is the chat histories between human and assistant, inside Date: Thu, 16 May 2024 05:25:09 +0000 Subject: [PATCH 092/267] fix: self node type shouldn't show in the picker (#4445) --- .../workflow/nodes/_base/components/next-step/item.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/nodes/_base/components/next-step/item.tsx b/web/app/components/workflow/nodes/_base/components/next-step/item.tsx index 0b7dc526a7..865d8b6cc9 100644 --- a/web/app/components/workflow/nodes/_base/components/next-step/item.tsx +++ b/web/app/components/workflow/nodes/_base/components/next-step/item.tsx @@ -44,7 +44,7 @@ const Item = ({ return (
From 98085209927e6d7341ed79801944dce8407ed531 Mon Sep 17 00:00:00 2001 From: Vikey Chen Date: Fri, 17 May 2024 22:26:19 +0800 Subject: [PATCH 102/267] fix: copy button is always displayed on the chat logs page (#4488) --- web/app/components/app/chat/answer/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/app/chat/answer/index.tsx b/web/app/components/app/chat/answer/index.tsx index 49dd817255..f96c91d57b 100644 --- a/web/app/components/app/chat/answer/index.tsx +++ b/web/app/components/app/chat/answer/index.tsx @@ -362,7 +362,7 @@ const Answer: FC = ({ {!item.isOpeningStatement && ( )} {((isShowPromptLog && !isResponding) || (!item.isOpeningStatement && isShowTextToSpeech)) && ( From ba06447cd5b1c5092d2cd66ec1453aa7e57b451c Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sat, 18 May 2024 11:27:12 +0900 Subject: [PATCH 103/267] chore: update docker-compose.yaml (#4492) --- docker/docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index edd4106b2d..21c391bb91 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -239,7 +239,7 @@ services: QDRANT_URL: http://qdrant:6333 # The Qdrant API key. QDRANT_API_KEY: difyai123456 - # The Qdrant clinet timeout setting. + # The Qdrant client timeout setting. QDRANT_CLIENT_TIMEOUT: 20 # The Qdrant client enable gRPC mode. QDRANT_GRPC_ENABLED: 'false' From e90eccdf9294e3bdb855afafbe64a0ddf198c39a Mon Sep 17 00:00:00 2001 From: wanghl Date: Sat, 18 May 2024 10:35:01 +0800 Subject: [PATCH 104/267] =?UTF-8?q?Fix:=20HTTP=20request=20node=20PARAMS?= =?UTF-8?q?=20parameters,=20if=20':'=20appears=20in=20the=20value=E2=80=A6?= =?UTF-8?q?=20(#4403)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Haolin Wang-汪皓临 --- .../workflow/nodes/http/hooks/use-key-value-list.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts b/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts index 55ee490474..8e01055c37 100644 --- a/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts +++ b/web/app/components/workflow/nodes/http/hooks/use-key-value-list.ts @@ -6,11 +6,11 @@ import type { KeyValue } from '../types' const UNIQUE_ID_PREFIX = 'key-value-' const strToKeyValueList = (value: string) => { return value.split('\n').map((item) => { - const [key, value] = item.split(':') + const [key, ...others] = item.split(':') return { id: uniqueId(UNIQUE_ID_PREFIX), key: key.trim(), - value: value?.trim(), + value: others.join(':').trim(), } }) } From b1f003646ba5fe36e6f972fed4379de4b15fb8a7 Mon Sep 17 00:00:00 2001 From: Zixuan Cheng <61724187+Theysua@users.noreply.github.com> Date: Sat, 18 May 2024 10:35:28 +0800 Subject: [PATCH 105/267] Update docker-compose.yaml- New DEBUG variable (#4476) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Bowen Liang --- docker/docker-compose.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 21c391bb91..8e7c94469d 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -9,6 +9,8 @@ services: MODE: api # The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` LOG_LEVEL: INFO + # enable DEBUG mode to output more logs + # DEBUG : true # A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`. SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U # The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is From aa13d140194f843fd0b3eda9ea8f27112b1b55cb Mon Sep 17 00:00:00 2001 From: Patryk Garstecki Date: Sat, 18 May 2024 04:52:48 +0200 Subject: [PATCH 106/267] Feat/chat custom disclaimer (#4306) --- api/constants/recommended_apps.json | 87 ++++++--- api/controllers/console/admin.py | 6 + api/controllers/console/app/site.py | 2 + .../console/explore/recommended_app.py | 1 + .../console/workspace/tool_providers.py | 4 + api/controllers/web/site.py | 1 + api/core/tools/tool_manager.py | 3 +- api/fields/app_fields.py | 2 + .../5fda94355fce_custom_disclaimer.py | 45 +++++ api/models/model.py | 2 + api/models/tools.py | 2 + api/services/recommended_app_service.py | 1 + api/services/tools_manage_service.py | 8 +- web/app/components/app/chat/index.tsx | 83 ++++---- .../app/overview/settings/index.tsx | 15 +- .../components/base/chat/chat/chat-input.tsx | 177 +++++++++--------- web/app/components/share/chat/index.tsx | 1 + web/app/components/share/chatbot/index.tsx | 1 + .../edit-custom-collection-modal/index.tsx | 13 ++ web/app/components/tools/types.ts | 1 + web/i18n/de-DE/app-overview.ts | 3 + web/i18n/de-DE/tools.ts | 2 + web/i18n/en-US/app-overview.ts | 3 + web/i18n/en-US/tools.ts | 2 + web/i18n/fr-FR/app-overview.ts | 3 + web/i18n/fr-FR/tools.ts | 2 + web/i18n/ja-JP/app-overview.ts | 3 + web/i18n/ja-JP/tools.ts | 2 + web/i18n/pl-PL/app-overview.ts | 3 + web/i18n/pl-PL/tools.ts | 2 + web/i18n/pt-BR/app-overview.ts | 3 + web/i18n/pt-BR/tools.ts | 2 + web/i18n/uk-UA/app-overview.ts | 3 + web/i18n/uk-UA/tools.ts | 2 + web/i18n/vi-VN/app-overview.ts | 3 + web/i18n/vi-VN/tools.ts | 2 + web/i18n/zh-Hans/app-overview.ts | 3 + web/i18n/zh-Hans/tools.ts | 2 + web/i18n/zh-Hant/app-overview.ts | 3 + web/i18n/zh-Hant/tools.ts | 2 + web/models/explore.ts | 1 + web/models/share.ts | 1 + web/types/app.ts | 2 + 43 files changed, 350 insertions(+), 159 deletions(-) create mode 100644 api/migrations/versions/5fda94355fce_custom_disclaimer.py diff --git a/api/constants/recommended_apps.json b/api/constants/recommended_apps.json index 8a1ee808e4..68c913f80a 100644 --- a/api/constants/recommended_apps.json +++ b/api/constants/recommended_apps.json @@ -24,7 +24,8 @@ "description": "Welcome to your personalized Investment Analysis Copilot service, where we delve into the depths of stock analysis to provide you with comprehensive insights. \n", "is_listed": true, "position": 0, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -40,7 +41,8 @@ "description": "Code interpreter, clarifying the syntax and semantics of the code.", "is_listed": true, "position": 13, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -56,7 +58,8 @@ "description": "Hello, I am your creative partner in bringing ideas to vivid life! I can assist you in creating stunning designs by leveraging abilities of DALL\u00b7E 3. ", "is_listed": true, "position": 4, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -72,7 +75,8 @@ "description": "Fully SEO Optimized Article including FAQs", "is_listed": true, "position": 1, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -88,7 +92,8 @@ "description": "Generate Flat Style Image", "is_listed": true, "position": 10, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -104,7 +109,8 @@ "description": "A multilingual translator that provides translation capabilities in multiple languages. Input the text you need to translate and select the target language.", "is_listed": true, "position": 10, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -120,7 +126,8 @@ "description": "I am a YouTube Channel Data Analysis Copilot, I am here to provide expert data analysis tailored to your needs. ", "is_listed": true, "position": 2, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -136,7 +143,8 @@ "description": "Meeting minutes generator", "is_listed": true, "position": 0, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -152,7 +160,8 @@ "description": "Tell me the main elements, I will generate a cyberpunk style image for you. ", "is_listed": true, "position": 10, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -168,7 +177,8 @@ "description": "Write SQL from natural language by pasting in your schema with the request.Please describe your query requirements in natural language and select the target database type.", "is_listed": true, "position": 13, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -184,7 +194,8 @@ "description": "Welcome to your personalized travel service with Consultant! \ud83c\udf0d\u2708\ufe0f Ready to embark on a journey filled with adventure and relaxation? Let's dive into creating your unforgettable travel experience. ", "is_listed": true, "position": 3, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -200,7 +211,8 @@ "description": "I can answer your questions related to strategic marketing.", "is_listed": true, "position": 10, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -216,7 +228,8 @@ "description": "A simulated front-end interviewer that tests the skill level of front-end development through questioning.", "is_listed": true, "position": 19, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -232,7 +245,8 @@ "description": "I'm here to hear about your feature request about Dify and help you flesh it out further. What's on your mind?", "is_listed": true, "position": 6, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null } ] }, @@ -261,7 +275,8 @@ "description": "\u4e00\u4e2a\u6a21\u62df\u7684\u524d\u7aef\u9762\u8bd5\u5b98\uff0c\u901a\u8fc7\u63d0\u95ee\u7684\u65b9\u5f0f\u5bf9\u524d\u7aef\u5f00\u53d1\u7684\u6280\u80fd\u6c34\u5e73\u8fdb\u884c\u68c0\u9a8c\u3002", "is_listed": true, "position": 20, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -277,7 +292,8 @@ "description": "\u8f93\u5165\u76f8\u5173\u5143\u7d20\uff0c\u4e3a\u4f60\u751f\u6210\u6241\u5e73\u63d2\u753b\u98ce\u683c\u7684\u5c01\u9762\u56fe\u7247", "is_listed": true, "position": 10, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -293,7 +309,8 @@ "description": "\u4e00\u4e2a\u591a\u8bed\u8a00\u7ffb\u8bd1\u5668\uff0c\u63d0\u4f9b\u591a\u79cd\u8bed\u8a00\u7ffb\u8bd1\u80fd\u529b\uff0c\u8f93\u5165\u4f60\u9700\u8981\u7ffb\u8bd1\u7684\u6587\u672c\uff0c\u9009\u62e9\u76ee\u6807\u8bed\u8a00\u5373\u53ef\u3002\u63d0\u793a\u8bcd\u6765\u81ea\u5b9d\u7389\u3002", "is_listed": true, "position": 10, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -309,7 +326,8 @@ "description": "\u6211\u5c06\u5e2e\u52a9\u4f60\u628a\u81ea\u7136\u8bed\u8a00\u8f6c\u5316\u6210\u6307\u5b9a\u7684\u6570\u636e\u5e93\u67e5\u8be2 SQL \u8bed\u53e5\uff0c\u8bf7\u5728\u4e0b\u65b9\u8f93\u5165\u4f60\u9700\u8981\u67e5\u8be2\u7684\u6761\u4ef6\uff0c\u5e76\u9009\u62e9\u76ee\u6807\u6570\u636e\u5e93\u7c7b\u578b\u3002", "is_listed": true, "position": 12, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -325,7 +343,8 @@ "description": "\u9610\u660e\u4ee3\u7801\u7684\u8bed\u6cd5\u548c\u8bed\u4e49\u3002", "is_listed": true, "position": 2, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -341,7 +360,8 @@ "description": "\u8f93\u5165\u76f8\u5173\u5143\u7d20\uff0c\u4e3a\u4f60\u751f\u6210\u8d5b\u535a\u670b\u514b\u98ce\u683c\u7684\u63d2\u753b", "is_listed": true, "position": 10, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -357,7 +377,8 @@ "description": "\u6211\u662f\u4e00\u540dSEO\u4e13\u5bb6\uff0c\u53ef\u4ee5\u6839\u636e\u60a8\u63d0\u4f9b\u7684\u6807\u9898\u3001\u5173\u952e\u8bcd\u3001\u76f8\u5173\u4fe1\u606f\u6765\u6279\u91cf\u751f\u6210SEO\u6587\u7ae0\u3002", "is_listed": true, "position": 10, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -373,7 +394,8 @@ "description": "\u5e2e\u4f60\u91cd\u65b0\u7ec4\u7ec7\u548c\u8f93\u51fa\u6df7\u4e71\u590d\u6742\u7684\u4f1a\u8bae\u7eaa\u8981\u3002", "is_listed": true, "position": 6, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -389,7 +411,8 @@ "description": "\u6b22\u8fce\u4f7f\u7528\u60a8\u7684\u4e2a\u6027\u5316\u7f8e\u80a1\u6295\u8d44\u5206\u6790\u52a9\u624b\uff0c\u5728\u8fd9\u91cc\u6211\u4eec\u6df1\u5165\u7684\u8fdb\u884c\u80a1\u7968\u5206\u6790\uff0c\u4e3a\u60a8\u63d0\u4f9b\u5168\u9762\u7684\u6d1e\u5bdf\u3002", "is_listed": true, "position": 0, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -405,7 +428,8 @@ "description": "\u60a8\u597d\uff0c\u6211\u662f\u60a8\u7684\u521b\u610f\u4f19\u4f34\uff0c\u5c06\u5e2e\u52a9\u60a8\u5c06\u60f3\u6cd5\u751f\u52a8\u5730\u5b9e\u73b0\uff01\u6211\u53ef\u4ee5\u534f\u52a9\u60a8\u5229\u7528DALL\u00b7E 3\u7684\u80fd\u529b\u521b\u9020\u51fa\u4ee4\u4eba\u60ca\u53f9\u7684\u8bbe\u8ba1\u3002", "is_listed": true, "position": 4, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -421,7 +445,8 @@ "description": "\u7ffb\u8bd1\u4e13\u5bb6\uff1a\u63d0\u4f9b\u4e2d\u82f1\u6587\u4e92\u8bd1", "is_listed": true, "position": 4, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -437,7 +462,8 @@ "description": "\u60a8\u7684\u79c1\u4eba\u5b66\u4e60\u5bfc\u5e08\uff0c\u5e2e\u60a8\u5236\u5b9a\u5b66\u4e60\u8ba1\u5212\u5e76\u8f85\u5bfc", "is_listed": true, "position": 26, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -453,7 +479,8 @@ "description": "\u5e2e\u4f60\u64b0\u5199\u8bba\u6587\u6587\u732e\u7efc\u8ff0", "is_listed": true, "position": 7, - "privacy_policy": "https://dify.ai" + "privacy_policy": "https://dify.ai", + "custom_disclaimer": null }, { "app": { @@ -469,7 +496,8 @@ "description": "\u4f60\u597d\uff0c\u544a\u8bc9\u6211\u60a8\u60f3\u5206\u6790\u7684 YouTube \u9891\u9053\uff0c\u6211\u5c06\u4e3a\u60a8\u6574\u7406\u4e00\u4efd\u5b8c\u6574\u7684\u6570\u636e\u5206\u6790\u62a5\u544a\u3002", "is_listed": true, "position": 0, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null }, { "app": { @@ -485,7 +513,8 @@ "description": "\u6b22\u8fce\u4f7f\u7528\u60a8\u7684\u4e2a\u6027\u5316\u65c5\u884c\u670d\u52a1\u987e\u95ee\uff01\ud83c\udf0d\u2708\ufe0f \u51c6\u5907\u597d\u8e0f\u4e0a\u4e00\u6bb5\u5145\u6ee1\u5192\u9669\u4e0e\u653e\u677e\u7684\u65c5\u7a0b\u4e86\u5417\uff1f\u8ba9\u6211\u4eec\u4e00\u8d77\u6df1\u5165\u6253\u9020\u60a8\u96be\u5fd8\u7684\u65c5\u884c\u4f53\u9a8c\u5427\u3002", "is_listed": true, "position": 0, - "privacy_policy": null + "privacy_policy": null, + "custom_disclaimer": null } ] }, diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index aaa737f83a..028be5de54 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -48,6 +48,7 @@ class InsertExploreAppListApi(Resource): parser.add_argument('desc', type=str, location='json') parser.add_argument('copyright', type=str, location='json') parser.add_argument('privacy_policy', type=str, location='json') + parser.add_argument('custom_disclaimer', type=str, location='json') parser.add_argument('language', type=supported_language, required=True, nullable=False, location='json') parser.add_argument('category', type=str, required=True, nullable=False, location='json') parser.add_argument('position', type=int, required=True, nullable=False, location='json') @@ -62,6 +63,7 @@ class InsertExploreAppListApi(Resource): desc = args['desc'] if args['desc'] else '' copy_right = args['copyright'] if args['copyright'] else '' privacy_policy = args['privacy_policy'] if args['privacy_policy'] else '' + custom_disclaimer = args['custom_disclaimer'] if args['custom_disclaimer'] else '' else: desc = site.description if site.description else \ args['desc'] if args['desc'] else '' @@ -69,6 +71,8 @@ class InsertExploreAppListApi(Resource): args['copyright'] if args['copyright'] else '' privacy_policy = site.privacy_policy if site.privacy_policy else \ args['privacy_policy'] if args['privacy_policy'] else '' + custom_disclaimer = site.custom_disclaimer if site.custom_disclaimer else \ + args['custom_disclaimer'] if args['custom_disclaimer'] else '' recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first() @@ -78,6 +82,7 @@ class InsertExploreAppListApi(Resource): description=desc, copyright=copy_right, privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, language=args['language'], category=args['category'], position=args['position'] @@ -93,6 +98,7 @@ class InsertExploreAppListApi(Resource): recommended_app.description = desc recommended_app.copyright = copy_right recommended_app.privacy_policy = privacy_policy + recommended_app.custom_disclaimer = custom_disclaimer recommended_app.language = args['language'] recommended_app.category = args['category'] recommended_app.position = args['position'] diff --git a/api/controllers/console/app/site.py b/api/controllers/console/app/site.py index 256824981e..592009fd88 100644 --- a/api/controllers/console/app/site.py +++ b/api/controllers/console/app/site.py @@ -23,6 +23,7 @@ def parse_app_site_args(): parser.add_argument('customize_domain', type=str, required=False, location='json') parser.add_argument('copyright', type=str, required=False, location='json') parser.add_argument('privacy_policy', type=str, required=False, location='json') + parser.add_argument('custom_disclaimer', type=str, required=False, location='json') parser.add_argument('customize_token_strategy', type=str, choices=['must', 'allow', 'not_allow'], required=False, location='json') @@ -56,6 +57,7 @@ class AppSite(Resource): 'customize_domain', 'copyright', 'privacy_policy', + 'custom_disclaimer', 'customize_token_strategy', 'prompt_public' ]: diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 2787b7cdba..6e10e2ec92 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -21,6 +21,7 @@ recommended_app_fields = { 'description': fields.String(attribute='description'), 'copyright': fields.String, 'privacy_policy': fields.String, + 'custom_disclaimer': fields.String, 'category': fields.String, 'position': fields.Integer, 'is_listed': fields.Boolean diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 3057de4559..63f4613e7d 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -116,6 +116,7 @@ class ToolApiProviderAddApi(Resource): parser.add_argument('provider', type=str, required=True, nullable=False, location='json') parser.add_argument('icon', type=dict, required=True, nullable=False, location='json') parser.add_argument('privacy_policy', type=str, required=False, nullable=True, location='json') + parser.add_argument('custom_disclaimer', type=str, required=False, nullable=True, location='json') args = parser.parse_args() @@ -128,6 +129,7 @@ class ToolApiProviderAddApi(Resource): args['schema_type'], args['schema'], args.get('privacy_policy', ''), + args.get('custom_disclaimer', ''), ) class ToolApiProviderGetRemoteSchemaApi(Resource): @@ -186,6 +188,7 @@ class ToolApiProviderUpdateApi(Resource): parser.add_argument('original_provider', type=str, required=True, nullable=False, location='json') parser.add_argument('icon', type=dict, required=True, nullable=False, location='json') parser.add_argument('privacy_policy', type=str, required=True, nullable=True, location='json') + parser.add_argument('custom_disclaimer', type=str, required=True, nullable=True, location='json') args = parser.parse_args() @@ -199,6 +202,7 @@ class ToolApiProviderUpdateApi(Resource): args['schema_type'], args['schema'], args['privacy_policy'], + args['custom_disclaimer'], ) class ToolApiProviderDeleteApi(Resource): diff --git a/api/controllers/web/site.py b/api/controllers/web/site.py index 49b0a8bfc0..a084b56b08 100644 --- a/api/controllers/web/site.py +++ b/api/controllers/web/site.py @@ -31,6 +31,7 @@ class AppSiteApi(WebApiResource): 'description': fields.String, 'copyright': fields.String, 'privacy_policy': fields.String, + 'custom_disclaimer': fields.String, 'default_language': fields.String, 'prompt_public': fields.Boolean } diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index d46f1f22a5..dd28055932 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -487,7 +487,8 @@ class ToolManager: 'icon': icon, 'description': provider.description, 'credentials': masked_credentials, - 'privacy_policy': provider.privacy_policy + 'privacy_policy': provider.privacy_policy, + 'custom_disclaimer': provider.custom_disclaimer }) @classmethod diff --git a/api/fields/app_fields.py b/api/fields/app_fields.py index c7cfdd7939..212c3e7f17 100644 --- a/api/fields/app_fields.py +++ b/api/fields/app_fields.py @@ -113,6 +113,7 @@ site_fields = { 'customize_domain': fields.String, 'copyright': fields.String, 'privacy_policy': fields.String, + 'custom_disclaimer': fields.String, 'customize_token_strategy': fields.String, 'prompt_public': fields.Boolean, 'app_base_url': fields.String, @@ -146,6 +147,7 @@ app_site_fields = { 'customize_domain': fields.String, 'copyright': fields.String, 'privacy_policy': fields.String, + 'custom_disclaimer': fields.String, 'customize_token_strategy': fields.String, 'prompt_public': fields.Boolean } diff --git a/api/migrations/versions/5fda94355fce_custom_disclaimer.py b/api/migrations/versions/5fda94355fce_custom_disclaimer.py new file mode 100644 index 0000000000..a5e2e6ee63 --- /dev/null +++ b/api/migrations/versions/5fda94355fce_custom_disclaimer.py @@ -0,0 +1,45 @@ +"""Custom Disclaimer + +Revision ID: 5fda94355fce +Revises: 47cc7df8c4f3 +Create Date: 2024-05-10 20:04:45.806549 + +""" +import sqlalchemy as sa +from alembic import op + +import models as models + +# revision identifiers, used by Alembic. +revision = '5fda94355fce' +down_revision = '47cc7df8c4f3' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('recommended_apps', schema=None) as batch_op: + batch_op.add_column(sa.Column('custom_disclaimer', sa.String(length=255), nullable=False)) + + with op.batch_alter_table('sites', schema=None) as batch_op: + batch_op.add_column(sa.Column('custom_disclaimer', sa.String(length=255), nullable=True)) + + with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: + batch_op.add_column(sa.Column('custom_disclaimer', sa.String(length=255), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: + batch_op.drop_column('custom_disclaimer') + + with op.batch_alter_table('sites', schema=None) as batch_op: + batch_op.drop_column('custom_disclaimer') + + with op.batch_alter_table('recommended_apps', schema=None) as batch_op: + batch_op.drop_column('custom_disclaimer') + + # ### end Alembic commands ### diff --git a/api/models/model.py b/api/models/model.py index 59b88eb3b1..befc8539b9 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -435,6 +435,7 @@ class RecommendedApp(db.Model): description = db.Column(db.JSON, nullable=False) copyright = db.Column(db.String(255), nullable=False) privacy_policy = db.Column(db.String(255), nullable=False) + custom_disclaimer = db.Column(db.String(255), nullable=False) category = db.Column(db.String(255), nullable=False) position = db.Column(db.Integer, nullable=False, default=0) is_listed = db.Column(db.Boolean, nullable=False, default=True) @@ -1042,6 +1043,7 @@ class Site(db.Model): default_language = db.Column(db.String(255), nullable=False) copyright = db.Column(db.String(255)) privacy_policy = db.Column(db.String(255)) + custom_disclaimer = db.Column(db.String(255)) customize_domain = db.Column(db.String(255)) customize_token_strategy = db.Column(db.String(255), nullable=False) prompt_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) diff --git a/api/models/tools.py b/api/models/tools.py index 8a133679e0..64fc334549 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -107,6 +107,8 @@ class ApiToolProvider(db.Model): credentials_str = db.Column(db.Text, nullable=False) # privacy policy privacy_policy = db.Column(db.String(255), nullable=True) + # custom_disclaimer + custom_disclaimer = db.Column(db.String(255), nullable=True) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) diff --git a/api/services/recommended_app_service.py b/api/services/recommended_app_service.py index 3d36fb80af..def39569ea 100644 --- a/api/services/recommended_app_service.py +++ b/api/services/recommended_app_service.py @@ -86,6 +86,7 @@ class RecommendedAppService: 'description': site.description, 'copyright': site.copyright, 'privacy_policy': site.privacy_policy, + 'custom_disclaimer': site.custom_disclaimer, 'category': recommended_app.category, 'position': recommended_app.position, 'is_listed': recommended_app.is_listed diff --git a/api/services/tools_manage_service.py b/api/services/tools_manage_service.py index ec4e89bd14..7100d79ee0 100644 --- a/api/services/tools_manage_service.py +++ b/api/services/tools_manage_service.py @@ -177,7 +177,7 @@ class ToolManageService: @staticmethod def create_api_tool_provider( user_id: str, tenant_id: str, provider_name: str, icon: dict, credentials: dict, - schema_type: str, schema: str, privacy_policy: str + schema_type: str, schema: str, privacy_policy: str, custom_disclaimer: str ): """ create api tool provider @@ -213,7 +213,8 @@ class ToolManageService: schema_type_str=schema_type, tools_str=json.dumps(jsonable_encoder(tool_bundles)), credentials_str={}, - privacy_policy=privacy_policy + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer ) if 'auth_type' not in credentials: @@ -364,7 +365,7 @@ class ToolManageService: @staticmethod def update_api_tool_provider( user_id: str, tenant_id: str, provider_name: str, original_provider: str, icon: dict, credentials: dict, - schema_type: str, schema: str, privacy_policy: str + schema_type: str, schema: str, privacy_policy: str, custom_disclaimer: str ): """ update api tool provider @@ -394,6 +395,7 @@ class ToolManageService: provider.schema_type_str = ApiProviderSchemaType.OPENAPI.value provider.tools_str = json.dumps(jsonable_encoder(tool_bundles)) provider.privacy_policy = privacy_policy + provider.custom_disclaimer = custom_disclaimer if 'auth_type' not in credentials: raise ValueError('auth_type is required') diff --git a/web/app/components/app/chat/index.tsx b/web/app/components/app/chat/index.tsx index 6187ec684f..d861ddb2de 100644 --- a/web/app/components/app/chat/index.tsx +++ b/web/app/components/app/chat/index.tsx @@ -67,6 +67,7 @@ export type IChatProps = { visionConfig?: VisionSettings supportAnnotation?: boolean allToolIcons?: Record + customDisclaimer?: string } const Chat: FC = ({ @@ -102,6 +103,7 @@ const Chat: FC = ({ supportAnnotation, onChatListChange, allToolIcons, + customDisclaimer, }) => { const { t } = useTranslation() const { notify } = useContext(ToastContext) @@ -358,44 +360,46 @@ const Chat: FC = ({
)} -
- {visionConfig?.enabled && ( - <> -
- = visionConfig.number_limits} - /> -
-
-
- -
- - )} -