mirror of https://github.com/langgenius/dify.git
merge
This commit is contained in:
commit
d6b9648332
|
|
@ -4,6 +4,7 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- plugins/beta
|
||||
paths:
|
||||
- api/**
|
||||
- docker/**
|
||||
|
|
@ -47,15 +48,9 @@ jobs:
|
|||
- name: Run Unit tests
|
||||
run: poetry run -C api bash dev/pytest/pytest_unit_tests.sh
|
||||
|
||||
- name: Run ModelRuntime
|
||||
run: poetry run -C api bash dev/pytest/pytest_model_runtime.sh
|
||||
|
||||
- name: Run dify config tests
|
||||
run: poetry run -C api python dev/pytest/pytest_config_tests.py
|
||||
|
||||
- name: Run Tool
|
||||
run: poetry run -C api bash dev/pytest/pytest_tools.sh
|
||||
|
||||
- name: Run mypy
|
||||
run: |
|
||||
pushd api
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ on:
|
|||
branches:
|
||||
- "main"
|
||||
- "deploy/dev"
|
||||
- "dev/plugin-deploy"
|
||||
- "plugins/beta"
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- plugins/beta
|
||||
paths:
|
||||
- api/migrations/**
|
||||
- .github/workflows/db-migration-test.yml
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- plugins/beta
|
||||
|
||||
concurrency:
|
||||
group: style-${{ github.head_ref || github.run_id }}
|
||||
|
|
@ -66,6 +67,12 @@ jobs:
|
|||
with:
|
||||
files: web/**
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@v4
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
|
@ -80,7 +87,8 @@ jobs:
|
|||
|
||||
- name: Web style check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: pnpm run lint
|
||||
run: echo "${{ steps.changed-files.outputs.all_changed_files }}" | sed 's|web/||g' | xargs pnpm eslint # wait for next lint support eslint v9
|
||||
|
||||
|
||||
docker-compose-template:
|
||||
name: Docker Compose Template
|
||||
|
|
@ -134,7 +142,7 @@ jobs:
|
|||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
BASH_SEVERITY: warning
|
||||
DEFAULT_BRANCH: main
|
||||
DEFAULT_BRANCH: plugins/beta
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IGNORE_GENERATED_FILES: true
|
||||
IGNORE_GITIGNORED_FILES: true
|
||||
|
|
|
|||
|
|
@ -422,8 +422,9 @@ POSITION_PROVIDER_INCLUDES=
|
|||
POSITION_PROVIDER_EXCLUDES=
|
||||
|
||||
# Plugin configuration
|
||||
PLUGIN_API_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
PLUGIN_API_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_API_URL=http://127.0.0.1:5002
|
||||
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
|
||||
PLUGIN_REMOTE_INSTALL_PORT=5003
|
||||
PLUGIN_REMOTE_INSTALL_HOST=localhost
|
||||
PLUGIN_MAX_PACKAGE_SIZE=15728640
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.15.0",
|
||||
default="1.0.0-beta.1",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
|
|
|||
|
|
@ -107,11 +107,46 @@ class LargeLanguageModel(AIModel):
|
|||
content_list = []
|
||||
usage = LLMUsage.empty_usage()
|
||||
system_fingerprint = None
|
||||
tools_calls: list[AssistantPromptMessage.ToolCall] = []
|
||||
|
||||
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
|
||||
def get_tool_call(tool_name: str):
|
||||
if not tool_name:
|
||||
return tools_calls[-1]
|
||||
|
||||
tool_call = next(
|
||||
(tool_call for tool_call in tools_calls if tool_call.function.name == tool_name), None
|
||||
)
|
||||
if tool_call is None:
|
||||
tool_call = AssistantPromptMessage.ToolCall(
|
||||
id="",
|
||||
type="",
|
||||
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tool_name, arguments=""),
|
||||
)
|
||||
tools_calls.append(tool_call)
|
||||
|
||||
return tool_call
|
||||
|
||||
for new_tool_call in new_tool_calls:
|
||||
# get tool call
|
||||
tool_call = get_tool_call(new_tool_call.function.name)
|
||||
# update tool call
|
||||
if new_tool_call.id:
|
||||
tool_call.id = new_tool_call.id
|
||||
if new_tool_call.type:
|
||||
tool_call.type = new_tool_call.type
|
||||
if new_tool_call.function.name:
|
||||
tool_call.function.name = new_tool_call.function.name
|
||||
if new_tool_call.function.arguments:
|
||||
tool_call.function.arguments += new_tool_call.function.arguments
|
||||
|
||||
for chunk in result:
|
||||
if isinstance(chunk.delta.message.content, str):
|
||||
content += chunk.delta.message.content
|
||||
elif isinstance(chunk.delta.message.content, list):
|
||||
content_list.extend(chunk.delta.message.content)
|
||||
if chunk.delta.message.tool_calls:
|
||||
increase_tool_call(chunk.delta.message.tool_calls)
|
||||
|
||||
usage = chunk.delta.usage or LLMUsage.empty_usage()
|
||||
system_fingerprint = chunk.system_fingerprint
|
||||
|
|
@ -120,7 +155,10 @@ class LargeLanguageModel(AIModel):
|
|||
result = LLMResult(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
message=AssistantPromptMessage(content=content or content_list),
|
||||
message=AssistantPromptMessage(
|
||||
content=content or content_list,
|
||||
tool_calls=tools_calls,
|
||||
),
|
||||
usage=usage,
|
||||
system_fingerprint=system_fingerprint,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class WordExtractor(BaseExtractor):
|
|||
|
||||
self.web_path = self.file_path
|
||||
# TODO: use a better way to handle the file
|
||||
self.temp_file = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
self.temp_file = tempfile.NamedTemporaryFile() # noqa SIM115
|
||||
self.temp_file.write(r.content)
|
||||
self.file_path = self.temp_file.name
|
||||
elif not os.path.isfile(self.file_path):
|
||||
|
|
|
|||
|
|
@ -48,6 +48,6 @@ class TimezoneConversionTool(BuiltinTool):
|
|||
datetime_with_tz = input_timezone.localize(local_time)
|
||||
# timezone convert
|
||||
converted_datetime = datetime_with_tz.astimezone(output_timezone)
|
||||
return converted_datetime.strftime(format=time_format)
|
||||
return converted_datetime.strftime(format=time_format) # type: ignore
|
||||
except Exception as e:
|
||||
raise ToolInvokeError(str(e))
|
||||
|
|
|
|||
|
|
@ -5,4 +5,7 @@ from core.tools.builtin_tool.provider import BuiltinToolProviderController
|
|||
|
||||
class WebscraperProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, user_id: str, credentials: dict[str, Any]) -> None:
|
||||
"""
|
||||
Validate credentials
|
||||
"""
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -7,6 +7,12 @@ env =
|
|||
CODE_EXECUTION_API_KEY = dify-sandbox
|
||||
CODE_EXECUTION_ENDPOINT = http://127.0.0.1:8194
|
||||
CODE_MAX_STRING_LENGTH = 80000
|
||||
PLUGIN_API_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
|
||||
PLUGIN_MAX_PACKAGE_SIZE=15728640
|
||||
INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||||
FIRECRAWL_API_KEY = fc-
|
||||
FIREWORKS_API_KEY = fw_aaaaaaaaaaaaaaaaaaaa
|
||||
GOOGLE_API_KEY = abcdefghijklmnopqrstuvwxyz
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
import os
|
||||
from collections.abc import Callable
|
||||
|
||||
import pytest
|
||||
|
||||
# import monkeypatch
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
from core.plugin.manager.model import PluginModelManager
|
||||
from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass
|
||||
|
||||
|
||||
def mock_plugin_daemon(
|
||||
monkeypatch: MonkeyPatch,
|
||||
) -> Callable[[], None]:
|
||||
"""
|
||||
mock openai module
|
||||
|
||||
:param monkeypatch: pytest monkeypatch fixture
|
||||
:return: unpatch function
|
||||
"""
|
||||
|
||||
def unpatch() -> None:
|
||||
monkeypatch.undo()
|
||||
|
||||
monkeypatch.setattr(PluginModelManager, "invoke_llm", MockModelClass.invoke_llm)
|
||||
monkeypatch.setattr(PluginModelManager, "fetch_model_providers", MockModelClass.fetch_model_providers)
|
||||
monkeypatch.setattr(PluginModelManager, "get_model_schema", MockModelClass.get_model_schema)
|
||||
|
||||
return unpatch
|
||||
|
||||
|
||||
MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_model_mock(monkeypatch):
|
||||
if MOCK:
|
||||
unpatch = mock_plugin_daemon(monkeypatch)
|
||||
|
||||
yield
|
||||
|
||||
if MOCK:
|
||||
unpatch()
|
||||
|
|
@ -0,0 +1,249 @@
|
|||
import datetime
|
||||
import uuid
|
||||
from collections.abc import Generator, Sequence
|
||||
from decimal import Decimal
|
||||
from json import dumps
|
||||
|
||||
# import monkeypatch
|
||||
from typing import Optional
|
||||
|
||||
from core.model_runtime.entities.common_entities import I18nObject
|
||||
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage, PromptMessageTool
|
||||
from core.model_runtime.entities.model_entities import (
|
||||
AIModelEntity,
|
||||
FetchFrom,
|
||||
ModelFeature,
|
||||
ModelPropertyKey,
|
||||
ModelType,
|
||||
)
|
||||
from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity
|
||||
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
|
||||
from core.plugin.manager.model import PluginModelManager
|
||||
|
||||
|
||||
class MockModelClass(PluginModelManager):
|
||||
def fetch_model_providers(self, tenant_id: str) -> Sequence[PluginModelProviderEntity]:
|
||||
"""
|
||||
Fetch model providers for the given tenant.
|
||||
"""
|
||||
return [
|
||||
PluginModelProviderEntity(
|
||||
id=uuid.uuid4().hex,
|
||||
created_at=datetime.datetime.now(),
|
||||
updated_at=datetime.datetime.now(),
|
||||
provider="openai",
|
||||
tenant_id=tenant_id,
|
||||
plugin_unique_identifier="langgenius/openai/openai",
|
||||
plugin_id="langgenius/openai",
|
||||
declaration=ProviderEntity(
|
||||
provider="openai",
|
||||
label=I18nObject(
|
||||
en_US="OpenAI",
|
||||
zh_Hans="OpenAI",
|
||||
),
|
||||
description=I18nObject(
|
||||
en_US="OpenAI",
|
||||
zh_Hans="OpenAI",
|
||||
),
|
||||
icon_small=I18nObject(
|
||||
en_US="https://example.com/icon_small.png",
|
||||
zh_Hans="https://example.com/icon_small.png",
|
||||
),
|
||||
icon_large=I18nObject(
|
||||
en_US="https://example.com/icon_large.png",
|
||||
zh_Hans="https://example.com/icon_large.png",
|
||||
),
|
||||
supported_model_types=[ModelType.LLM],
|
||||
configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL],
|
||||
models=[
|
||||
AIModelEntity(
|
||||
model="gpt-3.5-turbo",
|
||||
label=I18nObject(
|
||||
en_US="gpt-3.5-turbo",
|
||||
zh_Hans="gpt-3.5-turbo",
|
||||
),
|
||||
model_type=ModelType.LLM,
|
||||
fetch_from=FetchFrom.PREDEFINED_MODEL,
|
||||
model_properties={},
|
||||
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL],
|
||||
),
|
||||
AIModelEntity(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
label=I18nObject(
|
||||
en_US="gpt-3.5-turbo-instruct",
|
||||
zh_Hans="gpt-3.5-turbo-instruct",
|
||||
),
|
||||
model_type=ModelType.LLM,
|
||||
fetch_from=FetchFrom.PREDEFINED_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.MODE: LLMMode.COMPLETION,
|
||||
},
|
||||
features=[],
|
||||
),
|
||||
],
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
def get_model_schema(
|
||||
self,
|
||||
tenant_id: str,
|
||||
user_id: str,
|
||||
plugin_id: str,
|
||||
provider: str,
|
||||
model_type: str,
|
||||
model: str,
|
||||
credentials: dict,
|
||||
) -> AIModelEntity | None:
|
||||
"""
|
||||
Get model schema
|
||||
"""
|
||||
return AIModelEntity(
|
||||
model=model,
|
||||
label=I18nObject(
|
||||
en_US="OpenAI",
|
||||
zh_Hans="OpenAI",
|
||||
),
|
||||
model_type=ModelType(model_type),
|
||||
fetch_from=FetchFrom.PREDEFINED_MODEL,
|
||||
model_properties={},
|
||||
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL] if model == "gpt-3.5-turbo" else [],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def generate_function_call(
|
||||
tools: Optional[list[PromptMessageTool]],
|
||||
) -> Optional[AssistantPromptMessage.ToolCall]:
|
||||
if not tools or len(tools) == 0:
|
||||
return None
|
||||
function: PromptMessageTool = tools[0]
|
||||
function_name = function.name
|
||||
function_parameters = function.parameters
|
||||
function_parameters_type = function_parameters["type"]
|
||||
if function_parameters_type != "object":
|
||||
return None
|
||||
function_parameters_properties = function_parameters["properties"]
|
||||
function_parameters_required = function_parameters["required"]
|
||||
parameters = {}
|
||||
for parameter_name, parameter in function_parameters_properties.items():
|
||||
if parameter_name not in function_parameters_required:
|
||||
continue
|
||||
parameter_type = parameter["type"]
|
||||
if parameter_type == "string":
|
||||
if "enum" in parameter:
|
||||
if len(parameter["enum"]) == 0:
|
||||
continue
|
||||
parameters[parameter_name] = parameter["enum"][0]
|
||||
else:
|
||||
parameters[parameter_name] = "kawaii"
|
||||
elif parameter_type == "integer":
|
||||
parameters[parameter_name] = 114514
|
||||
elif parameter_type == "number":
|
||||
parameters[parameter_name] = 1919810.0
|
||||
elif parameter_type == "boolean":
|
||||
parameters[parameter_name] = True
|
||||
|
||||
return AssistantPromptMessage.ToolCall(
|
||||
id=str(uuid.uuid4()),
|
||||
type="function",
|
||||
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
|
||||
name=function_name,
|
||||
arguments=dumps(parameters),
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def mocked_chat_create_sync(
|
||||
model: str,
|
||||
prompt_messages: list[PromptMessage],
|
||||
tools: Optional[list[PromptMessageTool]] = None,
|
||||
) -> LLMResult:
|
||||
tool_call = MockModelClass.generate_function_call(tools=tools)
|
||||
|
||||
return LLMResult(
|
||||
id=str(uuid.uuid4()),
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
message=AssistantPromptMessage(content="elaina", tool_calls=[tool_call] if tool_call else []),
|
||||
usage=LLMUsage(
|
||||
prompt_tokens=2,
|
||||
completion_tokens=1,
|
||||
total_tokens=3,
|
||||
prompt_unit_price=Decimal(0.0001),
|
||||
completion_unit_price=Decimal(0.0002),
|
||||
prompt_price_unit=Decimal(1),
|
||||
prompt_price=Decimal(0.0001),
|
||||
completion_price_unit=Decimal(1),
|
||||
completion_price=Decimal(0.0002),
|
||||
total_price=Decimal(0.0003),
|
||||
currency="USD",
|
||||
latency=0.001,
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def mocked_chat_create_stream(
|
||||
model: str,
|
||||
prompt_messages: list[PromptMessage],
|
||||
tools: Optional[list[PromptMessageTool]] = None,
|
||||
) -> Generator[LLMResultChunk, None, None]:
|
||||
tool_call = MockModelClass.generate_function_call(tools=tools)
|
||||
|
||||
full_text = "Hello, world!\n\n```python\nprint('Hello, world!')\n```"
|
||||
for i in range(0, len(full_text) + 1):
|
||||
if i == len(full_text):
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content="",
|
||||
tool_calls=[tool_call] if tool_call else [],
|
||||
),
|
||||
),
|
||||
)
|
||||
else:
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content=full_text[i],
|
||||
tool_calls=[tool_call] if tool_call else [],
|
||||
),
|
||||
usage=LLMUsage(
|
||||
prompt_tokens=2,
|
||||
completion_tokens=17,
|
||||
total_tokens=19,
|
||||
prompt_unit_price=Decimal(0.0001),
|
||||
completion_unit_price=Decimal(0.0002),
|
||||
prompt_price_unit=Decimal(1),
|
||||
prompt_price=Decimal(0.0001),
|
||||
completion_price_unit=Decimal(1),
|
||||
completion_price=Decimal(0.0002),
|
||||
total_price=Decimal(0.0003),
|
||||
currency="USD",
|
||||
latency=0.001,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def invoke_llm(
|
||||
self: PluginModelManager,
|
||||
*,
|
||||
tenant_id: str,
|
||||
user_id: str,
|
||||
plugin_id: str,
|
||||
provider: str,
|
||||
model: str,
|
||||
credentials: dict,
|
||||
prompt_messages: list[PromptMessage],
|
||||
model_parameters: Optional[dict] = None,
|
||||
tools: Optional[list[PromptMessageTool]] = None,
|
||||
stop: Optional[list[str]] = None,
|
||||
stream: bool = True,
|
||||
):
|
||||
return MockModelClass.mocked_chat_create_stream(model=model, prompt_messages=prompt_messages, tools=tools)
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gpustack.speech2text.speech2text import GPUStackSpeech2TextModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = GPUStackSpeech2TextModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": "invalid_url",
|
||||
"api_key": "invalid_api_key",
|
||||
},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GPUStackSpeech2TextModel()
|
||||
|
||||
# Get the directory of the current file
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Get assets directory
|
||||
assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
|
||||
|
||||
# Construct the path to the audio file
|
||||
audio_file_path = os.path.join(assets_dir, "audio.mp3")
|
||||
|
||||
file = Path(audio_file_path).read_bytes()
|
||||
|
||||
result = model.invoke(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
file=file,
|
||||
)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
import os
|
||||
|
||||
from core.model_runtime.model_providers.gpustack.tts.tts import GPUStackText2SpeechModel
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GPUStackText2SpeechModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="cosyvoice-300m-sft",
|
||||
tenant_id="test",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
content_text="Hello world",
|
||||
voice="Chinese Female",
|
||||
)
|
||||
|
||||
content = b""
|
||||
for chunk in result:
|
||||
content += chunk
|
||||
|
||||
assert content != b""
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from models.provider import ProviderType
|
||||
|
||||
|
||||
def get_mocked_fetch_model_config(
|
||||
provider: str,
|
||||
model: str,
|
||||
mode: str,
|
||||
credentials: dict,
|
||||
):
|
||||
model_provider_factory = ModelProviderFactory(tenant_id="test_tenant")
|
||||
model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=model_provider_factory.get_provider_schema(provider),
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
|
||||
model_settings=[],
|
||||
),
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
|
||||
model_schema = model_provider_factory.get_model_schema(
|
||||
provider=provider,
|
||||
model_type=model_type_instance.model_type,
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
)
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model=model,
|
||||
provider=provider,
|
||||
mode=mode,
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
return MagicMock(return_value=(model_instance, model_config))
|
||||
|
|
@ -7,12 +7,7 @@ from unittest.mock import MagicMock
|
|||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers import ModelProviderFactory
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
|
|
@ -22,11 +17,11 @@ from core.workflow.nodes.event import RunCompletedEvent
|
|||
from core.workflow.nodes.llm.node import LLMNode
|
||||
from extensions.ext_database import db
|
||||
from models.enums import UserFrom
|
||||
from models.provider import ProviderType
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config
|
||||
|
||||
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
|
||||
|
|
@ -81,15 +76,19 @@ def init_llm_node(config: dict) -> LLMNode:
|
|||
return node
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_execute_llm(setup_openai_mock):
|
||||
def test_execute_llm(setup_model_mock):
|
||||
node = init_llm_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
"title": "123",
|
||||
"type": "llm",
|
||||
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
|
||||
"model": {
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo",
|
||||
"mode": "chat",
|
||||
"completion_params": {},
|
||||
},
|
||||
"prompt_template": [
|
||||
{"role": "system", "text": "you are a helpful assistant.\ntoday's weather is {{#abc.output#}}."},
|
||||
{"role": "user", "text": "{{#sys.query#}}"},
|
||||
|
|
@ -103,37 +102,15 @@ def test_execute_llm(setup_openai_mock):
|
|||
|
||||
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
|
||||
|
||||
provider_instance = ModelProviderFactory().get_provider_instance("openai")
|
||||
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=provider_instance.get_provider_schema(),
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
|
||||
model_settings=[],
|
||||
),
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
|
||||
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model="gpt-3.5-turbo",
|
||||
provider="openai",
|
||||
mode="chat",
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo",
|
||||
mode="chat",
|
||||
credentials=credentials,
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
|
|
@ -149,8 +126,7 @@ def test_execute_llm(setup_openai_mock):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
||||
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_model_mock):
|
||||
"""
|
||||
Test execute LLM node with jinja2
|
||||
"""
|
||||
|
|
@ -190,38 +166,15 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
|||
|
||||
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
|
||||
|
||||
provider_instance = ModelProviderFactory().get_provider_instance("openai")
|
||||
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=provider_instance.get_provider_schema(),
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
|
||||
model_settings=[],
|
||||
),
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
|
||||
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model="gpt-3.5-turbo",
|
||||
provider="openai",
|
||||
mode="chat",
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo",
|
||||
mode="chat",
|
||||
credentials=credentials,
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
|
|
|
|||
|
|
@ -4,14 +4,7 @@ import uuid
|
|||
from typing import Optional
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
|
|
@ -20,53 +13,11 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntime
|
|||
from core.workflow.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode
|
||||
from extensions.ext_database import db
|
||||
from models.enums import UserFrom
|
||||
from models.provider import ProviderType
|
||||
from tests.integration_tests.workflow.nodes.__mock.model import get_mocked_fetch_model_config
|
||||
|
||||
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
|
||||
def get_mocked_fetch_model_config(
|
||||
provider: str,
|
||||
model: str,
|
||||
mode: str,
|
||||
credentials: dict,
|
||||
):
|
||||
model_provider_factory = ModelProviderFactory(tenant_id="test_tenant")
|
||||
model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=model_provider_factory.get_provider_schema(provider),
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
|
||||
model_settings=[],
|
||||
),
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
|
||||
model_schema = model_provider_factory.get_model_schema(
|
||||
provider=provider,
|
||||
model_type=model_type_instance.model_type,
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
)
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model=model,
|
||||
provider=provider,
|
||||
mode=mode,
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
return MagicMock(return_value=(model_instance, model_config))
|
||||
from tests.integration_tests.model_runtime.__mock.plugin_daemon import setup_model_mock
|
||||
|
||||
|
||||
def get_mocked_fetch_memory(memory_text: str):
|
||||
|
|
@ -133,8 +84,7 @@ def init_parameter_extractor_node(config: dict):
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_function_calling_parameter_extractor(setup_openai_mock):
|
||||
def test_function_calling_parameter_extractor(setup_model_mock):
|
||||
"""
|
||||
Test function calling for parameter extractor.
|
||||
"""
|
||||
|
|
@ -144,7 +94,12 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
|
|||
"data": {
|
||||
"title": "123",
|
||||
"type": "parameter-extractor",
|
||||
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
|
||||
"model": {
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo",
|
||||
"mode": "chat",
|
||||
"completion_params": {},
|
||||
},
|
||||
"query": ["sys", "query"],
|
||||
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
|
||||
"instruction": "",
|
||||
|
|
@ -155,25 +110,13 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
|
|||
)
|
||||
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="openai",
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo",
|
||||
mode="chat",
|
||||
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
|
||||
)
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
|
|
@ -182,8 +125,7 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
|
|||
assert result.outputs.get("__reason") == None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_instructions(setup_openai_mock):
|
||||
def test_instructions(setup_model_mock):
|
||||
"""
|
||||
Test chat parameter extractor.
|
||||
"""
|
||||
|
|
@ -193,7 +135,12 @@ def test_instructions(setup_openai_mock):
|
|||
"data": {
|
||||
"title": "123",
|
||||
"type": "parameter-extractor",
|
||||
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
|
||||
"model": {
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo",
|
||||
"mode": "chat",
|
||||
"completion_params": {},
|
||||
},
|
||||
"query": ["sys", "query"],
|
||||
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
|
||||
"reasoning_mode": "function_call",
|
||||
|
|
@ -204,7 +151,7 @@ def test_instructions(setup_openai_mock):
|
|||
)
|
||||
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="openai",
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo",
|
||||
mode="chat",
|
||||
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
|
||||
|
|
@ -228,8 +175,7 @@ def test_instructions(setup_openai_mock):
|
|||
assert "what's the weather in SF" in prompt.get("text")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
|
||||
def test_chat_parameter_extractor(setup_anthropic_mock):
|
||||
def test_chat_parameter_extractor(setup_model_mock):
|
||||
"""
|
||||
Test chat parameter extractor.
|
||||
"""
|
||||
|
|
@ -239,7 +185,12 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
|
|||
"data": {
|
||||
"title": "123",
|
||||
"type": "parameter-extractor",
|
||||
"model": {"provider": "anthropic", "name": "claude-2", "mode": "chat", "completion_params": {}},
|
||||
"model": {
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo",
|
||||
"mode": "chat",
|
||||
"completion_params": {},
|
||||
},
|
||||
"query": ["sys", "query"],
|
||||
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
|
||||
"reasoning_mode": "prompt",
|
||||
|
|
@ -250,10 +201,10 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
|
|||
)
|
||||
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="anthropic",
|
||||
model="claude-2",
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo",
|
||||
mode="chat",
|
||||
credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")},
|
||||
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
|
||||
)
|
||||
db.session.close = MagicMock()
|
||||
|
||||
|
|
@ -275,8 +226,7 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
|
|||
assert '<structure>\n{"type": "object"' in prompt.get("text")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True)
|
||||
def test_completion_parameter_extractor(setup_openai_mock):
|
||||
def test_completion_parameter_extractor(setup_model_mock):
|
||||
"""
|
||||
Test completion parameter extractor.
|
||||
"""
|
||||
|
|
@ -287,7 +237,7 @@ def test_completion_parameter_extractor(setup_openai_mock):
|
|||
"title": "123",
|
||||
"type": "parameter-extractor",
|
||||
"model": {
|
||||
"provider": "openai",
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo-instruct",
|
||||
"mode": "completion",
|
||||
"completion_params": {},
|
||||
|
|
@ -302,7 +252,7 @@ def test_completion_parameter_extractor(setup_openai_mock):
|
|||
)
|
||||
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="openai",
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
mode="completion",
|
||||
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
|
||||
|
|
@ -335,7 +285,7 @@ def test_extract_json_response():
|
|||
"title": "123",
|
||||
"type": "parameter-extractor",
|
||||
"model": {
|
||||
"provider": "openai",
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo-instruct",
|
||||
"mode": "completion",
|
||||
"completion_params": {},
|
||||
|
|
@ -361,8 +311,7 @@ def test_extract_json_response():
|
|||
assert result["location"] == "kawaii"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
|
||||
def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
|
||||
def test_chat_parameter_extractor_with_memory(setup_model_mock):
|
||||
"""
|
||||
Test chat parameter extractor with memory.
|
||||
"""
|
||||
|
|
@ -372,7 +321,12 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
|
|||
"data": {
|
||||
"title": "123",
|
||||
"type": "parameter-extractor",
|
||||
"model": {"provider": "anthropic", "name": "claude-2", "mode": "chat", "completion_params": {}},
|
||||
"model": {
|
||||
"provider": "langgenius/openai/openai",
|
||||
"name": "gpt-3.5-turbo",
|
||||
"mode": "chat",
|
||||
"completion_params": {},
|
||||
},
|
||||
"query": ["sys", "query"],
|
||||
"parameters": [{"name": "location", "type": "string", "description": "location", "required": True}],
|
||||
"reasoning_mode": "prompt",
|
||||
|
|
@ -383,10 +337,10 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
|
|||
)
|
||||
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
provider="anthropic",
|
||||
model="claude-2",
|
||||
provider="langgenius/openai/openai",
|
||||
model="gpt-3.5-turbo",
|
||||
mode="chat",
|
||||
credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")},
|
||||
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
|
||||
)
|
||||
node._fetch_memory = get_mocked_fetch_memory("customized memory")
|
||||
db.session.close = MagicMock()
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
import time
|
||||
import uuid
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import NodeRunResult
|
||||
from core.tools.utils.configuration import ToolParameterConfigurationManager
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.event.event import RunCompletedEvent
|
||||
from core.workflow.nodes.tool.tool_node import ToolNode
|
||||
from models.enums import UserFrom
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
|
@ -63,31 +65,28 @@ def test_tool_variable_invoke():
|
|||
"data": {
|
||||
"title": "a",
|
||||
"desc": "a",
|
||||
"provider_id": "maths",
|
||||
"provider_id": "time",
|
||||
"provider_type": "builtin",
|
||||
"provider_name": "maths",
|
||||
"tool_name": "eval_expression",
|
||||
"tool_label": "eval_expression",
|
||||
"provider_name": "time",
|
||||
"tool_name": "current_time",
|
||||
"tool_label": "current_time",
|
||||
"tool_configurations": {},
|
||||
"tool_parameters": {
|
||||
"expression": {
|
||||
"type": "variable",
|
||||
"value": ["1", "123", "args1"],
|
||||
}
|
||||
},
|
||||
"tool_parameters": {},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
ToolParameterConfigurationManager.decrypt_tool_parameters = MagicMock(return_value={"format": "%Y-%m-%d %H:%M:%S"})
|
||||
|
||||
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], "1+1")
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
assert isinstance(result, NodeRunResult)
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert "2" in result.outputs["text"]
|
||||
assert result.outputs["files"] == []
|
||||
for item in result:
|
||||
if isinstance(item, RunCompletedEvent):
|
||||
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert item.run_result.outputs is not None
|
||||
assert item.run_result.outputs.get("text") is not None
|
||||
|
||||
|
||||
def test_tool_mixed_invoke():
|
||||
|
|
@ -97,28 +96,25 @@ def test_tool_mixed_invoke():
|
|||
"data": {
|
||||
"title": "a",
|
||||
"desc": "a",
|
||||
"provider_id": "maths",
|
||||
"provider_id": "time",
|
||||
"provider_type": "builtin",
|
||||
"provider_name": "maths",
|
||||
"tool_name": "eval_expression",
|
||||
"tool_label": "eval_expression",
|
||||
"tool_configurations": {},
|
||||
"tool_parameters": {
|
||||
"expression": {
|
||||
"type": "mixed",
|
||||
"value": "{{#1.args1#}}",
|
||||
}
|
||||
"provider_name": "time",
|
||||
"tool_name": "current_time",
|
||||
"tool_label": "current_time",
|
||||
"tool_configurations": {
|
||||
"format": "%Y-%m-%d %H:%M:%S",
|
||||
},
|
||||
"tool_parameters": {},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
node.graph_runtime_state.variable_pool.add(["1", "args1"], "1+1")
|
||||
ToolParameterConfigurationManager.decrypt_tool_parameters = MagicMock(return_value={"format": "%Y-%m-%d %H:%M:%S"})
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
assert isinstance(result, NodeRunResult)
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert "2" in result.outputs["text"]
|
||||
assert result.outputs["files"] == []
|
||||
for item in result:
|
||||
if isinstance(item, RunCompletedEvent):
|
||||
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert item.run_result.outputs is not None
|
||||
assert item.run_result.outputs.get("text") is not None
|
||||
|
|
|
|||
|
|
@ -2,6 +2,6 @@ from core.helper.marketplace import download_plugin_pkg
|
|||
|
||||
|
||||
def test_download_plugin_pkg():
|
||||
pkg = download_plugin_pkg("yeuoly/google:0.0.1@4ff79ee644987e5b744d9c5b7a735d459fe66f26b28724326a7834d7e459e708")
|
||||
pkg = download_plugin_pkg("langgenius/bing:0.0.1@e58735424d2104f208c2bd683c5142e0332045b425927067acf432b26f3d970b")
|
||||
assert pkg is not None
|
||||
assert len(pkg) > 0
|
||||
|
|
|
|||
|
|
@ -1,52 +1,52 @@
|
|||
from unittest.mock import MagicMock
|
||||
# from unittest.mock import MagicMock
|
||||
|
||||
from core.app.app_config.entities import ModelConfigEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
|
||||
from core.model_runtime.entities.provider_entities import ProviderEntity
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.prompt.prompt_transform import PromptTransform
|
||||
# from core.app.app_config.entities import ModelConfigEntity
|
||||
# from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
# from core.model_runtime.entities.message_entities import UserPromptMessage
|
||||
# from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
|
||||
# from core.model_runtime.entities.provider_entities import ProviderEntity
|
||||
# from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
# from core.prompt.prompt_transform import PromptTransform
|
||||
|
||||
|
||||
def test__calculate_rest_token():
|
||||
model_schema_mock = MagicMock(spec=AIModelEntity)
|
||||
parameter_rule_mock = MagicMock(spec=ParameterRule)
|
||||
parameter_rule_mock.name = "max_tokens"
|
||||
model_schema_mock.parameter_rules = [parameter_rule_mock]
|
||||
model_schema_mock.model_properties = {ModelPropertyKey.CONTEXT_SIZE: 62}
|
||||
# def test__calculate_rest_token():
|
||||
# model_schema_mock = MagicMock(spec=AIModelEntity)
|
||||
# parameter_rule_mock = MagicMock(spec=ParameterRule)
|
||||
# parameter_rule_mock.name = "max_tokens"
|
||||
# model_schema_mock.parameter_rules = [parameter_rule_mock]
|
||||
# model_schema_mock.model_properties = {ModelPropertyKey.CONTEXT_SIZE: 62}
|
||||
|
||||
large_language_model_mock = MagicMock(spec=LargeLanguageModel)
|
||||
large_language_model_mock.get_num_tokens.return_value = 6
|
||||
# large_language_model_mock = MagicMock(spec=LargeLanguageModel)
|
||||
# large_language_model_mock.get_num_tokens.return_value = 6
|
||||
|
||||
provider_mock = MagicMock(spec=ProviderEntity)
|
||||
provider_mock.provider = "openai"
|
||||
# provider_mock = MagicMock(spec=ProviderEntity)
|
||||
# provider_mock.provider = "openai"
|
||||
|
||||
provider_configuration_mock = MagicMock(spec=ProviderConfiguration)
|
||||
provider_configuration_mock.provider = provider_mock
|
||||
provider_configuration_mock.model_settings = None
|
||||
# provider_configuration_mock = MagicMock(spec=ProviderConfiguration)
|
||||
# provider_configuration_mock.provider = provider_mock
|
||||
# provider_configuration_mock.model_settings = None
|
||||
|
||||
provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
|
||||
provider_model_bundle_mock.model_type_instance = large_language_model_mock
|
||||
provider_model_bundle_mock.configuration = provider_configuration_mock
|
||||
# provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
|
||||
# provider_model_bundle_mock.model_type_instance = large_language_model_mock
|
||||
# provider_model_bundle_mock.configuration = provider_configuration_mock
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.model = "gpt-4"
|
||||
model_config_mock.credentials = {}
|
||||
model_config_mock.parameters = {"max_tokens": 50}
|
||||
model_config_mock.model_schema = model_schema_mock
|
||||
model_config_mock.provider_model_bundle = provider_model_bundle_mock
|
||||
# model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
# model_config_mock.model = "gpt-4"
|
||||
# model_config_mock.credentials = {}
|
||||
# model_config_mock.parameters = {"max_tokens": 50}
|
||||
# model_config_mock.model_schema = model_schema_mock
|
||||
# model_config_mock.provider_model_bundle = provider_model_bundle_mock
|
||||
|
||||
prompt_transform = PromptTransform()
|
||||
# prompt_transform = PromptTransform()
|
||||
|
||||
prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
|
||||
rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
|
||||
# prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
|
||||
# rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
|
||||
|
||||
# Validate based on the mock configuration and expected logic
|
||||
expected_rest_tokens = (
|
||||
model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
|
||||
- model_config_mock.parameters["max_tokens"]
|
||||
- large_language_model_mock.get_num_tokens.return_value
|
||||
)
|
||||
assert rest_tokens == expected_rest_tokens
|
||||
assert rest_tokens == 6
|
||||
# # Validate based on the mock configuration and expected logic
|
||||
# expected_rest_tokens = (
|
||||
# model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
|
||||
# - model_config_mock.parameters["max_tokens"]
|
||||
# - large_language_model_mock.get_num_tokens.return_value
|
||||
# )
|
||||
# assert rest_tokens == expected_rest_tokens
|
||||
# assert rest_tokens == 6
|
||||
|
|
|
|||
|
|
@ -1,186 +1,190 @@
|
|||
from core.entities.provider_entities import ModelSettings
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.provider_manager import ProviderManager
|
||||
from models.provider import LoadBalancingModelConfig, ProviderModelSetting
|
||||
# from core.entities.provider_entities import ModelSettings
|
||||
# from core.model_runtime.entities.model_entities import ModelType
|
||||
# from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
# from core.provider_manager import ProviderManager
|
||||
# from models.provider import LoadBalancingModelConfig, ProviderModelSetting
|
||||
|
||||
|
||||
def test__to_model_settings(mocker):
|
||||
# Get all provider entities
|
||||
model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
provider_entities = model_provider_factory.get_providers()
|
||||
# def test__to_model_settings(mocker):
|
||||
# # Get all provider entities
|
||||
# model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
# provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
provider_entity = None
|
||||
for provider in provider_entities:
|
||||
if provider.provider == "openai":
|
||||
provider_entity = provider
|
||||
# provider_entity = None
|
||||
# for provider in provider_entities:
|
||||
# if provider.provider == "openai":
|
||||
# provider_entity = provider
|
||||
|
||||
# Mocking the inputs
|
||||
provider_model_settings = [
|
||||
ProviderModelSetting(
|
||||
id="id",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
enabled=True,
|
||||
load_balancing_enabled=True,
|
||||
)
|
||||
]
|
||||
load_balancing_model_configs = [
|
||||
LoadBalancingModelConfig(
|
||||
id="id1",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="__inherit__",
|
||||
encrypted_config=None,
|
||||
enabled=True,
|
||||
),
|
||||
LoadBalancingModelConfig(
|
||||
id="id2",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="first",
|
||||
encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
enabled=True,
|
||||
),
|
||||
]
|
||||
# # Mocking the inputs
|
||||
# provider_model_settings = [
|
||||
# ProviderModelSetting(
|
||||
# id="id",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# enabled=True,
|
||||
# load_balancing_enabled=True,
|
||||
# )
|
||||
# ]
|
||||
# load_balancing_model_configs = [
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id1",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="__inherit__",
|
||||
# encrypted_config=None,
|
||||
# enabled=True,
|
||||
# ),
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id2",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="first",
|
||||
# encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
# enabled=True,
|
||||
# ),
|
||||
# ]
|
||||
|
||||
mocker.patch(
|
||||
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
)
|
||||
# mocker.patch(
|
||||
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
# )
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
# provider_manager = ProviderManager()
|
||||
|
||||
# Running the method
|
||||
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
# # Running the method
|
||||
# result = provider_manager._to_model_settings(provider_entity,
|
||||
# provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# Asserting that the result is as expected
|
||||
assert len(result) == 1
|
||||
assert isinstance(result[0], ModelSettings)
|
||||
assert result[0].model == "gpt-4"
|
||||
assert result[0].model_type == ModelType.LLM
|
||||
assert result[0].enabled is True
|
||||
assert len(result[0].load_balancing_configs) == 2
|
||||
assert result[0].load_balancing_configs[0].name == "__inherit__"
|
||||
assert result[0].load_balancing_configs[1].name == "first"
|
||||
# # Asserting that the result is as expected
|
||||
# assert len(result) == 1
|
||||
# assert isinstance(result[0], ModelSettings)
|
||||
# assert result[0].model == "gpt-4"
|
||||
# assert result[0].model_type == ModelType.LLM
|
||||
# assert result[0].enabled is True
|
||||
# assert len(result[0].load_balancing_configs) == 2
|
||||
# assert result[0].load_balancing_configs[0].name == "__inherit__"
|
||||
# assert result[0].load_balancing_configs[1].name == "first"
|
||||
|
||||
|
||||
def test__to_model_settings_only_one_lb(mocker):
|
||||
# Get all provider entities
|
||||
model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
provider_entities = model_provider_factory.get_providers()
|
||||
# def test__to_model_settings_only_one_lb(mocker):
|
||||
# # Get all provider entities
|
||||
# model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
# provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
provider_entity = None
|
||||
for provider in provider_entities:
|
||||
if provider.provider == "openai":
|
||||
provider_entity = provider
|
||||
# provider_entity = None
|
||||
# for provider in provider_entities:
|
||||
# if provider.provider == "openai":
|
||||
# provider_entity = provider
|
||||
|
||||
# Mocking the inputs
|
||||
provider_model_settings = [
|
||||
ProviderModelSetting(
|
||||
id="id",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
enabled=True,
|
||||
load_balancing_enabled=True,
|
||||
)
|
||||
]
|
||||
load_balancing_model_configs = [
|
||||
LoadBalancingModelConfig(
|
||||
id="id1",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="__inherit__",
|
||||
encrypted_config=None,
|
||||
enabled=True,
|
||||
)
|
||||
]
|
||||
# # Mocking the inputs
|
||||
# provider_model_settings = [
|
||||
# ProviderModelSetting(
|
||||
# id="id",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# enabled=True,
|
||||
# load_balancing_enabled=True,
|
||||
# )
|
||||
# ]
|
||||
# load_balancing_model_configs = [
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id1",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="__inherit__",
|
||||
# encrypted_config=None,
|
||||
# enabled=True,
|
||||
# )
|
||||
# ]
|
||||
|
||||
mocker.patch(
|
||||
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
)
|
||||
# mocker.patch(
|
||||
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
# )
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
# provider_manager = ProviderManager()
|
||||
|
||||
# Running the method
|
||||
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
# # Running the method
|
||||
# result = provider_manager._to_model_settings(
|
||||
# provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# Asserting that the result is as expected
|
||||
assert len(result) == 1
|
||||
assert isinstance(result[0], ModelSettings)
|
||||
assert result[0].model == "gpt-4"
|
||||
assert result[0].model_type == ModelType.LLM
|
||||
assert result[0].enabled is True
|
||||
assert len(result[0].load_balancing_configs) == 0
|
||||
# # Asserting that the result is as expected
|
||||
# assert len(result) == 1
|
||||
# assert isinstance(result[0], ModelSettings)
|
||||
# assert result[0].model == "gpt-4"
|
||||
# assert result[0].model_type == ModelType.LLM
|
||||
# assert result[0].enabled is True
|
||||
# assert len(result[0].load_balancing_configs) == 0
|
||||
|
||||
|
||||
def test__to_model_settings_lb_disabled(mocker):
|
||||
# Get all provider entities
|
||||
model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
provider_entities = model_provider_factory.get_providers()
|
||||
# def test__to_model_settings_lb_disabled(mocker):
|
||||
# # Get all provider entities
|
||||
# model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
# provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
provider_entity = None
|
||||
for provider in provider_entities:
|
||||
if provider.provider == "openai":
|
||||
provider_entity = provider
|
||||
# provider_entity = None
|
||||
# for provider in provider_entities:
|
||||
# if provider.provider == "openai":
|
||||
# provider_entity = provider
|
||||
|
||||
# Mocking the inputs
|
||||
provider_model_settings = [
|
||||
ProviderModelSetting(
|
||||
id="id",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
enabled=True,
|
||||
load_balancing_enabled=False,
|
||||
)
|
||||
]
|
||||
load_balancing_model_configs = [
|
||||
LoadBalancingModelConfig(
|
||||
id="id1",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="__inherit__",
|
||||
encrypted_config=None,
|
||||
enabled=True,
|
||||
),
|
||||
LoadBalancingModelConfig(
|
||||
id="id2",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="first",
|
||||
encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
enabled=True,
|
||||
),
|
||||
]
|
||||
# # Mocking the inputs
|
||||
# provider_model_settings = [
|
||||
# ProviderModelSetting(
|
||||
# id="id",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# enabled=True,
|
||||
# load_balancing_enabled=False,
|
||||
# )
|
||||
# ]
|
||||
# load_balancing_model_configs = [
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id1",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="__inherit__",
|
||||
# encrypted_config=None,
|
||||
# enabled=True,
|
||||
# ),
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id2",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="first",
|
||||
# encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
# enabled=True,
|
||||
# ),
|
||||
# ]
|
||||
|
||||
mocker.patch(
|
||||
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
)
|
||||
# mocker.patch(
|
||||
# "core.helper.model_provider_cache.ProviderCredentialsCache.get",
|
||||
# return_value={"openai_api_key": "fake_key"}
|
||||
# )
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
# provider_manager = ProviderManager()
|
||||
|
||||
# Running the method
|
||||
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
# # Running the method
|
||||
# result = provider_manager._to_model_settings(provider_entity,
|
||||
# provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# Asserting that the result is as expected
|
||||
assert len(result) == 1
|
||||
assert isinstance(result[0], ModelSettings)
|
||||
assert result[0].model == "gpt-4"
|
||||
assert result[0].model_type == ModelType.LLM
|
||||
assert result[0].enabled is True
|
||||
assert len(result[0].load_balancing_configs) == 0
|
||||
# # Asserting that the result is as expected
|
||||
# assert len(result) == 1
|
||||
# assert isinstance(result[0], ModelSettings)
|
||||
# assert result[0].model == "gpt-4"
|
||||
# assert result[0].model_type == ModelType.LLM
|
||||
# assert result[0].enabled is True
|
||||
# assert len(result[0].load_balancing_configs) == 0
|
||||
|
|
|
|||
|
|
@ -3,24 +3,20 @@ from typing import Optional
|
|||
|
||||
import pytest
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, SystemConfiguration
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.model_runtime.entities.common_entities import I18nObject
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessage,
|
||||
PromptMessageRole,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelFeature, ModelType
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
|
||||
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.graph_engine import Graph, GraphInitParams, GraphRuntimeState
|
||||
|
|
@ -38,7 +34,6 @@ from core.workflow.nodes.llm.node import LLMNode
|
|||
from models.enums import UserFrom
|
||||
from models.provider import ProviderType
|
||||
from models.workflow import WorkflowType
|
||||
from tests.unit_tests.core.workflow.nodes.llm.test_scenarios import LLMNodeTestScenario
|
||||
|
||||
|
||||
class MockTokenBufferMemory:
|
||||
|
|
@ -112,22 +107,21 @@ def llm_node():
|
|||
@pytest.fixture
|
||||
def model_config():
|
||||
# Create actual provider and model type instances
|
||||
model_provider_factory = ModelProviderFactory()
|
||||
provider_instance = model_provider_factory.get_provider_instance("openai")
|
||||
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
|
||||
model_provider_factory = ModelProviderFactory(tenant_id="test")
|
||||
provider_instance = model_provider_factory.get_plugin_model_provider("openai")
|
||||
model_type_instance = model_provider_factory.get_model_type_instance("openai", ModelType.LLM)
|
||||
|
||||
# Create a ProviderModelBundle
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=provider_instance.get_provider_schema(),
|
||||
provider=provider_instance,
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=None),
|
||||
model_settings=[],
|
||||
),
|
||||
provider_instance=provider_instance,
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
|
||||
|
|
@ -211,236 +205,240 @@ def test_fetch_files_with_non_existent_variable(llm_node):
|
|||
assert result == []
|
||||
|
||||
|
||||
def test_fetch_prompt_messages__vison_disabled(faker, llm_node, model_config):
|
||||
prompt_template = []
|
||||
llm_node.node_data.prompt_template = prompt_template
|
||||
# def test_fetch_prompt_messages__vison_disabled(faker, llm_node, model_config):
|
||||
# TODO: Add test
|
||||
# pass
|
||||
# prompt_template = []
|
||||
# llm_node.node_data.prompt_template = prompt_template
|
||||
|
||||
fake_vision_detail = faker.random_element(
|
||||
[ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
)
|
||||
fake_remote_url = faker.url()
|
||||
files = [
|
||||
File(
|
||||
id="1",
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
storage_key="",
|
||||
)
|
||||
]
|
||||
# fake_vision_detail = faker.random_element(
|
||||
# [ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
# )
|
||||
# fake_remote_url = faker.url()
|
||||
# files = [
|
||||
# File(
|
||||
# id="1",
|
||||
# tenant_id="test",
|
||||
# type=FileType.IMAGE,
|
||||
# filename="test1.jpg",
|
||||
# transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
# remote_url=fake_remote_url,
|
||||
# storage_key="",
|
||||
# )
|
||||
# ]
|
||||
|
||||
fake_query = faker.sentence()
|
||||
# fake_query = faker.sentence()
|
||||
|
||||
prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
sys_query=fake_query,
|
||||
sys_files=files,
|
||||
context=None,
|
||||
memory=None,
|
||||
model_config=model_config,
|
||||
prompt_template=prompt_template,
|
||||
memory_config=None,
|
||||
vision_enabled=False,
|
||||
vision_detail=fake_vision_detail,
|
||||
variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
jinja2_variables=[],
|
||||
)
|
||||
# prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
# sys_query=fake_query,
|
||||
# sys_files=files,
|
||||
# context=None,
|
||||
# memory=None,
|
||||
# model_config=model_config,
|
||||
# prompt_template=prompt_template,
|
||||
# memory_config=None,
|
||||
# vision_enabled=False,
|
||||
# vision_detail=fake_vision_detail,
|
||||
# variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
# jinja2_variables=[],
|
||||
# )
|
||||
|
||||
assert prompt_messages == [UserPromptMessage(content=fake_query)]
|
||||
# assert prompt_messages == [UserPromptMessage(content=fake_query)]
|
||||
|
||||
|
||||
def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
||||
# Setup dify config
|
||||
dify_config.MULTIMODAL_SEND_FORMAT = "url"
|
||||
# def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
||||
# TODO: Add test
|
||||
# pass
|
||||
# Setup dify config
|
||||
# dify_config.MULTIMODAL_SEND_FORMAT = "url"
|
||||
|
||||
# Generate fake values for prompt template
|
||||
fake_assistant_prompt = faker.sentence()
|
||||
fake_query = faker.sentence()
|
||||
fake_context = faker.sentence()
|
||||
fake_window_size = faker.random_int(min=1, max=3)
|
||||
fake_vision_detail = faker.random_element(
|
||||
[ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
)
|
||||
fake_remote_url = faker.url()
|
||||
# # Generate fake values for prompt template
|
||||
# fake_assistant_prompt = faker.sentence()
|
||||
# fake_query = faker.sentence()
|
||||
# fake_context = faker.sentence()
|
||||
# fake_window_size = faker.random_int(min=1, max=3)
|
||||
# fake_vision_detail = faker.random_element(
|
||||
# [ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
# )
|
||||
# fake_remote_url = faker.url()
|
||||
|
||||
# Setup mock memory with history messages
|
||||
mock_history = [
|
||||
UserPromptMessage(content=faker.sentence()),
|
||||
AssistantPromptMessage(content=faker.sentence()),
|
||||
UserPromptMessage(content=faker.sentence()),
|
||||
AssistantPromptMessage(content=faker.sentence()),
|
||||
UserPromptMessage(content=faker.sentence()),
|
||||
AssistantPromptMessage(content=faker.sentence()),
|
||||
]
|
||||
# # Setup mock memory with history messages
|
||||
# mock_history = [
|
||||
# UserPromptMessage(content=faker.sentence()),
|
||||
# AssistantPromptMessage(content=faker.sentence()),
|
||||
# UserPromptMessage(content=faker.sentence()),
|
||||
# AssistantPromptMessage(content=faker.sentence()),
|
||||
# UserPromptMessage(content=faker.sentence()),
|
||||
# AssistantPromptMessage(content=faker.sentence()),
|
||||
# ]
|
||||
|
||||
# Setup memory configuration
|
||||
memory_config = MemoryConfig(
|
||||
role_prefix=MemoryConfig.RolePrefix(user="Human", assistant="Assistant"),
|
||||
window=MemoryConfig.WindowConfig(enabled=True, size=fake_window_size),
|
||||
query_prompt_template=None,
|
||||
)
|
||||
# # Setup memory configuration
|
||||
# memory_config = MemoryConfig(
|
||||
# role_prefix=MemoryConfig.RolePrefix(user="Human", assistant="Assistant"),
|
||||
# window=MemoryConfig.WindowConfig(enabled=True, size=fake_window_size),
|
||||
# query_prompt_template=None,
|
||||
# )
|
||||
|
||||
memory = MockTokenBufferMemory(history_messages=mock_history)
|
||||
# memory = MockTokenBufferMemory(history_messages=mock_history)
|
||||
|
||||
# Test scenarios covering different file input combinations
|
||||
test_scenarios = [
|
||||
LLMNodeTestScenario(
|
||||
description="No files",
|
||||
sys_query=fake_query,
|
||||
sys_files=[],
|
||||
features=[],
|
||||
vision_enabled=False,
|
||||
vision_detail=None,
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_context,
|
||||
role=PromptMessageRole.SYSTEM,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text="{#context#}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_assistant_prompt,
|
||||
role=PromptMessageRole.ASSISTANT,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
SystemPromptMessage(content=fake_context),
|
||||
UserPromptMessage(content=fake_context),
|
||||
AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [
|
||||
UserPromptMessage(content=fake_query),
|
||||
],
|
||||
),
|
||||
LLMNodeTestScenario(
|
||||
description="User files",
|
||||
sys_query=fake_query,
|
||||
sys_files=[
|
||||
File(
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
extension=".jpg",
|
||||
mime_type="image/jpg",
|
||||
storage_key="",
|
||||
)
|
||||
],
|
||||
vision_enabled=True,
|
||||
vision_detail=fake_vision_detail,
|
||||
features=[ModelFeature.VISION],
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_context,
|
||||
role=PromptMessageRole.SYSTEM,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text="{#context#}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_assistant_prompt,
|
||||
role=PromptMessageRole.ASSISTANT,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
SystemPromptMessage(content=fake_context),
|
||||
UserPromptMessage(content=fake_context),
|
||||
AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [
|
||||
UserPromptMessage(
|
||||
content=[
|
||||
TextPromptMessageContent(data=fake_query),
|
||||
ImagePromptMessageContent(
|
||||
url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
LLMNodeTestScenario(
|
||||
description="Prompt template with variable selector of File",
|
||||
sys_query=fake_query,
|
||||
sys_files=[],
|
||||
vision_enabled=False,
|
||||
vision_detail=fake_vision_detail,
|
||||
features=[ModelFeature.VISION],
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text="{{#input.image#}}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
UserPromptMessage(
|
||||
content=[
|
||||
ImagePromptMessageContent(
|
||||
url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
),
|
||||
]
|
||||
),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [UserPromptMessage(content=fake_query)],
|
||||
file_variables={
|
||||
"input.image": File(
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
extension=".jpg",
|
||||
mime_type="image/jpg",
|
||||
storage_key="",
|
||||
)
|
||||
},
|
||||
),
|
||||
]
|
||||
# # Test scenarios covering different file input combinations
|
||||
# test_scenarios = [
|
||||
# LLMNodeTestScenario(
|
||||
# description="No files",
|
||||
# sys_query=fake_query,
|
||||
# sys_files=[],
|
||||
# features=[],
|
||||
# vision_enabled=False,
|
||||
# vision_detail=None,
|
||||
# window_size=fake_window_size,
|
||||
# prompt_template=[
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_context,
|
||||
# role=PromptMessageRole.SYSTEM,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text="{#context#}",
|
||||
# role=PromptMessageRole.USER,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_assistant_prompt,
|
||||
# role=PromptMessageRole.ASSISTANT,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# ],
|
||||
# expected_messages=[
|
||||
# SystemPromptMessage(content=fake_context),
|
||||
# UserPromptMessage(content=fake_context),
|
||||
# AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
# ]
|
||||
# + mock_history[fake_window_size * -2 :]
|
||||
# + [
|
||||
# UserPromptMessage(content=fake_query),
|
||||
# ],
|
||||
# ),
|
||||
# LLMNodeTestScenario(
|
||||
# description="User files",
|
||||
# sys_query=fake_query,
|
||||
# sys_files=[
|
||||
# File(
|
||||
# tenant_id="test",
|
||||
# type=FileType.IMAGE,
|
||||
# filename="test1.jpg",
|
||||
# transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
# remote_url=fake_remote_url,
|
||||
# extension=".jpg",
|
||||
# mime_type="image/jpg",
|
||||
# storage_key="",
|
||||
# )
|
||||
# ],
|
||||
# vision_enabled=True,
|
||||
# vision_detail=fake_vision_detail,
|
||||
# features=[ModelFeature.VISION],
|
||||
# window_size=fake_window_size,
|
||||
# prompt_template=[
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_context,
|
||||
# role=PromptMessageRole.SYSTEM,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text="{#context#}",
|
||||
# role=PromptMessageRole.USER,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_assistant_prompt,
|
||||
# role=PromptMessageRole.ASSISTANT,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# ],
|
||||
# expected_messages=[
|
||||
# SystemPromptMessage(content=fake_context),
|
||||
# UserPromptMessage(content=fake_context),
|
||||
# AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
# ]
|
||||
# + mock_history[fake_window_size * -2 :]
|
||||
# + [
|
||||
# UserPromptMessage(
|
||||
# content=[
|
||||
# TextPromptMessageContent(data=fake_query),
|
||||
# ImagePromptMessageContent(
|
||||
# url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
# ),
|
||||
# ]
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# LLMNodeTestScenario(
|
||||
# description="Prompt template with variable selector of File",
|
||||
# sys_query=fake_query,
|
||||
# sys_files=[],
|
||||
# vision_enabled=False,
|
||||
# vision_detail=fake_vision_detail,
|
||||
# features=[ModelFeature.VISION],
|
||||
# window_size=fake_window_size,
|
||||
# prompt_template=[
|
||||
# LLMNodeChatModelMessage(
|
||||
# text="{{#input.image#}}",
|
||||
# role=PromptMessageRole.USER,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# ],
|
||||
# expected_messages=[
|
||||
# UserPromptMessage(
|
||||
# content=[
|
||||
# ImagePromptMessageContent(
|
||||
# url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
# ),
|
||||
# ]
|
||||
# ),
|
||||
# ]
|
||||
# + mock_history[fake_window_size * -2 :]
|
||||
# + [UserPromptMessage(content=fake_query)],
|
||||
# file_variables={
|
||||
# "input.image": File(
|
||||
# tenant_id="test",
|
||||
# type=FileType.IMAGE,
|
||||
# filename="test1.jpg",
|
||||
# transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
# remote_url=fake_remote_url,
|
||||
# extension=".jpg",
|
||||
# mime_type="image/jpg",
|
||||
# storage_key="",
|
||||
# )
|
||||
# },
|
||||
# ),
|
||||
# ]
|
||||
|
||||
for scenario in test_scenarios:
|
||||
model_config.model_schema.features = scenario.features
|
||||
# for scenario in test_scenarios:
|
||||
# model_config.model_schema.features = scenario.features
|
||||
|
||||
for k, v in scenario.file_variables.items():
|
||||
selector = k.split(".")
|
||||
llm_node.graph_runtime_state.variable_pool.add(selector, v)
|
||||
# for k, v in scenario.file_variables.items():
|
||||
# selector = k.split(".")
|
||||
# llm_node.graph_runtime_state.variable_pool.add(selector, v)
|
||||
|
||||
# Call the method under test
|
||||
prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
sys_query=scenario.sys_query,
|
||||
sys_files=scenario.sys_files,
|
||||
context=fake_context,
|
||||
memory=memory,
|
||||
model_config=model_config,
|
||||
prompt_template=scenario.prompt_template,
|
||||
memory_config=memory_config,
|
||||
vision_enabled=scenario.vision_enabled,
|
||||
vision_detail=scenario.vision_detail,
|
||||
variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
jinja2_variables=[],
|
||||
)
|
||||
# # Call the method under test
|
||||
# prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
# sys_query=scenario.sys_query,
|
||||
# sys_files=scenario.sys_files,
|
||||
# context=fake_context,
|
||||
# memory=memory,
|
||||
# model_config=model_config,
|
||||
# prompt_template=scenario.prompt_template,
|
||||
# memory_config=memory_config,
|
||||
# vision_enabled=scenario.vision_enabled,
|
||||
# vision_detail=scenario.vision_detail,
|
||||
# variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
# jinja2_variables=[],
|
||||
# )
|
||||
|
||||
# Verify the result
|
||||
assert len(prompt_messages) == len(scenario.expected_messages), f"Scenario failed: {scenario.description}"
|
||||
assert (
|
||||
prompt_messages == scenario.expected_messages
|
||||
), f"Message content mismatch in scenario: {scenario.description}"
|
||||
# # Verify the result
|
||||
# assert len(prompt_messages) == len(scenario.expected_messages), f"Scenario failed: {scenario.description}"
|
||||
# assert (
|
||||
# prompt_messages == scenario.expected_messages
|
||||
# ), f"Message content mismatch in scenario: {scenario.description}"
|
||||
|
||||
|
||||
def test_handle_list_messages_basic(llm_node):
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ class ContinueOnErrorTestHelper:
|
|||
},
|
||||
}
|
||||
if default_value:
|
||||
node["data"]["default_value"] = default_value
|
||||
node.node_data.default_value = default_value
|
||||
return node
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -331,55 +331,55 @@ def test_http_node_fail_branch_continue_on_error():
|
|||
assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
|
||||
|
||||
def test_tool_node_default_value_continue_on_error():
|
||||
"""Test tool node with default value error strategy"""
|
||||
graph_config = {
|
||||
"edges": DEFAULT_VALUE_EDGE,
|
||||
"nodes": [
|
||||
{"data": {"title": "start", "type": "start", "variables": []}, "id": "start"},
|
||||
{"data": {"title": "answer", "type": "answer", "answer": "{{#node.result#}}"}, "id": "answer"},
|
||||
ContinueOnErrorTestHelper.get_tool_node(
|
||||
"default-value", [{"key": "result", "type": "string", "value": "default tool result"}]
|
||||
),
|
||||
],
|
||||
}
|
||||
# def test_tool_node_default_value_continue_on_error():
|
||||
# """Test tool node with default value error strategy"""
|
||||
# graph_config = {
|
||||
# "edges": DEFAULT_VALUE_EDGE,
|
||||
# "nodes": [
|
||||
# {"data": {"title": "start", "type": "start", "variables": []}, "id": "start"},
|
||||
# {"data": {"title": "answer", "type": "answer", "answer": "{{#node.result#}}"}, "id": "answer"},
|
||||
# ContinueOnErrorTestHelper.get_tool_node(
|
||||
# "default-value", [{"key": "result", "type": "string", "value": "default tool result"}]
|
||||
# ),
|
||||
# ],
|
||||
# }
|
||||
|
||||
graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
events = list(graph_engine.run())
|
||||
# graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
# events = list(graph_engine.run())
|
||||
|
||||
assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
assert any(
|
||||
isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "default tool result"} for e in events
|
||||
)
|
||||
assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
# assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
# assert any(
|
||||
# isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "default tool result"} for e in events # noqa: E501
|
||||
# )
|
||||
# assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
|
||||
|
||||
def test_tool_node_fail_branch_continue_on_error():
|
||||
"""Test HTTP node with fail-branch error strategy"""
|
||||
graph_config = {
|
||||
"edges": FAIL_BRANCH_EDGES,
|
||||
"nodes": [
|
||||
{"data": {"title": "Start", "type": "start", "variables": []}, "id": "start"},
|
||||
{
|
||||
"data": {"title": "success", "type": "answer", "answer": "tool execute successful"},
|
||||
"id": "success",
|
||||
},
|
||||
{
|
||||
"data": {"title": "error", "type": "answer", "answer": "tool execute failed"},
|
||||
"id": "error",
|
||||
},
|
||||
ContinueOnErrorTestHelper.get_tool_node(),
|
||||
],
|
||||
}
|
||||
# def test_tool_node_fail_branch_continue_on_error():
|
||||
# """Test HTTP node with fail-branch error strategy"""
|
||||
# graph_config = {
|
||||
# "edges": FAIL_BRANCH_EDGES,
|
||||
# "nodes": [
|
||||
# {"data": {"title": "Start", "type": "start", "variables": []}, "id": "start"},
|
||||
# {
|
||||
# "data": {"title": "success", "type": "answer", "answer": "tool execute successful"},
|
||||
# "id": "success",
|
||||
# },
|
||||
# {
|
||||
# "data": {"title": "error", "type": "answer", "answer": "tool execute failed"},
|
||||
# "id": "error",
|
||||
# },
|
||||
# ContinueOnErrorTestHelper.get_tool_node(),
|
||||
# ],
|
||||
# }
|
||||
|
||||
graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
events = list(graph_engine.run())
|
||||
# graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
# events = list(graph_engine.run())
|
||||
|
||||
assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
assert any(
|
||||
isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "tool execute failed"} for e in events
|
||||
)
|
||||
assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
# assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
# assert any(
|
||||
# isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "tool execute failed"} for e in events # noqa: E501
|
||||
# )
|
||||
# assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
|
||||
|
||||
def test_llm_node_default_value_continue_on_error():
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ BASE_API_AND_DOCKER_CONFIG_SET_DIFF = {
|
|||
"HTTP_REQUEST_MAX_CONNECT_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_READ_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_WRITE_TIMEOUT",
|
||||
"INNER_API_KEY",
|
||||
"INNER_API_KEY_FOR_PLUGIN",
|
||||
"KEYWORD_DATA_SOURCE_TYPE",
|
||||
"LOGIN_LOCKOUT_DURATION",
|
||||
"LOG_FORMAT",
|
||||
|
|
@ -18,6 +20,10 @@ BASE_API_AND_DOCKER_CONFIG_SET_DIFF = {
|
|||
"OCI_ENDPOINT",
|
||||
"OCI_REGION",
|
||||
"OCI_SECRET_KEY",
|
||||
"PLUGIN_API_KEY",
|
||||
"PLUGIN_API_URL",
|
||||
"PLUGIN_REMOTE_INSTALL_HOST",
|
||||
"PLUGIN_REMOTE_INSTALL_PORT",
|
||||
"REDIS_DB",
|
||||
"RESEND_API_URL",
|
||||
"RESPECT_XFORWARD_HEADERS_ENABLED",
|
||||
|
|
@ -40,6 +46,8 @@ BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF = {
|
|||
"HTTP_REQUEST_MAX_CONNECT_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_READ_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_WRITE_TIMEOUT",
|
||||
"INNER_API_KEY",
|
||||
"INNER_API_KEY_FOR_PLUGIN",
|
||||
"KEYWORD_DATA_SOURCE_TYPE",
|
||||
"LOGIN_LOCKOUT_DURATION",
|
||||
"LOG_FORMAT",
|
||||
|
|
@ -58,6 +66,10 @@ BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF = {
|
|||
"PGVECTO_RS_PASSWORD",
|
||||
"PGVECTO_RS_PORT",
|
||||
"PGVECTO_RS_USER",
|
||||
"PLUGIN_API_KEY",
|
||||
"PLUGIN_API_URL",
|
||||
"PLUGIN_REMOTE_INSTALL_HOST",
|
||||
"PLUGIN_REMOTE_INSTALL_PORT",
|
||||
"RESPECT_XFORWARD_HEADERS_ENABLED",
|
||||
"SCARF_NO_ANALYTICS",
|
||||
"SSRF_DEFAULT_CONNECT_TIME_OUT",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ version: '3'
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.15.0
|
||||
image: langgenius/dify-api:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
|
|
@ -227,7 +227,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.15.0
|
||||
image: langgenius/dify-api:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
|
|
@ -397,7 +397,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.15.0
|
||||
image: langgenius/dify-web:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
|
|
|||
|
|
@ -937,7 +937,7 @@ TOP_K_MAX_VALUE=10
|
|||
# Plugin Daemon Configuration
|
||||
# ------------------------------
|
||||
|
||||
DB_PLUGIN_DATABASE=dify-plugin
|
||||
DB_PLUGIN_DATABASE=dify_plugin
|
||||
EXPOSE_PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
|
|
@ -958,3 +958,4 @@ ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
|
|||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace-plugin.dify.dev
|
||||
|
||||
FORCE_VERIFYING_SIGNATURE=true
|
||||
|
|
|
|||
|
|
@ -2,24 +2,22 @@ x-shared-env: &shared-api-worker-env
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:dev-plugin-deploy
|
||||
image: langgenius/dify-api:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-http://localhost:5001}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-http://localhost:3000}
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
|
||||
depends_on:
|
||||
- db
|
||||
|
|
@ -34,7 +32,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:dev-plugin-deploy
|
||||
image: langgenius/dify-api:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -49,7 +47,7 @@ services:
|
|||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
|
@ -62,7 +60,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:dev-plugin-deploy
|
||||
image: langgenius/dify-web:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
|
@ -71,8 +69,8 @@ services:
|
|||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace-plugin.dify.dev}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
|
||||
# The postgres database.
|
||||
|
|
@ -138,7 +136,7 @@ services:
|
|||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:47c8bed17c22f67bd035d0979e696cb00ca45b16-local
|
||||
image: langgenius/dify-plugin-daemon:0.0.1-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -149,10 +147,11 @@ services:
|
|||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ services:
|
|||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:47c8bed17c22f67bd035d0979e696cb00ca45b16-local
|
||||
image: langgenius/dify-plugin-daemon:0.0.1-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -87,6 +87,7 @@ services:
|
|||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}"
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@
|
|||
# ==================================================================
|
||||
|
||||
x-shared-env: &shared-api-worker-env
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
|
||||
SERVICE_API_URL: ${SERVICE_API_URL:-}
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-http://localhost}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-http://localhost}
|
||||
SERVICE_API_URL: ${SERVICE_API_URL:-http://localhost}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
APP_WEB_URL: ${APP_WEB_URL:-}
|
||||
FILES_URL: ${FILES_URL:-}
|
||||
|
|
@ -388,7 +388,8 @@ x-shared-env: &shared-api-worker-env
|
|||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
|
||||
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
|
||||
DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify-plugin}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
|
||||
DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002}
|
||||
PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
|
|
@ -404,12 +405,11 @@ x-shared-env: &shared-api-worker-env
|
|||
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
|
||||
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:dev-plugin-deploy
|
||||
image: langgenius/dify-api:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -419,14 +419,10 @@ services:
|
|||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
|
||||
depends_on:
|
||||
- db
|
||||
|
|
@ -441,7 +437,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:dev-plugin-deploy
|
||||
image: langgenius/dify-api:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -456,7 +452,7 @@ services:
|
|||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
|
@ -469,7 +465,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:dev-plugin-deploy
|
||||
image: langgenius/dify-web:1.0.0-beta.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
|
@ -480,8 +476,8 @@ services:
|
|||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace-plugin.dify.dev}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
|
||||
# The postgres database.
|
||||
|
|
@ -547,7 +543,7 @@ services:
|
|||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:47c8bed17c22f67bd035d0979e696cb00ca45b16-local
|
||||
image: langgenius/dify-plugin-daemon:0.0.1-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -557,12 +553,12 @@ services:
|
|||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ EXPOSE_WEAVIATE_PORT=8080
|
|||
# Plugin Daemon Configuration
|
||||
# ------------------------------
|
||||
|
||||
DB_PLUGIN_DATABASE=dify-plugin
|
||||
DB_PLUGIN_DATABASE=dify_plugin
|
||||
EXPOSE_PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
|
|
@ -113,3 +113,5 @@ PLUGIN_DIFY_INNER_API_URL=http://api:5001
|
|||
|
||||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace-plugin.dify.dev
|
||||
|
||||
FORCE_VERIFYING_SIGNATURE=true
|
||||
|
|
@ -24,6 +24,11 @@ server {
|
|||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /explore {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /e {
|
||||
proxy_pass http://plugin_daemon:5002;
|
||||
include proxy.conf;
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import React from 'react'
|
|||
import Main from '@/app/components/app/log-annotation'
|
||||
import { PageType } from '@/app/components/base/features/new-feature-panel/annotation-reply/type'
|
||||
|
||||
export type IProps = {
|
||||
export interface IProps {
|
||||
params: { appId: string }
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import { logout } from '@/service/common'
|
|||
import { useAppContext } from '@/context/app-context'
|
||||
import { LogOut01 } from '@/app/components/base/icons/src/vender/line/general'
|
||||
|
||||
export type IAppSelector = {
|
||||
export interface IAppSelector {
|
||||
isMobile: boolean
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,11 +6,11 @@ import useSWR from 'swr'
|
|||
import Input from '@/app/components/base/input'
|
||||
import { fetchAnnotationsCount } from '@/service/log'
|
||||
|
||||
export type QueryParam = {
|
||||
export interface QueryParam {
|
||||
keyword?: string
|
||||
}
|
||||
|
||||
type IFilterProps = {
|
||||
interface IFilterProps {
|
||||
appId: string
|
||||
queryParams: QueryParam
|
||||
setQueryParams: (v: QueryParam) => void
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import ActionButton from '@/app/components/base/action-button'
|
|||
import useTimestamp from '@/hooks/use-timestamp'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
list: AnnotationItem[]
|
||||
onRemove: (id: string) => void
|
||||
onView: (item: AnnotationItem) => void
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import { MessageClockCircle } from '@/app/components/base/icons/src/vender/solid
|
|||
import I18n from '@/context/i18n'
|
||||
import { LanguagesSupported } from '@/i18n/language'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
showWarning: boolean
|
||||
onShowEditModal: () => void
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import type { FC } from 'react'
|
|||
import React from 'react'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
className?: string
|
||||
title: string
|
||||
children: JSX.Element
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
|||
|
||||
const TEXT_MAX_LENGTH = 256
|
||||
|
||||
export type IConfigModalProps = {
|
||||
export interface IConfigModalProps {
|
||||
isCreate?: boolean
|
||||
payload?: InputVar
|
||||
isShow: boolean
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import type { FC } from 'react'
|
|||
import React, { useEffect } from 'react'
|
||||
import Input from '@/app/components/base/input'
|
||||
|
||||
export type IConfigStringProps = {
|
||||
export interface IConfigStringProps {
|
||||
value: number | undefined
|
||||
maxLength: number
|
||||
modelId: string
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import { InputVarType } from '@/app/components/workflow/types'
|
|||
|
||||
export const ADD_EXTERNAL_DATA_TOOL = 'ADD_EXTERNAL_DATA_TOOL'
|
||||
|
||||
type ExternalDataToolParams = {
|
||||
interface ExternalDataToolParams {
|
||||
key: string
|
||||
type: string
|
||||
index: number
|
||||
|
|
@ -43,7 +43,7 @@ type ExternalDataToolParams = {
|
|||
icon_background?: string
|
||||
}
|
||||
|
||||
export type IConfigVarProps = {
|
||||
export interface IConfigVarProps {
|
||||
promptVariables: PromptVariable[]
|
||||
readonly?: boolean
|
||||
onPromptVariablesChange?: (promptVariables: PromptVariable[]) => void
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next'
|
|||
import cn from '@/utils/classnames'
|
||||
import type { InputVarType } from '@/app/components/workflow/types'
|
||||
import InputVarTypeIcon from '@/app/components/workflow/nodes/_base/components/input-var-type-icon'
|
||||
export type ISelectTypeItemProps = {
|
||||
export interface ISelectTypeItemProps {
|
||||
type: InputVarType
|
||||
selected: boolean
|
||||
onClick: () => void
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ import ModelName from '@/app/components/header/account-setting/model-provider-pa
|
|||
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
|
||||
|
||||
export type IGetAutomaticResProps = {
|
||||
export interface IGetAutomaticResProps {
|
||||
mode: AppType
|
||||
model: Model
|
||||
isShow: boolean
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import { ModelTypeEnum } from '@/app/components/header/account-setting/model-pro
|
|||
import { fetchMembers } from '@/service/common'
|
||||
import type { Member } from '@/models/common'
|
||||
|
||||
type SettingsModalProps = {
|
||||
interface SettingsModalProps {
|
||||
currentDataset: DataSet
|
||||
onCancel: () => void
|
||||
onSave: (newDataset: DataSet) => void
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import { useFeatures } from '@/app/components/base/features/hooks'
|
|||
import type { InputForm } from '@/app/components/base/chat/chat/type'
|
||||
import { getLastAnswer } from '@/app/components/base/chat/utils'
|
||||
|
||||
type ChatItemProps = {
|
||||
interface ChatItemProps {
|
||||
modelAndParameter: ModelAndParameter
|
||||
}
|
||||
const ChatItem: FC<ChatItemProps> = ({
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import { useEventEmitterContextContext } from '@/context/event-emitter'
|
|||
import { useProviderContext } from '@/context/provider-context'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
|
||||
type TextGenerationItemProps = {
|
||||
interface TextGenerationItemProps {
|
||||
modelAndParameter: ModelAndParameter
|
||||
}
|
||||
const TextGenerationItem: FC<TextGenerationItemProps> = ({
|
||||
|
|
|
|||
|
|
@ -27,10 +27,10 @@ import { useFeatures } from '@/app/components/base/features/hooks'
|
|||
import { getLastAnswer } from '@/app/components/base/chat/utils'
|
||||
import type { InputForm } from '@/app/components/base/chat/chat/type'
|
||||
|
||||
type DebugWithSingleModelProps = {
|
||||
interface DebugWithSingleModelProps {
|
||||
checkCanSend?: () => boolean
|
||||
}
|
||||
export type DebugWithSingleModelRefType = {
|
||||
export interface DebugWithSingleModelRefType {
|
||||
handleRestart: () => void
|
||||
}
|
||||
const DebugWithSingleModel = forwardRef<DebugWithSingleModelRefType, DebugWithSingleModelProps>(({
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ import PromptLogModal from '@/app/components/base/prompt-log-modal'
|
|||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks'
|
||||
|
||||
type IDebug = {
|
||||
interface IDebug {
|
||||
isAPIKeySet: boolean
|
||||
onSetting: () => void
|
||||
inputs: Inputs
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
|||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
export type IPromptValuePanelProps = {
|
||||
export interface IPromptValuePanelProps {
|
||||
appType: AppType
|
||||
onSend?: () => void
|
||||
inputs: Inputs
|
||||
|
|
|
|||
|
|
@ -21,13 +21,13 @@ import { useToastContext } from '@/app/components/base/toast'
|
|||
import AppIcon from '@/app/components/base/app-icon'
|
||||
|
||||
const systemTypes = ['api']
|
||||
type ExternalDataToolModalProps = {
|
||||
interface ExternalDataToolModalProps {
|
||||
data: ExternalDataTool
|
||||
onCancel: () => void
|
||||
onSave: (externalDataTool: ExternalDataTool) => void
|
||||
onValidateBeforeSave?: (externalDataTool: ExternalDataTool) => boolean
|
||||
}
|
||||
type Provider = {
|
||||
interface Provider {
|
||||
key: string
|
||||
name: string
|
||||
form_schema?: CodeBasedExtensionItem['form_schema']
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import { useProviderContext } from '@/context/provider-context'
|
|||
import AppsFull from '@/app/components/billing/apps-full-in-dialog'
|
||||
import type { AppIconType } from '@/types/app'
|
||||
|
||||
export type DuplicateAppModalProps = {
|
||||
export interface DuplicateAppModalProps {
|
||||
appName: string
|
||||
icon_type: AppIconType | null
|
||||
icon: string
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { create } from 'zustand'
|
|||
import type { App, AppSSO } from '@/types/app'
|
||||
import type { IChatItem } from '@/app/components/base/chat/chat/type'
|
||||
|
||||
type State = {
|
||||
interface State {
|
||||
appDetail?: App & Partial<AppSSO>
|
||||
appSidebarExpand: string
|
||||
currentLogItem?: IChatItem
|
||||
|
|
@ -13,7 +13,7 @@ type State = {
|
|||
showAppConfigureFeaturesModal: boolean
|
||||
}
|
||||
|
||||
type Action = {
|
||||
interface Action {
|
||||
setAppDetail: (appDetail?: App & Partial<AppSSO>) => void
|
||||
setAppSiderbarExpand: (state: string) => void
|
||||
setCurrentLogItem: (item?: IChatItem) => void
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/aler
|
|||
import AppIcon from '@/app/components/base/app-icon'
|
||||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
|
||||
type SwitchAppModalProps = {
|
||||
interface SwitchAppModalProps {
|
||||
show: boolean
|
||||
appDetail: App
|
||||
onSuccess?: () => void
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import { useChatContext } from '@/app/components/base/chat/chat/context'
|
|||
|
||||
const MAX_DEPTH = 3
|
||||
|
||||
export type IGenerationItemProps = {
|
||||
export interface IGenerationItemProps {
|
||||
isWorkflow?: boolean
|
||||
workflowProcessData?: WorkflowProcess
|
||||
className?: string
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import { useTranslation } from 'react-i18next'
|
|||
import { RiCloseLine } from '@remixicon/react'
|
||||
import Run from '@/app/components/workflow/run'
|
||||
|
||||
type ILogDetail = {
|
||||
interface ILogDetail {
|
||||
runID: string
|
||||
onClose: () => void
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import type { QueryParam } from './index'
|
|||
import Chip from '@/app/components/base/chip'
|
||||
import Input from '@/app/components/base/input'
|
||||
|
||||
type IFilterProps = {
|
||||
interface IFilterProps {
|
||||
queryParams: QueryParam
|
||||
setQueryParams: (v: QueryParam) => void
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ export class AudioPlayerManager {
|
|||
private audioPlayers: AudioPlayer | null = null
|
||||
private msgId: string | undefined
|
||||
|
||||
// eslint-disable-next-line
|
||||
private constructor() {
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import Tooltip from '@/app/components/base/tooltip'
|
|||
import Loading from '@/app/components/base/loading'
|
||||
import { AudioPlayerManager } from '@/app/components/base/audio-btn/audio.player.manager'
|
||||
|
||||
type AudioBtnProps = {
|
||||
interface AudioBtnProps {
|
||||
id?: string
|
||||
voice?: string
|
||||
value?: string
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import React from 'react'
|
|||
import { RiAddLine } from '@remixicon/react'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
className?: string
|
||||
onClick: () => void
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import { useTranslation } from 'react-i18next'
|
|||
import { memo } from 'react'
|
||||
import Textarea from '@/app/components/base/textarea'
|
||||
|
||||
type InputProps = {
|
||||
interface InputProps {
|
||||
form: any
|
||||
value: string
|
||||
onChange: (variable: string, value: string) => void
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import type {
|
|||
ConversationItem,
|
||||
} from '@/models/share'
|
||||
|
||||
export type ChatWithHistoryContextValue = {
|
||||
export interface ChatWithHistoryContextValue {
|
||||
appInfoError?: any
|
||||
appInfoLoading?: boolean
|
||||
appMeta?: AppMeta
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
|
|||
import { checkOrSetAccessToken } from '@/app/components/share/utils'
|
||||
import AppUnavailable from '@/app/components/base/app-unavailable'
|
||||
|
||||
type ChatWithHistoryProps = {
|
||||
interface ChatWithHistoryProps {
|
||||
className?: string
|
||||
}
|
||||
const ChatWithHistory: FC<ChatWithHistoryProps> = ({
|
||||
|
|
@ -99,7 +99,7 @@ const ChatWithHistory: FC<ChatWithHistoryProps> = ({
|
|||
)
|
||||
}
|
||||
|
||||
export type ChatWithHistoryWrapProps = {
|
||||
export interface ChatWithHistoryWrapProps {
|
||||
installedAppInfo?: InstalledApp
|
||||
className?: string
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import Thought from '@/app/components/base/chat/chat/thought'
|
|||
import { FileList } from '@/app/components/base/file-uploader'
|
||||
import { getProcessedFilesFromResponse } from '@/app/components/base/file-uploader/utils'
|
||||
|
||||
type AgentContentProps = {
|
||||
interface AgentContentProps {
|
||||
item: ChatItem
|
||||
responding?: boolean
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import type { ChatItem } from '../../types'
|
|||
import { Markdown } from '@/app/components/base/markdown'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
type BasicContentProps = {
|
||||
interface BasicContentProps {
|
||||
item: ChatItem
|
||||
}
|
||||
const BasicContent: FC<BasicContentProps> = ({
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import { ChevronRight } from '@/app/components/base/icons/src/vender/line/arrows
|
|||
import cn from '@/utils/classnames'
|
||||
import { FileList } from '@/app/components/base/file-uploader'
|
||||
|
||||
type AnswerProps = {
|
||||
interface AnswerProps {
|
||||
item: ChatItem
|
||||
question: string
|
||||
index: number
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import {
|
|||
import Tooltip from '@/app/components/base/tooltip'
|
||||
import Log from '@/app/components/base/chat/chat/log'
|
||||
|
||||
type OperationProps = {
|
||||
interface OperationProps {
|
||||
item: ChatItem
|
||||
question: string
|
||||
index: number
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ import {
|
|||
} from '@/app/components/base/file-uploader/utils'
|
||||
|
||||
type GetAbortController = (abortController: AbortController) => void
|
||||
type SendCallback = {
|
||||
interface SendCallback {
|
||||
onGetConversationMessages?: (conversationId: string, getAbortController: GetAbortController) => Promise<any>
|
||||
onGetSuggestedQuestions?: (responseItemId: string, getAbortController: GetAbortController) => Promise<any>
|
||||
onConversationComplete?: (conversationId: string) => void
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ import PromptLogModal from '@/app/components/base/prompt-log-modal'
|
|||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
import type { AppData } from '@/models/share'
|
||||
|
||||
export type ChatProps = {
|
||||
export interface ChatProps {
|
||||
appData?: AppData
|
||||
chatList: ChatItem[]
|
||||
config?: ChatConfig
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import { User } from '@/app/components/base/icons/src/public/avatar'
|
|||
import { Markdown } from '@/app/components/base/markdown'
|
||||
import { FileList } from '@/app/components/base/file-uploader'
|
||||
|
||||
type QuestionProps = {
|
||||
interface QuestionProps {
|
||||
item: ChatItem
|
||||
questionIcon?: ReactNode
|
||||
theme: Theme | null | undefined
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import React from 'react'
|
|||
import type { ThoughtItem, ToolInfoInThought } from '../type'
|
||||
import ToolDetail from '@/app/components/base/chat/chat/answer/tool-detail'
|
||||
|
||||
export type IThoughtProps = {
|
||||
export interface IThoughtProps {
|
||||
thought: ThoughtItem
|
||||
isFinished: boolean
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,13 +4,13 @@ import type { FileEntity } from '@/app/components/base/file-uploader/types'
|
|||
import type { InputVarType } from '@/app/components/workflow/types'
|
||||
import type { FileResponse } from '@/types/workflow'
|
||||
|
||||
export type MessageMore = {
|
||||
export interface MessageMore {
|
||||
time: string
|
||||
tokens: number
|
||||
latency: number | string
|
||||
}
|
||||
|
||||
export type FeedbackType = {
|
||||
export interface FeedbackType {
|
||||
rating: MessageRating
|
||||
content?: string | null
|
||||
}
|
||||
|
|
@ -26,7 +26,7 @@ export type SubmitAnnotationFunc = (
|
|||
|
||||
export type DisplayScene = 'web' | 'console'
|
||||
|
||||
export type ToolInfoInThought = {
|
||||
export interface ToolInfoInThought {
|
||||
name: string
|
||||
label: string
|
||||
input: string
|
||||
|
|
@ -34,7 +34,7 @@ export type ToolInfoInThought = {
|
|||
isFinished: boolean
|
||||
}
|
||||
|
||||
export type ThoughtItem = {
|
||||
export interface ThoughtItem {
|
||||
id: string
|
||||
tool: string // plugin or dataset. May has multi.
|
||||
thought: string
|
||||
|
|
@ -47,7 +47,7 @@ export type ThoughtItem = {
|
|||
message_files?: FileEntity[]
|
||||
}
|
||||
|
||||
export type CitationItem = {
|
||||
export interface CitationItem {
|
||||
content: string
|
||||
data_source_type: string
|
||||
dataset_name: string
|
||||
|
|
@ -62,7 +62,7 @@ export type CitationItem = {
|
|||
word_count: number
|
||||
}
|
||||
|
||||
export type IChatItem = {
|
||||
export interface IChatItem {
|
||||
id: string
|
||||
content: string
|
||||
citation?: CitationItem[]
|
||||
|
|
@ -104,7 +104,7 @@ export type IChatItem = {
|
|||
nextSibling?: string
|
||||
}
|
||||
|
||||
export type Metadata = {
|
||||
export interface Metadata {
|
||||
retriever_resources?: CitationItem[]
|
||||
annotation_reply: {
|
||||
id: string
|
||||
|
|
@ -115,20 +115,20 @@ export type Metadata = {
|
|||
}
|
||||
}
|
||||
|
||||
export type MessageEnd = {
|
||||
export interface MessageEnd {
|
||||
id: string
|
||||
metadata: Metadata
|
||||
files?: FileResponse[]
|
||||
}
|
||||
|
||||
export type MessageReplace = {
|
||||
export interface MessageReplace {
|
||||
id: string
|
||||
task_id: string
|
||||
answer: string
|
||||
conversation_id: string
|
||||
}
|
||||
|
||||
export type AnnotationReply = {
|
||||
export interface AnnotationReply {
|
||||
id: string
|
||||
task_id: string
|
||||
answer: string
|
||||
|
|
@ -137,7 +137,7 @@ export type AnnotationReply = {
|
|||
annotation_author_name: string
|
||||
}
|
||||
|
||||
export type InputForm = {
|
||||
export interface InputForm {
|
||||
type: InputVarType
|
||||
label: string
|
||||
variable: any
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import { useTranslation } from 'react-i18next'
|
|||
import { memo } from 'react'
|
||||
import Textarea from '@/app/components/base/textarea'
|
||||
|
||||
type InputProps = {
|
||||
interface InputProps {
|
||||
form: any
|
||||
value: string
|
||||
onChange: (variable: string, value: string) => void
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import type {
|
|||
ConversationItem,
|
||||
} from '@/models/share'
|
||||
|
||||
export type EmbeddedChatbotContextValue = {
|
||||
export interface EmbeddedChatbotContextValue {
|
||||
appInfoError?: any
|
||||
appInfoLoading?: boolean
|
||||
appMeta?: AppMeta
|
||||
|
|
|
|||
|
|
@ -14,32 +14,32 @@ export type {
|
|||
PromptVariable,
|
||||
} from '@/models/debug'
|
||||
|
||||
export type UserInputForm = {
|
||||
export interface UserInputForm {
|
||||
default: string
|
||||
label: string
|
||||
required: boolean
|
||||
variable: string
|
||||
}
|
||||
|
||||
export type UserInputFormTextInput = {
|
||||
export interface UserInputFormTextInput {
|
||||
'text-input': UserInputForm & {
|
||||
max_length: number
|
||||
}
|
||||
}
|
||||
|
||||
export type UserInputFormSelect = {
|
||||
export interface UserInputFormSelect {
|
||||
select: UserInputForm & {
|
||||
options: string[]
|
||||
}
|
||||
}
|
||||
|
||||
export type UserInputFormParagraph = {
|
||||
export interface UserInputFormParagraph {
|
||||
paragraph: UserInputForm
|
||||
}
|
||||
|
||||
export type VisionConfig = VisionSettings
|
||||
|
||||
export type EnableType = {
|
||||
export interface EnableType {
|
||||
enabled: boolean
|
||||
}
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ export type ChatConfig = Omit<ModelConfig, 'model'> & {
|
|||
supportCitationHitInfo?: boolean
|
||||
}
|
||||
|
||||
export type WorkflowProcess = {
|
||||
export interface WorkflowProcess {
|
||||
status: WorkflowRunningStatus
|
||||
tracing: NodeTracing[]
|
||||
expand?: boolean // for UI
|
||||
|
|
@ -73,10 +73,10 @@ export type OnSend = (message: string, files?: FileEntity[], last_answer?: ChatI
|
|||
|
||||
export type OnRegenerate = (chatItem: ChatItem) => void
|
||||
|
||||
export type Callback = {
|
||||
export interface Callback {
|
||||
onSuccess: () => void
|
||||
}
|
||||
|
||||
export type Feedback = {
|
||||
export interface Feedback {
|
||||
rating: 'like' | 'dislike' | null
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,16 +2,16 @@ import { createStore } from 'zustand'
|
|||
import type { Features } from './types'
|
||||
import { Resolution, TransferMethod } from '@/types/app'
|
||||
|
||||
export type FeaturesModal = {
|
||||
export interface FeaturesModal {
|
||||
showFeaturesModal: boolean
|
||||
setShowFeaturesModal: (showFeaturesModal: boolean) => void
|
||||
}
|
||||
|
||||
export type FeaturesState = {
|
||||
export interface FeaturesState {
|
||||
features: Features
|
||||
}
|
||||
|
||||
export type FeaturesAction = {
|
||||
export interface FeaturesAction {
|
||||
setFeatures: (features: Features) => void
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import type { Resolution, TransferMethod, TtsAutoPlay } from '@/types/app'
|
||||
import type { FileUploadConfigResponse } from '@/models/common'
|
||||
|
||||
export type EnabledOrDisabled = {
|
||||
export interface EnabledOrDisabled {
|
||||
enabled?: boolean
|
||||
}
|
||||
|
||||
|
|
@ -42,7 +42,7 @@ export type FileUpload = {
|
|||
fileUploadConfig?: FileUploadConfigResponse
|
||||
} & EnabledOrDisabled
|
||||
|
||||
export type AnnotationReplyConfig = {
|
||||
export interface AnnotationReplyConfig {
|
||||
enabled: boolean
|
||||
id?: string
|
||||
score_threshold?: number
|
||||
|
|
@ -64,7 +64,7 @@ export enum FeatureEnum {
|
|||
annotationReply = 'annotationReply',
|
||||
}
|
||||
|
||||
export type Features = {
|
||||
export interface Features {
|
||||
[FeatureEnum.moreLikeThis]?: MoreLikeThis
|
||||
[FeatureEnum.opening]?: OpeningStatement
|
||||
[FeatureEnum.suggested]?: SuggestedQuestionsAfterAnswer
|
||||
|
|
|
|||
|
|
@ -0,0 +1,186 @@
|
|||
<svg width="90" height="10" viewBox="0 0 90 10" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Anthropic" clip-path="url(#clip0_5981_49007)">
|
||||
<g id="Clip path group">
|
||||
<mask id="mask0_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_2">
|
||||
<path id="Vector" d="M89.375 -0.00195312H0V9.99805H89.375V-0.00195312Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask0_5981_49007)">
|
||||
<g id="Group">
|
||||
<g id="Clip path group_2">
|
||||
<mask id="mask1_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_4">
|
||||
<path id="Vector_2" d="M0 -0.00390625H89.375V9.99609H0V-0.00390625Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask1_5981_49007)">
|
||||
<g id="Group_2">
|
||||
<g id="Clip path group_3">
|
||||
<mask id="mask2_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_12">
|
||||
<path id="Vector_3" d="M0 -0.00585938H89.375V9.99414H0V-0.00585938Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask2_5981_49007)">
|
||||
<g id="Group_3">
|
||||
<g id="Clip path group_4">
|
||||
<mask id="mask3_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_89">
|
||||
<path id="Vector_4" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask3_5981_49007)">
|
||||
<g id="Group_4">
|
||||
<g id="Group_5">
|
||||
<g id="Group_6">
|
||||
<path id="Vector_5" d="M18.1273 6.92438L13.7773 0.15625H11.4297V9.82501H13.4321V3.05688L17.7821 9.82501H20.1297V0.15625H18.1273V6.92438Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_5">
|
||||
<mask id="mask4_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_80">
|
||||
<path id="Vector_6" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask4_5981_49007)">
|
||||
<g id="Group_7">
|
||||
<g id="Group_8">
|
||||
<g id="Group_9">
|
||||
<path id="Vector_7" d="M21.7969 2.02094H25.0423V9.82501H27.1139V2.02094H30.3594V0.15625H21.7969V2.02094Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_6">
|
||||
<mask id="mask5_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_71">
|
||||
<path id="Vector_8" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask5_5981_49007)">
|
||||
<g id="Group_10">
|
||||
<g id="Group_11">
|
||||
<g id="Group_12">
|
||||
<path id="Vector_9" d="M38.6442 4.00994H34.0871V0.15625H32.0156V9.82501H34.0871V5.87463H38.6442V9.82501H40.7156V0.15625H38.6442V4.00994Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_7">
|
||||
<mask id="mask6_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_62">
|
||||
<path id="Vector_10" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask6_5981_49007)">
|
||||
<g id="Group_13">
|
||||
<g id="Group_14">
|
||||
<g id="Group_15">
|
||||
<path id="Vector_11" d="M45.3376 2.02094H47.893C48.9152 2.02094 49.4539 2.39387 49.4539 3.09831C49.4539 3.80275 48.9152 4.17569 47.893 4.17569H45.3376V2.02094ZM51.5259 3.09831C51.5259 1.27506 50.186 0.15625 47.9897 0.15625H43.2656V9.82501H45.3376V6.04037H47.6443L49.7164 9.82501H52.0094L49.715 5.75211C50.8666 5.30941 51.5259 4.37721 51.5259 3.09831Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_8">
|
||||
<mask id="mask7_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_53">
|
||||
<path id="Vector_12" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask7_5981_49007)">
|
||||
<g id="Group_16">
|
||||
<g id="Group_17">
|
||||
<g id="Group_18">
|
||||
<path id="Vector_13" d="M57.8732 8.05653C56.2438 8.05653 55.2496 6.89631 55.2496 5.00404C55.2496 3.08416 56.2438 1.92394 57.8732 1.92394C59.4887 1.92394 60.4691 3.08416 60.4691 5.00404C60.4691 6.89631 59.4887 8.05653 57.8732 8.05653ZM57.8732 -0.00976562C55.0839 -0.00976562 53.1094 2.06206 53.1094 5.00404C53.1094 7.91841 55.0839 9.99023 57.8732 9.99023C60.6486 9.99023 62.6094 7.91841 62.6094 5.00404C62.6094 2.06206 60.6486 -0.00976562 57.8732 -0.00976562Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_9">
|
||||
<mask id="mask8_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_44">
|
||||
<path id="Vector_14" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask8_5981_49007)">
|
||||
<g id="Group_19">
|
||||
<g id="Group_20">
|
||||
<g id="Group_21">
|
||||
<path id="Vector_15" d="M69.1794 4.45194H66.6233V2.02094H69.1794C70.2019 2.02094 70.7407 2.43532 70.7407 3.23644C70.7407 4.03756 70.2019 4.45194 69.1794 4.45194ZM69.2762 0.15625H64.5508V9.82501H66.6233V6.31662H69.2762C71.473 6.31662 72.8133 5.15637 72.8133 3.23644C72.8133 1.3165 71.473 0.15625 69.2762 0.15625Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_10">
|
||||
<mask id="mask9_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_35">
|
||||
<path id="Vector_16" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask9_5981_49007)">
|
||||
<g id="Group_22">
|
||||
<g id="Group_23">
|
||||
<g id="Group_24">
|
||||
<path id="Vector_17" d="M86.8413 6.57863C86.4823 7.51786 85.7642 8.05653 84.7837 8.05653C83.1542 8.05653 82.16 6.89631 82.16 5.00404C82.16 3.08416 83.1542 1.92394 84.7837 1.92394C85.7642 1.92394 86.4823 2.46261 86.8413 3.40183H89.0369C88.4984 1.33002 86.8827 -0.00976562 84.7837 -0.00976562C81.9942 -0.00976562 80.0195 2.06206 80.0195 5.00404C80.0195 7.91841 81.9942 9.99023 84.7837 9.99023C86.8965 9.99023 88.5122 8.63664 89.0508 6.57863H86.8413Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_11">
|
||||
<mask id="mask10_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_26">
|
||||
<path id="Vector_18" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask10_5981_49007)">
|
||||
<g id="Group_25">
|
||||
<g id="Group_26">
|
||||
<g id="Group_27">
|
||||
<path id="Vector_19" d="M73.6484 0.15625L77.5033 9.82501H79.6172L75.7624 0.15625H73.6484Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_12">
|
||||
<mask id="mask11_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_17">
|
||||
<path id="Vector_20" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask11_5981_49007)">
|
||||
<g id="Group_28">
|
||||
<g id="Group_29">
|
||||
<g id="Group_30">
|
||||
<path id="Vector_21" d="M3.64038 5.99893L4.95938 2.60106L6.27838 5.99893H3.64038ZM3.85422 0.15625L0 9.82501H2.15505L2.9433 7.79456H6.97558L7.76371 9.82501H9.91875L6.06453 0.15625H3.85422Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_5981_49007">
|
||||
<rect width="89.375" height="10" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.1 KiB |
|
|
@ -0,0 +1,186 @@
|
|||
<svg width="90" height="10" viewBox="0 0 90 10" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Anthropic" clip-path="url(#clip0_5981_52010)">
|
||||
<g id="Clip path group">
|
||||
<mask id="mask0_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_2">
|
||||
<path id="Vector" d="M89.375 -0.00195312H0V9.99805H89.375V-0.00195312Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask0_5981_52010)">
|
||||
<g id="Group">
|
||||
<g id="Clip path group_2">
|
||||
<mask id="mask1_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_4">
|
||||
<path id="Vector_2" d="M0 -0.00390625H89.375V9.99609H0V-0.00390625Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask1_5981_52010)">
|
||||
<g id="Group_2">
|
||||
<g id="Clip path group_3">
|
||||
<mask id="mask2_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_12">
|
||||
<path id="Vector_3" d="M0 -0.00585938H89.375V9.99414H0V-0.00585938Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask2_5981_52010)">
|
||||
<g id="Group_3">
|
||||
<g id="Clip path group_4">
|
||||
<mask id="mask3_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_89">
|
||||
<path id="Vector_4" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask3_5981_52010)">
|
||||
<g id="Group_4">
|
||||
<g id="Group_5">
|
||||
<g id="Group_6">
|
||||
<path id="Vector_5" d="M18.1273 6.92438L13.7773 0.15625H11.4297V9.82501H13.4321V3.05688L17.7821 9.82501H20.1297V0.15625H18.1273V6.92438Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_5">
|
||||
<mask id="mask4_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_80">
|
||||
<path id="Vector_6" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask4_5981_52010)">
|
||||
<g id="Group_7">
|
||||
<g id="Group_8">
|
||||
<g id="Group_9">
|
||||
<path id="Vector_7" d="M21.7969 2.02094H25.0423V9.82501H27.1139V2.02094H30.3594V0.15625H21.7969V2.02094Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_6">
|
||||
<mask id="mask5_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_71">
|
||||
<path id="Vector_8" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask5_5981_52010)">
|
||||
<g id="Group_10">
|
||||
<g id="Group_11">
|
||||
<g id="Group_12">
|
||||
<path id="Vector_9" d="M38.6442 4.00994H34.0871V0.15625H32.0156V9.82501H34.0871V5.87463H38.6442V9.82501H40.7156V0.15625H38.6442V4.00994Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_7">
|
||||
<mask id="mask6_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_62">
|
||||
<path id="Vector_10" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask6_5981_52010)">
|
||||
<g id="Group_13">
|
||||
<g id="Group_14">
|
||||
<g id="Group_15">
|
||||
<path id="Vector_11" d="M45.3376 2.02094H47.893C48.9152 2.02094 49.4539 2.39387 49.4539 3.09831C49.4539 3.80275 48.9152 4.17569 47.893 4.17569H45.3376V2.02094ZM51.5259 3.09831C51.5259 1.27506 50.186 0.15625 47.9897 0.15625H43.2656V9.82501H45.3376V6.04037H47.6443L49.7164 9.82501H52.0094L49.715 5.75211C50.8666 5.30941 51.5259 4.37721 51.5259 3.09831Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_8">
|
||||
<mask id="mask7_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_53">
|
||||
<path id="Vector_12" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask7_5981_52010)">
|
||||
<g id="Group_16">
|
||||
<g id="Group_17">
|
||||
<g id="Group_18">
|
||||
<path id="Vector_13" d="M57.8732 8.05653C56.2438 8.05653 55.2496 6.89631 55.2496 5.00404C55.2496 3.08416 56.2438 1.92394 57.8732 1.92394C59.4887 1.92394 60.4691 3.08416 60.4691 5.00404C60.4691 6.89631 59.4887 8.05653 57.8732 8.05653ZM57.8732 -0.00976562C55.0839 -0.00976562 53.1094 2.06206 53.1094 5.00404C53.1094 7.91841 55.0839 9.99023 57.8732 9.99023C60.6486 9.99023 62.6094 7.91841 62.6094 5.00404C62.6094 2.06206 60.6486 -0.00976562 57.8732 -0.00976562Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_9">
|
||||
<mask id="mask8_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_44">
|
||||
<path id="Vector_14" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask8_5981_52010)">
|
||||
<g id="Group_19">
|
||||
<g id="Group_20">
|
||||
<g id="Group_21">
|
||||
<path id="Vector_15" d="M69.1794 4.45194H66.6233V2.02094H69.1794C70.2019 2.02094 70.7407 2.43532 70.7407 3.23644C70.7407 4.03756 70.2019 4.45194 69.1794 4.45194ZM69.2762 0.15625H64.5508V9.82501H66.6233V6.31662H69.2762C71.473 6.31662 72.8133 5.15637 72.8133 3.23644C72.8133 1.3165 71.473 0.15625 69.2762 0.15625Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_10">
|
||||
<mask id="mask9_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_35">
|
||||
<path id="Vector_16" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask9_5981_52010)">
|
||||
<g id="Group_22">
|
||||
<g id="Group_23">
|
||||
<g id="Group_24">
|
||||
<path id="Vector_17" d="M86.8413 6.57863C86.4823 7.51786 85.7642 8.05653 84.7837 8.05653C83.1542 8.05653 82.16 6.89631 82.16 5.00404C82.16 3.08416 83.1542 1.92394 84.7837 1.92394C85.7642 1.92394 86.4823 2.46261 86.8413 3.40183H89.0369C88.4984 1.33002 86.8827 -0.00976562 84.7837 -0.00976562C81.9942 -0.00976562 80.0195 2.06206 80.0195 5.00404C80.0195 7.91841 81.9942 9.99023 84.7837 9.99023C86.8965 9.99023 88.5122 8.63664 89.0508 6.57863H86.8413Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_11">
|
||||
<mask id="mask10_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_26">
|
||||
<path id="Vector_18" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask10_5981_52010)">
|
||||
<g id="Group_25">
|
||||
<g id="Group_26">
|
||||
<g id="Group_27">
|
||||
<path id="Vector_19" d="M73.6484 0.15625L77.5033 9.82501H79.6172L75.7624 0.15625H73.6484Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_12">
|
||||
<mask id="mask11_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_17">
|
||||
<path id="Vector_20" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask11_5981_52010)">
|
||||
<g id="Group_28">
|
||||
<g id="Group_29">
|
||||
<g id="Group_30">
|
||||
<path id="Vector_21" d="M3.64038 5.99893L4.95938 2.60106L6.27838 5.99893H3.64038ZM3.85422 0.15625L0 9.82501H2.15505L2.9433 7.79456H6.97558L7.76371 9.82501H9.91875L6.06453 0.15625H3.85422Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_5981_52010">
|
||||
<rect width="89.375" height="10" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.1 KiB |
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,16 @@
|
|||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './AnthropicDark.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
Icon.displayName = 'AnthropicDark'
|
||||
|
||||
export default Icon
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,16 @@
|
|||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './AnthropicLight.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
Icon.displayName = 'AnthropicLight'
|
||||
|
||||
export default Icon
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
export { default as AnthropicDark } from './AnthropicDark'
|
||||
export { default as AnthropicLight } from './AnthropicLight'
|
||||
export { default as AnthropicText } from './AnthropicText'
|
||||
export { default as Anthropic } from './Anthropic'
|
||||
export { default as AzureOpenaiServiceText } from './AzureOpenaiServiceText'
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import { useHotkeys } from 'react-hotkeys-hook'
|
|||
import Tooltip from '@/app/components/base/tooltip'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
|
||||
type ImagePreviewProps = {
|
||||
interface ImagePreviewProps {
|
||||
url: string
|
||||
title: string
|
||||
onCancel: () => void
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import RemarkGfm from 'remark-gfm'
|
|||
import RehypeRaw from 'rehype-raw'
|
||||
import SyntaxHighlighter from 'react-syntax-highlighter'
|
||||
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
|
||||
import { Component, createContext, memo, useContext, useMemo, useRef, useState } from 'react'
|
||||
import { Component, createContext, memo, useContext, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import cn from '@/utils/classnames'
|
||||
import CopyBtn from '@/app/components/base/copy-btn'
|
||||
import SVGBtn from '@/app/components/base/svg'
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import cn from '@/utils/classnames'
|
|||
import type { IChatItem } from '@/app/components/base/chat/chat/type'
|
||||
import Run from '@/app/components/workflow/run'
|
||||
|
||||
type MessageLogModalProps = {
|
||||
interface MessageLogModalProps {
|
||||
currentLogItem?: IChatItem
|
||||
defaultTab?: string
|
||||
width: number
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ import {
|
|||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
export type PromptEditorProps = {
|
||||
export interface PromptEditorProps {
|
||||
instanceId?: string
|
||||
compact?: boolean
|
||||
className?: string
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import type { PickerBlockMenuOption } from './menu'
|
|||
import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars'
|
||||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
|
||||
type ComponentPickerProps = {
|
||||
interface ComponentPickerProps {
|
||||
triggerString: string
|
||||
contextBlock?: ContextBlockType
|
||||
queryBlock?: QueryBlockType
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ export type Item = {
|
|||
name: string
|
||||
} & Record<string, any>
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
order?: string
|
||||
value: number | string
|
||||
items: Item[]
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ import type { FC } from 'react'
|
|||
import React from 'react'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
type Option = {
|
||||
interface Option {
|
||||
value: string
|
||||
text: string | JSX.Element
|
||||
}
|
||||
|
||||
type ItemProps = {
|
||||
interface ItemProps {
|
||||
className?: string
|
||||
isActive: boolean
|
||||
onClick: (v: string) => void
|
||||
|
|
@ -38,7 +38,7 @@ const Item: FC<ItemProps> = ({
|
|||
)
|
||||
}
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
className?: string
|
||||
value: string
|
||||
onChange: (v: string) => void
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import { RiAddLine, RiCloseLine } from '@remixicon/react'
|
|||
import cn from '@/utils/classnames'
|
||||
import { useToastContext } from '@/app/components/base/toast'
|
||||
|
||||
type TagInputProps = {
|
||||
interface TagInputProps {
|
||||
items: string[]
|
||||
onChange: (items: string[]) => void
|
||||
disableRemove?: boolean
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import type { Tag } from '@/app/components/base/tag-management/constant'
|
|||
|
||||
import { fetchTagList } from '@/service/tag'
|
||||
|
||||
type TagFilterProps = {
|
||||
interface TagFilterProps {
|
||||
type: 'knowledge' | 'app'
|
||||
value: string[]
|
||||
onChange: (v: string[]) => void
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import Checkbox from '@/app/components/base/checkbox'
|
|||
import { bindTag, createTag, fetchTagList, unBindTag } from '@/service/tag'
|
||||
import { ToastContext } from '@/app/components/base/toast'
|
||||
|
||||
type TagSelectorProps = {
|
||||
interface TagSelectorProps {
|
||||
targetID: string
|
||||
isPopover?: boolean
|
||||
position?: 'bl' | 'br'
|
||||
|
|
|
|||
|
|
@ -7,32 +7,32 @@ import type { ExternalDataTool } from '@/models/common'
|
|||
export type { VisionFile } from '@/types/app'
|
||||
export { TransferMethod } from '@/types/app'
|
||||
|
||||
export type UserInputForm = {
|
||||
export interface UserInputForm {
|
||||
default: string
|
||||
label: string
|
||||
required: boolean
|
||||
variable: string
|
||||
}
|
||||
|
||||
export type UserInputFormTextInput = {
|
||||
export interface UserInputFormTextInput {
|
||||
'text-input': UserInputForm & {
|
||||
max_length: number
|
||||
}
|
||||
}
|
||||
|
||||
export type UserInputFormSelect = {
|
||||
export interface UserInputFormSelect {
|
||||
select: UserInputForm & {
|
||||
options: string[]
|
||||
}
|
||||
}
|
||||
|
||||
export type UserInputFormParagraph = {
|
||||
export interface UserInputFormParagraph {
|
||||
paragraph: UserInputForm
|
||||
}
|
||||
|
||||
export type VisionConfig = VisionSettings
|
||||
|
||||
export type EnableType = {
|
||||
export interface EnableType {
|
||||
enabled: boolean
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,11 +51,11 @@ const Toast = ({
|
|||
'top-0',
|
||||
'right-0',
|
||||
)}>
|
||||
<div className={`absolute inset-0 opacity-40 -z-10 ${(type === 'success' && 'bg-toast-success-bg')
|
||||
<div className={`absolute inset-0 opacity-40 ${(type === 'success' && 'bg-toast-success-bg')
|
||||
|| (type === 'warning' && 'bg-toast-warning-bg')
|
||||
|| (type === 'error' && 'bg-toast-error-bg')
|
||||
|| (type === 'info' && 'bg-toast-info-bg')
|
||||
}`}
|
||||
}`}
|
||||
/>
|
||||
<div className={`flex ${size === 'md' ? 'gap-1' : 'gap-0.5'}`}>
|
||||
<div className={`flex justify-center items-center ${size === 'md' ? 'p-0.5' : 'p-1'}`}>
|
||||
|
|
@ -80,7 +80,7 @@ const Toast = ({
|
|||
</ActionButton>)
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
</div >
|
||||
}
|
||||
|
||||
export const ToastProvider = ({
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import Button from '@/app/components/base/button'
|
|||
import { ToastContext } from '@/app/components/base/toast'
|
||||
import { createEmptyDataset } from '@/service/datasets'
|
||||
|
||||
type IProps = {
|
||||
interface IProps {
|
||||
show: boolean
|
||||
onHide: () => void
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import cn from '@/utils/classnames'
|
|||
import Popover from '@/app/components/base/popover'
|
||||
import { languages } from '@/i18n/language'
|
||||
|
||||
export type ILanguageSelectProps = {
|
||||
export interface ILanguageSelectProps {
|
||||
currentLanguage: string
|
||||
onSelect: (language: string) => void
|
||||
disabled?: boolean
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import cn from '@/utils/classnames'
|
|||
import Checkbox from '@/app/components/base/checkbox'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
className?: string
|
||||
isChecked: boolean
|
||||
onChange: (isChecked: boolean) => void
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import React from 'react'
|
|||
import cn from '@/utils/classnames'
|
||||
import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
className?: string
|
||||
title: string
|
||||
errorMsg?: string
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import Input from './input'
|
|||
import cn from '@/utils/classnames'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
|
||||
type Props = {
|
||||
interface Props {
|
||||
className?: string
|
||||
label: string
|
||||
labelClassName?: string
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue