mirror of https://github.com/langgenius/dify.git
Merge branch 'main' into feat/node-execution-retry
This commit is contained in:
commit
07528f82b9
|
|
@ -50,6 +50,9 @@ jobs:
|
|||
- name: Run ModelRuntime
|
||||
run: poetry run -C api bash dev/pytest/pytest_model_runtime.sh
|
||||
|
||||
- name: Run dify config tests
|
||||
run: poetry run -C api python dev/pytest/pytest_config_tests.py
|
||||
|
||||
- name: Run Tool
|
||||
run: poetry run -C api bash dev/pytest/pytest_tools.sh
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,6 @@ ignore = [
|
|||
"SIM113", # eumerate-for-loop
|
||||
"SIM117", # multiple-with-statements
|
||||
"SIM210", # if-expr-with-true-false
|
||||
"SIM300", # yoda-conditions,
|
||||
]
|
||||
|
||||
[lint.per-file-ignores]
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.14.0",
|
||||
default="0.14.1",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ def admin_required(view):
|
|||
if auth_scheme != "bearer":
|
||||
raise Unauthorized("Invalid Authorization header format. Expected 'Bearer <api-key>' format.")
|
||||
|
||||
if dify_config.ADMIN_API_KEY != auth_token:
|
||||
if auth_token != dify_config.ADMIN_API_KEY:
|
||||
raise Unauthorized("API key is invalid.")
|
||||
|
||||
return view(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ app_fields = {
|
|||
"name": fields.String,
|
||||
"mode": fields.String,
|
||||
"icon": fields.String,
|
||||
"icon_type": fields.String,
|
||||
"icon_url": AppIconUrlField,
|
||||
"icon_background": fields.String,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -819,6 +819,82 @@ LLM_BASE_MODELS = [
|
|||
),
|
||||
),
|
||||
),
|
||||
AzureBaseModel(
|
||||
base_model_name="gpt-4o-2024-11-20",
|
||||
entity=AIModelEntity(
|
||||
model="fake-deployment-name",
|
||||
label=I18nObject(
|
||||
en_US="fake-deployment-name-label",
|
||||
),
|
||||
model_type=ModelType.LLM,
|
||||
features=[
|
||||
ModelFeature.AGENT_THOUGHT,
|
||||
ModelFeature.VISION,
|
||||
ModelFeature.MULTI_TOOL_CALL,
|
||||
ModelFeature.STREAM_TOOL_CALL,
|
||||
],
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.MODE: LLMMode.CHAT.value,
|
||||
ModelPropertyKey.CONTEXT_SIZE: 128000,
|
||||
},
|
||||
parameter_rules=[
|
||||
ParameterRule(
|
||||
name="temperature",
|
||||
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE],
|
||||
),
|
||||
ParameterRule(
|
||||
name="top_p",
|
||||
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P],
|
||||
),
|
||||
ParameterRule(
|
||||
name="presence_penalty",
|
||||
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY],
|
||||
),
|
||||
ParameterRule(
|
||||
name="frequency_penalty",
|
||||
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY],
|
||||
),
|
||||
_get_max_tokens(default=512, min_val=1, max_val=16384),
|
||||
ParameterRule(
|
||||
name="seed",
|
||||
label=I18nObject(zh_Hans="种子", en_US="Seed"),
|
||||
type="int",
|
||||
help=AZURE_DEFAULT_PARAM_SEED_HELP,
|
||||
required=False,
|
||||
precision=2,
|
||||
min=0,
|
||||
max=1,
|
||||
),
|
||||
ParameterRule(
|
||||
name="response_format",
|
||||
label=I18nObject(zh_Hans="回复格式", en_US="response_format"),
|
||||
type="string",
|
||||
help=I18nObject(
|
||||
zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output"
|
||||
),
|
||||
required=False,
|
||||
options=["text", "json_object", "json_schema"],
|
||||
),
|
||||
ParameterRule(
|
||||
name="json_schema",
|
||||
label=I18nObject(en_US="JSON Schema"),
|
||||
type="text",
|
||||
help=I18nObject(
|
||||
zh_Hans="设置返回的json schema,llm将按照它返回",
|
||||
en_US="Set a response json schema will ensure LLM to adhere it.",
|
||||
),
|
||||
required=False,
|
||||
),
|
||||
],
|
||||
pricing=PriceConfig(
|
||||
input=5.00,
|
||||
output=15.00,
|
||||
unit=0.000001,
|
||||
currency="USD",
|
||||
),
|
||||
),
|
||||
),
|
||||
AzureBaseModel(
|
||||
base_model_name="gpt-4-turbo",
|
||||
entity=AIModelEntity(
|
||||
|
|
|
|||
|
|
@ -171,6 +171,12 @@ model_credential_schema:
|
|||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
- label:
|
||||
en_US: gpt-4o-2024-11-20
|
||||
value: gpt-4o-2024-11-20
|
||||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
- label:
|
||||
en_US: gpt-4-turbo
|
||||
value: gpt-4-turbo
|
||||
|
|
|
|||
|
|
@ -92,7 +92,10 @@ class AzureOpenAITextEmbeddingModel(_CommonAzureOpenAI, TextEmbeddingModel):
|
|||
average = embeddings_batch[0]
|
||||
else:
|
||||
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
||||
embeddings[i] = (average / np.linalg.norm(average)).tolist()
|
||||
embedding = (average / np.linalg.norm(average)).tolist()
|
||||
if np.isnan(embedding).any():
|
||||
raise ValueError("Normalized embedding is nan please try again")
|
||||
embeddings[i] = embedding
|
||||
|
||||
# calc usage
|
||||
usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,19 @@
|
|||
from collections.abc import Mapping
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
||||
|
||||
|
||||
def get_bedrock_client(service_name: str, credentials: Mapping[str, str]):
|
||||
region_name = credentials.get("aws_region")
|
||||
if not region_name:
|
||||
raise InvokeBadRequestError("aws_region is required")
|
||||
client_config = Config(region_name=region_name)
|
||||
aws_access_key_id = credentials.get("aws_access_key_id")
|
||||
aws_secret_access_key = credentials.get("aws_secret_access_key")
|
||||
|
||||
def get_bedrock_client(service_name, credentials=None):
|
||||
client_config = Config(region_name=credentials["aws_region"])
|
||||
aws_access_key_id = credentials["aws_access_key_id"]
|
||||
aws_secret_access_key = credentials["aws_secret_access_key"]
|
||||
if aws_access_key_id and aws_secret_access_key:
|
||||
# use aksk to call bedrock
|
||||
client = boto3.client(
|
||||
|
|
|
|||
|
|
@ -62,7 +62,10 @@ class BedrockRerankModel(RerankModel):
|
|||
}
|
||||
)
|
||||
modelId = model
|
||||
region = credentials["aws_region"]
|
||||
region = credentials.get("aws_region")
|
||||
# region is a required field
|
||||
if not region:
|
||||
raise InvokeBadRequestError("aws_region is required in credentials")
|
||||
model_package_arn = f"arn:aws:bedrock:{region}::foundation-model/{modelId}"
|
||||
rerankingConfiguration = {
|
||||
"type": "BEDROCK_RERANKING_MODEL",
|
||||
|
|
|
|||
|
|
@ -88,7 +88,10 @@ class CohereTextEmbeddingModel(TextEmbeddingModel):
|
|||
average = embeddings_batch[0]
|
||||
else:
|
||||
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
||||
embeddings[i] = (average / np.linalg.norm(average)).tolist()
|
||||
embedding = (average / np.linalg.norm(average)).tolist()
|
||||
if np.isnan(embedding).any():
|
||||
raise ValueError("Normalized embedding is nan please try again")
|
||||
embeddings[i] = embedding
|
||||
|
||||
# calc usage
|
||||
usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
- gemini-2.0-flash-exp
|
||||
- gemini-2.0-flash-thinking-exp-1219
|
||||
- gemini-1.5-pro
|
||||
- gemini-1.5-pro-latest
|
||||
- gemini-1.5-pro-001
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
model: gemini-2.0-flash-thinking-exp-1219
|
||||
label:
|
||||
en_US: Gemini 2.0 Flash Thinking Exp 1219
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
|
|
@ -97,7 +97,10 @@ class OpenAITextEmbeddingModel(_CommonOpenAI, TextEmbeddingModel):
|
|||
average = embeddings_batch[0]
|
||||
else:
|
||||
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
||||
embeddings[i] = (average / np.linalg.norm(average)).tolist()
|
||||
embedding = (average / np.linalg.norm(average)).tolist()
|
||||
if np.isnan(embedding).any():
|
||||
raise ValueError("Normalized embedding is nan please try again")
|
||||
embeddings[i] = embedding
|
||||
|
||||
# calc usage
|
||||
usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ class ReplicateEmbeddingModel(_CommonReplicate, TextEmbeddingModel):
|
|||
embeddings.append(result[0].get("embedding"))
|
||||
|
||||
return [list(map(float, e)) for e in embeddings]
|
||||
elif "texts" == text_input_key:
|
||||
elif text_input_key == "texts":
|
||||
result = client.run(
|
||||
replicate_model_version,
|
||||
input={
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class SiliconflowProvider(ModelProvider):
|
|||
try:
|
||||
model_instance = self.get_model_instance(ModelType.LLM)
|
||||
|
||||
model_instance.validate_credentials(model="deepseek-ai/DeepSeek-V2-Chat", credentials=credentials)
|
||||
model_instance.validate_credentials(model="deepseek-ai/DeepSeek-V2.5", credentials=credentials)
|
||||
except CredentialsValidateFailedError as ex:
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
|
|
|
|||
|
|
@ -100,7 +100,10 @@ class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel):
|
|||
average = embeddings_batch[0]
|
||||
else:
|
||||
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
||||
embeddings[i] = (average / np.linalg.norm(average)).tolist()
|
||||
embedding = (average / np.linalg.norm(average)).tolist()
|
||||
if np.isnan(embedding).any():
|
||||
raise ValueError("Normalized embedding is nan please try again")
|
||||
embeddings[i] = embedding
|
||||
|
||||
usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,10 @@ configs: dict[str, ModelConfig] = {
|
|||
properties=ModelProperties(context_size=32768, max_tokens=4096, mode=LLMMode.CHAT),
|
||||
features=[ModelFeature.TOOL_CALL],
|
||||
),
|
||||
"Doubao-pro-256k": ModelConfig(
|
||||
properties=ModelProperties(context_size=262144, max_tokens=4096, mode=LLMMode.CHAT),
|
||||
features=[],
|
||||
),
|
||||
"Doubao-pro-128k": ModelConfig(
|
||||
properties=ModelProperties(context_size=131072, max_tokens=4096, mode=LLMMode.CHAT),
|
||||
features=[ModelFeature.TOOL_CALL],
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ class ModelConfig(BaseModel):
|
|||
|
||||
ModelConfigs = {
|
||||
"Doubao-embedding": ModelConfig(properties=ModelProperties(context_size=4096, max_chunks=32)),
|
||||
"Doubao-embedding-large": ModelConfig(properties=ModelProperties(context_size=4096, max_chunks=32)),
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -21,7 +22,7 @@ def get_model_config(credentials: dict) -> ModelConfig:
|
|||
if not model_configs:
|
||||
return ModelConfig(
|
||||
properties=ModelProperties(
|
||||
context_size=int(credentials.get("context_size", 0)),
|
||||
context_size=int(credentials.get("context_size", 4096)),
|
||||
max_chunks=int(credentials.get("max_chunks", 1)),
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -166,6 +166,12 @@ model_credential_schema:
|
|||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
- label:
|
||||
en_US: Doubao-pro-256k
|
||||
value: Doubao-pro-256k
|
||||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
- label:
|
||||
en_US: Llama3-8B
|
||||
value: Llama3-8B
|
||||
|
|
@ -220,6 +226,12 @@ model_credential_schema:
|
|||
show_on:
|
||||
- variable: __model_type
|
||||
value: text-embedding
|
||||
- label:
|
||||
en_US: Doubao-embedding-large
|
||||
value: Doubao-embedding-large
|
||||
show_on:
|
||||
- variable: __model_type
|
||||
value: text-embedding
|
||||
- label:
|
||||
en_US: Custom
|
||||
zh_Hans: 自定义
|
||||
|
|
|
|||
|
|
@ -65,6 +65,11 @@ class CacheEmbedding(Embeddings):
|
|||
for vector in embedding_result.embeddings:
|
||||
try:
|
||||
normalized_embedding = (vector / np.linalg.norm(vector)).tolist()
|
||||
# stackoverflow best way: https://stackoverflow.com/questions/20319813/how-to-check-list-containing-nan
|
||||
if np.isnan(normalized_embedding).any():
|
||||
# for issue #11827 float values are not json compliant
|
||||
logger.warning(f"Normalized embedding is nan: {normalized_embedding}")
|
||||
continue
|
||||
embedding_queue_embeddings.append(normalized_embedding)
|
||||
except IntegrityError:
|
||||
db.session.rollback()
|
||||
|
|
@ -111,6 +116,8 @@ class CacheEmbedding(Embeddings):
|
|||
|
||||
embedding_results = embedding_result.embeddings[0]
|
||||
embedding_results = (embedding_results / np.linalg.norm(embedding_results)).tolist()
|
||||
if np.isnan(embedding_results).any():
|
||||
raise ValueError("Normalized embedding is nan please try again")
|
||||
except Exception as ex:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception(f"Failed to embed query text '{text[:10]}...({len(text)} chars)'")
|
||||
|
|
|
|||
|
|
@ -11,7 +11,10 @@ class ComfyUIProvider(BuiltinToolProviderController):
|
|||
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
|
||||
ws = websocket.WebSocket()
|
||||
base_url = URL(credentials.get("base_url"))
|
||||
ws_address = f"ws://{base_url.authority}/ws?clientId=test123"
|
||||
ws_protocol = "ws"
|
||||
if base_url.scheme == "https":
|
||||
ws_protocol = "wss"
|
||||
ws_address = f"{ws_protocol}://{base_url.authority}/ws?clientId=test123"
|
||||
|
||||
try:
|
||||
ws.connect(ws_address)
|
||||
|
|
|
|||
|
|
@ -40,7 +40,10 @@ class ComfyUiClient:
|
|||
def open_websocket_connection(self) -> tuple[WebSocket, str]:
|
||||
client_id = str(uuid.uuid4())
|
||||
ws = WebSocket()
|
||||
ws_address = f"ws://{self.base_url.authority}/ws?clientId={client_id}"
|
||||
ws_protocol = "ws"
|
||||
if self.base_url.scheme == "https":
|
||||
ws_protocol = "wss"
|
||||
ws_address = f"{ws_protocol}://{self.base_url.authority}/ws?clientId={client_id}"
|
||||
ws.connect(ws_address)
|
||||
return ws, client_id
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import logging
|
||||
import mimetypes
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any
|
||||
|
||||
|
|
@ -165,20 +166,24 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
|||
|
||||
def extract_files(self, url: str, response: Response) -> list[File]:
|
||||
"""
|
||||
Extract files from response
|
||||
Extract files from response by checking both Content-Type header and URL
|
||||
"""
|
||||
files = []
|
||||
is_file = response.is_file
|
||||
content_type = response.content_type
|
||||
content = response.content
|
||||
|
||||
if is_file and content_type:
|
||||
if is_file:
|
||||
# Guess file extension from URL or Content-Type header
|
||||
filename = url.split("?")[0].split("/")[-1] or ""
|
||||
mime_type = content_type or mimetypes.guess_type(filename)[0] or "application/octet-stream"
|
||||
|
||||
tool_file = ToolFileManager.create_file_by_raw(
|
||||
user_id=self.user_id,
|
||||
tenant_id=self.tenant_id,
|
||||
conversation_id=None,
|
||||
file_binary=content,
|
||||
mimetype=content_type,
|
||||
mimetype=mime_type,
|
||||
)
|
||||
|
||||
mapping = {
|
||||
|
|
|
|||
|
|
@ -21,13 +21,13 @@ class MockXinferenceClass:
|
|||
if not re.match(r"https?:\/\/[^\s\/$.?#].[^\s]*$", self.base_url):
|
||||
raise RuntimeError("404 Not Found")
|
||||
|
||||
if "generate" == model_uid:
|
||||
if model_uid == "generate":
|
||||
return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url, auth_headers={})
|
||||
if "chat" == model_uid:
|
||||
if model_uid == "chat":
|
||||
return RESTfulChatModelHandle(model_uid, base_url=self.base_url, auth_headers={})
|
||||
if "embedding" == model_uid:
|
||||
if model_uid == "embedding":
|
||||
return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url, auth_headers={})
|
||||
if "rerank" == model_uid:
|
||||
if model_uid == "rerank":
|
||||
return RESTfulRerankModelHandle(model_uid, base_url=self.base_url, auth_headers={})
|
||||
raise RuntimeError("404 Not Found")
|
||||
|
||||
|
|
|
|||
|
|
@ -34,9 +34,9 @@ def test_api_tool(setup_http_mock):
|
|||
response = tool.do_http_request(tool.api_bundle.server_url, tool.api_bundle.method, headers, parameters)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert "/p_param" == response.request.url.path
|
||||
assert b"query_param=q_param" == response.request.url.query
|
||||
assert "h_param" == response.request.headers.get("header_param")
|
||||
assert "application/json" == response.request.headers.get("content-type")
|
||||
assert "cookie_param=c_param" == response.request.headers.get("cookie")
|
||||
assert response.request.url.path == "/p_param"
|
||||
assert response.request.url.query == b"query_param=q_param"
|
||||
assert response.request.headers.get("header_param") == "h_param"
|
||||
assert response.request.headers.get("content-type") == "application/json"
|
||||
assert response.request.headers.get("cookie") == "cookie_param=c_param"
|
||||
assert "b_param" in response.content.decode()
|
||||
|
|
|
|||
|
|
@ -384,7 +384,7 @@ def test_mock_404(setup_http_mock):
|
|||
assert result.outputs is not None
|
||||
resp = result.outputs
|
||||
|
||||
assert 404 == resp.get("status_code")
|
||||
assert resp.get("status_code") == 404
|
||||
assert "Not Found" in resp.get("body", "")
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,111 @@
|
|||
import yaml # type: ignore
|
||||
from dotenv import dotenv_values
|
||||
from pathlib import Path
|
||||
|
||||
BASE_API_AND_DOCKER_CONFIG_SET_DIFF = {
|
||||
"APP_MAX_EXECUTION_TIME",
|
||||
"BATCH_UPLOAD_LIMIT",
|
||||
"CELERY_BEAT_SCHEDULER_TIME",
|
||||
"CODE_EXECUTION_API_KEY",
|
||||
"HTTP_REQUEST_MAX_CONNECT_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_READ_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_WRITE_TIMEOUT",
|
||||
"KEYWORD_DATA_SOURCE_TYPE",
|
||||
"LOGIN_LOCKOUT_DURATION",
|
||||
"LOG_FORMAT",
|
||||
"OCI_ACCESS_KEY",
|
||||
"OCI_BUCKET_NAME",
|
||||
"OCI_ENDPOINT",
|
||||
"OCI_REGION",
|
||||
"OCI_SECRET_KEY",
|
||||
"REDIS_DB",
|
||||
"RESEND_API_URL",
|
||||
"RESPECT_XFORWARD_HEADERS_ENABLED",
|
||||
"SENTRY_DSN",
|
||||
"SSRF_DEFAULT_CONNECT_TIME_OUT",
|
||||
"SSRF_DEFAULT_MAX_RETRIES",
|
||||
"SSRF_DEFAULT_READ_TIME_OUT",
|
||||
"SSRF_DEFAULT_TIME_OUT",
|
||||
"SSRF_DEFAULT_WRITE_TIME_OUT",
|
||||
"UPSTASH_VECTOR_TOKEN",
|
||||
"UPSTASH_VECTOR_URL",
|
||||
"USING_UGC_INDEX",
|
||||
"WEAVIATE_BATCH_SIZE",
|
||||
"WEAVIATE_GRPC_ENABLED",
|
||||
}
|
||||
|
||||
BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF = {
|
||||
"BATCH_UPLOAD_LIMIT",
|
||||
"CELERY_BEAT_SCHEDULER_TIME",
|
||||
"HTTP_REQUEST_MAX_CONNECT_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_READ_TIMEOUT",
|
||||
"HTTP_REQUEST_MAX_WRITE_TIMEOUT",
|
||||
"KEYWORD_DATA_SOURCE_TYPE",
|
||||
"LOGIN_LOCKOUT_DURATION",
|
||||
"LOG_FORMAT",
|
||||
"OPENDAL_FS_ROOT",
|
||||
"OPENDAL_S3_ACCESS_KEY_ID",
|
||||
"OPENDAL_S3_BUCKET",
|
||||
"OPENDAL_S3_ENDPOINT",
|
||||
"OPENDAL_S3_REGION",
|
||||
"OPENDAL_S3_ROOT",
|
||||
"OPENDAL_S3_SECRET_ACCESS_KEY",
|
||||
"OPENDAL_S3_SERVER_SIDE_ENCRYPTION",
|
||||
"PGVECTOR_MAX_CONNECTION",
|
||||
"PGVECTOR_MIN_CONNECTION",
|
||||
"PGVECTO_RS_DATABASE",
|
||||
"PGVECTO_RS_HOST",
|
||||
"PGVECTO_RS_PASSWORD",
|
||||
"PGVECTO_RS_PORT",
|
||||
"PGVECTO_RS_USER",
|
||||
"RESPECT_XFORWARD_HEADERS_ENABLED",
|
||||
"SCARF_NO_ANALYTICS",
|
||||
"SSRF_DEFAULT_CONNECT_TIME_OUT",
|
||||
"SSRF_DEFAULT_MAX_RETRIES",
|
||||
"SSRF_DEFAULT_READ_TIME_OUT",
|
||||
"SSRF_DEFAULT_TIME_OUT",
|
||||
"SSRF_DEFAULT_WRITE_TIME_OUT",
|
||||
"STORAGE_OPENDAL_SCHEME",
|
||||
"SUPABASE_API_KEY",
|
||||
"SUPABASE_BUCKET_NAME",
|
||||
"SUPABASE_URL",
|
||||
"USING_UGC_INDEX",
|
||||
"VIKINGDB_CONNECTION_TIMEOUT",
|
||||
"VIKINGDB_SOCKET_TIMEOUT",
|
||||
"WEAVIATE_BATCH_SIZE",
|
||||
"WEAVIATE_GRPC_ENABLED",
|
||||
}
|
||||
|
||||
API_CONFIG_SET = set(dotenv_values(Path("api") / Path(".env.example")).keys())
|
||||
DOCKER_CONFIG_SET = set(dotenv_values(Path("docker") / Path(".env.example")).keys())
|
||||
DOCKER_COMPOSE_CONFIG_SET = set()
|
||||
|
||||
with open(Path("docker") / Path("docker-compose.yaml")) as f:
|
||||
DOCKER_COMPOSE_CONFIG_SET = set(yaml.safe_load(f.read())["x-shared-env"].keys())
|
||||
|
||||
|
||||
def test_yaml_config():
|
||||
# python set == operator is used to compare two sets
|
||||
DIFF_API_WITH_DOCKER = (
|
||||
API_CONFIG_SET - DOCKER_CONFIG_SET - BASE_API_AND_DOCKER_CONFIG_SET_DIFF
|
||||
)
|
||||
if DIFF_API_WITH_DOCKER:
|
||||
print(
|
||||
f"API and Docker config sets are different with key: {DIFF_API_WITH_DOCKER}"
|
||||
)
|
||||
raise Exception("API and Docker config sets are different")
|
||||
DIFF_API_WITH_DOCKER_COMPOSE = (
|
||||
API_CONFIG_SET
|
||||
- DOCKER_COMPOSE_CONFIG_SET
|
||||
- BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF
|
||||
)
|
||||
if DIFF_API_WITH_DOCKER_COMPOSE:
|
||||
print(
|
||||
f"API and Docker Compose config sets are different with key: {DIFF_API_WITH_DOCKER_COMPOSE}"
|
||||
)
|
||||
raise Exception("API and Docker Compose config sets are different")
|
||||
print("All tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_yaml_config()
|
||||
|
|
@ -2,7 +2,7 @@ version: '3'
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
image: langgenius/dify-api:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
|
|
@ -227,7 +227,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
image: langgenius/dify-api:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
|
|
@ -397,7 +397,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.14.0
|
||||
image: langgenius/dify-web:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
|
|
|||
|
|
@ -107,6 +107,7 @@ ACCESS_TOKEN_EXPIRE_MINUTES=60
|
|||
|
||||
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
|
||||
APP_MAX_ACTIVE_REQUESTS=0
|
||||
APP_MAX_EXECUTION_TIME=1200
|
||||
|
||||
# ------------------------------
|
||||
# Container Startup Related Configuration
|
||||
|
|
@ -606,6 +607,7 @@ UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
|||
# Sentry Configuration
|
||||
# Used for application monitoring and error log tracking.
|
||||
# ------------------------------
|
||||
SENTRY_DSN=
|
||||
|
||||
# API Service Sentry DSN address, default is empty, when empty,
|
||||
# all monitoring information is not reported to Sentry.
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
image: langgenius/dify-api:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -25,7 +25,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
image: langgenius/dify-api:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -47,7 +47,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.14.0
|
||||
image: langgenius/dify-web:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ x-shared-env: &shared-api-worker-env
|
|||
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-"%Y-%m-%d %H:%M:%S"}
|
||||
LOG_TZ: ${LOG_TZ:-UTC}
|
||||
DEBUG: ${DEBUG:-false}
|
||||
SENTRY_DSN: ${SENTRY_DSN:-}
|
||||
FLASK_DEBUG: ${FLASK_DEBUG:-false}
|
||||
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
|
||||
INIT_PASSWORD: ${INIT_PASSWORD:-}
|
||||
|
|
@ -28,6 +29,7 @@ x-shared-env: &shared-api-worker-env
|
|||
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
|
||||
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
|
||||
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
|
||||
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
|
||||
DIFY_PORT: ${DIFY_PORT:-5001}
|
||||
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
|
||||
|
|
@ -389,7 +391,7 @@ x-shared-env: &shared-api-worker-env
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
image: langgenius/dify-api:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -412,7 +414,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
image: langgenius/dify-api:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
|
@ -434,7 +436,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.14.0
|
||||
image: langgenius/dify-web:0.14.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
|
|
|||
|
|
@ -1,13 +1,19 @@
|
|||
'use client'
|
||||
import { useSearchParams } from 'next/navigation'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
const Empty = () => {
|
||||
const { t } = useTranslation()
|
||||
const searchParams = useSearchParams()
|
||||
|
||||
return (
|
||||
<div className='flex flex-col items-center'>
|
||||
<div className="shrink-0 w-[163px] h-[149px] bg-cover bg-no-repeat bg-[url('~@/app/components/tools/add-tool-modal/empty.png')]"></div>
|
||||
<div className='mb-1 text-[13px] font-medium text-text-primary leading-[18px]'>{t('tools.addToolModal.emptyTitle')}</div>
|
||||
<div className='text-[13px] text-text-tertiary leading-[18px]'>{t('tools.addToolModal.emptyTip')}</div>
|
||||
<div className='mb-1 text-[13px] font-medium text-text-primary leading-[18px]'>
|
||||
{t(`tools.addToolModal.${searchParams.get('category') === 'workflow' ? 'emptyTitle' : 'emptyTitleCustom'}`)}
|
||||
</div>
|
||||
<div className='text-[13px] text-text-tertiary leading-[18px]'>
|
||||
{t(`tools.addToolModal.${searchParams.get('category') === 'workflow' ? 'emptyTip' : 'emptyTipCustom'}`)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ const translation = {
|
|||
manageInTools: 'Manage in Tools',
|
||||
emptyTitle: 'No workflow tool available',
|
||||
emptyTip: 'Go to "Workflow -> Publish as Tool"',
|
||||
emptyTitleCustom: 'No custom tool available',
|
||||
emptyTipCustom: 'Create a custom tool',
|
||||
},
|
||||
createTool: {
|
||||
title: 'Create Custom Tool',
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ const translation = {
|
|||
manageInTools: '去工具列表管理',
|
||||
emptyTitle: '没有可用的工作流工具',
|
||||
emptyTip: '去 “工作流 -> 发布为工具” 添加',
|
||||
emptyTitleCustom: '没有可用的自定义工具',
|
||||
emptyTipCustom: '创建自定义工具',
|
||||
},
|
||||
createTool: {
|
||||
title: '创建自定义工具',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.14.0",
|
||||
"version": "0.14.1",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.17.0"
|
||||
|
|
|
|||
Loading…
Reference in New Issue