mirror of https://github.com/langgenius/dify.git
Merge branch 'main' into feat/node-execution-retry
This commit is contained in:
commit
853b9af09c
|
|
@ -9,5 +9,6 @@ yq eval '.services["pgvecto-rs"].ports += ["5431:5432"]' -i docker/docker-compos
|
|||
yq eval '.services["elasticsearch"].ports += ["9200:9200"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.couchbase-server.ports += ["8091-8096:8091-8096"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.couchbase-server.ports += ["11210:11210"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/docker-compose.yaml
|
||||
|
||||
echo "Ports exposed for sandbox, weaviate, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase"
|
||||
echo "Ports exposed for sandbox, weaviate, tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase"
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ jobs:
|
|||
- name: Expose Service Ports
|
||||
run: sh .github/workflows/expose_service_ports.sh
|
||||
|
||||
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
|
||||
- name: Set up Vector Stores (TiDB, Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
with:
|
||||
compose-file: |
|
||||
|
|
@ -67,6 +67,7 @@ jobs:
|
|||
pgvector
|
||||
chroma
|
||||
elasticsearch
|
||||
tidb
|
||||
|
||||
- name: Test Vector Stores
|
||||
run: poetry run -C api bash dev/pytest/pytest_vdb.sh
|
||||
|
|
|
|||
|
|
@ -60,17 +60,8 @@ DB_DATABASE=dify
|
|||
STORAGE_TYPE=opendal
|
||||
|
||||
# Apache OpenDAL storage configuration, refer to https://github.com/apache/opendal
|
||||
STORAGE_OPENDAL_SCHEME=fs
|
||||
# OpenDAL FS
|
||||
OPENDAL_SCHEME=fs
|
||||
OPENDAL_FS_ROOT=storage
|
||||
# OpenDAL S3
|
||||
OPENDAL_S3_ROOT=/
|
||||
OPENDAL_S3_BUCKET=your-bucket-name
|
||||
OPENDAL_S3_ENDPOINT=https://s3.amazonaws.com
|
||||
OPENDAL_S3_ACCESS_KEY_ID=your-access-key
|
||||
OPENDAL_S3_SECRET_ACCESS_KEY=your-secret-key
|
||||
OPENDAL_S3_REGION=your-region
|
||||
OPENDAL_S3_SERVER_SIDE_ENCRYPTION=
|
||||
|
||||
# S3 Storage configuration
|
||||
S3_USE_AWS_MANAGED_IAM=false
|
||||
|
|
@ -313,8 +304,7 @@ UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
|||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
# Model configuration
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT=base64
|
||||
MULTIMODAL_SEND_VIDEO_FORMAT=base64
|
||||
MULTIMODAL_SEND_FORMAT=base64
|
||||
PROMPT_GENERATION_MAX_TOKENS=512
|
||||
CODE_GENERATION_MAX_TOKENS=1024
|
||||
|
||||
|
|
@ -399,6 +389,8 @@ LOG_FILE_BACKUP_COUNT=5
|
|||
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
|
||||
# Log Timezone
|
||||
LOG_TZ=UTC
|
||||
# Log format
|
||||
LOG_FORMAT=%(asctime)s,%(msecs)d %(levelname)-2s [%(filename)s:%(lineno)d] %(req_id)s %(message)s
|
||||
|
||||
# Indexing configuration
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||||
|
|
@ -431,3 +423,7 @@ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
|||
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||||
|
||||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||||
MAX_SUBMIT_COUNT=100
|
||||
# Lockout duration in seconds
|
||||
LOGIN_LOCKOUT_DURATION=86400
|
||||
27
api/app.py
27
api/app.py
|
|
@ -1,13 +1,30 @@
|
|||
from app_factory import create_app
|
||||
from libs import threadings_utils, version_utils
|
||||
from libs import version_utils
|
||||
|
||||
# preparation before creating app
|
||||
version_utils.check_supported_python_version()
|
||||
threadings_utils.apply_gevent_threading_patch()
|
||||
|
||||
|
||||
def is_db_command():
|
||||
import sys
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[0].endswith("flask") and sys.argv[1] == "db":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
# create app
|
||||
app = create_app()
|
||||
celery = app.extensions["celery"]
|
||||
if is_db_command():
|
||||
from app_factory import create_migrations_app
|
||||
|
||||
app = create_migrations_app()
|
||||
else:
|
||||
from app_factory import create_app
|
||||
from libs import threadings_utils
|
||||
|
||||
threadings_utils.apply_gevent_threading_patch()
|
||||
|
||||
app = create_app()
|
||||
celery = app.extensions["celery"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=5001)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from configs import dify_config
|
||||
|
|
@ -17,15 +16,6 @@ def create_flask_app_with_configs() -> DifyApp:
|
|||
dify_app = DifyApp(__name__)
|
||||
dify_app.config.from_mapping(dify_config.model_dump())
|
||||
|
||||
# populate configs into system environment variables
|
||||
for key, value in dify_app.config.items():
|
||||
if isinstance(value, str):
|
||||
os.environ[key] = value
|
||||
elif isinstance(value, int | float | bool):
|
||||
os.environ[key] = str(value)
|
||||
elif value is None:
|
||||
os.environ[key] = ""
|
||||
|
||||
return dify_app
|
||||
|
||||
|
||||
|
|
@ -98,3 +88,14 @@ def initialize_extensions(app: DifyApp):
|
|||
end_time = time.perf_counter()
|
||||
if dify_config.DEBUG:
|
||||
logging.info(f"Loaded {short_name} ({round((end_time - start_time) * 1000, 2)} ms)")
|
||||
|
||||
|
||||
def create_migrations_app():
|
||||
app = create_flask_app_with_configs()
|
||||
from extensions import ext_database, ext_migrate
|
||||
|
||||
# Initialize only required extensions
|
||||
ext_database.init_app(app)
|
||||
ext_migrate.init_app(app)
|
||||
|
||||
return app
|
||||
|
|
|
|||
|
|
@ -439,6 +439,17 @@ class WorkflowConfig(BaseSettings):
|
|||
)
|
||||
|
||||
|
||||
class WorkflowNodeExecutionConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for workflow node execution
|
||||
"""
|
||||
|
||||
MAX_SUBMIT_COUNT: PositiveInt = Field(
|
||||
description="Maximum number of submitted thread count in a ThreadPool for parallel node execution",
|
||||
default=100,
|
||||
)
|
||||
|
||||
|
||||
class AuthConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for authentication and OAuth
|
||||
|
|
@ -474,6 +485,11 @@ class AuthConfig(BaseSettings):
|
|||
default=60,
|
||||
)
|
||||
|
||||
LOGIN_LOCKOUT_DURATION: PositiveInt = Field(
|
||||
description="Time (in seconds) a user must wait before retrying login after exceeding the rate limit.",
|
||||
default=86400,
|
||||
)
|
||||
|
||||
|
||||
class ModerationConfig(BaseSettings):
|
||||
"""
|
||||
|
|
@ -649,14 +665,9 @@ class IndexingConfig(BaseSettings):
|
|||
)
|
||||
|
||||
|
||||
class VisionFormatConfig(BaseSettings):
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT: Literal["base64", "url"] = Field(
|
||||
description="Format for sending images in multimodal contexts ('base64' or 'url'), default is base64",
|
||||
default="base64",
|
||||
)
|
||||
|
||||
MULTIMODAL_SEND_VIDEO_FORMAT: Literal["base64", "url"] = Field(
|
||||
description="Format for sending videos in multimodal contexts ('base64' or 'url'), default is base64",
|
||||
class MultiModalTransferConfig(BaseSettings):
|
||||
MULTIMODAL_SEND_FORMAT: Literal["base64", "url"] = Field(
|
||||
description="Format for sending files in multimodal contexts ('base64' or 'url'), default is base64",
|
||||
default="base64",
|
||||
)
|
||||
|
||||
|
|
@ -762,19 +773,20 @@ class FeatureConfig(
|
|||
FileAccessConfig,
|
||||
FileUploadConfig,
|
||||
HttpConfig,
|
||||
VisionFormatConfig,
|
||||
InnerAPIConfig,
|
||||
IndexingConfig,
|
||||
LoggingConfig,
|
||||
MailConfig,
|
||||
ModelLoadBalanceConfig,
|
||||
ModerationConfig,
|
||||
MultiModalTransferConfig,
|
||||
PositionConfig,
|
||||
RagEtlConfig,
|
||||
SecurityConfig,
|
||||
ToolConfig,
|
||||
UpdateConfig,
|
||||
WorkflowConfig,
|
||||
WorkflowNodeExecutionConfig,
|
||||
WorkspaceConfig,
|
||||
LoginConfig,
|
||||
# hosted services config
|
||||
|
|
|
|||
|
|
@ -1,51 +1,9 @@
|
|||
from enum import StrEnum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class OpenDALScheme(StrEnum):
|
||||
FS = "fs"
|
||||
S3 = "s3"
|
||||
|
||||
|
||||
class OpenDALStorageConfig(BaseSettings):
|
||||
STORAGE_OPENDAL_SCHEME: str = Field(
|
||||
default=OpenDALScheme.FS.value,
|
||||
OPENDAL_SCHEME: str = Field(
|
||||
default="fs",
|
||||
description="OpenDAL scheme.",
|
||||
)
|
||||
# FS
|
||||
OPENDAL_FS_ROOT: str = Field(
|
||||
default="storage",
|
||||
description="Root path for local storage.",
|
||||
)
|
||||
# S3
|
||||
OPENDAL_S3_ROOT: str = Field(
|
||||
default="/",
|
||||
description="Root path for S3 storage.",
|
||||
)
|
||||
OPENDAL_S3_BUCKET: str = Field(
|
||||
default="",
|
||||
description="S3 bucket name.",
|
||||
)
|
||||
OPENDAL_S3_ENDPOINT: str = Field(
|
||||
default="https://s3.amazonaws.com",
|
||||
description="S3 endpoint URL.",
|
||||
)
|
||||
OPENDAL_S3_ACCESS_KEY_ID: str = Field(
|
||||
default="",
|
||||
description="S3 access key ID.",
|
||||
)
|
||||
OPENDAL_S3_SECRET_ACCESS_KEY: str = Field(
|
||||
default="",
|
||||
description="S3 secret access key.",
|
||||
)
|
||||
OPENDAL_S3_REGION: str = Field(
|
||||
default="",
|
||||
description="S3 region.",
|
||||
)
|
||||
OPENDAL_S3_SERVER_SIDE_ENCRYPTION: Literal["aws:kms", ""] = Field(
|
||||
default="",
|
||||
description="S3 server-side encryption.",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.13.2",
|
||||
default="0.14.0",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ class ModelConfigResource(Resource):
|
|||
provider_type=agent_tool_entity.provider_type,
|
||||
identity_id=f"AGENT.{app_model.id}",
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# get decrypted parameters
|
||||
|
|
@ -97,7 +97,7 @@ class ModelConfigResource(Resource):
|
|||
app_id=app_model.id,
|
||||
agent_tool=agent_tool_entity,
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
manager = ToolParameterConfigurationManager(
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from flask_restful import Resource, reqparse
|
||||
from werkzeug.exceptions import BadRequest
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist
|
||||
|
|
@ -26,7 +27,7 @@ class TraceAppConfigApi(Resource):
|
|||
return {"has_not_configured": True}
|
||||
return trace_config
|
||||
except Exception as e:
|
||||
raise e
|
||||
raise BadRequest(str(e))
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
|
|
@ -48,7 +49,7 @@ class TraceAppConfigApi(Resource):
|
|||
raise TracingConfigCheckError()
|
||||
return result
|
||||
except Exception as e:
|
||||
raise e
|
||||
raise BadRequest(str(e))
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
|
|
@ -68,7 +69,7 @@ class TraceAppConfigApi(Resource):
|
|||
raise TracingConfigNotExist()
|
||||
return {"result": "success"}
|
||||
except Exception as e:
|
||||
raise e
|
||||
raise BadRequest(str(e))
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
|
|
@ -85,7 +86,7 @@ class TraceAppConfigApi(Resource):
|
|||
raise TracingConfigNotExist()
|
||||
return {"result": "success"}
|
||||
except Exception as e:
|
||||
raise e
|
||||
raise BadRequest(str(e))
|
||||
|
||||
|
||||
api.add_resource(TraceAppConfigApi, "/apps/<uuid:app_id>/trace-config")
|
||||
|
|
|
|||
|
|
@ -948,7 +948,7 @@ class DocumentRetryApi(DocumentResource):
|
|||
if document.indexing_status == "completed":
|
||||
raise DocumentAlreadyFinishedError()
|
||||
retry_documents.append(document)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
logging.exception(f"Failed to retry document, document id: {document_id}")
|
||||
continue
|
||||
# retry document
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from flask_restful import Resource, fields, marshal_with, reqparse
|
|||
from constants.languages import languages
|
||||
from controllers.console import api
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from libs.helper import AppIconUrlField
|
||||
from libs.login import login_required
|
||||
from services.recommended_app_service import RecommendedAppService
|
||||
|
||||
|
|
@ -12,6 +13,7 @@ app_fields = {
|
|||
"name": fields.String,
|
||||
"mode": fields.String,
|
||||
"icon": fields.String,
|
||||
"icon_url": AppIconUrlField,
|
||||
"icon_background": fields.String,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -260,36 +260,34 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
|
|||
|
||||
workflow_node_execution = self._handle_node_execution_start(workflow_run=workflow_run, event=event)
|
||||
|
||||
response = self._workflow_node_start_to_stream_response(
|
||||
node_start_response = self._workflow_node_start_to_stream_response(
|
||||
event=event,
|
||||
task_id=self._application_generate_entity.task_id,
|
||||
workflow_node_execution=workflow_node_execution,
|
||||
)
|
||||
|
||||
if response:
|
||||
yield response
|
||||
if node_start_response:
|
||||
yield node_start_response
|
||||
elif isinstance(event, QueueNodeSucceededEvent):
|
||||
workflow_node_execution = self._handle_workflow_node_execution_success(event)
|
||||
|
||||
response = self._workflow_node_finish_to_stream_response(
|
||||
node_success_response = self._workflow_node_finish_to_stream_response(
|
||||
event=event,
|
||||
task_id=self._application_generate_entity.task_id,
|
||||
workflow_node_execution=workflow_node_execution,
|
||||
)
|
||||
|
||||
if response:
|
||||
yield response
|
||||
if node_success_response:
|
||||
yield node_success_response
|
||||
elif isinstance(event, QueueNodeFailedEvent | QueueNodeInIterationFailedEvent | QueueNodeExceptionEvent):
|
||||
workflow_node_execution = self._handle_workflow_node_execution_failed(event)
|
||||
|
||||
response = self._workflow_node_finish_to_stream_response(
|
||||
node_failed_response = self._workflow_node_finish_to_stream_response(
|
||||
event=event,
|
||||
task_id=self._application_generate_entity.task_id,
|
||||
workflow_node_execution=workflow_node_execution,
|
||||
)
|
||||
|
||||
if response:
|
||||
yield response
|
||||
elif isinstance(
|
||||
event,
|
||||
QueueNodeRetryEvent,
|
||||
|
|
@ -306,6 +304,8 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
|
|||
|
||||
if response:
|
||||
yield response
|
||||
if node_failed_response:
|
||||
yield node_failed_response
|
||||
elif isinstance(event, QueueParallelBranchRunStartedEvent):
|
||||
if not workflow_run:
|
||||
raise Exception("Workflow run not initialized.")
|
||||
|
|
@ -394,47 +394,19 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
|
|||
|
||||
if not graph_runtime_state:
|
||||
raise Exception("Graph runtime state not initialized.")
|
||||
handle_args = {
|
||||
"workflow_run": workflow_run,
|
||||
"start_at": graph_runtime_state.start_at,
|
||||
"total_tokens": graph_runtime_state.total_tokens,
|
||||
"total_steps": graph_runtime_state.node_run_steps,
|
||||
"status": WorkflowRunStatus.FAILED
|
||||
workflow_run = self._handle_workflow_run_failed(
|
||||
workflow_run=workflow_run,
|
||||
start_at=graph_runtime_state.start_at,
|
||||
total_tokens=graph_runtime_state.total_tokens,
|
||||
total_steps=graph_runtime_state.node_run_steps,
|
||||
status=WorkflowRunStatus.FAILED
|
||||
if isinstance(event, QueueWorkflowFailedEvent)
|
||||
else WorkflowRunStatus.STOPPED,
|
||||
"error": event.error if isinstance(event, QueueWorkflowFailedEvent) else event.get_stop_reason(),
|
||||
"conversation_id": None,
|
||||
"trace_manager": trace_manager,
|
||||
"exceptions_count": event.exceptions_count if isinstance(event, QueueWorkflowFailedEvent) else 0,
|
||||
}
|
||||
workflow_run = self._handle_workflow_run_failed(**handle_args)
|
||||
|
||||
# save workflow app log
|
||||
self._save_workflow_app_log(workflow_run)
|
||||
|
||||
yield self._workflow_finish_to_stream_response(
|
||||
task_id=self._application_generate_entity.task_id, workflow_run=workflow_run
|
||||
error=event.error if isinstance(event, QueueWorkflowFailedEvent) else event.get_stop_reason(),
|
||||
conversation_id=None,
|
||||
trace_manager=trace_manager,
|
||||
exceptions_count=event.exceptions_count if isinstance(event, QueueWorkflowFailedEvent) else 0,
|
||||
)
|
||||
elif isinstance(event, QueueWorkflowPartialSuccessEvent):
|
||||
if not workflow_run:
|
||||
raise Exception("Workflow run not initialized.")
|
||||
|
||||
if not graph_runtime_state:
|
||||
raise Exception("Graph runtime state not initialized.")
|
||||
handle_args = {
|
||||
"workflow_run": workflow_run,
|
||||
"start_at": graph_runtime_state.start_at,
|
||||
"total_tokens": graph_runtime_state.total_tokens,
|
||||
"total_steps": graph_runtime_state.node_run_steps,
|
||||
"status": WorkflowRunStatus.FAILED
|
||||
if isinstance(event, QueueWorkflowFailedEvent)
|
||||
else WorkflowRunStatus.STOPPED,
|
||||
"error": event.error if isinstance(event, QueueWorkflowFailedEvent) else event.get_stop_reason(),
|
||||
"conversation_id": None,
|
||||
"trace_manager": trace_manager,
|
||||
"exceptions_count": event.exceptions_count,
|
||||
}
|
||||
workflow_run = self._handle_workflow_run_partial_success(**handle_args)
|
||||
|
||||
# save workflow app log
|
||||
self._save_workflow_app_log(workflow_run)
|
||||
|
|
|
|||
|
|
@ -273,9 +273,9 @@ class WorkflowCycleManage:
|
|||
|
||||
db.session.close()
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
session.add(workflow_run)
|
||||
session.refresh(workflow_run)
|
||||
# with Session(db.engine, expire_on_commit=False) as session:
|
||||
# session.add(workflow_run)
|
||||
# session.refresh(workflow_run)
|
||||
|
||||
if trace_manager:
|
||||
trace_manager.add_trace_task(
|
||||
|
|
|
|||
|
|
@ -42,39 +42,31 @@ def to_prompt_message_content(
|
|||
*,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL | None = None,
|
||||
):
|
||||
match f.type:
|
||||
case FileType.IMAGE:
|
||||
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||
if dify_config.MULTIMODAL_SEND_IMAGE_FORMAT == "url":
|
||||
data = _to_url(f)
|
||||
else:
|
||||
data = _to_base64_data_string(f)
|
||||
if f.extension is None:
|
||||
raise ValueError("Missing file extension")
|
||||
if f.mime_type is None:
|
||||
raise ValueError("Missing file mime_type")
|
||||
|
||||
return ImagePromptMessageContent(data=data, detail=image_detail_config)
|
||||
case FileType.AUDIO:
|
||||
encoded_string = _get_encoded_string(f)
|
||||
if f.extension is None:
|
||||
raise ValueError("Missing file extension")
|
||||
return AudioPromptMessageContent(data=encoded_string, format=f.extension.lstrip("."))
|
||||
case FileType.VIDEO:
|
||||
if dify_config.MULTIMODAL_SEND_VIDEO_FORMAT == "url":
|
||||
data = _to_url(f)
|
||||
else:
|
||||
data = _to_base64_data_string(f)
|
||||
if f.extension is None:
|
||||
raise ValueError("Missing file extension")
|
||||
return VideoPromptMessageContent(data=data, format=f.extension.lstrip("."))
|
||||
case FileType.DOCUMENT:
|
||||
data = _get_encoded_string(f)
|
||||
if f.mime_type is None:
|
||||
raise ValueError("Missing file mime_type")
|
||||
return DocumentPromptMessageContent(
|
||||
encode_format="base64",
|
||||
mime_type=f.mime_type,
|
||||
data=data,
|
||||
)
|
||||
case _:
|
||||
raise ValueError(f"file type {f.type} is not supported")
|
||||
params = {
|
||||
"base64_data": _get_encoded_string(f) if dify_config.MULTIMODAL_SEND_FORMAT == "base64" else "",
|
||||
"url": _to_url(f) if dify_config.MULTIMODAL_SEND_FORMAT == "url" else "",
|
||||
"format": f.extension.removeprefix("."),
|
||||
"mime_type": f.mime_type,
|
||||
}
|
||||
if f.type == FileType.IMAGE:
|
||||
params["detail"] = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||
|
||||
prompt_class_map = {
|
||||
FileType.IMAGE: ImagePromptMessageContent,
|
||||
FileType.AUDIO: AudioPromptMessageContent,
|
||||
FileType.VIDEO: VideoPromptMessageContent,
|
||||
FileType.DOCUMENT: DocumentPromptMessageContent,
|
||||
}
|
||||
|
||||
try:
|
||||
return prompt_class_map[f.type](**params)
|
||||
except KeyError:
|
||||
raise ValueError(f"file type {f.type} is not supported")
|
||||
|
||||
|
||||
def download(f: File, /):
|
||||
|
|
@ -128,11 +120,6 @@ def _get_encoded_string(f: File, /):
|
|||
return encoded_string
|
||||
|
||||
|
||||
def _to_base64_data_string(f: File, /):
|
||||
encoded_string = _get_encoded_string(f)
|
||||
return f"data:{f.mime_type};base64,{encoded_string}"
|
||||
|
||||
|
||||
def _to_url(f: File, /):
|
||||
if f.transfer_method == FileTransferMethod.REMOTE_URL:
|
||||
if f.remote_url is None:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from abc import ABC
|
||||
from collections.abc import Sequence
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Literal, Optional
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
from pydantic import BaseModel, Field, computed_field, field_validator
|
||||
|
||||
|
||||
class PromptMessageRole(Enum):
|
||||
|
|
@ -67,7 +67,6 @@ class PromptMessageContent(BaseModel):
|
|||
"""
|
||||
|
||||
type: PromptMessageContentType
|
||||
data: str
|
||||
|
||||
|
||||
class TextPromptMessageContent(PromptMessageContent):
|
||||
|
|
@ -76,21 +75,35 @@ class TextPromptMessageContent(PromptMessageContent):
|
|||
"""
|
||||
|
||||
type: PromptMessageContentType = PromptMessageContentType.TEXT
|
||||
data: str
|
||||
|
||||
|
||||
class VideoPromptMessageContent(PromptMessageContent):
|
||||
class MultiModalPromptMessageContent(PromptMessageContent):
|
||||
"""
|
||||
Model class for multi-modal prompt message content.
|
||||
"""
|
||||
|
||||
type: PromptMessageContentType
|
||||
format: str = Field(..., description="the format of multi-modal file")
|
||||
base64_data: str = Field("", description="the base64 data of multi-modal file")
|
||||
url: str = Field("", description="the url of multi-modal file")
|
||||
mime_type: str = Field(..., description="the mime type of multi-modal file")
|
||||
|
||||
@computed_field(return_type=str)
|
||||
@property
|
||||
def data(self):
|
||||
return self.url or f"data:{self.mime_type};base64,{self.base64_data}"
|
||||
|
||||
|
||||
class VideoPromptMessageContent(MultiModalPromptMessageContent):
|
||||
type: PromptMessageContentType = PromptMessageContentType.VIDEO
|
||||
data: str = Field(..., description="Base64 encoded video data")
|
||||
format: str = Field(..., description="Video format")
|
||||
|
||||
|
||||
class AudioPromptMessageContent(PromptMessageContent):
|
||||
class AudioPromptMessageContent(MultiModalPromptMessageContent):
|
||||
type: PromptMessageContentType = PromptMessageContentType.AUDIO
|
||||
data: str = Field(..., description="Base64 encoded audio data")
|
||||
format: str = Field(..., description="Audio format")
|
||||
|
||||
|
||||
class ImagePromptMessageContent(PromptMessageContent):
|
||||
class ImagePromptMessageContent(MultiModalPromptMessageContent):
|
||||
"""
|
||||
Model class for image prompt message content.
|
||||
"""
|
||||
|
|
@ -103,11 +116,8 @@ class ImagePromptMessageContent(PromptMessageContent):
|
|||
detail: DETAIL = DETAIL.LOW
|
||||
|
||||
|
||||
class DocumentPromptMessageContent(PromptMessageContent):
|
||||
class DocumentPromptMessageContent(MultiModalPromptMessageContent):
|
||||
type: PromptMessageContentType = PromptMessageContentType.DOCUMENT
|
||||
encode_format: Literal["base64"]
|
||||
mime_type: str
|
||||
data: str
|
||||
|
||||
|
||||
class PromptMessage(ABC, BaseModel):
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import base64
|
||||
import io
|
||||
import json
|
||||
from collections.abc import Generator, Sequence
|
||||
from typing import Optional, Union, cast
|
||||
|
|
@ -18,7 +17,6 @@ from anthropic.types import (
|
|||
)
|
||||
from anthropic.types.beta.tools import ToolsBetaMessage
|
||||
from httpx import Timeout
|
||||
from PIL import Image
|
||||
|
||||
from core.model_runtime.callbacks.base_callback import Callback
|
||||
from core.model_runtime.entities import (
|
||||
|
|
@ -498,22 +496,19 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
|
|||
sub_messages.append(sub_message_dict)
|
||||
elif message_content.type == PromptMessageContentType.IMAGE:
|
||||
message_content = cast(ImagePromptMessageContent, message_content)
|
||||
if not message_content.data.startswith("data:"):
|
||||
if not message_content.base64_data:
|
||||
# fetch image data from url
|
||||
try:
|
||||
image_content = requests.get(message_content.data).content
|
||||
with Image.open(io.BytesIO(image_content)) as img:
|
||||
mime_type = f"image/{img.format.lower()}"
|
||||
image_content = requests.get(message_content.url).content
|
||||
base64_data = base64.b64encode(image_content).decode("utf-8")
|
||||
except Exception as ex:
|
||||
raise ValueError(
|
||||
f"Failed to fetch image data from url {message_content.data}, {ex}"
|
||||
)
|
||||
else:
|
||||
data_split = message_content.data.split(";base64,")
|
||||
mime_type = data_split[0].replace("data:", "")
|
||||
base64_data = data_split[1]
|
||||
base64_data = message_content.base64_data
|
||||
|
||||
mime_type = message_content.mime_type
|
||||
if mime_type not in {"image/jpeg", "image/png", "image/gif", "image/webp"}:
|
||||
raise ValueError(
|
||||
f"Unsupported image type {mime_type}, "
|
||||
|
|
@ -534,7 +529,7 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
|
|||
sub_message_dict = {
|
||||
"type": "document",
|
||||
"source": {
|
||||
"type": message_content.encode_format,
|
||||
"type": "base64",
|
||||
"media_type": message_content.mime_type,
|
||||
"data": message_content.data,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -86,6 +86,9 @@ model_credential_schema:
|
|||
- label:
|
||||
en_US: '2024-06-01'
|
||||
value: '2024-06-01'
|
||||
- label:
|
||||
en_US: '2024-10-21'
|
||||
value: '2024-10-21'
|
||||
placeholder:
|
||||
zh_Hans: 在此选择您的 API 版本
|
||||
en_US: Select your API Version here
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from core.model_runtime.entities.llm_entities import (
|
|||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessage,
|
||||
PromptMessageContentType,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
ToolPromptMessage,
|
||||
|
|
@ -105,7 +106,11 @@ class BaichuanLanguageModel(LargeLanguageModel):
|
|||
if isinstance(message.content, str):
|
||||
message_dict = {"role": "user", "content": message.content}
|
||||
else:
|
||||
raise ValueError("User message content must be str")
|
||||
for message_content in message.content:
|
||||
if message_content.type == PromptMessageContentType.TEXT:
|
||||
message_dict = {"role": "user", "content": message_content.data}
|
||||
elif message_content.type == PromptMessageContentType.IMAGE:
|
||||
raise ValueError("Content object type not support image_url")
|
||||
elif isinstance(message, AssistantPromptMessage):
|
||||
message = cast(AssistantPromptMessage, message)
|
||||
message_dict = {"role": "assistant", "content": message.content}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,9 @@ class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
user: Optional[str] = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
self._add_custom_parameters(credentials)
|
||||
# {"response_format": "xx"} need convert to {"response_format": {"type": "xx"}}
|
||||
if "response_format" in model_parameters:
|
||||
model_parameters["response_format"] = {"type": model_parameters.get("response_format")}
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
model: InternVL2-8B
|
||||
label:
|
||||
en_US: InternVL2-8B
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32000
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
label:
|
||||
en_US: "Max Tokens"
|
||||
zh_Hans: "最大Token数"
|
||||
type: int
|
||||
default: 512
|
||||
min: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The maximum number of tokens that can be generated by the model varies depending on the model."
|
||||
zh_Hans: "模型可生成的最大 token 个数,不同模型上限不同。"
|
||||
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_k
|
||||
use_template: top_k
|
||||
label:
|
||||
en_US: "Top K"
|
||||
zh_Hans: "Top K"
|
||||
type: int
|
||||
default: 50
|
||||
min: 0
|
||||
max: 100
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range is [0,100], which limits the model to only select from the top k words with the highest probability when choosing the next word at each step. The larger the value, the more diverse text generation will be."
|
||||
zh_Hans: "取值范围为 [0,100],限制模型在每一步选择下一个词时,只从概率最高的前 k 个词中选取。数值越大,文本生成越多样。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: -1.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to adjust the frequency of repeated content in automatically generated text. Positive numbers reduce repetition, while negative numbers increase repetition. After setting this parameter, if a word has already appeared in the text, the model will decrease the probability of choosing that word for subsequent generation."
|
||||
zh_Hans: "用于调整自动生成文本中重复内容的频率。正数减少重复,负数增加重复。设置此参数后,如果一个词在文本中已经出现过,模型在后续生成中选择该词的概率会降低。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
model: InternVL2.5-26B
|
||||
label:
|
||||
en_US: InternVL2.5-26B
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32000
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
label:
|
||||
en_US: "Max Tokens"
|
||||
zh_Hans: "最大Token数"
|
||||
type: int
|
||||
default: 512
|
||||
min: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The maximum number of tokens that can be generated by the model varies depending on the model."
|
||||
zh_Hans: "模型可生成的最大 token 个数,不同模型上限不同。"
|
||||
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_k
|
||||
use_template: top_k
|
||||
label:
|
||||
en_US: "Top K"
|
||||
zh_Hans: "Top K"
|
||||
type: int
|
||||
default: 50
|
||||
min: 0
|
||||
max: 100
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range is [0,100], which limits the model to only select from the top k words with the highest probability when choosing the next word at each step. The larger the value, the more diverse text generation will be."
|
||||
zh_Hans: "取值范围为 [0,100],限制模型在每一步选择下一个词时,只从概率最高的前 k 个词中选取。数值越大,文本生成越多样。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: -1.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to adjust the frequency of repeated content in automatically generated text. Positive numbers reduce repetition, while negative numbers increase repetition. After setting this parameter, if a word has already appeared in the text, the model will decrease the probability of choosing that word for subsequent generation."
|
||||
zh_Hans: "用于调整自动生成文本中重复内容的频率。正数减少重复,负数增加重复。设置此参数后,如果一个词在文本中已经出现过,模型在后续生成中选择该词的概率会降低。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
||||
|
|
@ -6,3 +6,5 @@
|
|||
- deepseek-coder-33B-instruct-chat
|
||||
- deepseek-coder-33B-instruct-completions
|
||||
- codegeex4-all-9b
|
||||
- InternVL2.5-26B
|
||||
- InternVL2-8B
|
||||
|
|
|
|||
|
|
@ -29,18 +29,26 @@ class GiteeAILargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
user: Optional[str] = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
self._add_custom_parameters(credentials, model, model_parameters)
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
return super()._invoke(
|
||||
GiteeAILargeLanguageModel.MODEL_TO_IDENTITY.get(model, model),
|
||||
credentials,
|
||||
prompt_messages,
|
||||
model_parameters,
|
||||
tools,
|
||||
stop,
|
||||
stream,
|
||||
user,
|
||||
)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
self._add_custom_parameters(credentials, None)
|
||||
super().validate_credentials(model, credentials)
|
||||
self._add_custom_parameters(credentials, model, None)
|
||||
super().validate_credentials(GiteeAILargeLanguageModel.MODEL_TO_IDENTITY.get(model, model), credentials)
|
||||
|
||||
def _add_custom_parameters(self, credentials: dict, model: Optional[str]) -> None:
|
||||
def _add_custom_parameters(self, credentials: dict, model: Optional[str], model_parameters: dict) -> None:
|
||||
if model is None:
|
||||
model = "Qwen2-72B-Instruct"
|
||||
|
||||
model_identity = GiteeAILargeLanguageModel.MODEL_TO_IDENTITY.get(model, model)
|
||||
credentials["endpoint_url"] = f"https://ai.gitee.com/api/serverless/{model_identity}/"
|
||||
credentials["endpoint_url"] = "https://ai.gitee.com/v1"
|
||||
if model.endswith("completions"):
|
||||
credentials["mode"] = LLMMode.COMPLETION.value
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
- gemini-2.0-flash-exp
|
||||
- gemini-1.5-pro
|
||||
- gemini-1.5-pro-latest
|
||||
- gemini-1.5-pro-001
|
||||
|
|
@ -11,6 +12,8 @@
|
|||
- gemini-1.5-flash-exp-0827
|
||||
- gemini-1.5-flash-8b-exp-0827
|
||||
- gemini-1.5-flash-8b-exp-0924
|
||||
- gemini-exp-1206
|
||||
- gemini-exp-1121
|
||||
- gemini-exp-1114
|
||||
- gemini-pro
|
||||
- gemini-pro-vision
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ features:
|
|||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@ features:
|
|||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@ features:
|
|||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@ features:
|
|||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
|
|
|
|||
|
|
@ -1,24 +1,23 @@
|
|||
import base64
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
from collections.abc import Generator
|
||||
from typing import Optional, Union, cast
|
||||
from typing import Optional, Union
|
||||
|
||||
import google.ai.generativelanguage as glm
|
||||
import google.generativeai as genai
|
||||
import requests
|
||||
from google.api_core import exceptions
|
||||
from google.generativeai.client import _ClientManager
|
||||
from google.generativeai.types import ContentType, GenerateContentResponse
|
||||
from google.generativeai.types import ContentType, File, GenerateContentResponse
|
||||
from google.generativeai.types.content_types import to_part
|
||||
from PIL import Image
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
DocumentPromptMessageContent,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessage,
|
||||
PromptMessageContent,
|
||||
PromptMessageContentType,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
|
|
@ -35,21 +34,7 @@ from core.model_runtime.errors.invoke import (
|
|||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
|
||||
GOOGLE_AVAILABLE_MIMETYPE = [
|
||||
"application/pdf",
|
||||
"application/x-javascript",
|
||||
"text/javascript",
|
||||
"application/x-python",
|
||||
"text/x-python",
|
||||
"text/plain",
|
||||
"text/html",
|
||||
"text/css",
|
||||
"text/md",
|
||||
"text/csv",
|
||||
"text/xml",
|
||||
"text/rtf",
|
||||
]
|
||||
from extensions.ext_redis import redis_client
|
||||
|
||||
|
||||
class GoogleLargeLanguageModel(LargeLanguageModel):
|
||||
|
|
@ -201,29 +186,17 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||
if stop:
|
||||
config_kwargs["stop_sequences"] = stop
|
||||
|
||||
genai.configure(api_key=credentials["google_api_key"])
|
||||
google_model = genai.GenerativeModel(model_name=model)
|
||||
|
||||
history = []
|
||||
|
||||
# hack for gemini-pro-vision, which currently does not support multi-turn chat
|
||||
if model == "gemini-pro-vision":
|
||||
last_msg = prompt_messages[-1]
|
||||
content = self._format_message_to_glm_content(last_msg)
|
||||
history.append(content)
|
||||
else:
|
||||
for msg in prompt_messages: # makes message roles strictly alternating
|
||||
content = self._format_message_to_glm_content(msg)
|
||||
if history and history[-1]["role"] == content["role"]:
|
||||
history[-1]["parts"].extend(content["parts"])
|
||||
else:
|
||||
history.append(content)
|
||||
|
||||
# Create a new ClientManager with tenant's API key
|
||||
new_client_manager = _ClientManager()
|
||||
new_client_manager.configure(api_key=credentials["google_api_key"])
|
||||
new_custom_client = new_client_manager.make_client("generative")
|
||||
|
||||
google_model._client = new_custom_client
|
||||
for msg in prompt_messages: # makes message roles strictly alternating
|
||||
content = self._format_message_to_glm_content(msg)
|
||||
if history and history[-1]["role"] == content["role"]:
|
||||
history[-1]["parts"].extend(content["parts"])
|
||||
else:
|
||||
history.append(content)
|
||||
|
||||
response = google_model.generate_content(
|
||||
contents=history,
|
||||
|
|
@ -317,8 +290,12 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||
)
|
||||
else:
|
||||
# calculate num tokens
|
||||
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
||||
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
|
||||
if hasattr(response, "usage_metadata") and response.usage_metadata:
|
||||
prompt_tokens = response.usage_metadata.prompt_token_count
|
||||
completion_tokens = response.usage_metadata.candidates_token_count
|
||||
else:
|
||||
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
||||
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
|
||||
|
||||
# transform usage
|
||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||
|
|
@ -346,7 +323,7 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||
|
||||
content = message.content
|
||||
if isinstance(content, list):
|
||||
content = "".join(c.data for c in content if c.type != PromptMessageContentType.IMAGE)
|
||||
content = "".join(c.data for c in content if c.type == PromptMessageContentType.TEXT)
|
||||
|
||||
if isinstance(message, UserPromptMessage):
|
||||
message_text = f"{human_prompt} {content}"
|
||||
|
|
@ -359,6 +336,40 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||
|
||||
return message_text
|
||||
|
||||
def _upload_file_content_to_google(self, message_content: PromptMessageContent) -> File:
|
||||
key = f"{message_content.type.value}:{hash(message_content.data)}"
|
||||
if redis_client.exists(key):
|
||||
try:
|
||||
return genai.get_file(redis_client.get(key).decode())
|
||||
except:
|
||||
pass
|
||||
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
||||
if message_content.base64_data:
|
||||
file_content = base64.b64decode(message_content.base64_data)
|
||||
temp_file.write(file_content)
|
||||
else:
|
||||
try:
|
||||
response = requests.get(message_content.url)
|
||||
response.raise_for_status()
|
||||
temp_file.write(response.content)
|
||||
except Exception as ex:
|
||||
raise ValueError(f"Failed to fetch data from url {message_content.url}, {ex}")
|
||||
temp_file.flush()
|
||||
|
||||
file = genai.upload_file(path=temp_file.name, mime_type=message_content.mime_type)
|
||||
while file.state.name == "PROCESSING":
|
||||
time.sleep(5)
|
||||
file = genai.get_file(file.name)
|
||||
# google will delete your upload files in 2 days.
|
||||
redis_client.setex(key, 47 * 60 * 60, file.name)
|
||||
|
||||
try:
|
||||
os.unlink(temp_file.name)
|
||||
except PermissionError:
|
||||
# windows may raise permission error
|
||||
pass
|
||||
return file
|
||||
|
||||
def _format_message_to_glm_content(self, message: PromptMessage) -> ContentType:
|
||||
"""
|
||||
Format a single message into glm.Content for Google API
|
||||
|
|
@ -374,28 +385,8 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||
for c in message.content:
|
||||
if c.type == PromptMessageContentType.TEXT:
|
||||
glm_content["parts"].append(to_part(c.data))
|
||||
elif c.type == PromptMessageContentType.IMAGE:
|
||||
message_content = cast(ImagePromptMessageContent, c)
|
||||
if message_content.data.startswith("data:"):
|
||||
metadata, base64_data = c.data.split(",", 1)
|
||||
mime_type = metadata.split(";", 1)[0].split(":")[1]
|
||||
else:
|
||||
# fetch image data from url
|
||||
try:
|
||||
image_content = requests.get(message_content.data).content
|
||||
with Image.open(io.BytesIO(image_content)) as img:
|
||||
mime_type = f"image/{img.format.lower()}"
|
||||
base64_data = base64.b64encode(image_content).decode("utf-8")
|
||||
except Exception as ex:
|
||||
raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}")
|
||||
blob = {"inline_data": {"mime_type": mime_type, "data": base64_data}}
|
||||
glm_content["parts"].append(blob)
|
||||
elif c.type == PromptMessageContentType.DOCUMENT:
|
||||
message_content = cast(DocumentPromptMessageContent, c)
|
||||
if message_content.mime_type not in GOOGLE_AVAILABLE_MIMETYPE:
|
||||
raise ValueError(f"Unsupported mime type {message_content.mime_type}")
|
||||
blob = {"inline_data": {"mime_type": message_content.mime_type, "data": message_content.data}}
|
||||
glm_content["parts"].append(blob)
|
||||
else:
|
||||
glm_content["parts"].append(self._upload_file_content_to_google(c))
|
||||
|
||||
return glm_content
|
||||
elif isinstance(message, AssistantPromptMessage):
|
||||
|
|
|
|||
|
|
@ -920,10 +920,12 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||
}
|
||||
sub_messages.append(sub_message_dict)
|
||||
elif isinstance(message_content, AudioPromptMessageContent):
|
||||
data_split = message_content.data.split(";base64,")
|
||||
base64_data = data_split[1]
|
||||
sub_message_dict = {
|
||||
"type": "input_audio",
|
||||
"input_audio": {
|
||||
"data": message_content.data,
|
||||
"data": base64_data,
|
||||
"format": message_content.format,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -478,6 +478,10 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
|
|||
usage=usage,
|
||||
)
|
||||
break
|
||||
# handle the error here. for issue #11629
|
||||
if chunk_json.get("error") and chunk_json.get("choices") is None:
|
||||
raise ValueError(chunk_json.get("error"))
|
||||
|
||||
if chunk_json:
|
||||
if u := chunk_json.get("usage"):
|
||||
usage = u
|
||||
|
|
|
|||
|
|
@ -434,9 +434,9 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
|
|||
sub_messages.append(sub_message_dict)
|
||||
elif message_content.type == PromptMessageContentType.VIDEO:
|
||||
message_content = cast(VideoPromptMessageContent, message_content)
|
||||
video_url = message_content.data
|
||||
if message_content.data.startswith("data:"):
|
||||
raise InvokeError("not support base64, please set MULTIMODAL_SEND_VIDEO_FORMAT to url")
|
||||
video_url = message_content.url
|
||||
if not video_url:
|
||||
raise InvokeError("not support base64, please set MULTIMODAL_SEND_FORMAT to url")
|
||||
|
||||
sub_message_dict = {"video": video_url}
|
||||
sub_messages.append(sub_message_dict)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import re
|
||||
from collections.abc import Generator
|
||||
from typing import Optional, cast
|
||||
|
||||
|
|
@ -104,17 +103,16 @@ class ArkClientV3:
|
|||
if message_content.type == PromptMessageContentType.TEXT:
|
||||
content.append(
|
||||
ChatCompletionContentPartTextParam(
|
||||
text=message_content.text,
|
||||
text=message_content.data,
|
||||
type="text",
|
||||
)
|
||||
)
|
||||
elif message_content.type == PromptMessageContentType.IMAGE:
|
||||
message_content = cast(ImagePromptMessageContent, message_content)
|
||||
image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data)
|
||||
content.append(
|
||||
ChatCompletionContentPartImageParam(
|
||||
image_url=ImageURL(
|
||||
url=image_data,
|
||||
url=message_content.data,
|
||||
detail=message_content.detail.value,
|
||||
),
|
||||
type="image_url",
|
||||
|
|
|
|||
|
|
@ -68,7 +68,12 @@ class MaaSClient(MaasService):
|
|||
content = []
|
||||
for message_content in message.content:
|
||||
if message_content.type == PromptMessageContentType.TEXT:
|
||||
raise ValueError("Content object type only support image_url")
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": message_content.data,
|
||||
}
|
||||
)
|
||||
elif message_content.type == PromptMessageContentType.IMAGE:
|
||||
message_content = cast(ImagePromptMessageContent, message_content)
|
||||
image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data)
|
||||
|
|
|
|||
|
|
@ -132,6 +132,14 @@ class VolcengineMaaSLargeLanguageModel(LargeLanguageModel):
|
|||
messages_dict = [ArkClientV3.convert_prompt_message(m) for m in messages]
|
||||
for message in messages_dict:
|
||||
for key, value in message.items():
|
||||
# Ignore tokens for image type
|
||||
if isinstance(value, list):
|
||||
text = ""
|
||||
for item in value:
|
||||
if isinstance(item, dict) and item["type"] == "text":
|
||||
text += item["text"]
|
||||
|
||||
value = text
|
||||
num_tokens += self._get_num_tokens_by_gpt2(str(key))
|
||||
num_tokens += self._get_num_tokens_by_gpt2(str(value))
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,14 @@ class ModelConfig(BaseModel):
|
|||
|
||||
|
||||
configs: dict[str, ModelConfig] = {
|
||||
"Doubao-vision-pro-32k": ModelConfig(
|
||||
properties=ModelProperties(context_size=32768, max_tokens=4096, mode=LLMMode.CHAT),
|
||||
features=[ModelFeature.VISION],
|
||||
),
|
||||
"Doubao-vision-lite-32k": ModelConfig(
|
||||
properties=ModelProperties(context_size=32768, max_tokens=4096, mode=LLMMode.CHAT),
|
||||
features=[ModelFeature.VISION],
|
||||
),
|
||||
"Doubao-pro-4k": ModelConfig(
|
||||
properties=ModelProperties(context_size=4096, max_tokens=4096, mode=LLMMode.CHAT),
|
||||
features=[ModelFeature.TOOL_CALL],
|
||||
|
|
|
|||
|
|
@ -118,6 +118,18 @@ model_credential_schema:
|
|||
type: select
|
||||
required: true
|
||||
options:
|
||||
- label:
|
||||
en_US: Doubao-vision-pro-32k
|
||||
value: Doubao-vision-pro-32k
|
||||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
- label:
|
||||
en_US: Doubao-vision-lite-32k
|
||||
value: Doubao-vision-lite-32k
|
||||
show_on:
|
||||
- variable: __model_type
|
||||
value: llm
|
||||
- label:
|
||||
en_US: Doubao-pro-4k
|
||||
value: Doubao-pro-4k
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
model: grok-2-1212
|
||||
label:
|
||||
en_US: grok-2-1212
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- multi-tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 2.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Number between 0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
zh_Hans: "介于0和2.0之间的数字。正值会根据新标记在文本中迄今为止的现有频率来惩罚它们,从而降低模型一字不差地重复同一句话的可能性。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
model: grok-2-vision-1212
|
||||
label:
|
||||
en_US: grok-2-vision-1212
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 2.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Number between 0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
zh_Hans: "介于0和2.0之间的数字。正值会根据新标记在文本中迄今为止的现有频率来惩罚它们,从而降低模型一字不差地重复同一句话的可能性。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
model: grok-beta
|
||||
label:
|
||||
en_US: Grok Beta
|
||||
en_US: grok-beta
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
model: grok-vision-beta
|
||||
label:
|
||||
en_US: Grok Vision Beta
|
||||
en_US: grok-vision-beta
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import os
|
|||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
|
||||
from langfuse import Langfuse
|
||||
from langfuse import Langfuse # type: ignore
|
||||
|
||||
from core.ops.base_trace_instance import BaseTraceInstance
|
||||
from core.ops.entities.config_entity import LangfuseConfig
|
||||
|
|
@ -82,8 +82,6 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||
metadata=metadata,
|
||||
session_id=trace_info.conversation_id,
|
||||
tags=["message", "workflow"],
|
||||
created_at=trace_info.start_time,
|
||||
updated_at=trace_info.end_time,
|
||||
)
|
||||
self.add_trace(langfuse_trace_data=trace_data)
|
||||
workflow_span_data = LangfuseSpan(
|
||||
|
|
@ -242,11 +240,13 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||
file_list = trace_info.file_list
|
||||
metadata = trace_info.metadata
|
||||
message_data = trace_info.message_data
|
||||
if message_data is None:
|
||||
return
|
||||
message_id = message_data.id
|
||||
|
||||
user_id = message_data.from_account_id
|
||||
if message_data.from_end_user_id:
|
||||
end_user_data: EndUser = (
|
||||
end_user_data: Optional[EndUser] = (
|
||||
db.session.query(EndUser).filter(EndUser.id == message_data.from_end_user_id).first()
|
||||
)
|
||||
if end_user_data is not None:
|
||||
|
|
@ -303,6 +303,8 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||
self.add_generation(langfuse_generation_data)
|
||||
|
||||
def moderation_trace(self, trace_info: ModerationTraceInfo):
|
||||
if trace_info.message_data is None:
|
||||
return
|
||||
span_data = LangfuseSpan(
|
||||
name=TraceTaskName.MODERATION_TRACE.value,
|
||||
input=trace_info.inputs,
|
||||
|
|
@ -322,9 +324,11 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||
|
||||
def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
|
||||
message_data = trace_info.message_data
|
||||
if message_data is None:
|
||||
return
|
||||
generation_usage = GenerationUsage(
|
||||
total=len(str(trace_info.suggested_question)),
|
||||
input=len(trace_info.inputs),
|
||||
input=len(trace_info.inputs) if trace_info.inputs else 0,
|
||||
output=len(trace_info.suggested_question),
|
||||
unit=UnitEnum.CHARACTERS,
|
||||
)
|
||||
|
|
@ -345,6 +349,8 @@ class LangFuseDataTrace(BaseTraceInstance):
|
|||
self.add_generation(langfuse_generation_data=generation_data)
|
||||
|
||||
def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo):
|
||||
if trace_info.message_data is None:
|
||||
return
|
||||
dataset_retrieval_span_data = LangfuseSpan(
|
||||
name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value,
|
||||
input=trace_info.inputs,
|
||||
|
|
|
|||
|
|
@ -424,7 +424,7 @@ def default_vector_search_query(
|
|||
) -> dict:
|
||||
if filters is not None:
|
||||
filter_type = "post_filter" if filter_type is None else filter_type
|
||||
if not isinstance(filter, list):
|
||||
if not isinstance(filters, list):
|
||||
raise RuntimeError(f"unexpected filter with {type(filters)}")
|
||||
final_ext = {"lvector": {}}
|
||||
if min_score != "0.0":
|
||||
|
|
|
|||
|
|
@ -37,8 +37,6 @@ class TiDBVectorConfig(BaseModel):
|
|||
raise ValueError("config TIDB_VECTOR_PORT is required")
|
||||
if not values["user"]:
|
||||
raise ValueError("config TIDB_VECTOR_USER is required")
|
||||
if not values["password"]:
|
||||
raise ValueError("config TIDB_VECTOR_PASSWORD is required")
|
||||
if not values["database"]:
|
||||
raise ValueError("config TIDB_VECTOR_DATABASE is required")
|
||||
if not values["program_name"]:
|
||||
|
|
|
|||
|
|
@ -43,6 +43,13 @@ class JinaReaderTool(BuiltinTool):
|
|||
if wait_for_selector is not None and wait_for_selector != "":
|
||||
headers["X-Wait-For-Selector"] = wait_for_selector
|
||||
|
||||
remove_selector = tool_parameters.get("remove_selector")
|
||||
if remove_selector is not None and remove_selector != "":
|
||||
headers["X-Remove-Selector"] = remove_selector
|
||||
|
||||
if tool_parameters.get("retain_images", False):
|
||||
headers["X-Retain-Images"] = "true"
|
||||
|
||||
if tool_parameters.get("image_caption", False):
|
||||
headers["X-With-Generated-Alt"] = "true"
|
||||
|
||||
|
|
@ -59,6 +66,12 @@ class JinaReaderTool(BuiltinTool):
|
|||
if tool_parameters.get("no_cache", False):
|
||||
headers["X-No-Cache"] = "true"
|
||||
|
||||
if tool_parameters.get("with_iframe", False):
|
||||
headers["X-With-Iframe"] = "true"
|
||||
|
||||
if tool_parameters.get("with_shadow_dom", False):
|
||||
headers["X-With-Shadow-Dom"] = "true"
|
||||
|
||||
max_retries = tool_parameters.get("max_retries", 3)
|
||||
response = ssrf_proxy.get(
|
||||
str(URL(self._jina_reader_endpoint + url)),
|
||||
|
|
|
|||
|
|
@ -67,6 +67,33 @@ parameters:
|
|||
pt_BR: css selector para aguardar elementos específicos
|
||||
llm_description: css selector of the target element to wait for
|
||||
form: form
|
||||
- name: remove_selector
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: Excluded Selector
|
||||
zh_Hans: 排除选择器
|
||||
pt_BR: Seletor Excluído
|
||||
human_description:
|
||||
en_US: css selector for remove for specific elements
|
||||
zh_Hans: css 选择器用于排除特定元素
|
||||
pt_BR: seletor CSS para remover elementos específicos
|
||||
llm_description: css selector of the target element to remove for
|
||||
form: form
|
||||
- name: retain_images
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Remove All Images
|
||||
zh_Hans: 删除所有图片
|
||||
pt_BR: Remover todas as imagens
|
||||
human_description:
|
||||
en_US: Removes all images from the response.
|
||||
zh_Hans: 从响应中删除所有图片。
|
||||
pt_BR: Remove todas as imagens da resposta.
|
||||
llm_description: Remove all images
|
||||
form: form
|
||||
- name: image_caption
|
||||
type: boolean
|
||||
required: false
|
||||
|
|
@ -136,6 +163,34 @@ parameters:
|
|||
pt_BR: Ignorar o cache
|
||||
llm_description: bypass the cache
|
||||
form: form
|
||||
- name: with_iframe
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Enable iframe extraction
|
||||
zh_Hans: 启用 iframe 提取
|
||||
pt_BR: Habilitar extração de iframe
|
||||
human_description:
|
||||
en_US: Extract and process content of all embedded iframes in the DOM tree.
|
||||
zh_Hans: 提取并处理 DOM 树中所有嵌入 iframe 的内容。
|
||||
pt_BR: Extrair e processar o conteúdo de todos os iframes incorporados na árvore DOM.
|
||||
llm_description: Extract content from embedded iframes
|
||||
form: form
|
||||
- name: with_shadow_dom
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Enable Shadow DOM extraction
|
||||
zh_Hans: 启用 Shadow DOM 提取
|
||||
pt_BR: Habilitar extração de Shadow DOM
|
||||
human_description:
|
||||
en_US: Traverse all Shadow DOM roots in the document and extract content.
|
||||
zh_Hans: 遍历文档中所有 Shadow DOM 根并提取内容。
|
||||
pt_BR: Percorra todas as raízes do Shadow DOM no documento e extraia o conteúdo.
|
||||
llm_description: Extract content from Shadow DOM roots
|
||||
form: form
|
||||
- name: summary
|
||||
type: boolean
|
||||
required: false
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from typing import Any, Optional, cast
|
|||
|
||||
from flask import Flask, current_app
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.apps.base_app_queue_manager import GenerateTaskStoppedError
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
|
||||
|
|
@ -54,7 +55,12 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
class GraphEngineThreadPool(ThreadPoolExecutor):
|
||||
def __init__(
|
||||
self, max_workers=None, thread_name_prefix="", initializer=None, initargs=(), max_submit_count=100
|
||||
self,
|
||||
max_workers=None,
|
||||
thread_name_prefix="",
|
||||
initializer=None,
|
||||
initargs=(),
|
||||
max_submit_count=dify_config.MAX_SUBMIT_COUNT,
|
||||
) -> None:
|
||||
super().__init__(max_workers, thread_name_prefix, initializer, initargs)
|
||||
self.max_submit_count = max_submit_count
|
||||
|
|
@ -94,7 +100,7 @@ class GraphEngine:
|
|||
max_execution_time: int,
|
||||
thread_pool_id: Optional[str] = None,
|
||||
) -> None:
|
||||
thread_pool_max_submit_count = 100
|
||||
thread_pool_max_submit_count = dify_config.MAX_SUBMIT_COUNT
|
||||
thread_pool_max_workers = 10
|
||||
|
||||
# init thread pool
|
||||
|
|
|
|||
|
|
@ -72,7 +72,11 @@ class BaseNode(Generic[GenericNodeData]):
|
|||
result = self._run()
|
||||
except Exception as e:
|
||||
logger.exception(f"Node {self.node_id} failed to run")
|
||||
result = NodeRunResult(status=WorkflowNodeExecutionStatus.FAILED, error=str(e), error_type="SystemError")
|
||||
result = NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
error=str(e),
|
||||
error_type="WorkflowNodeError",
|
||||
)
|
||||
|
||||
if isinstance(result, NodeRunResult):
|
||||
yield RunCompletedEvent(run_result=result)
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ BODY_TYPE_TO_CONTENT_TYPE = {
|
|||
class Executor:
|
||||
method: Literal["get", "head", "post", "put", "delete", "patch"]
|
||||
url: str
|
||||
params: Mapping[str, str] | None
|
||||
params: list[tuple[str, str]] | None
|
||||
content: str | bytes | None
|
||||
data: Mapping[str, Any] | None
|
||||
files: Mapping[str, tuple[str | None, bytes, str]] | None
|
||||
|
|
@ -69,7 +69,7 @@ class Executor:
|
|||
self.method = node_data.method
|
||||
self.auth = node_data.authorization
|
||||
self.timeout = timeout
|
||||
self.params = {}
|
||||
self.params = []
|
||||
self.headers = {}
|
||||
self.content = None
|
||||
self.files = None
|
||||
|
|
@ -92,14 +92,48 @@ class Executor:
|
|||
self.url = self.variable_pool.convert_template(self.node_data.url).text
|
||||
|
||||
def _init_params(self):
|
||||
params = _plain_text_to_dict(self.node_data.params)
|
||||
for key in params:
|
||||
params[key] = self.variable_pool.convert_template(params[key]).text
|
||||
self.params = params
|
||||
"""
|
||||
Almost same as _init_headers(), difference:
|
||||
1. response a list tuple to support same key, like 'aa=1&aa=2'
|
||||
2. param value may have '\n', we need to splitlines then extract the variable value.
|
||||
"""
|
||||
result = []
|
||||
for line in self.node_data.params.splitlines():
|
||||
if not (line := line.strip()):
|
||||
continue
|
||||
|
||||
key, *value = line.split(":", 1)
|
||||
if not (key := key.strip()):
|
||||
continue
|
||||
|
||||
value = value[0].strip() if value else ""
|
||||
result.append(
|
||||
(self.variable_pool.convert_template(key).text, self.variable_pool.convert_template(value).text)
|
||||
)
|
||||
|
||||
self.params = result
|
||||
|
||||
def _init_headers(self):
|
||||
"""
|
||||
Convert the header string of frontend to a dictionary.
|
||||
|
||||
Each line in the header string represents a key-value pair.
|
||||
Keys and values are separated by ':'.
|
||||
Empty values are allowed.
|
||||
|
||||
Examples:
|
||||
'aa:bb\n cc:dd' -> {'aa': 'bb', 'cc': 'dd'}
|
||||
'aa:\n cc:dd\n' -> {'aa': '', 'cc': 'dd'}
|
||||
'aa\n cc : dd' -> {'aa': '', 'cc': 'dd'}
|
||||
|
||||
"""
|
||||
headers = self.variable_pool.convert_template(self.node_data.headers).text
|
||||
self.headers = _plain_text_to_dict(headers)
|
||||
self.headers = {
|
||||
key.strip(): (value[0].strip() if value else "")
|
||||
for line in headers.splitlines()
|
||||
if line.strip()
|
||||
for key, *value in [line.split(":", 1)]
|
||||
}
|
||||
|
||||
def _init_body(self):
|
||||
body = self.node_data.body
|
||||
|
|
@ -292,33 +326,6 @@ class Executor:
|
|||
return raw
|
||||
|
||||
|
||||
def _plain_text_to_dict(text: str, /) -> dict[str, str]:
|
||||
"""
|
||||
Convert a string of key-value pairs to a dictionary.
|
||||
|
||||
Each line in the input string represents a key-value pair.
|
||||
Keys and values are separated by ':'.
|
||||
Empty values are allowed.
|
||||
|
||||
Examples:
|
||||
'aa:bb\n cc:dd' -> {'aa': 'bb', 'cc': 'dd'}
|
||||
'aa:\n cc:dd\n' -> {'aa': '', 'cc': 'dd'}
|
||||
'aa\n cc : dd' -> {'aa': '', 'cc': 'dd'}
|
||||
|
||||
Args:
|
||||
convert_text (str): The input string to convert.
|
||||
|
||||
Returns:
|
||||
dict[str, str]: A dictionary of key-value pairs.
|
||||
"""
|
||||
return {
|
||||
key.strip(): (value[0].strip() if value else "")
|
||||
for line in text.splitlines()
|
||||
if line.strip()
|
||||
for key, *value in [line.split(":", 1)]
|
||||
}
|
||||
|
||||
|
||||
def _generate_random_string(n: int) -> str:
|
||||
"""
|
||||
Generate a random string of lowercase ASCII letters.
|
||||
|
|
|
|||
|
|
@ -163,7 +163,9 @@ class IterationNode(BaseNode[IterationNodeData]):
|
|||
if self.node_data.is_parallel:
|
||||
futures: list[Future] = []
|
||||
q: Queue = Queue()
|
||||
thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100)
|
||||
thread_pool = GraphEngineThreadPool(
|
||||
max_workers=self.node_data.parallel_nums, max_submit_count=dify_config.MAX_SUBMIT_COUNT
|
||||
)
|
||||
for index, item in enumerate(iterator_list_value):
|
||||
future: Future = thread_pool.submit(
|
||||
self._run_single_iter_parallel,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,20 @@ class KnowledgeRetrievalNode(BaseNode[KnowledgeRetrievalNodeData]):
|
|||
|
||||
except KnowledgeRetrievalNodeError as e:
|
||||
logger.warning("Error when running knowledge retrieval node")
|
||||
return NodeRunResult(status=WorkflowNodeExecutionStatus.FAILED, inputs=variables, error=str(e))
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
inputs=variables,
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
# Temporary handle all exceptions from DatasetRetrieval class here.
|
||||
except Exception as e:
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
inputs=variables,
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
|
||||
def _fetch_dataset_retriever(self, node_data: KnowledgeRetrievalNodeData, query: str) -> list[dict[str, Any]]:
|
||||
available_datasets = []
|
||||
|
|
@ -160,18 +173,18 @@ class KnowledgeRetrievalNode(BaseNode[KnowledgeRetrievalNodeData]):
|
|||
reranking_model = None
|
||||
weights = None
|
||||
all_documents = dataset_retrieval.multiple_retrieve(
|
||||
self.app_id,
|
||||
self.tenant_id,
|
||||
self.user_id,
|
||||
self.user_from.value,
|
||||
available_datasets,
|
||||
query,
|
||||
node_data.multiple_retrieval_config.top_k,
|
||||
node_data.multiple_retrieval_config.score_threshold,
|
||||
node_data.multiple_retrieval_config.reranking_mode,
|
||||
reranking_model,
|
||||
weights,
|
||||
node_data.multiple_retrieval_config.reranking_enable,
|
||||
app_id=self.app_id,
|
||||
tenant_id=self.tenant_id,
|
||||
user_id=self.user_id,
|
||||
user_from=self.user_from.value,
|
||||
available_datasets=available_datasets,
|
||||
query=query,
|
||||
top_k=node_data.multiple_retrieval_config.top_k,
|
||||
score_threshold=node_data.multiple_retrieval_config.score_threshold,
|
||||
reranking_mode=node_data.multiple_retrieval_config.reranking_mode,
|
||||
reranking_model=reranking_model,
|
||||
weights=weights,
|
||||
reranking_enable=node_data.multiple_retrieval_config.reranking_enable,
|
||||
)
|
||||
dify_documents = [item for item in all_documents if item.provider == "dify"]
|
||||
external_documents = [item for item in all_documents if item.provider == "external"]
|
||||
|
|
|
|||
|
|
@ -92,6 +92,16 @@ class ToolNode(BaseNode[ToolNodeData]):
|
|||
error=f"Failed to invoke tool: {str(e)}",
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
except Exception as e:
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
inputs=parameters_for_log,
|
||||
metadata={
|
||||
NodeRunMetadataKey.TOOL_INFO: tool_info,
|
||||
},
|
||||
error=f"Failed to invoke tool: {str(e)}",
|
||||
error_type="UnknownError",
|
||||
)
|
||||
|
||||
# convert tool messages
|
||||
plain_text, files, json = self._convert_tool_messages(messages)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
import logging
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
import flask
|
||||
|
||||
from configs import dify_config
|
||||
from dify_app import DifyApp
|
||||
|
||||
|
|
@ -22,11 +25,14 @@ def init_app(app: DifyApp):
|
|||
)
|
||||
|
||||
# Always add StreamHandler to log to console
|
||||
log_handlers.append(logging.StreamHandler(sys.stdout))
|
||||
sh = logging.StreamHandler(sys.stdout)
|
||||
sh.addFilter(RequestIdFilter())
|
||||
log_formatter = logging.Formatter(fmt=dify_config.LOG_FORMAT)
|
||||
sh.setFormatter(log_formatter)
|
||||
log_handlers.append(sh)
|
||||
|
||||
logging.basicConfig(
|
||||
level=dify_config.LOG_LEVEL,
|
||||
format=dify_config.LOG_FORMAT,
|
||||
datefmt=dify_config.LOG_DATEFORMAT,
|
||||
handlers=log_handlers,
|
||||
force=True,
|
||||
|
|
@ -44,3 +50,22 @@ def init_app(app: DifyApp):
|
|||
|
||||
for handler in logging.root.handlers:
|
||||
handler.formatter.converter = time_converter
|
||||
|
||||
|
||||
def get_request_id():
|
||||
if getattr(flask.g, "request_id", None):
|
||||
return flask.g.request_id
|
||||
|
||||
new_uuid = uuid.uuid4().hex[:10]
|
||||
flask.g.request_id = new_uuid
|
||||
|
||||
return new_uuid
|
||||
|
||||
|
||||
class RequestIdFilter(logging.Filter):
|
||||
# This is a logging filter that makes the request ID available for use in
|
||||
# the logging format. Note that we're checking if we're in a request
|
||||
# context, as we may want to log things before Flask is fully loaded.
|
||||
def filter(self, record):
|
||||
record.req_id = get_request_id() if flask.has_request_context() else ""
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import logging
|
||||
from collections.abc import Callable, Generator, Mapping
|
||||
from collections.abc import Callable, Generator
|
||||
from typing import Union
|
||||
|
||||
from flask import Flask
|
||||
|
||||
from configs import dify_config
|
||||
from configs.middleware.storage.opendal_storage_config import OpenDALScheme
|
||||
from dify_app import DifyApp
|
||||
from extensions.storage.base_storage import BaseStorage
|
||||
from extensions.storage.storage_type import StorageType
|
||||
|
|
@ -23,21 +22,17 @@ class Storage:
|
|||
def get_storage_factory(storage_type: str) -> Callable[[], BaseStorage]:
|
||||
match storage_type:
|
||||
case StorageType.S3:
|
||||
from extensions.storage.opendal_storage import OpenDALStorage
|
||||
from extensions.storage.aws_s3_storage import AwsS3Storage
|
||||
|
||||
kwargs = _load_s3_storage_kwargs()
|
||||
return lambda: OpenDALStorage(scheme=OpenDALScheme.S3, **kwargs)
|
||||
return AwsS3Storage
|
||||
case StorageType.OPENDAL:
|
||||
from extensions.storage.opendal_storage import OpenDALStorage
|
||||
|
||||
scheme = OpenDALScheme(dify_config.STORAGE_OPENDAL_SCHEME)
|
||||
kwargs = _load_opendal_storage_kwargs(scheme)
|
||||
return lambda: OpenDALStorage(scheme=scheme, **kwargs)
|
||||
return lambda: OpenDALStorage(dify_config.OPENDAL_SCHEME)
|
||||
case StorageType.LOCAL:
|
||||
from extensions.storage.opendal_storage import OpenDALStorage
|
||||
|
||||
kwargs = _load_local_storage_kwargs()
|
||||
return lambda: OpenDALStorage(scheme=OpenDALScheme.FS, **kwargs)
|
||||
return lambda: OpenDALStorage(scheme="fs", root=dify_config.STORAGE_LOCAL_PATH)
|
||||
case StorageType.AZURE_BLOB:
|
||||
from extensions.storage.azure_blob_storage import AzureBlobStorage
|
||||
|
||||
|
|
@ -75,7 +70,7 @@ class Storage:
|
|||
|
||||
return SupabaseStorage
|
||||
case _:
|
||||
raise ValueError(f"Unsupported storage type {storage_type}")
|
||||
raise ValueError(f"unsupported storage type {storage_type}")
|
||||
|
||||
def save(self, filename, data):
|
||||
try:
|
||||
|
|
@ -130,81 +125,6 @@ class Storage:
|
|||
raise e
|
||||
|
||||
|
||||
def _load_s3_storage_kwargs() -> Mapping[str, str]:
|
||||
"""
|
||||
Load the kwargs for S3 storage based on dify_config.
|
||||
Handles special cases like AWS managed IAM and R2.
|
||||
"""
|
||||
kwargs = {
|
||||
"root": "/",
|
||||
"bucket": dify_config.S3_BUCKET_NAME,
|
||||
"endpoint": dify_config.S3_ENDPOINT,
|
||||
"access_key_id": dify_config.S3_ACCESS_KEY,
|
||||
"secret_access_key": dify_config.S3_SECRET_KEY,
|
||||
"region": dify_config.S3_REGION,
|
||||
}
|
||||
kwargs = {k: v for k, v in kwargs.items() if isinstance(v, str)}
|
||||
|
||||
# For AWS managed IAM
|
||||
if dify_config.S3_USE_AWS_MANAGED_IAM:
|
||||
from extensions.storage.opendal_storage import S3_SSE_WITH_AWS_MANAGED_IAM_KWARGS
|
||||
|
||||
logger.debug("Using AWS managed IAM role for S3")
|
||||
kwargs = {**kwargs, **{k: v for k, v in S3_SSE_WITH_AWS_MANAGED_IAM_KWARGS.items() if k not in kwargs}}
|
||||
|
||||
# For Cloudflare R2
|
||||
if kwargs.get("endpoint"):
|
||||
from extensions.storage.opendal_storage import S3_R2_COMPATIBLE_KWARGS, is_r2_endpoint
|
||||
|
||||
if is_r2_endpoint(kwargs["endpoint"]):
|
||||
logger.debug("Using R2 for OpenDAL S3")
|
||||
kwargs = {**kwargs, **{k: v for k, v in S3_R2_COMPATIBLE_KWARGS.items() if k not in kwargs}}
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
def _load_local_storage_kwargs() -> Mapping[str, str]:
|
||||
"""
|
||||
Load the kwargs for local storage based on dify_config.
|
||||
"""
|
||||
return {
|
||||
"root": dify_config.STORAGE_LOCAL_PATH,
|
||||
}
|
||||
|
||||
|
||||
def _load_opendal_storage_kwargs(scheme: OpenDALScheme) -> Mapping[str, str]:
|
||||
"""
|
||||
Load the kwargs for OpenDAL storage based on the given scheme.
|
||||
"""
|
||||
match scheme:
|
||||
case OpenDALScheme.FS:
|
||||
kwargs = {
|
||||
"root": dify_config.OPENDAL_FS_ROOT,
|
||||
}
|
||||
case OpenDALScheme.S3:
|
||||
# Load OpenDAL S3-related configs
|
||||
kwargs = {
|
||||
"root": dify_config.OPENDAL_S3_ROOT,
|
||||
"bucket": dify_config.OPENDAL_S3_BUCKET,
|
||||
"endpoint": dify_config.OPENDAL_S3_ENDPOINT,
|
||||
"access_key_id": dify_config.OPENDAL_S3_ACCESS_KEY_ID,
|
||||
"secret_access_key": dify_config.OPENDAL_S3_SECRET_ACCESS_KEY,
|
||||
"region": dify_config.OPENDAL_S3_REGION,
|
||||
}
|
||||
|
||||
# For Cloudflare R2
|
||||
if kwargs.get("endpoint"):
|
||||
from extensions.storage.opendal_storage import S3_R2_COMPATIBLE_KWARGS, is_r2_endpoint
|
||||
|
||||
if is_r2_endpoint(kwargs["endpoint"]):
|
||||
logger.debug("Using R2 for OpenDAL S3")
|
||||
kwargs = {**kwargs, **{k: v for k, v in S3_R2_COMPATIBLE_KWARGS.items() if k not in kwargs}}
|
||||
case _:
|
||||
logger.warning(f"Unrecognized OpenDAL scheme: {scheme}, will fall back to default.")
|
||||
kwargs = {}
|
||||
return kwargs
|
||||
|
||||
|
||||
storage = Storage()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,46 +1,57 @@
|
|||
import logging
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import opendal
|
||||
from dotenv import dotenv_values
|
||||
|
||||
from configs.middleware.storage.opendal_storage_config import OpenDALScheme
|
||||
from extensions.storage.base_storage import BaseStorage
|
||||
|
||||
S3_R2_HOSTNAME = "r2.cloudflarestorage.com"
|
||||
S3_R2_COMPATIBLE_KWARGS = {
|
||||
"delete_max_size": "700",
|
||||
"disable_stat_with_override": "true",
|
||||
"region": "auto",
|
||||
}
|
||||
S3_SSE_WITH_AWS_MANAGED_IAM_KWARGS = {
|
||||
"server_side_encryption": "aws:kms",
|
||||
}
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def is_r2_endpoint(endpoint: str) -> bool:
|
||||
if not endpoint:
|
||||
return False
|
||||
def _get_opendal_kwargs(*, scheme: str, env_file_path: str = ".env", prefix: str = "OPENDAL_"):
|
||||
kwargs = {}
|
||||
config_prefix = prefix + scheme.upper() + "_"
|
||||
for key, value in os.environ.items():
|
||||
if key.startswith(config_prefix):
|
||||
kwargs[key[len(config_prefix) :].lower()] = value
|
||||
|
||||
parsed_url = urlparse(endpoint)
|
||||
return bool(parsed_url.hostname and parsed_url.hostname.endswith(S3_R2_HOSTNAME))
|
||||
file_env_vars = dotenv_values(env_file_path)
|
||||
for key, value in file_env_vars.items():
|
||||
if key.startswith(config_prefix) and key[len(config_prefix) :].lower() not in kwargs and value:
|
||||
kwargs[key[len(config_prefix) :].lower()] = value
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
class OpenDALStorage(BaseStorage):
|
||||
def __init__(self, scheme: OpenDALScheme, **kwargs):
|
||||
if scheme == OpenDALScheme.FS:
|
||||
Path(kwargs["root"]).mkdir(parents=True, exist_ok=True)
|
||||
def __init__(self, scheme: str, **kwargs):
|
||||
kwargs = kwargs or _get_opendal_kwargs(scheme=scheme)
|
||||
|
||||
if scheme == "fs":
|
||||
root = kwargs.get("root", "storage")
|
||||
Path(root).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# self.op = opendal.Operator(scheme=scheme, **kwargs)
|
||||
self.op = opendal.Operator(scheme=scheme, **kwargs)
|
||||
logger.debug(f"opendal operator created with scheme {scheme}")
|
||||
retry_layer = opendal.layers.RetryLayer(max_times=3, factor=2.0, jitter=True)
|
||||
self.op = self.op.layer(retry_layer)
|
||||
logger.debug("added retry layer to opendal operator")
|
||||
|
||||
def save(self, filename: str, data: bytes) -> None:
|
||||
self.op.write(path=filename, bs=data)
|
||||
logger.debug(f"file {filename} saved")
|
||||
|
||||
def load_once(self, filename: str) -> bytes:
|
||||
if not self.exists(filename):
|
||||
raise FileNotFoundError("File not found")
|
||||
|
||||
return self.op.read(path=filename)
|
||||
content = self.op.read(path=filename)
|
||||
logger.debug(f"file {filename} loaded")
|
||||
return content
|
||||
|
||||
def load_stream(self, filename: str) -> Generator:
|
||||
if not self.exists(filename):
|
||||
|
|
@ -50,6 +61,7 @@ class OpenDALStorage(BaseStorage):
|
|||
file = self.op.open(path=filename, mode="rb")
|
||||
while chunk := file.read(batch_size):
|
||||
yield chunk
|
||||
logger.debug(f"file {filename} loaded as stream")
|
||||
|
||||
def download(self, filename: str, target_filepath: str):
|
||||
if not self.exists(filename):
|
||||
|
|
@ -57,16 +69,22 @@ class OpenDALStorage(BaseStorage):
|
|||
|
||||
with Path(target_filepath).open("wb") as f:
|
||||
f.write(self.op.read(path=filename))
|
||||
logger.debug(f"file {filename} downloaded to {target_filepath}")
|
||||
|
||||
def exists(self, filename: str) -> bool:
|
||||
# FIXME this is a workaround for opendal python-binding do not have a exists method and no better
|
||||
# error handler here when opendal python-binding has a exists method, we should use it
|
||||
# more https://github.com/apache/opendal/blob/main/bindings/python/src/operator.rs
|
||||
try:
|
||||
return self.op.stat(path=filename).mode.is_file()
|
||||
except Exception as e:
|
||||
res = self.op.stat(path=filename).mode.is_file()
|
||||
logger.debug(f"file {filename} checked")
|
||||
return res
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def delete(self, filename: str):
|
||||
if self.exists(filename):
|
||||
self.op.delete(path=filename)
|
||||
logger.debug(f"file {filename} deleted")
|
||||
return
|
||||
logger.debug(f"file {filename} not found, skip delete")
|
||||
|
|
|
|||
|
|
@ -9,11 +9,11 @@ import uuid
|
|||
from collections.abc import Generator, Mapping
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any, Optional, Union, cast
|
||||
from zoneinfo import available_timezones
|
||||
|
||||
from flask import Response, stream_with_context
|
||||
from flask_restful import fields
|
||||
from flask_restful import fields # type: ignore
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.features.rate_limiting.rate_limit import RateLimitGenerator
|
||||
|
|
@ -168,11 +168,11 @@ def generate_string(n):
|
|||
|
||||
def extract_remote_ip(request) -> str:
|
||||
if request.headers.get("CF-Connecting-IP"):
|
||||
return request.headers.get("Cf-Connecting-Ip")
|
||||
return cast(str, request.headers.get("Cf-Connecting-Ip"))
|
||||
elif request.headers.getlist("X-Forwarded-For"):
|
||||
return request.headers.getlist("X-Forwarded-For")[0]
|
||||
return cast(str, request.headers.getlist("X-Forwarded-For")[0])
|
||||
else:
|
||||
return request.remote_addr
|
||||
return cast(str, request.remote_addr)
|
||||
|
||||
|
||||
def generate_text_hash(text: str) -> str:
|
||||
|
|
@ -221,12 +221,14 @@ class TokenManager:
|
|||
token_data.update(additional_data)
|
||||
|
||||
expiry_minutes = dify_config.model_dump().get(f"{token_type.upper()}_TOKEN_EXPIRY_MINUTES")
|
||||
if expiry_minutes is None:
|
||||
raise ValueError(f"Expiry minutes for {token_type} token is not set")
|
||||
token_key = cls._get_token_key(token, token_type)
|
||||
expiry_time = int(expiry_minutes * 60)
|
||||
redis_client.setex(token_key, expiry_time, json.dumps(token_data))
|
||||
|
||||
if account_id:
|
||||
cls._set_current_token_for_account(account.id, token, token_type, expiry_minutes)
|
||||
cls._set_current_token_for_account(account_id, token, token_type, expiry_minutes)
|
||||
|
||||
return token
|
||||
|
||||
|
|
|
|||
|
|
@ -225,8 +225,10 @@ class Workflow(db.Model):
|
|||
from models.tools import WorkflowToolProvider
|
||||
|
||||
return (
|
||||
db.session.query(WorkflowToolProvider).filter(WorkflowToolProvider.app_id == self.app_id).first()
|
||||
is not None
|
||||
db.session.query(WorkflowToolProvider)
|
||||
.filter(WorkflowToolProvider.tenant_id == self.tenant_id, WorkflowToolProvider.app_id == self.app_id)
|
||||
.count()
|
||||
> 0
|
||||
)
|
||||
|
||||
@property
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiofiles"
|
||||
|
|
@ -955,6 +955,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5dab0844f2cf82be357a0eb11a9087f70c5430b2c241493fc122bb6f2bb0917c"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4fe605b917c70283db7dfe5ada75e04561479075761a0b3866c081d035b01c1"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1e9a65b5736232e7a7f91ff3d02277f11d339bf34099a56cdab6a8b3410a02b2"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:58d4b711689366d4a03ac7957ab8c28890415e267f9b6589969e74b6e42225ec"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"},
|
||||
|
|
@ -967,8 +971,14 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c247dd99d39e0338a604f8c2b3bc7061d5c2e9e2ac7ba9cc1be5a69cb6cd832f"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1b2c248cd517c222d89e74669a4adfa5577e06ab68771a529060cf5a156e9757"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a24c50840d89ded6c9a8fdc7b6ed3692ed4e86f1c4a4a938e1e92def92933e0"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f31859074d57b4639318523d6ffdca586ace54271a73ad23ad021acd807eb14b"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"},
|
||||
|
|
@ -979,8 +989,24 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"},
|
||||
|
|
@ -990,6 +1016,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:aea440a510e14e818e67bfc4027880e2fb500c2ccb20ab21c7a7c8b5b4703d75"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:6974f52a02321b36847cd19d1b8e381bf39939c21efd6ee2fc13a28b0d99348c"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:a7e53012d2853a07a4a79c00643832161a910674a893d296c9f1259859a289d2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:d7702622a8b40c49bffb46e1e3ba2e81268d5c04a34f460978c6b5517a34dd52"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"},
|
||||
|
|
@ -1001,6 +1031,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:cb1dac1770878ade83f2ccdf7d25e494f05c9165f5246b46a621cc849341dc01"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:3ee8a80d67a4334482d9712b8e83ca6b1d9bc7e351931252ebef5d8f7335a547"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:5e55da2c8724191e5b557f8e18943b1b4839b8efc3ef60d65985bcf6f587dd38"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:d342778ef319e1026af243ed0a07c97acf3bad33b9f29e7ae6a1f68fd083e90c"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"},
|
||||
|
|
@ -1013,6 +1047,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d2b35ca2c7f81d173d2fadc2f4f31e88cc5f7a39ae5b6db5513cf3383b0e0ec7"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:af6fa6817889314555aede9a919612b23739395ce767fe7fcbea9a80bf140fe5"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2feb1d960f760a575dbc5ab3b1c00504b24caaf6986e2dc2b01c09c87866a943"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4410f84b33374409552ac9b6903507cdb31cd30d2501fc5ca13d18f73548444a"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"},
|
||||
|
|
@ -1025,6 +1063,10 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0737ddb3068957cf1b054899b0883830bb1fec522ec76b1098f9b6e0f02d9419"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4f3607b129417e111e30637af1b56f24f7a49e64763253bbc275c75fa887d4b2"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6c6e0c425f22c1c719c42670d561ad682f7bfeeef918edea971a79ac5252437f"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:494994f807ba0b92092a163a0a283961369a65f6cbe01e8891132b7a320e61eb"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"},
|
||||
{file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"},
|
||||
|
|
@ -7482,23 +7524,24 @@ image = ["Pillow (>=8.0.0)"]
|
|||
|
||||
[[package]]
|
||||
name = "pypdfium2"
|
||||
version = "4.17.0"
|
||||
version = "4.30.0"
|
||||
description = "Python bindings to PDFium"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "pypdfium2-4.17.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:e9ed42d5a5065ae41ae3ead3cd642e1f21b6039e69ccc204e260e218e91cd7e1"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0a3b5a8eca53a1e68434969821b70bd2bc9ac2b70e58daf516c6ff0b6b5779e7"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:854e04b51205466ec415b86588fe5dc593e9ca3e8e15b5aa05978c5352bd57d2"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-manylinux_2_17_armv7l.whl", hash = "sha256:9ff8707b28568e9585bdf9a96b7a8a9f91c0b5ad05af119b49381dad89983364"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-manylinux_2_17_i686.whl", hash = "sha256:09ecbef6212993db0b5460cfd46d6b157a921ff45c97b0764e6fe8ea2e8cdebf"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:f680e469b79c71c3fb086d7ced8361fbd66f4cd7b0ad08ff888289fe6743ab32"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1ba7a7da48fbf0f1aaa903dac7d0e62186d6e8ae9a78b7b7b836d3f1b3d1be5d"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:451752170caf59d4b4572b527c2858dfff96eb1da35f2822c66cdce006dd4eae"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-win32.whl", hash = "sha256:4930cfa793298214fa644c6986f6466e21f98eba3f338b4577614ebd8aa34af5"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-win_amd64.whl", hash = "sha256:99de7f336e967dea4d324484f581fff55db1eb3c8e90baa845567dd9a3cc84f3"},
|
||||
{file = "pypdfium2-4.17.0-py3-none-win_arm64.whl", hash = "sha256:9381677b489c13d64ea4f8cbf6ebfc858216b052883e01e40fa993c2818a078e"},
|
||||
{file = "pypdfium2-4.17.0.tar.gz", hash = "sha256:2a2b3273c4614ee2004df60ace5f387645f843418ae29f379408ee11560241c0"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:b33ceded0b6ff5b2b93bc1fe0ad4b71aa6b7e7bd5875f1ca0cdfb6ba6ac01aab"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4e55689f4b06e2d2406203e771f78789bd4f190731b5d57383d05cf611d829de"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e6e50f5ce7f65a40a33d7c9edc39f23140c57e37144c2d6d9e9262a2a854854"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3d0dd3ecaffd0b6dbda3da663220e705cb563918249bda26058c6036752ba3a2"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc3bf29b0db8c76cdfaac1ec1cde8edf211a7de7390fbf8934ad2aa9b4d6dfad"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1f78d2189e0ddf9ac2b7a9b9bd4f0c66f54d1389ff6c17e9fd9dc034d06eb3f"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:5eda3641a2da7a7a0b2f4dbd71d706401a656fea521b6b6faa0675b15d31a163"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:0dfa61421b5eb68e1188b0b2231e7ba35735aef2d867d86e48ee6cab6975195e"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:f33bd79e7a09d5f7acca3b0b69ff6c8a488869a7fab48fdf400fec6e20b9c8be"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-win32.whl", hash = "sha256:ee2410f15d576d976c2ab2558c93d392a25fb9f6635e8dd0a8a3a5241b275e0e"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-win_amd64.whl", hash = "sha256:90dbb2ac07be53219f56be09961eb95cf2473f834d01a42d901d13ccfad64b4c"},
|
||||
{file = "pypdfium2-4.30.0-py3-none-win_arm64.whl", hash = "sha256:119b2969a6d6b1e8d55e99caaf05290294f2d0fe49c12a3f17102d01c441bd29"},
|
||||
{file = "pypdfium2-4.30.0.tar.gz", hash = "sha256:48b5b7e5566665bc1015b9d69c1ebabe21f6aee468b509531c3c8318eeee2e16"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -11052,4 +11095,4 @@ cffi = ["cffi (>=1.11)"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "1aa6a44bc9270d50c9c0ea09f55a304b5148bf4dbbbb068ff1b1ea8da6fa60cc"
|
||||
content-hash = "14476bf95504a4df4b8d5a5c6608c6aa3dae7499d27d1e41ef39d761cc7c693d"
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ pydantic = "~2.9.2"
|
|||
pydantic-settings = "~2.6.0"
|
||||
pydantic_extra_types = "~2.9.0"
|
||||
pyjwt = "~2.8.0"
|
||||
pypdfium2 = "~4.17.0"
|
||||
pypdfium2 = "~4.30.0"
|
||||
python = ">=3.11,<3.13"
|
||||
python-docx = "~1.1.0"
|
||||
python-dotenv = "1.0.0"
|
||||
|
|
|
|||
|
|
@ -420,7 +420,7 @@ class AccountService:
|
|||
if count is None:
|
||||
count = 0
|
||||
count = int(count) + 1
|
||||
redis_client.setex(key, 60 * 60 * 24, count)
|
||||
redis_client.setex(key, dify_config.LOGIN_LOCKOUT_DURATION, count)
|
||||
|
||||
@staticmethod
|
||||
def is_login_error_rate_limit(email: str) -> bool:
|
||||
|
|
|
|||
|
|
@ -57,13 +57,7 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase):
|
|||
|
||||
recommended_app_result = {
|
||||
"id": recommended_app.id,
|
||||
"app": {
|
||||
"id": app.id,
|
||||
"name": app.name,
|
||||
"mode": app.mode,
|
||||
"icon": app.icon,
|
||||
"icon_background": app.icon_background,
|
||||
},
|
||||
"app": recommended_app.app,
|
||||
"app_id": recommended_app.app_id,
|
||||
"description": site.description,
|
||||
"copyright": site.copyright,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import logging
|
|||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from celery import shared_task # type: ignore
|
||||
|
||||
from core.indexing_runner import DocumentIsPausedError
|
||||
from extensions.ext_database import db
|
||||
|
|
@ -68,11 +68,9 @@ def external_document_indexing_task(
|
|||
settings = ExternalDatasetService.get_external_knowledge_api_settings(
|
||||
json.loads(external_knowledge_api.settings)
|
||||
)
|
||||
# assemble headers
|
||||
headers = ExternalDatasetService.assembling_headers(settings.authorization, settings.headers)
|
||||
|
||||
# do http request
|
||||
response = ExternalDatasetService.process_external_api(settings, headers, process_parameter, files)
|
||||
response = ExternalDatasetService.process_external_api(settings, files)
|
||||
job_id = response.json().get("job_id")
|
||||
if job_id:
|
||||
# save job_id to dataset
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from collections.abc import Generator
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import google.generativeai.types.generation_types as generation_config_types
|
||||
import pytest
|
||||
|
|
@ -6,11 +7,10 @@ from _pytest.monkeypatch import MonkeyPatch
|
|||
from google.ai import generativelanguage as glm
|
||||
from google.ai.generativelanguage_v1beta.types import content as gag_content
|
||||
from google.generativeai import GenerativeModel
|
||||
from google.generativeai.client import _ClientManager, configure
|
||||
from google.generativeai.types import GenerateContentResponse, content_types, safety_types
|
||||
from google.generativeai.types.generation_types import BaseGenerateContentResponse
|
||||
|
||||
current_api_key = ""
|
||||
from extensions import ext_redis
|
||||
|
||||
|
||||
class MockGoogleResponseClass:
|
||||
|
|
@ -57,11 +57,6 @@ class MockGoogleClass:
|
|||
stream: bool = False,
|
||||
**kwargs,
|
||||
) -> GenerateContentResponse:
|
||||
global current_api_key
|
||||
|
||||
if len(current_api_key) < 16:
|
||||
raise Exception("Invalid API key")
|
||||
|
||||
if stream:
|
||||
return MockGoogleClass.generate_content_stream()
|
||||
|
||||
|
|
@ -75,33 +70,29 @@ class MockGoogleClass:
|
|||
def generative_response_candidates(self) -> list[MockGoogleResponseCandidateClass]:
|
||||
return [MockGoogleResponseCandidateClass()]
|
||||
|
||||
def make_client(self: _ClientManager, name: str):
|
||||
global current_api_key
|
||||
|
||||
if name.endswith("_async"):
|
||||
name = name.split("_")[0]
|
||||
cls = getattr(glm, name.title() + "ServiceAsyncClient")
|
||||
else:
|
||||
cls = getattr(glm, name.title() + "ServiceClient")
|
||||
def mock_configure(api_key: str):
|
||||
if len(api_key) < 16:
|
||||
raise Exception("Invalid API key")
|
||||
|
||||
# Attempt to configure using defaults.
|
||||
if not self.client_config:
|
||||
configure()
|
||||
|
||||
client_options = self.client_config.get("client_options", None)
|
||||
if client_options:
|
||||
current_api_key = client_options.api_key
|
||||
class MockFileState:
|
||||
def __init__(self):
|
||||
self.name = "FINISHED"
|
||||
|
||||
def nop(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
original_init = cls.__init__
|
||||
cls.__init__ = nop
|
||||
client: glm.GenerativeServiceClient = cls(**self.client_config)
|
||||
cls.__init__ = original_init
|
||||
class MockGoogleFile:
|
||||
def __init__(self, name: str = "mock_file_name"):
|
||||
self.name = name
|
||||
self.state = MockFileState()
|
||||
|
||||
if not self.default_metadata:
|
||||
return client
|
||||
|
||||
def mock_get_file(name: str) -> MockGoogleFile:
|
||||
return MockGoogleFile(name)
|
||||
|
||||
|
||||
def mock_upload_file(path: str, mime_type: str) -> MockGoogleFile:
|
||||
return MockGoogleFile()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
@ -109,8 +100,17 @@ def setup_google_mock(request, monkeypatch: MonkeyPatch):
|
|||
monkeypatch.setattr(BaseGenerateContentResponse, "text", MockGoogleClass.generative_response_text)
|
||||
monkeypatch.setattr(BaseGenerateContentResponse, "candidates", MockGoogleClass.generative_response_candidates)
|
||||
monkeypatch.setattr(GenerativeModel, "generate_content", MockGoogleClass.generate_content)
|
||||
monkeypatch.setattr(_ClientManager, "make_client", MockGoogleClass.make_client)
|
||||
monkeypatch.setattr("google.generativeai.configure", mock_configure)
|
||||
monkeypatch.setattr("google.generativeai.get_file", mock_get_file)
|
||||
monkeypatch.setattr("google.generativeai.upload_file", mock_upload_file)
|
||||
|
||||
yield
|
||||
|
||||
monkeypatch.undo()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_mock_redis() -> None:
|
||||
ext_redis.redis_client.get = MagicMock(return_value=None)
|
||||
ext_redis.redis_client.setex = MagicMock(return_value=None)
|
||||
ext_redis.redis_client.exists = MagicMock(return_value=True)
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
|
@ -12,11 +12,11 @@ def tidb_vector():
|
|||
return TiDBVector(
|
||||
collection_name="test_collection",
|
||||
config=TiDBVectorConfig(
|
||||
host="xxx.eu-central-1.xxx.aws.tidbcloud.com",
|
||||
port="4000",
|
||||
user="xxx.root",
|
||||
password="xxxxxx",
|
||||
database="dify",
|
||||
host="localhost",
|
||||
port=4000,
|
||||
user="root",
|
||||
password="",
|
||||
database="test",
|
||||
program_name="langgenius/dify",
|
||||
),
|
||||
)
|
||||
|
|
@ -27,35 +27,14 @@ class TiDBVectorTest(AbstractVectorTest):
|
|||
super().__init__()
|
||||
self.vector = vector
|
||||
|
||||
def text_exists(self):
|
||||
exist = self.vector.text_exists(self.example_doc_id)
|
||||
assert exist == False
|
||||
|
||||
def search_by_vector(self):
|
||||
hits_by_vector: list[Document] = self.vector.search_by_vector(query_vector=self.example_embedding)
|
||||
assert len(hits_by_vector) == 0
|
||||
|
||||
def search_by_full_text(self):
|
||||
hits_by_full_text: list[Document] = self.vector.search_by_full_text(query=get_example_text())
|
||||
assert len(hits_by_full_text) == 0
|
||||
|
||||
def get_ids_by_metadata_field(self):
|
||||
ids = self.vector.get_ids_by_metadata_field(key="document_id", value=self.example_doc_id)
|
||||
assert len(ids) == 0
|
||||
ids = self.vector.get_ids_by_metadata_field(key="doc_id", value=self.example_doc_id)
|
||||
assert len(ids) == 1
|
||||
|
||||
|
||||
def test_tidb_vector(setup_mock_redis, setup_tidbvector_mock, tidb_vector, mock_session):
|
||||
def test_tidb_vector(setup_mock_redis, tidb_vector):
|
||||
TiDBVectorTest(vector=tidb_vector).run_all_tests()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_session():
|
||||
with patch("core.rag.datasource.vdb.tidb_vector.tidb_vector.Session", new_callable=MagicMock) as mock_session:
|
||||
yield mock_session
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_tidbvector_mock(tidb_vector, mock_session):
|
||||
with patch("core.rag.datasource.vdb.tidb_vector.tidb_vector.create_engine"):
|
||||
with patch.object(tidb_vector._engine, "connect"):
|
||||
yield tidb_vector
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
import pytest
|
||||
|
||||
from extensions.storage.opendal_storage import is_r2_endpoint
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("endpoint", "expected"),
|
||||
[
|
||||
("https://bucket.r2.cloudflarestorage.com", True),
|
||||
("https://custom-domain.r2.cloudflarestorage.com/", True),
|
||||
("https://bucket.r2.cloudflarestorage.com/path", True),
|
||||
("https://s3.amazonaws.com", False),
|
||||
("https://storage.googleapis.com", False),
|
||||
("http://localhost:9000", False),
|
||||
("invalid-url", False),
|
||||
("", False),
|
||||
],
|
||||
)
|
||||
def test_is_r2_endpoint(endpoint: str, expected: bool):
|
||||
assert is_r2_endpoint(endpoint) == expected
|
||||
|
|
@ -2,6 +2,7 @@ from unittest.mock import MagicMock, patch
|
|||
|
||||
import pytest
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.app_config.entities import ModelConfigEntity
|
||||
from core.file import File, FileTransferMethod, FileType, FileUploadConfig, ImageConfig
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
|
|
@ -126,6 +127,7 @@ def test__get_chat_model_prompt_messages_no_memory(get_chat_model_args):
|
|||
|
||||
def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_args):
|
||||
model_config_mock, _, messages, inputs, context = get_chat_model_args
|
||||
dify_config.MULTIMODAL_SEND_FORMAT = "url"
|
||||
|
||||
files = [
|
||||
File(
|
||||
|
|
@ -140,7 +142,9 @@ def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_arg
|
|||
prompt_transform = AdvancedPromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
with patch("core.file.file_manager.to_prompt_message_content") as mock_get_encoded_string:
|
||||
mock_get_encoded_string.return_value = ImagePromptMessageContent(data=str(files[0].remote_url))
|
||||
mock_get_encoded_string.return_value = ImagePromptMessageContent(
|
||||
url=str(files[0].remote_url), format="jpg", mime_type="image/jpg"
|
||||
)
|
||||
prompt_messages = prompt_transform._get_chat_model_prompt_messages(
|
||||
prompt_template=messages,
|
||||
inputs=inputs,
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ def test_executor_with_json_body_and_number_variable():
|
|||
assert executor.method == "post"
|
||||
assert executor.url == "https://api.example.com/data"
|
||||
assert executor.headers == {"Content-Type": "application/json"}
|
||||
assert executor.params == {}
|
||||
assert executor.params == []
|
||||
assert executor.json == {"number": 42}
|
||||
assert executor.data is None
|
||||
assert executor.files is None
|
||||
|
|
@ -101,7 +101,7 @@ def test_executor_with_json_body_and_object_variable():
|
|||
assert executor.method == "post"
|
||||
assert executor.url == "https://api.example.com/data"
|
||||
assert executor.headers == {"Content-Type": "application/json"}
|
||||
assert executor.params == {}
|
||||
assert executor.params == []
|
||||
assert executor.json == {"name": "John Doe", "age": 30, "email": "john@example.com"}
|
||||
assert executor.data is None
|
||||
assert executor.files is None
|
||||
|
|
@ -156,7 +156,7 @@ def test_executor_with_json_body_and_nested_object_variable():
|
|||
assert executor.method == "post"
|
||||
assert executor.url == "https://api.example.com/data"
|
||||
assert executor.headers == {"Content-Type": "application/json"}
|
||||
assert executor.params == {}
|
||||
assert executor.params == []
|
||||
assert executor.json == {"object": {"name": "John Doe", "age": 30, "email": "john@example.com"}}
|
||||
assert executor.data is None
|
||||
assert executor.files is None
|
||||
|
|
@ -195,7 +195,7 @@ def test_extract_selectors_from_template_with_newline():
|
|||
variable_pool=variable_pool,
|
||||
)
|
||||
|
||||
assert executor.params == {"test": "line1\nline2"}
|
||||
assert executor.params == [("test", "line1\nline2")]
|
||||
|
||||
|
||||
def test_executor_with_form_data():
|
||||
|
|
@ -244,7 +244,7 @@ def test_executor_with_form_data():
|
|||
assert executor.url == "https://api.example.com/upload"
|
||||
assert "Content-Type" in executor.headers
|
||||
assert "multipart/form-data" in executor.headers["Content-Type"]
|
||||
assert executor.params == {}
|
||||
assert executor.params == []
|
||||
assert executor.json is None
|
||||
assert executor.files is None
|
||||
assert executor.content is None
|
||||
|
|
@ -265,3 +265,72 @@ def test_executor_with_form_data():
|
|||
assert "Hello, World!" in raw_request
|
||||
assert "number_field" in raw_request
|
||||
assert "42" in raw_request
|
||||
|
||||
|
||||
def test_init_headers():
|
||||
def create_executor(headers: str) -> Executor:
|
||||
node_data = HttpRequestNodeData(
|
||||
title="test",
|
||||
method="get",
|
||||
url="http://example.com",
|
||||
headers=headers,
|
||||
params="",
|
||||
authorization=HttpRequestNodeAuthorization(type="no-auth"),
|
||||
)
|
||||
timeout = HttpRequestNodeTimeout(connect=10, read=30, write=30)
|
||||
return Executor(node_data=node_data, timeout=timeout, variable_pool=VariablePool())
|
||||
|
||||
executor = create_executor("aa\n cc:")
|
||||
executor._init_headers()
|
||||
assert executor.headers == {"aa": "", "cc": ""}
|
||||
|
||||
executor = create_executor("aa:bb\n cc:dd")
|
||||
executor._init_headers()
|
||||
assert executor.headers == {"aa": "bb", "cc": "dd"}
|
||||
|
||||
executor = create_executor("aa:bb\n cc:dd\n")
|
||||
executor._init_headers()
|
||||
assert executor.headers == {"aa": "bb", "cc": "dd"}
|
||||
|
||||
executor = create_executor("aa:bb\n\n cc : dd\n\n")
|
||||
executor._init_headers()
|
||||
assert executor.headers == {"aa": "bb", "cc": "dd"}
|
||||
|
||||
|
||||
def test_init_params():
|
||||
def create_executor(params: str) -> Executor:
|
||||
node_data = HttpRequestNodeData(
|
||||
title="test",
|
||||
method="get",
|
||||
url="http://example.com",
|
||||
headers="",
|
||||
params=params,
|
||||
authorization=HttpRequestNodeAuthorization(type="no-auth"),
|
||||
)
|
||||
timeout = HttpRequestNodeTimeout(connect=10, read=30, write=30)
|
||||
return Executor(node_data=node_data, timeout=timeout, variable_pool=VariablePool())
|
||||
|
||||
# Test basic key-value pairs
|
||||
executor = create_executor("key1:value1\nkey2:value2")
|
||||
executor._init_params()
|
||||
assert executor.params == [("key1", "value1"), ("key2", "value2")]
|
||||
|
||||
# Test empty values
|
||||
executor = create_executor("key1:\nkey2:")
|
||||
executor._init_params()
|
||||
assert executor.params == [("key1", ""), ("key2", "")]
|
||||
|
||||
# Test duplicate keys (which is allowed for params)
|
||||
executor = create_executor("key1:value1\nkey1:value2")
|
||||
executor._init_params()
|
||||
assert executor.params == [("key1", "value1"), ("key1", "value2")]
|
||||
|
||||
# Test whitespace handling
|
||||
executor = create_executor(" key1 : value1 \n key2 : value2 ")
|
||||
executor._init_params()
|
||||
assert executor.params == [("key1", "value1"), ("key2", "value2")]
|
||||
|
||||
# Test empty lines and extra whitespace
|
||||
executor = create_executor("key1:value1\n\nkey2:value2\n\n")
|
||||
executor._init_params()
|
||||
assert executor.params == [("key1", "value1"), ("key2", "value2")]
|
||||
|
|
|
|||
|
|
@ -14,18 +14,10 @@ from core.workflow.nodes.http_request import (
|
|||
HttpRequestNodeBody,
|
||||
HttpRequestNodeData,
|
||||
)
|
||||
from core.workflow.nodes.http_request.executor import _plain_text_to_dict
|
||||
from models.enums import UserFrom
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
def test_plain_text_to_dict():
|
||||
assert _plain_text_to_dict("aa\n cc:") == {"aa": "", "cc": ""}
|
||||
assert _plain_text_to_dict("aa:bb\n cc:dd") == {"aa": "bb", "cc": "dd"}
|
||||
assert _plain_text_to_dict("aa:bb\n cc:dd\n") == {"aa": "bb", "cc": "dd"}
|
||||
assert _plain_text_to_dict("aa:bb\n\n cc : dd\n\n") == {"aa": "bb", "cc": "dd"}
|
||||
|
||||
|
||||
def test_http_request_node_binary_file(monkeypatch):
|
||||
data = HttpRequestNodeData(
|
||||
title="test",
|
||||
|
|
|
|||
|
|
@ -18,8 +18,7 @@ from core.model_runtime.entities.message_entities import (
|
|||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelFeature, ModelType, ProviderModel
|
||||
from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelFeature, ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
|
||||
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
|
||||
|
|
@ -249,8 +248,7 @@ def test_fetch_prompt_messages__vison_disabled(faker, llm_node, model_config):
|
|||
|
||||
def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
||||
# Setup dify config
|
||||
dify_config.MULTIMODAL_SEND_IMAGE_FORMAT = "url"
|
||||
dify_config.MULTIMODAL_SEND_VIDEO_FORMAT = "url"
|
||||
dify_config.MULTIMODAL_SEND_FORMAT = "url"
|
||||
|
||||
# Generate fake values for prompt template
|
||||
fake_assistant_prompt = faker.sentence()
|
||||
|
|
@ -328,6 +326,8 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
|||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
extension=".jpg",
|
||||
mime_type="image/jpg",
|
||||
)
|
||||
],
|
||||
vision_enabled=True,
|
||||
|
|
@ -361,7 +361,9 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
|||
UserPromptMessage(
|
||||
content=[
|
||||
TextPromptMessageContent(data=fake_query),
|
||||
ImagePromptMessageContent(data=fake_remote_url, detail=fake_vision_detail),
|
||||
ImagePromptMessageContent(
|
||||
url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
|
|
@ -384,7 +386,9 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
|||
expected_messages=[
|
||||
UserPromptMessage(
|
||||
content=[
|
||||
ImagePromptMessageContent(data=fake_remote_url, detail=fake_vision_detail),
|
||||
ImagePromptMessageContent(
|
||||
url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
),
|
||||
]
|
||||
),
|
||||
]
|
||||
|
|
@ -397,6 +401,8 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
|||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
extension=".jpg",
|
||||
mime_type="image/jpg",
|
||||
)
|
||||
},
|
||||
),
|
||||
|
|
|
|||
|
|
@ -1,15 +1,12 @@
|
|||
import os
|
||||
from collections.abc import Generator
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from configs.middleware.storage.opendal_storage_config import OpenDALScheme
|
||||
from extensions.storage.opendal_storage import OpenDALStorage
|
||||
from tests.unit_tests.oss.__mock.base import (
|
||||
get_example_data,
|
||||
get_example_filename,
|
||||
get_example_filepath,
|
||||
get_opendal_bucket,
|
||||
)
|
||||
|
||||
|
|
@ -19,7 +16,7 @@ class TestOpenDAL:
|
|||
def setup_method(self, *args, **kwargs):
|
||||
"""Executed before each test method."""
|
||||
self.storage = OpenDALStorage(
|
||||
scheme=OpenDALScheme.FS,
|
||||
scheme="fs",
|
||||
root=get_opendal_bucket(),
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -14,3 +14,4 @@ pytest api/tests/integration_tests/vdb/chroma \
|
|||
api/tests/integration_tests/vdb/upstash \
|
||||
api/tests/integration_tests/vdb/couchbase \
|
||||
api/tests/integration_tests/vdb/oceanbase \
|
||||
api/tests/integration_tests/vdb/tidb_vector \
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ version: '3'
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.13.2
|
||||
image: langgenius/dify-api:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
|
|
@ -227,7 +227,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.13.2
|
||||
image: langgenius/dify-api:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
|
|
@ -397,7 +397,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.13.2
|
||||
image: langgenius/dify-web:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
|
|
|||
|
|
@ -119,15 +119,15 @@ DIFY_BIND_ADDRESS=0.0.0.0
|
|||
# API service binding port number, default 5001.
|
||||
DIFY_PORT=5001
|
||||
|
||||
# The number of API server workers, i.e., the number of gevent workers.
|
||||
# Formula: number of cpu cores x 2 + 1
|
||||
# The number of API server workers, i.e., the number of workers.
|
||||
# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent
|
||||
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
|
||||
SERVER_WORKER_AMOUNT=
|
||||
|
||||
# Defaults to gevent. If using windows, it can be switched to sync or solo.
|
||||
SERVER_WORKER_CLASS=
|
||||
|
||||
# Similar to SERVER_WORKER_CLASS. Default is gevent.
|
||||
# Similar to SERVER_WORKER_CLASS.
|
||||
# If using windows, it can be switched to sync or solo.
|
||||
CELERY_WORKER_CLASS=
|
||||
|
||||
|
|
@ -227,6 +227,7 @@ REDIS_PORT=6379
|
|||
REDIS_USERNAME=
|
||||
REDIS_PASSWORD=difyai123456
|
||||
REDIS_USE_SSL=false
|
||||
REDIS_DB=0
|
||||
|
||||
# Whether to use Redis Sentinel mode.
|
||||
# If set to true, the application will automatically discover and connect to the master node through Sentinel.
|
||||
|
|
@ -281,57 +282,39 @@ CONSOLE_CORS_ALLOW_ORIGINS=*
|
|||
# ------------------------------
|
||||
|
||||
# The type of storage to use for storing user files.
|
||||
# Supported values are `opendal` , `s3` , `azure-blob` , `google-storage`, `tencent-cos`, `huawei-obs`, `volcengine-tos`, `baidu-obs`, `supabase`
|
||||
# Default: `opendal`
|
||||
STORAGE_TYPE=opendal
|
||||
|
||||
# Apache OpenDAL Configuration, refer to https://github.com/apache/opendal
|
||||
# The scheme for the OpenDAL storage.
|
||||
STORAGE_OPENDAL_SCHEME=fs
|
||||
# OpenDAL FS
|
||||
OPENDAL_SCHEME=fs
|
||||
# Configurations for OpenDAL Local File System.
|
||||
OPENDAL_FS_ROOT=storage
|
||||
# OpenDAL S3
|
||||
OPENDAL_S3_ROOT=/
|
||||
OPENDAL_S3_BUCKET=your-bucket-name
|
||||
OPENDAL_S3_ENDPOINT=https://s3.amazonaws.com
|
||||
OPENDAL_S3_ACCESS_KEY_ID=your-access-key
|
||||
OPENDAL_S3_SECRET_ACCESS_KEY=your-secret-key
|
||||
OPENDAL_S3_REGION=your-region
|
||||
OPENDAL_S3_SERVER_SIDE_ENCRYPTION=
|
||||
|
||||
# S3 Configuration
|
||||
#
|
||||
S3_ENDPOINT=
|
||||
S3_REGION=us-east-1
|
||||
S3_BUCKET_NAME=difyai
|
||||
S3_ACCESS_KEY=
|
||||
S3_SECRET_KEY=
|
||||
# Whether to use AWS managed IAM roles for authenticating with the S3 service.
|
||||
# If set to false, the access key and secret key must be provided.
|
||||
S3_USE_AWS_MANAGED_IAM=false
|
||||
# The endpoint of the S3 service.
|
||||
S3_ENDPOINT=
|
||||
# The region of the S3 service.
|
||||
S3_REGION=us-east-1
|
||||
# The name of the S3 bucket to use for storing files.
|
||||
S3_BUCKET_NAME=difyai
|
||||
# The access key to use for authenticating with the S3 service.
|
||||
S3_ACCESS_KEY=
|
||||
# The secret key to use for authenticating with the S3 service.
|
||||
S3_SECRET_KEY=
|
||||
|
||||
# Azure Blob Configuration
|
||||
# The name of the Azure Blob Storage account to use for storing files.
|
||||
#
|
||||
AZURE_BLOB_ACCOUNT_NAME=difyai
|
||||
# The access key to use for authenticating with the Azure Blob Storage account.
|
||||
AZURE_BLOB_ACCOUNT_KEY=difyai
|
||||
# The name of the Azure Blob Storage container to use for storing files.
|
||||
AZURE_BLOB_CONTAINER_NAME=difyai-container
|
||||
# The URL of the Azure Blob Storage account.
|
||||
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
|
||||
|
||||
# Google Storage Configuration
|
||||
# The name of the Google Storage bucket to use for storing files.
|
||||
#
|
||||
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
|
||||
# The service account JSON key to use for authenticating with the Google Storage service.
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
|
||||
|
||||
# The Alibaba Cloud OSS configurations,
|
||||
# only available when STORAGE_TYPE is `aliyun-oss`
|
||||
#
|
||||
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
|
||||
ALIYUN_OSS_ACCESS_KEY=your-access-key
|
||||
ALIYUN_OSS_SECRET_KEY=your-secret-key
|
||||
|
|
@ -342,55 +325,47 @@ ALIYUN_OSS_AUTH_VERSION=v4
|
|||
ALIYUN_OSS_PATH=your-path
|
||||
|
||||
# Tencent COS Configuration
|
||||
# The name of the Tencent COS bucket to use for storing files.
|
||||
#
|
||||
TENCENT_COS_BUCKET_NAME=your-bucket-name
|
||||
# The secret key to use for authenticating with the Tencent COS service.
|
||||
TENCENT_COS_SECRET_KEY=your-secret-key
|
||||
# The secret id to use for authenticating with the Tencent COS service.
|
||||
TENCENT_COS_SECRET_ID=your-secret-id
|
||||
# The region of the Tencent COS service.
|
||||
TENCENT_COS_REGION=your-region
|
||||
# The scheme of the Tencent COS service.
|
||||
TENCENT_COS_SCHEME=your-scheme
|
||||
|
||||
# Oracle Storage Configuration
|
||||
#
|
||||
OCI_ENDPOINT=https://objectstorage.us-ashburn-1.oraclecloud.com
|
||||
OCI_BUCKET_NAME=your-bucket-name
|
||||
OCI_ACCESS_KEY=your-access-key
|
||||
OCI_SECRET_KEY=your-secret-key
|
||||
OCI_REGION=us-ashburn-1
|
||||
|
||||
# Huawei OBS Configuration
|
||||
# The name of the Huawei OBS bucket to use for storing files.
|
||||
#
|
||||
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
|
||||
# The secret key to use for authenticating with the Huawei OBS service.
|
||||
HUAWEI_OBS_SECRET_KEY=your-secret-key
|
||||
# The access key to use for authenticating with the Huawei OBS service.
|
||||
HUAWEI_OBS_ACCESS_KEY=your-access-key
|
||||
# The server url of the HUAWEI OBS service.
|
||||
HUAWEI_OBS_SERVER=your-server-url
|
||||
|
||||
# Volcengine TOS Configuration
|
||||
# The name of the Volcengine TOS bucket to use for storing files.
|
||||
#
|
||||
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
|
||||
# The secret key to use for authenticating with the Volcengine TOS service.
|
||||
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
|
||||
# The access key to use for authenticating with the Volcengine TOS service.
|
||||
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
|
||||
# The endpoint of the Volcengine TOS service.
|
||||
VOLCENGINE_TOS_ENDPOINT=your-server-url
|
||||
# The region of the Volcengine TOS service.
|
||||
VOLCENGINE_TOS_REGION=your-region
|
||||
|
||||
# Baidu OBS Storage Configuration
|
||||
# The name of the Baidu OBS bucket to use for storing files.
|
||||
#
|
||||
BAIDU_OBS_BUCKET_NAME=your-bucket-name
|
||||
# The secret key to use for authenticating with the Baidu OBS service.
|
||||
BAIDU_OBS_SECRET_KEY=your-secret-key
|
||||
# The access key to use for authenticating with the Baidu OBS service.
|
||||
BAIDU_OBS_ACCESS_KEY=your-access-key
|
||||
# The endpoint of the Baidu OBS service.
|
||||
BAIDU_OBS_ENDPOINT=your-server-url
|
||||
|
||||
# Supabase Storage Configuration
|
||||
# The name of the Supabase bucket to use for storing files.
|
||||
#
|
||||
SUPABASE_BUCKET_NAME=your-bucket-name
|
||||
# The api key to use for authenticating with the Supabase service.
|
||||
SUPABASE_API_KEY=your-access-key
|
||||
# The project endpoint url of the Supabase service.
|
||||
SUPABASE_URL=your-server-url
|
||||
|
||||
# ------------------------------
|
||||
|
|
@ -403,28 +378,20 @@ VECTOR_STORE=weaviate
|
|||
|
||||
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
|
||||
WEAVIATE_ENDPOINT=http://weaviate:8080
|
||||
# The Weaviate API key.
|
||||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
|
||||
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
|
||||
QDRANT_URL=http://qdrant:6333
|
||||
# The Qdrant API key.
|
||||
QDRANT_API_KEY=difyai123456
|
||||
# The Qdrant client timeout setting.
|
||||
QDRANT_CLIENT_TIMEOUT=20
|
||||
# The Qdrant client enable gRPC mode.
|
||||
QDRANT_GRPC_ENABLED=false
|
||||
# The Qdrant server gRPC mode PORT.
|
||||
QDRANT_GRPC_PORT=6334
|
||||
|
||||
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
|
||||
# The milvus uri.
|
||||
MILVUS_URI=http://127.0.0.1:19530
|
||||
# The milvus token.
|
||||
MILVUS_TOKEN=
|
||||
# The milvus username.
|
||||
MILVUS_USER=root
|
||||
# The milvus password.
|
||||
MILVUS_PASSWORD=Milvus
|
||||
|
||||
# MyScale configuration, only available when VECTOR_STORE is `myscale`
|
||||
|
|
@ -478,8 +445,8 @@ ANALYTICDB_MAX_CONNECTION=5
|
|||
# TiDB vector configurations, only available when VECTOR_STORE is `tidb`
|
||||
TIDB_VECTOR_HOST=tidb
|
||||
TIDB_VECTOR_PORT=4000
|
||||
TIDB_VECTOR_USER=xxx.root
|
||||
TIDB_VECTOR_PASSWORD=xxxxxx
|
||||
TIDB_VECTOR_USER=
|
||||
TIDB_VECTOR_PASSWORD=
|
||||
TIDB_VECTOR_DATABASE=dify
|
||||
|
||||
# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant`
|
||||
|
|
@ -502,7 +469,7 @@ CHROMA_PORT=8000
|
|||
CHROMA_TENANT=default_tenant
|
||||
CHROMA_DATABASE=default_database
|
||||
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
|
||||
CHROMA_AUTH_CREDENTIALS=xxxxxx
|
||||
CHROMA_AUTH_CREDENTIALS=
|
||||
|
||||
# Oracle configuration, only available when VECTOR_STORE is `oracle`
|
||||
ORACLE_HOST=oracle
|
||||
|
|
@ -539,6 +506,7 @@ ELASTICSEARCH_HOST=0.0.0.0
|
|||
ELASTICSEARCH_PORT=9200
|
||||
ELASTICSEARCH_USERNAME=elastic
|
||||
ELASTICSEARCH_PASSWORD=elastic
|
||||
KIBANA_PORT=5601
|
||||
|
||||
# baidu vector configurations, only available when VECTOR_STORE is `baidu`
|
||||
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
|
||||
|
|
@ -558,11 +526,10 @@ VIKINGDB_SCHEMA=http
|
|||
VIKINGDB_CONNECTION_TIMEOUT=30
|
||||
VIKINGDB_SOCKET_TIMEOUT=30
|
||||
|
||||
|
||||
# Lindorm configuration, only available when VECTOR_STORE is `lindorm`
|
||||
LINDORM_URL=http://ld-***************-proxy-search-pub.lindorm.aliyuncs.com:30070
|
||||
LINDORM_USERNAME=username
|
||||
LINDORM_PASSWORD=password
|
||||
LINDORM_URL=http://lindorm:30070
|
||||
LINDORM_USERNAME=lindorm
|
||||
LINDORM_PASSWORD=lindorm
|
||||
|
||||
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
|
||||
OCEANBASE_VECTOR_HOST=oceanbase
|
||||
|
|
@ -570,8 +537,13 @@ OCEANBASE_VECTOR_PORT=2881
|
|||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_CLUSTER_NAME=difyai
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
|
||||
# Upstash Vector configuration, only available when VECTOR_STORE is `upstash`
|
||||
UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io
|
||||
UPSTASH_VECTOR_TOKEN=dify
|
||||
|
||||
# ------------------------------
|
||||
# Knowledge Configuration
|
||||
# ------------------------------
|
||||
|
|
@ -614,20 +586,16 @@ CODE_GENERATION_MAX_TOKENS=1024
|
|||
# Multi-modal Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The format of the image/video sent when the multi-modal model is input,
|
||||
# The format of the image/video/audio/document sent when the multi-modal model is input,
|
||||
# the default is base64, optional url.
|
||||
# The delay of the call in url mode will be lower than that in base64 mode.
|
||||
# It is generally recommended to use the more compatible base64 mode.
|
||||
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video.
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT=base64
|
||||
MULTIMODAL_SEND_VIDEO_FORMAT=base64
|
||||
|
||||
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document.
|
||||
MULTIMODAL_SEND_FORMAT=base64
|
||||
# Upload image file size limit, default 10M.
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||||
|
||||
# Upload video file size limit, default 100M.
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||||
|
||||
# Upload audio file size limit, default 50M.
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
|
|
@ -640,10 +608,8 @@ UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
|||
# all monitoring information is not reported to Sentry.
|
||||
# If not set, Sentry error reporting will be disabled.
|
||||
API_SENTRY_DSN=
|
||||
|
||||
# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%.
|
||||
API_SENTRY_TRACES_SAMPLE_RATE=1.0
|
||||
|
||||
# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
|
||||
API_SENTRY_PROFILES_SAMPLE_RATE=1.0
|
||||
|
||||
|
|
@ -681,8 +647,10 @@ MAIL_TYPE=resend
|
|||
MAIL_DEFAULT_SEND_FROM=
|
||||
|
||||
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
|
||||
RESEND_API_URL=https://api.resend.com
|
||||
RESEND_API_KEY=your-resend-api-key
|
||||
|
||||
|
||||
# SMTP server configuration, used when MAIL_TYPE is `smtp`
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=465
|
||||
|
|
@ -707,18 +675,19 @@ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
|||
|
||||
# The sandbox service endpoint.
|
||||
CODE_EXECUTION_ENDPOINT=http://sandbox:8194
|
||||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||||
CODE_MAX_NUMBER=9223372036854775807
|
||||
CODE_MIN_NUMBER=-9223372036854775808
|
||||
CODE_MAX_DEPTH=5
|
||||
CODE_MAX_PRECISION=20
|
||||
CODE_MAX_STRING_LENGTH=80000
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
|
||||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT=10
|
||||
CODE_EXECUTION_READ_TIMEOUT=60
|
||||
CODE_EXECUTION_WRITE_TIMEOUT=10
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
|
||||
|
||||
# Workflow runtime configuration
|
||||
WORKFLOW_MAX_EXECUTION_STEPS=500
|
||||
|
|
@ -944,3 +913,10 @@ CSP_WHITELIST=
|
|||
|
||||
# Enable or disable create tidb service job
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||||
|
||||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||||
MAX_SUBMIT_COUNT=100
|
||||
|
||||
# Proxy
|
||||
HTTP_PROXY=
|
||||
HTTPS_PROXY=
|
||||
|
|
@ -0,0 +1,576 @@
|
|||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGUSER:-postgres}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready']
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ./volumes/redis/data:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
healthcheck:
|
||||
test: ['CMD', 'redis-cli', 'ping']
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.10
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:8194/health']
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint:
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Certbot service
|
||||
# use `docker-compose --profile certbot up` to start the certbot service.
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
profiles:
|
||||
- certbot
|
||||
volumes:
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
- ./volumes/certbot/logs:/var/log/letsencrypt
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
|
||||
- ./certbot/update-cert.template.txt:/update-cert.template.txt
|
||||
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
environment:
|
||||
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
|
||||
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
|
||||
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
|
||||
entrypoint: ['/docker-entrypoint.sh']
|
||||
command: ['tail', '-f', '/dev/null']
|
||||
|
||||
# The nginx reverse proxy.
|
||||
# used for reverse proxying the API service and Web service.
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
|
||||
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
|
||||
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d
|
||||
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
entrypoint:
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
depends_on:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
|
||||
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
|
||||
|
||||
# The TiDB vector store.
|
||||
# For production use, please refer to https://github.com/pingcap/tidb-docker-compose
|
||||
tidb:
|
||||
image: pingcap/tidb:v8.4.0
|
||||
profiles:
|
||||
- tidb
|
||||
command:
|
||||
- --store=unistore
|
||||
restart: always
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
profiles:
|
||||
- ''
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the con tainer.
|
||||
- ./volumes/weaviate:/var/lib/weaviate
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
|
||||
# The Couchbase vector store.
|
||||
couchbase-server:
|
||||
build: ./couchbase-server
|
||||
profiles:
|
||||
- couchbase
|
||||
restart: always
|
||||
environment:
|
||||
- CLUSTER_NAME=dify_search
|
||||
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
|
||||
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
|
||||
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
- COUCHBASE_BUCKET_RAMSIZE=512
|
||||
- COUCHBASE_RAM_SIZE=2048
|
||||
- COUCHBASE_EVENTING_RAM_SIZE=512
|
||||
- COUCHBASE_INDEX_RAM_SIZE=512
|
||||
- COUCHBASE_FTS_RAM_SIZE=1024
|
||||
hostname: couchbase-server
|
||||
container_name: couchbase-server
|
||||
working_dir: /opt/couchbase
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: [""]
|
||||
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
|
||||
volumes:
|
||||
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
|
||||
healthcheck:
|
||||
# ensure bucket was created before proceeding
|
||||
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
|
||||
interval: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# The pgvector vector database.
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
profiles:
|
||||
- pgvector
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/pgvector/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready']
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# pgvecto-rs vector store
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.3.0
|
||||
profiles:
|
||||
- pgvecto-rs
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready']
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# Chroma vector database
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.20
|
||||
profiles:
|
||||
- chroma
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: '127.0.0.1'
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
profiles:
|
||||
- oracle
|
||||
restart: always
|
||||
volumes:
|
||||
- source: oradata
|
||||
type: volume
|
||||
target: /opt/oracle/oradata
|
||||
- ./startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
|
||||
# Milvus vector database services
|
||||
etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
volumes:
|
||||
- ./volumes/milvus/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ['CMD', 'etcdctl', 'endpoint', 'health']
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
volumes:
|
||||
- ./volumes/milvus/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.3.1
|
||||
profiles:
|
||||
- milvus
|
||||
command: ['milvus', 'run', 'standalone']
|
||||
environment:
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
volumes:
|
||||
- ./volumes/milvus/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:9091/healthz']
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- etcd
|
||||
- minio
|
||||
ports:
|
||||
- 19530:19530
|
||||
- 9091:9091
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
# Opensearch vector database
|
||||
opensearch:
|
||||
container_name: opensearch
|
||||
image: opensearchproject/opensearch:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
nofile:
|
||||
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
volumes:
|
||||
- ./volumes/opensearch/data:/usr/share/opensearch/data
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-dashboards:
|
||||
container_name: opensearch-dashboards
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
volumes:
|
||||
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
depends_on:
|
||||
- opensearch
|
||||
|
||||
# MyScale vector database
|
||||
myscale:
|
||||
container_name: myscale
|
||||
image: myscale/myscaledb:1.6.4
|
||||
profiles:
|
||||
- myscale
|
||||
restart: always
|
||||
tty: true
|
||||
volumes:
|
||||
- ./volumes/myscale/data:/var/lib/clickhouse
|
||||
- ./volumes/myscale/log:/var/log/clickhouse-server
|
||||
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
|
||||
ports:
|
||||
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
|
||||
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
|
||||
container_name: elasticsearch
|
||||
profiles:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
volumes:
|
||||
- dify_es01_data:/usr/share/elasticsearch/data
|
||||
environment:
|
||||
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
cluster.name: dify-es-cluster
|
||||
node.name: dify-es0
|
||||
discovery.type: single-node
|
||||
xpack.license.self_generated.type: trial
|
||||
xpack.security.enabled: 'true'
|
||||
xpack.security.enrollment.enabled: 'false'
|
||||
xpack.security.http.ssl.enabled: 'false'
|
||||
ports:
|
||||
- ${ELASTICSEARCH_PORT:-9200}:9200
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 50
|
||||
|
||||
# https://www.elastic.co/guide/en/kibana/current/docker.html
|
||||
# https://www.elastic.co/guide/en/kibana/current/settings.html
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.14.3
|
||||
container_name: kibana
|
||||
profiles:
|
||||
- elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
|
||||
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
|
||||
XPACK_SECURITY_ENABLED: 'true'
|
||||
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
|
||||
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
|
||||
XPACK_FLEET_ISAIRGAPPED: 'true'
|
||||
I18N_LOCALE: zh-CN
|
||||
SERVER_PORT: '5601'
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
ports:
|
||||
- ${KIBANA_PORT:-5601}:5601
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# unstructured .
|
||||
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
|
||||
unstructured:
|
||||
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
|
||||
profiles:
|
||||
- unstructured
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/unstructured:/app/data
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
milvus:
|
||||
driver: bridge
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
internal: true
|
||||
|
||||
volumes:
|
||||
oradata:
|
||||
dify_es01_data:
|
||||
|
|
@ -1,28 +1,33 @@
|
|||
# ==================================================================
|
||||
# WARNING: This file is auto-generated by generate_docker_compose
|
||||
# Do not modify this file directly. Instead, update the .env.example
|
||||
# or docker-compose-template.yaml and regenerate this file.
|
||||
# ==================================================================
|
||||
|
||||
x-shared-env: &shared-api-worker-env
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
|
||||
SERVICE_API_URL: ${SERVICE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
APP_WEB_URL: ${APP_WEB_URL:-}
|
||||
FILES_URL: ${FILES_URL:-}
|
||||
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||
LOG_FILE: ${LOG_FILE:-}
|
||||
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
|
||||
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
|
||||
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
|
||||
# Log dateformat
|
||||
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
|
||||
# Log Timezone
|
||||
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-"%Y-%m-%d %H:%M:%S"}
|
||||
LOG_TZ: ${LOG_TZ:-UTC}
|
||||
DEBUG: ${DEBUG:-false}
|
||||
FLASK_DEBUG: ${FLASK_DEBUG:-false}
|
||||
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
|
||||
INIT_PASSWORD: ${INIT_PASSWORD:-}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
SERVICE_API_URL: ${SERVICE_API_URL:-}
|
||||
APP_WEB_URL: ${APP_WEB_URL:-}
|
||||
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
|
||||
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
|
||||
FILES_URL: ${FILES_URL:-}
|
||||
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
|
||||
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
|
||||
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
|
||||
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
|
||||
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-"https://updates.dify.ai"}
|
||||
OPENAI_API_BASE: ${OPENAI_API_BASE:-"https://api.openai.com/v1"}
|
||||
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
|
||||
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
|
||||
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
|
||||
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
|
||||
DIFY_PORT: ${DIFY_PORT:-5001}
|
||||
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
|
||||
|
|
@ -43,6 +48,11 @@ x-shared-env: &shared-api-worker-env
|
|||
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
|
||||
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
|
||||
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
|
||||
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
|
||||
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
|
||||
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
|
||||
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_USERNAME: ${REDIS_USERNAME:-}
|
||||
|
|
@ -55,75 +65,73 @@ x-shared-env: &shared-api-worker-env
|
|||
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
|
||||
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
|
||||
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
|
||||
REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
|
||||
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
|
||||
REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
|
||||
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
|
||||
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-"redis://:difyai123456@redis:6379/1"}
|
||||
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
|
||||
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
|
||||
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
|
||||
STORAGE_TYPE: ${STORAGE_TYPE:-local}
|
||||
STORAGE_LOCAL_PATH: ${STORAGE_LOCAL_PATH:-storage}
|
||||
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
|
||||
STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
|
||||
OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
|
||||
OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
|
||||
S3_ENDPOINT: ${S3_ENDPOINT:-}
|
||||
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-}
|
||||
S3_REGION: ${S3_REGION:-us-east-1}
|
||||
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
|
||||
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
|
||||
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
|
||||
S3_REGION: ${S3_REGION:-us-east-1}
|
||||
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-}
|
||||
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-}
|
||||
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-}
|
||||
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-}
|
||||
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-}
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
|
||||
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-}
|
||||
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-}
|
||||
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-}
|
||||
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-}
|
||||
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
|
||||
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
|
||||
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
|
||||
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
|
||||
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-"https://<your_account_name>.blob.core.windows.net"}
|
||||
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-your-google-service-account-json-base64-string}
|
||||
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
|
||||
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
|
||||
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
|
||||
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-"https://oss-ap-southeast-1-internal.aliyuncs.com"}
|
||||
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-}
|
||||
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-}
|
||||
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-}
|
||||
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-}
|
||||
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-}
|
||||
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-}
|
||||
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-}
|
||||
OCI_ENDPOINT: ${OCI_ENDPOINT:-}
|
||||
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-}
|
||||
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-}
|
||||
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-}
|
||||
OCI_REGION: ${OCI_REGION:-}
|
||||
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-}
|
||||
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-}
|
||||
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-}
|
||||
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-}
|
||||
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-}
|
||||
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-}
|
||||
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
|
||||
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
|
||||
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
|
||||
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
|
||||
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
|
||||
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
|
||||
OCI_ENDPOINT: ${OCI_ENDPOINT:-"https://objectstorage.us-ashburn-1.oraclecloud.com"}
|
||||
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
|
||||
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
|
||||
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
|
||||
OCI_REGION: ${OCI_REGION:-us-ashburn-1}
|
||||
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
|
||||
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
|
||||
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
|
||||
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
|
||||
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
|
||||
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
|
||||
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
|
||||
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
|
||||
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
|
||||
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
|
||||
SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
|
||||
SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
|
||||
SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
|
||||
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
|
||||
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-"http://weaviate:8080"}
|
||||
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
|
||||
QDRANT_URL: ${QDRANT_URL:-"http://qdrant:6333"}
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
|
||||
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
|
||||
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
|
||||
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-'couchbase-server'}
|
||||
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
|
||||
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
|
||||
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
|
||||
MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
|
||||
MILVUS_URI: ${MILVUS_URI:-"http://127.0.0.1:19530"}
|
||||
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
|
||||
MILVUS_USER: ${MILVUS_USER:-root}
|
||||
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
|
||||
|
|
@ -133,172 +141,264 @@ x-shared-env: &shared-api-worker-env
|
|||
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
|
||||
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
|
||||
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
|
||||
RELYT_HOST: ${RELYT_HOST:-db}
|
||||
RELYT_PORT: ${RELYT_PORT:-5432}
|
||||
RELYT_USER: ${RELYT_USER:-postgres}
|
||||
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
|
||||
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
|
||||
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-"couchbase://couchbase-server"}
|
||||
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
|
||||
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
|
||||
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
|
||||
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
|
||||
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
|
||||
PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
|
||||
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
|
||||
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
|
||||
PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
|
||||
PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
|
||||
PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
|
||||
PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
|
||||
PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
|
||||
PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
|
||||
PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
|
||||
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
|
||||
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
|
||||
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
|
||||
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
|
||||
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
|
||||
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
|
||||
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
|
||||
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
|
||||
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
|
||||
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
|
||||
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
|
||||
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
|
||||
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
|
||||
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
|
||||
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
|
||||
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
|
||||
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
|
||||
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
|
||||
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-"http://127.0.0.1"}
|
||||
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
|
||||
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
|
||||
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
|
||||
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
|
||||
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
|
||||
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
|
||||
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
|
||||
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
|
||||
TIDB_API_URL: ${TIDB_API_URL:-"http://127.0.0.1"}
|
||||
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-"http://127.0.0.1"}
|
||||
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
|
||||
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
|
||||
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
|
||||
ORACLE_HOST: ${ORACLE_HOST:-oracle}
|
||||
ORACLE_PORT: ${ORACLE_PORT:-1521}
|
||||
ORACLE_USER: ${ORACLE_USER:-dify}
|
||||
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
|
||||
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
|
||||
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
|
||||
CHROMA_PORT: ${CHROMA_PORT:-8000}
|
||||
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
|
||||
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
|
||||
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
|
||||
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
|
||||
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
|
||||
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
|
||||
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
|
||||
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070}
|
||||
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
|
||||
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm }
|
||||
KIBANA_PORT: ${KIBANA_PORT:-5601}
|
||||
# AnalyticDB configuration
|
||||
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-}
|
||||
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-}
|
||||
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-}
|
||||
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-}
|
||||
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-}
|
||||
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-}
|
||||
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
|
||||
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-}
|
||||
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-}
|
||||
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
|
||||
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
|
||||
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
|
||||
ORACLE_HOST: ${ORACLE_HOST:-oracle}
|
||||
ORACLE_PORT: ${ORACLE_PORT:-1521}
|
||||
ORACLE_USER: ${ORACLE_USER:-dify}
|
||||
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
|
||||
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
|
||||
RELYT_HOST: ${RELYT_HOST:-db}
|
||||
RELYT_PORT: ${RELYT_PORT:-5432}
|
||||
RELYT_USER: ${RELYT_USER:-postgres}
|
||||
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
|
||||
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
|
||||
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
|
||||
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
|
||||
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
|
||||
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
|
||||
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
|
||||
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
|
||||
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-"http://127.0.0.1"}
|
||||
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
|
||||
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
|
||||
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
|
||||
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
|
||||
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
|
||||
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
|
||||
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
|
||||
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
|
||||
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
|
||||
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
|
||||
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
KIBANA_PORT: ${KIBANA_PORT:-5601}
|
||||
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-"http://127.0.0.1:5287"}
|
||||
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
|
||||
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
|
||||
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
|
||||
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
|
||||
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
|
||||
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
|
||||
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-dify}
|
||||
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-dify}
|
||||
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
|
||||
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
|
||||
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
|
||||
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
|
||||
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
|
||||
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
|
||||
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
|
||||
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
|
||||
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
|
||||
ETL_TYPE: ${ETL_TYPE:-dify}
|
||||
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
|
||||
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
|
||||
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
|
||||
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT:-base64}
|
||||
MULTIMODAL_SEND_VIDEO_FORMAT: ${MULTIMODAL_SEND_VIDEO_FORMAT:-base64}
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
|
||||
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
|
||||
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
|
||||
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
|
||||
MAIL_TYPE: ${MAIL_TYPE:-resend}
|
||||
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
|
||||
SMTP_SERVER: ${SMTP_SERVER:-}
|
||||
SMTP_PORT: ${SMTP_PORT:-465}
|
||||
SMTP_USERNAME: ${SMTP_USERNAME:-}
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
|
||||
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
|
||||
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
|
||||
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
|
||||
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
|
||||
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
|
||||
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
|
||||
CODE_EXECUTION_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
|
||||
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
|
||||
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
|
||||
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
|
||||
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
|
||||
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
|
||||
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
|
||||
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
|
||||
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
|
||||
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
|
||||
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
|
||||
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
|
||||
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
|
||||
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
|
||||
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-12000}
|
||||
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
|
||||
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
|
||||
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
|
||||
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
|
||||
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
|
||||
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
|
||||
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
|
||||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-http://oceanbase-vector}
|
||||
VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
|
||||
VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
|
||||
LINDORM_URL: ${LINDORM_URL:-"http://lindorm:30070"}
|
||||
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
|
||||
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm}
|
||||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
|
||||
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
|
||||
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
|
||||
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-"https://xxx-vector.upstash.io"}
|
||||
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
|
||||
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
|
||||
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
|
||||
ETL_TYPE: ${ETL_TYPE:-dify}
|
||||
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
|
||||
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
|
||||
SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
|
||||
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
|
||||
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
|
||||
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
|
||||
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
|
||||
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
|
||||
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
|
||||
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
|
||||
MAIL_TYPE: ${MAIL_TYPE:-resend}
|
||||
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
|
||||
RESEND_API_URL: ${RESEND_API_URL:-"https://api.resend.com"}
|
||||
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
|
||||
SMTP_SERVER: ${SMTP_SERVER:-}
|
||||
SMTP_PORT: ${SMTP_PORT:-465}
|
||||
SMTP_USERNAME: ${SMTP_USERNAME:-}
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
|
||||
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
|
||||
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
|
||||
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
|
||||
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-"http://sandbox:8194"}
|
||||
CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
|
||||
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
|
||||
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
|
||||
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
|
||||
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
|
||||
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
|
||||
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
|
||||
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
|
||||
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
|
||||
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
|
||||
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
|
||||
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
|
||||
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
|
||||
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-"http://ssrf_proxy:3128"}
|
||||
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-"http://ssrf_proxy:3128"}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
PGUSER: ${PGUSER:-${DB_USERNAME}}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-"http://ssrf_proxy:3128"}
|
||||
SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-"http://ssrf_proxy:3128"}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
|
||||
WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-"etcd:2379"}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-"minio:9000"}
|
||||
MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
|
||||
OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-"TLSv1.1 TLSv1.2 TLSv1.3"}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com}
|
||||
CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
|
||||
SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
COMPOSE_PROFILES: ${COMPOSE_PROFILES:-"${VECTOR_STORE:-weaviate}"}
|
||||
EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80}
|
||||
EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443}
|
||||
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
|
||||
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
|
||||
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
|
||||
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
|
||||
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
|
||||
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
|
||||
RETRIEVAL_TOP_N: ${RETRIEVAL_TOP_N:-0}
|
||||
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
|
||||
HTTP_PROXY: ${HTTP_PROXY:-}
|
||||
HTTPS_PROXY: ${HTTPS_PROXY:-}
|
||||
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.13.2
|
||||
image: langgenius/dify-api:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
|
@ -312,13 +412,16 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.13.2
|
||||
image: langgenius/dify-api:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
|
@ -331,7 +434,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.13.2
|
||||
image: langgenius/dify-web:0.14.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
|
@ -491,6 +594,16 @@ services:
|
|||
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
|
||||
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
|
||||
|
||||
# The TiDB vector store.
|
||||
# For production use, please refer to https://github.com/pingcap/tidb-docker-compose
|
||||
tidb:
|
||||
image: pingcap/tidb:v8.4.0
|
||||
profiles:
|
||||
- tidb
|
||||
command:
|
||||
- --store=unistore
|
||||
restart: always
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
#!/usr/bin/env python3
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def parse_env_example(file_path):
|
||||
"""
|
||||
Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
|
||||
"""
|
||||
env_vars = {}
|
||||
with open(file_path, "r") as f:
|
||||
for line_number, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
# Ignore empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Use regex to parse KEY=VALUE
|
||||
match = re.match(r"^([^=]+)=(.*)$", line)
|
||||
if match:
|
||||
key = match.group(1).strip()
|
||||
value = match.group(2).strip()
|
||||
# Remove possible quotes around the value
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
env_vars[key] = value
|
||||
else:
|
||||
print(f"Warning: Unable to parse line {line_number}: {line}")
|
||||
return env_vars
|
||||
|
||||
|
||||
def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
|
||||
"""
|
||||
Generates a shared environment variables block as a YAML string.
|
||||
"""
|
||||
lines = [f"x-shared-env: &{anchor_name}"]
|
||||
for key, default in env_vars.items():
|
||||
# If default value is empty, use ${KEY:-}
|
||||
if default == "":
|
||||
lines.append(f" {key}: ${{{key}:-}}")
|
||||
else:
|
||||
# If default value contains special characters, wrap it in quotes
|
||||
if re.search(r"[:\s]", default):
|
||||
default = f'"{default}"'
|
||||
lines.append(f" {key}: ${{{key}:-{default}}}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
|
||||
"""
|
||||
Inserts the shared environment variables block and header comments into the template file,
|
||||
removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
|
||||
"""
|
||||
with open(template_path, "r") as f:
|
||||
template_content = f.read()
|
||||
|
||||
# Remove existing x-shared-env: &shared-api-worker-env lines
|
||||
template_content = re.sub(
|
||||
r"^x-shared-env: &shared-api-worker-env\s*\n?",
|
||||
"",
|
||||
template_content,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
# Prepare the final content with header comments and shared env block
|
||||
final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
|
||||
|
||||
with open(output_path, "w") as f:
|
||||
f.write(final_content)
|
||||
print(f"Generated {output_path}")
|
||||
|
||||
|
||||
def main():
|
||||
env_example_path = ".env.example"
|
||||
template_path = "docker-compose-template.yaml"
|
||||
output_path = "docker-compose.yaml"
|
||||
anchor_name = "shared-api-worker-env" # Can be modified as needed
|
||||
|
||||
# Define header comments to be added at the top of docker-compose.yaml
|
||||
header_comments = (
|
||||
"# ==================================================================\n"
|
||||
"# WARNING: This file is auto-generated by generate_docker_compose\n"
|
||||
"# Do not modify this file directly. Instead, update the .env.example\n"
|
||||
"# or docker-compose-template.yaml and regenerate this file.\n"
|
||||
"# ==================================================================\n"
|
||||
)
|
||||
|
||||
# Check if required files exist
|
||||
for path in [env_example_path, template_path]:
|
||||
if not os.path.isfile(path):
|
||||
print(f"Error: File {path} does not exist.")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse .env.example file
|
||||
env_vars = parse_env_example(env_example_path)
|
||||
|
||||
if not env_vars:
|
||||
print("Warning: No environment variables found in .env.example.")
|
||||
|
||||
# Generate shared environment variables block
|
||||
shared_env_block = generate_shared_env_block(env_vars, anchor_name)
|
||||
|
||||
# Insert shared environment variables block and header comments into the template
|
||||
insert_shared_env(template_path, output_path, shared_env_block, header_comments)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -25,6 +25,7 @@ import { fetchAppDetail, fetchAppSSO } from '@/service/apps'
|
|||
import AppContext, { useAppContext } from '@/context/app-context'
|
||||
import Loading from '@/app/components/base/loading'
|
||||
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
|
||||
import type { App } from '@/types/app'
|
||||
|
||||
export type IAppDetailLayoutProps = {
|
||||
children: React.ReactNode
|
||||
|
|
@ -41,12 +42,14 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
|
|||
const pathname = usePathname()
|
||||
const media = useBreakpoints()
|
||||
const isMobile = media === MediaType.mobile
|
||||
const { isCurrentWorkspaceEditor } = useAppContext()
|
||||
const { isCurrentWorkspaceEditor, isLoadingCurrentWorkspace } = useAppContext()
|
||||
const { appDetail, setAppDetail, setAppSiderbarExpand } = useStore(useShallow(state => ({
|
||||
appDetail: state.appDetail,
|
||||
setAppDetail: state.setAppDetail,
|
||||
setAppSiderbarExpand: state.setAppSiderbarExpand,
|
||||
})))
|
||||
const [isLoadingAppDetail, setIsLoadingAppDetail] = useState(false)
|
||||
const [appDetailRes, setAppDetailRes] = useState<App | null>(null)
|
||||
const [navigation, setNavigation] = useState<Array<{
|
||||
name: string
|
||||
href: string
|
||||
|
|
@ -107,33 +110,43 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
|
|||
|
||||
useEffect(() => {
|
||||
setAppDetail()
|
||||
setIsLoadingAppDetail(true)
|
||||
fetchAppDetail({ url: '/apps', id: appId }).then((res) => {
|
||||
// redirection
|
||||
const canIEditApp = isCurrentWorkspaceEditor
|
||||
if (!canIEditApp && (pathname.endsWith('configuration') || pathname.endsWith('workflow') || pathname.endsWith('logs'))) {
|
||||
router.replace(`/app/${appId}/overview`)
|
||||
return
|
||||
}
|
||||
if ((res.mode === 'workflow' || res.mode === 'advanced-chat') && (pathname).endsWith('configuration')) {
|
||||
router.replace(`/app/${appId}/workflow`)
|
||||
}
|
||||
else if ((res.mode !== 'workflow' && res.mode !== 'advanced-chat') && (pathname).endsWith('workflow')) {
|
||||
router.replace(`/app/${appId}/configuration`)
|
||||
}
|
||||
else {
|
||||
setAppDetail({ ...res, enable_sso: false })
|
||||
setNavigation(getNavigations(appId, isCurrentWorkspaceEditor, res.mode))
|
||||
if (systemFeatures.enable_web_sso_switch_component && canIEditApp) {
|
||||
fetchAppSSO({ appId }).then((ssoRes) => {
|
||||
setAppDetail({ ...res, enable_sso: ssoRes.enabled })
|
||||
})
|
||||
}
|
||||
}
|
||||
setAppDetailRes(res)
|
||||
}).catch((e: any) => {
|
||||
if (e.status === 404)
|
||||
router.replace('/apps')
|
||||
}).finally(() => {
|
||||
setIsLoadingAppDetail(false)
|
||||
})
|
||||
}, [appId, isCurrentWorkspaceEditor, systemFeatures, getNavigations, pathname, router, setAppDetail])
|
||||
}, [appId, router, setAppDetail])
|
||||
|
||||
useEffect(() => {
|
||||
if (!appDetailRes || isLoadingCurrentWorkspace || isLoadingAppDetail)
|
||||
return
|
||||
const res = appDetailRes
|
||||
// redirection
|
||||
const canIEditApp = isCurrentWorkspaceEditor
|
||||
if (!canIEditApp && (pathname.endsWith('configuration') || pathname.endsWith('workflow') || pathname.endsWith('logs'))) {
|
||||
router.replace(`/app/${appId}/overview`)
|
||||
return
|
||||
}
|
||||
if ((res.mode === 'workflow' || res.mode === 'advanced-chat') && (pathname).endsWith('configuration')) {
|
||||
router.replace(`/app/${appId}/workflow`)
|
||||
}
|
||||
else if ((res.mode !== 'workflow' && res.mode !== 'advanced-chat') && (pathname).endsWith('workflow')) {
|
||||
router.replace(`/app/${appId}/configuration`)
|
||||
}
|
||||
else {
|
||||
setAppDetail({ ...res, enable_sso: false })
|
||||
setNavigation(getNavigations(appId, isCurrentWorkspaceEditor, res.mode))
|
||||
if (systemFeatures.enable_web_sso_switch_component && canIEditApp) {
|
||||
fetchAppSSO({ appId }).then((ssoRes) => {
|
||||
setAppDetail({ ...res, enable_sso: ssoRes.enabled })
|
||||
})
|
||||
}
|
||||
}
|
||||
}, [appDetailRes, appId, getNavigations, isCurrentWorkspaceEditor, isLoadingAppDetail, isLoadingCurrentWorkspace, pathname, router, setAppDetail, systemFeatures.enable_web_sso_switch_component])
|
||||
|
||||
useUnmount(() => {
|
||||
setAppDetail()
|
||||
|
|
|
|||
|
|
@ -25,10 +25,10 @@ const AppCard = ({
|
|||
<div className='relative shrink-0'>
|
||||
<AppIcon
|
||||
size='large'
|
||||
iconType={app.app.icon_type}
|
||||
icon={app.app.icon}
|
||||
background={app.app.icon_background}
|
||||
imageUrl={app.app.icon_url}
|
||||
iconType={appBasicInfo.icon_type}
|
||||
icon={appBasicInfo.icon}
|
||||
background={appBasicInfo.icon_background}
|
||||
imageUrl={appBasicInfo.icon_url}
|
||||
/>
|
||||
<AppTypeIcon wrapperClassName='absolute -bottom-0.5 -right-0.5 w-4 h-4 rounded-[4px] border border-divider-regular outline outline-components-panel-on-panel-item-bg'
|
||||
className='w-3 h-3' type={appBasicInfo.mode} />
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
import type { FC } from 'react'
|
||||
import { init } from 'emoji-mart'
|
||||
import data from '@emoji-mart/data'
|
||||
import Image from 'next/image'
|
||||
import { cva } from 'class-variance-authority'
|
||||
import type { AppIconType } from '@/types/app'
|
||||
import classNames from '@/utils/classnames'
|
||||
|
|
@ -62,7 +61,8 @@ const AppIcon: FC<AppIconProps> = ({
|
|||
onClick={onClick}
|
||||
>
|
||||
{isValidImageIcon
|
||||
? <Image src={imageUrl} className="w-full h-full" alt="app icon" />
|
||||
// eslint-disable-next-line @next/next/no-img-element
|
||||
? <img src={imageUrl} className="w-full h-full" alt="app icon" />
|
||||
: (innerIcon || ((icon && icon !== '') ? <em-emoji id={icon} /> : <em-emoji id='🤖' />))
|
||||
}
|
||||
</span>
|
||||
|
|
|
|||
|
|
@ -675,7 +675,7 @@ Chat applications support session persistence, allowing previous chat history to
|
|||
</Col>
|
||||
<Col sticky>
|
||||
|
||||
<CodeGroup title="Request" tag="GET" label="/conversations" targetCode={`curl -X GET '${props.appDetail.api_base_url}/conversations?user=abc-123&last_id=&limit=20'`}>
|
||||
<CodeGroup title="Request" tag="GET" label="/conversations" targetCode={`curl -X GET '${props.appDetail.api_base_url}/conversations?user=abc-123&last_id=&limit=20' \\\n --header 'Authorization: Bearer {api_key}'`}>
|
||||
|
||||
```bash {{ title: 'cURL' }}
|
||||
curl -X GET '${props.appDetail.api_base_url}/conversations?user=abc-123&last_id=&limit=20' \
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue