mirror of
https://github.com/langgenius/dify.git
synced 2026-05-09 04:36:31 +08:00
Add an optional S3_PUBLIC_BASE_URL setting that, when configured, lets file controllers 302-redirect signed previews to the object store / CDN instead of streaming bytes through the Dify API. Works with any S3-compatible backend exposing a public domain (Cloudflare R2 custom domain, MinIO public endpoint, Aliyun OSS public domain, etc.) so that egress and request handling for images, attachments, tool outputs, and webapp logos no longer go through the API container. Signature verification is preserved: the API still validates the HMAC before issuing the redirect. When S3_PUBLIC_BASE_URL is unset the behavior is unchanged. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
779 lines
24 KiB
Plaintext
779 lines
24 KiB
Plaintext
# Your App secret key will be used for securely signing the session cookie
|
||
# Make sure you are changing this key for your deployment with a strong key.
|
||
# You can generate a strong key using `openssl rand -base64 42`.
|
||
# Alternatively you can set it with `SECRET_KEY` environment variable.
|
||
SECRET_KEY=
|
||
|
||
# Ensure UTF-8 encoding
|
||
LANG=en_US.UTF-8
|
||
LC_ALL=en_US.UTF-8
|
||
PYTHONIOENCODING=utf-8
|
||
|
||
# Console API base URL
|
||
CONSOLE_API_URL=http://localhost:5001
|
||
CONSOLE_WEB_URL=http://localhost:3000
|
||
|
||
# Service API base URL
|
||
SERVICE_API_URL=http://localhost:5001
|
||
|
||
# Web APP base URL
|
||
APP_WEB_URL=http://localhost:3000
|
||
|
||
# Files URL
|
||
FILES_URL=http://localhost:5001
|
||
|
||
# INTERNAL_FILES_URL is used by services running in Docker to reach the API file endpoints.
|
||
# For Docker Desktop (Mac/Windows), use http://host.docker.internal:5001 when the API runs on the host.
|
||
# For Docker Compose on Linux, use http://api:5001 when the API runs inside the Docker network.
|
||
INTERNAL_FILES_URL=http://host.docker.internal:5001
|
||
|
||
# TRIGGER URL
|
||
TRIGGER_URL=http://localhost:5001
|
||
|
||
# The time in seconds after the signature is rejected
|
||
FILES_ACCESS_TIMEOUT=300
|
||
|
||
# Collaboration mode toggle
|
||
ENABLE_COLLABORATION_MODE=false
|
||
|
||
# Access token expiration time in minutes
|
||
ACCESS_TOKEN_EXPIRE_MINUTES=60
|
||
|
||
# Refresh token expiration time in days
|
||
REFRESH_TOKEN_EXPIRE_DAYS=30
|
||
|
||
# redis configuration
|
||
REDIS_HOST=localhost
|
||
REDIS_PORT=6379
|
||
# Optional: limit total connections in connection pool (unset for default)
|
||
# REDIS_MAX_CONNECTIONS=200
|
||
REDIS_USERNAME=
|
||
REDIS_PASSWORD=difyai123456
|
||
REDIS_USE_SSL=false
|
||
# SSL configuration for Redis (when REDIS_USE_SSL=true)
|
||
REDIS_SSL_CERT_REQS=CERT_NONE
|
||
# Options: CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
|
||
REDIS_SSL_CA_CERTS=
|
||
# Path to CA certificate file for SSL verification
|
||
REDIS_SSL_CERTFILE=
|
||
# Path to client certificate file for SSL authentication
|
||
REDIS_SSL_KEYFILE=
|
||
# Path to client private key file for SSL authentication
|
||
REDIS_DB=0
|
||
# Optional global prefix for Redis keys, topics, streams, and Celery Redis transport artifacts.
|
||
# Leave empty to preserve current unprefixed behavior.
|
||
REDIS_KEY_PREFIX=
|
||
|
||
# redis Sentinel configuration.
|
||
REDIS_USE_SENTINEL=false
|
||
REDIS_SENTINELS=
|
||
REDIS_SENTINEL_SERVICE_NAME=
|
||
REDIS_SENTINEL_USERNAME=
|
||
REDIS_SENTINEL_PASSWORD=
|
||
REDIS_SENTINEL_SOCKET_TIMEOUT=0.1
|
||
|
||
# redis Cluster configuration.
|
||
REDIS_USE_CLUSTERS=false
|
||
REDIS_CLUSTERS=
|
||
REDIS_CLUSTERS_PASSWORD=
|
||
|
||
REDIS_RETRY_RETRIES=3
|
||
REDIS_RETRY_BACKOFF_BASE=1.0
|
||
REDIS_RETRY_BACKOFF_CAP=10.0
|
||
REDIS_SOCKET_TIMEOUT=5.0
|
||
REDIS_SOCKET_CONNECT_TIMEOUT=5.0
|
||
REDIS_HEALTH_CHECK_INTERVAL=30
|
||
|
||
# celery configuration
|
||
CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1
|
||
CELERY_BACKEND=redis
|
||
|
||
# Database configuration
|
||
DB_TYPE=postgresql
|
||
DB_USERNAME=postgres
|
||
DB_PASSWORD=difyai123456
|
||
DB_HOST=localhost
|
||
DB_PORT=5432
|
||
DB_DATABASE=dify
|
||
|
||
SQLALCHEMY_POOL_PRE_PING=true
|
||
SQLALCHEMY_POOL_TIMEOUT=30
|
||
|
||
# Storage configuration
|
||
# use for store upload files, private keys...
|
||
# storage type: opendal, s3, aliyun-oss, azure-blob, baidu-obs, google-storage, huawei-obs, oci-storage, tencent-cos, volcengine-tos, supabase
|
||
STORAGE_TYPE=opendal
|
||
|
||
# Apache OpenDAL storage configuration, refer to https://github.com/apache/opendal
|
||
OPENDAL_SCHEME=fs
|
||
OPENDAL_FS_ROOT=storage
|
||
|
||
# S3 Storage configuration
|
||
S3_USE_AWS_MANAGED_IAM=false
|
||
S3_ENDPOINT=https://your-bucket-name.storage.s3.cloudflare.com
|
||
S3_BUCKET_NAME=your-bucket-name
|
||
S3_ACCESS_KEY=your-access-key
|
||
S3_SECRET_KEY=your-secret-key
|
||
S3_REGION=your-region
|
||
S3_ADDRESS_STYLE=auto
|
||
# Optional public base URL for objects in the bucket. When set, signed file
|
||
# previews are served by 302-redirecting to "<base>/<object-key>" so that bytes
|
||
# are delivered directly by the object store / CDN. Examples:
|
||
# Cloudflare R2 custom domain: https://cdn.example.com
|
||
# MinIO public endpoint: https://minio.example.com/your-bucket
|
||
# Aliyun OSS public domain: https://your-bucket.oss-cn-hangzhou.aliyuncs.com
|
||
# Leave empty to keep the default API-streamed behavior.
|
||
S3_PUBLIC_BASE_URL=
|
||
|
||
# Workflow run and Conversation archive storage (S3-compatible)
|
||
ARCHIVE_STORAGE_ENABLED=false
|
||
ARCHIVE_STORAGE_ENDPOINT=
|
||
ARCHIVE_STORAGE_ARCHIVE_BUCKET=
|
||
ARCHIVE_STORAGE_EXPORT_BUCKET=
|
||
ARCHIVE_STORAGE_ACCESS_KEY=
|
||
ARCHIVE_STORAGE_SECRET_KEY=
|
||
ARCHIVE_STORAGE_REGION=auto
|
||
|
||
# Azure Blob Storage configuration
|
||
AZURE_BLOB_ACCOUNT_NAME=your-account-name
|
||
AZURE_BLOB_ACCOUNT_KEY=your-account-key
|
||
AZURE_BLOB_CONTAINER_NAME=your-container-name
|
||
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
|
||
|
||
# Aliyun oss Storage configuration
|
||
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
|
||
ALIYUN_OSS_ACCESS_KEY=your-access-key
|
||
ALIYUN_OSS_SECRET_KEY=your-secret-key
|
||
ALIYUN_OSS_ENDPOINT=your-endpoint
|
||
ALIYUN_OSS_AUTH_VERSION=v1
|
||
ALIYUN_OSS_REGION=your-region
|
||
# Don't start with '/'. OSS doesn't support leading slash in object names.
|
||
ALIYUN_OSS_PATH=your-path
|
||
# Optional CloudBox ID for Aliyun OSS, DO NOT enable it if you are not using CloudBox.
|
||
#ALIYUN_CLOUDBOX_ID=your-cloudbox-id
|
||
|
||
# Google Storage configuration
|
||
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
|
||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
|
||
|
||
# Tencent COS Storage configuration
|
||
TENCENT_COS_BUCKET_NAME=your-bucket-name
|
||
TENCENT_COS_SECRET_KEY=your-secret-key
|
||
TENCENT_COS_SECRET_ID=your-secret-id
|
||
TENCENT_COS_REGION=your-region
|
||
TENCENT_COS_SCHEME=your-scheme
|
||
TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain
|
||
|
||
# Huawei OBS Storage Configuration
|
||
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
|
||
HUAWEI_OBS_SECRET_KEY=your-secret-key
|
||
HUAWEI_OBS_ACCESS_KEY=your-access-key
|
||
HUAWEI_OBS_SERVER=your-server-url
|
||
HUAWEI_OBS_PATH_STYLE=false
|
||
|
||
# Baidu OBS Storage Configuration
|
||
BAIDU_OBS_BUCKET_NAME=your-bucket-name
|
||
BAIDU_OBS_SECRET_KEY=your-secret-key
|
||
BAIDU_OBS_ACCESS_KEY=your-access-key
|
||
BAIDU_OBS_ENDPOINT=your-server-url
|
||
|
||
# OCI Storage configuration
|
||
OCI_ENDPOINT=your-endpoint
|
||
OCI_BUCKET_NAME=your-bucket-name
|
||
OCI_ACCESS_KEY=your-access-key
|
||
OCI_SECRET_KEY=your-secret-key
|
||
OCI_REGION=your-region
|
||
|
||
# Volcengine tos Storage configuration
|
||
VOLCENGINE_TOS_ENDPOINT=your-endpoint
|
||
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
|
||
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
|
||
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
|
||
VOLCENGINE_TOS_REGION=your-region
|
||
|
||
# Supabase Storage Configuration
|
||
SUPABASE_BUCKET_NAME=your-bucket-name
|
||
SUPABASE_API_KEY=your-access-key
|
||
SUPABASE_URL=your-server-url
|
||
|
||
# CORS configuration
|
||
WEB_API_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||
CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||
# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site’s top-level domain (e.g., `example.com`). Leading dots are optional.
|
||
COOKIE_DOMAIN=
|
||
|
||
# Vector database configuration
|
||
# Supported values are `weaviate`, `oceanbase`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `hologres`.
|
||
VECTOR_STORE=weaviate
|
||
# Prefix used to create collection name in vector database
|
||
VECTOR_INDEX_NAME_PREFIX=Vector_index
|
||
|
||
# Weaviate configuration
|
||
WEAVIATE_ENDPOINT=http://localhost:8080
|
||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||
WEAVIATE_BATCH_SIZE=100
|
||
WEAVIATE_TOKENIZATION=word
|
||
|
||
# OceanBase Vector configuration
|
||
OCEANBASE_VECTOR_HOST=127.0.0.1
|
||
OCEANBASE_VECTOR_PORT=2881
|
||
OCEANBASE_VECTOR_USER=root@test
|
||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||
OCEANBASE_VECTOR_DATABASE=test
|
||
OCEANBASE_MEMORY_LIMIT=6G
|
||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||
OCEANBASE_FULLTEXT_PARSER=ik
|
||
SEEKDB_MEMORY_LIMIT=2G
|
||
|
||
# Qdrant configuration, use `http://localhost:6333` for local mode or `https://your-qdrant-cluster-url.qdrant.io` for remote mode
|
||
QDRANT_URL=http://localhost:6333
|
||
QDRANT_API_KEY=difyai123456
|
||
QDRANT_CLIENT_TIMEOUT=20
|
||
QDRANT_GRPC_ENABLED=false
|
||
QDRANT_GRPC_PORT=6334
|
||
QDRANT_REPLICATION_FACTOR=1
|
||
|
||
#Couchbase configuration
|
||
COUCHBASE_CONNECTION_STRING=127.0.0.1
|
||
COUCHBASE_USER=Administrator
|
||
COUCHBASE_PASSWORD=password
|
||
COUCHBASE_BUCKET_NAME=Embeddings
|
||
COUCHBASE_SCOPE_NAME=_default
|
||
|
||
# Hologres configuration
|
||
# access_key_id is used as the PG username, access_key_secret is used as the PG password
|
||
HOLOGRES_HOST=
|
||
HOLOGRES_PORT=80
|
||
HOLOGRES_DATABASE=
|
||
HOLOGRES_ACCESS_KEY_ID=
|
||
HOLOGRES_ACCESS_KEY_SECRET=
|
||
HOLOGRES_SCHEMA=public
|
||
HOLOGRES_TOKENIZER=jieba
|
||
HOLOGRES_DISTANCE_METHOD=Cosine
|
||
HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq
|
||
HOLOGRES_MAX_DEGREE=64
|
||
HOLOGRES_EF_CONSTRUCTION=400
|
||
|
||
# Milvus configuration
|
||
MILVUS_URI=http://127.0.0.1:19530
|
||
MILVUS_TOKEN=
|
||
MILVUS_USER=root
|
||
MILVUS_PASSWORD=Milvus
|
||
MILVUS_ANALYZER_PARAMS=
|
||
|
||
# MyScale configuration
|
||
MYSCALE_HOST=127.0.0.1
|
||
MYSCALE_PORT=8123
|
||
MYSCALE_USER=default
|
||
MYSCALE_PASSWORD=
|
||
MYSCALE_DATABASE=default
|
||
MYSCALE_FTS_PARAMS=
|
||
|
||
# Relyt configuration
|
||
RELYT_HOST=127.0.0.1
|
||
RELYT_PORT=5432
|
||
RELYT_USER=postgres
|
||
RELYT_PASSWORD=postgres
|
||
RELYT_DATABASE=postgres
|
||
|
||
# Tencent configuration
|
||
TENCENT_VECTOR_DB_URL=http://127.0.0.1
|
||
TENCENT_VECTOR_DB_API_KEY=dify
|
||
TENCENT_VECTOR_DB_TIMEOUT=30
|
||
TENCENT_VECTOR_DB_USERNAME=dify
|
||
TENCENT_VECTOR_DB_DATABASE=dify
|
||
TENCENT_VECTOR_DB_SHARD=1
|
||
TENCENT_VECTOR_DB_REPLICAS=2
|
||
TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false
|
||
|
||
# ElasticSearch configuration
|
||
ELASTICSEARCH_HOST=127.0.0.1
|
||
ELASTICSEARCH_PORT=9200
|
||
ELASTICSEARCH_USERNAME=elastic
|
||
ELASTICSEARCH_PASSWORD=elastic
|
||
|
||
# PGVECTO_RS configuration
|
||
PGVECTO_RS_HOST=localhost
|
||
PGVECTO_RS_PORT=5431
|
||
PGVECTO_RS_USER=postgres
|
||
PGVECTO_RS_PASSWORD=difyai123456
|
||
PGVECTO_RS_DATABASE=postgres
|
||
|
||
# PGVector configuration
|
||
PGVECTOR_HOST=127.0.0.1
|
||
PGVECTOR_PORT=5433
|
||
PGVECTOR_USER=postgres
|
||
PGVECTOR_PASSWORD=postgres
|
||
PGVECTOR_DATABASE=postgres
|
||
PGVECTOR_MIN_CONNECTION=1
|
||
PGVECTOR_MAX_CONNECTION=5
|
||
|
||
# TableStore Vector configuration
|
||
TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com
|
||
TABLESTORE_INSTANCE_NAME=instance-name
|
||
TABLESTORE_ACCESS_KEY_ID=xxx
|
||
TABLESTORE_ACCESS_KEY_SECRET=xxx
|
||
TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false
|
||
|
||
# Tidb Vector configuration
|
||
TIDB_VECTOR_HOST=xxx.eu-central-1.xxx.aws.tidbcloud.com
|
||
TIDB_VECTOR_PORT=4000
|
||
TIDB_VECTOR_USER=xxx.root
|
||
TIDB_VECTOR_PASSWORD=xxxxxx
|
||
TIDB_VECTOR_DATABASE=dify
|
||
|
||
# Tidb on qdrant configuration
|
||
TIDB_ON_QDRANT_URL=http://127.0.0.1
|
||
TIDB_ON_QDRANT_API_KEY=dify
|
||
TIDB_ON_QDRANT_CLIENT_TIMEOUT=20
|
||
TIDB_ON_QDRANT_GRPC_ENABLED=false
|
||
TIDB_ON_QDRANT_GRPC_PORT=6334
|
||
TIDB_PUBLIC_KEY=dify
|
||
TIDB_PRIVATE_KEY=dify
|
||
TIDB_API_URL=http://127.0.0.1
|
||
TIDB_IAM_API_URL=http://127.0.0.1
|
||
TIDB_REGION=regions/aws-us-east-1
|
||
TIDB_PROJECT_ID=dify
|
||
TIDB_SPEND_LIMIT=100
|
||
|
||
# Chroma configuration
|
||
CHROMA_HOST=127.0.0.1
|
||
CHROMA_PORT=8000
|
||
CHROMA_TENANT=default_tenant
|
||
CHROMA_DATABASE=default_database
|
||
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
|
||
CHROMA_AUTH_CREDENTIALS=difyai123456
|
||
|
||
# AnalyticDB configuration
|
||
ANALYTICDB_KEY_ID=your-ak
|
||
ANALYTICDB_KEY_SECRET=your-sk
|
||
ANALYTICDB_REGION_ID=cn-hangzhou
|
||
ANALYTICDB_INSTANCE_ID=gp-ab123456
|
||
ANALYTICDB_ACCOUNT=testaccount
|
||
ANALYTICDB_PASSWORD=testpassword
|
||
ANALYTICDB_NAMESPACE=dify
|
||
ANALYTICDB_NAMESPACE_PASSWORD=difypassword
|
||
ANALYTICDB_HOST=gp-test.aliyuncs.com
|
||
ANALYTICDB_PORT=5432
|
||
ANALYTICDB_MIN_CONNECTION=1
|
||
ANALYTICDB_MAX_CONNECTION=5
|
||
|
||
# OpenSearch configuration
|
||
OPENSEARCH_HOST=127.0.0.1
|
||
OPENSEARCH_PORT=9200
|
||
OPENSEARCH_USER=admin
|
||
OPENSEARCH_PASSWORD=admin
|
||
OPENSEARCH_SECURE=true
|
||
OPENSEARCH_VERIFY_CERTS=true
|
||
|
||
# Baidu configuration
|
||
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
|
||
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000
|
||
BAIDU_VECTOR_DB_ACCOUNT=root
|
||
BAIDU_VECTOR_DB_API_KEY=dify
|
||
BAIDU_VECTOR_DB_DATABASE=dify
|
||
BAIDU_VECTOR_DB_SHARD=1
|
||
BAIDU_VECTOR_DB_REPLICAS=3
|
||
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER
|
||
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE
|
||
BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500
|
||
BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05
|
||
BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300
|
||
|
||
# Upstash configuration
|
||
UPSTASH_VECTOR_URL=your-server-url
|
||
UPSTASH_VECTOR_TOKEN=your-access-token
|
||
|
||
# ViKingDB configuration
|
||
VIKINGDB_ACCESS_KEY=your-ak
|
||
VIKINGDB_SECRET_KEY=your-sk
|
||
VIKINGDB_REGION=cn-shanghai
|
||
VIKINGDB_HOST=api-vikingdb.xxx.volces.com
|
||
VIKINGDB_SCHEMA=http
|
||
VIKINGDB_CONNECTION_TIMEOUT=30
|
||
VIKINGDB_SOCKET_TIMEOUT=30
|
||
|
||
# Matrixone configration
|
||
MATRIXONE_HOST=127.0.0.1
|
||
MATRIXONE_PORT=6001
|
||
MATRIXONE_USER=dump
|
||
MATRIXONE_PASSWORD=111
|
||
MATRIXONE_DATABASE=dify
|
||
|
||
# Lindorm configuration
|
||
LINDORM_URL=http://ld-*******************-proxy-search-pub.lindorm.aliyuncs.com:30070
|
||
LINDORM_USERNAME=admin
|
||
LINDORM_PASSWORD=admin
|
||
LINDORM_USING_UGC=True
|
||
LINDORM_QUERY_TIMEOUT=1
|
||
|
||
# AlibabaCloud MySQL Vector configuration
|
||
ALIBABACLOUD_MYSQL_HOST=127.0.0.1
|
||
ALIBABACLOUD_MYSQL_PORT=3306
|
||
ALIBABACLOUD_MYSQL_USER=root
|
||
ALIBABACLOUD_MYSQL_PASSWORD=root
|
||
ALIBABACLOUD_MYSQL_DATABASE=dify
|
||
ALIBABACLOUD_MYSQL_MAX_CONNECTION=5
|
||
ALIBABACLOUD_MYSQL_HNSW_M=6
|
||
|
||
# openGauss configuration
|
||
OPENGAUSS_HOST=127.0.0.1
|
||
OPENGAUSS_PORT=6600
|
||
OPENGAUSS_USER=postgres
|
||
OPENGAUSS_PASSWORD=Dify@123
|
||
OPENGAUSS_DATABASE=dify
|
||
OPENGAUSS_MIN_CONNECTION=1
|
||
OPENGAUSS_MAX_CONNECTION=5
|
||
|
||
# Upload configuration
|
||
UPLOAD_FILE_SIZE_LIMIT=15
|
||
UPLOAD_FILE_BATCH_LIMIT=5
|
||
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||
|
||
# Comma-separated list of file extensions blocked from upload for security reasons.
|
||
# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll).
|
||
# Empty by default to allow all file types.
|
||
# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll
|
||
UPLOAD_FILE_EXTENSION_BLACKLIST=
|
||
|
||
# Model configuration
|
||
MULTIMODAL_SEND_FORMAT=base64
|
||
PROMPT_GENERATION_MAX_TOKENS=512
|
||
CODE_GENERATION_MAX_TOKENS=1024
|
||
PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false
|
||
|
||
# Mail configuration, support: resend, smtp, sendgrid
|
||
MAIL_TYPE=
|
||
# If using SendGrid, use the 'from' field for authentication if necessary.
|
||
MAIL_DEFAULT_SEND_FROM=no-reply <no-reply@dify.ai>
|
||
# resend configuration
|
||
RESEND_API_KEY=
|
||
RESEND_API_URL=https://api.resend.com
|
||
# smtp configuration
|
||
SMTP_SERVER=smtp.gmail.com
|
||
SMTP_PORT=465
|
||
SMTP_USERNAME=123
|
||
SMTP_PASSWORD=abc
|
||
SMTP_USE_TLS=true
|
||
SMTP_OPPORTUNISTIC_TLS=false
|
||
# Optional: override the local hostname used for SMTP HELO/EHLO
|
||
SMTP_LOCAL_HOSTNAME=
|
||
# Sendgid configuration
|
||
SENDGRID_API_KEY=
|
||
# Sentry configuration
|
||
SENTRY_DSN=
|
||
|
||
# DEBUG
|
||
DEBUG=false
|
||
ENABLE_REQUEST_LOGGING=False
|
||
SQLALCHEMY_ECHO=false
|
||
|
||
# Notion import configuration, support public and internal
|
||
NOTION_INTEGRATION_TYPE=public
|
||
NOTION_CLIENT_SECRET=you-client-secret
|
||
NOTION_CLIENT_ID=you-client-id
|
||
NOTION_INTERNAL_SECRET=you-internal-secret
|
||
|
||
ETL_TYPE=dify
|
||
UNSTRUCTURED_API_URL=
|
||
UNSTRUCTURED_API_KEY=
|
||
SCARF_NO_ANALYTICS=true
|
||
|
||
#ssrf
|
||
SSRF_PROXY_HTTP_URL=
|
||
SSRF_PROXY_HTTPS_URL=
|
||
SSRF_DEFAULT_MAX_RETRIES=3
|
||
SSRF_DEFAULT_TIME_OUT=5
|
||
SSRF_DEFAULT_CONNECT_TIME_OUT=5
|
||
SSRF_DEFAULT_READ_TIME_OUT=5
|
||
SSRF_DEFAULT_WRITE_TIME_OUT=5
|
||
SSRF_POOL_MAX_CONNECTIONS=100
|
||
SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||
SSRF_POOL_KEEPALIVE_EXPIRY=5.0
|
||
|
||
BATCH_UPLOAD_LIMIT=10
|
||
KEYWORD_DATA_SOURCE_TYPE=database
|
||
|
||
# Workflow file upload limit
|
||
WORKFLOW_FILE_UPLOAD_LIMIT=10
|
||
|
||
# CODE EXECUTION CONFIGURATION
|
||
CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
|
||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||
CODE_EXECUTION_SSL_VERIFY=True
|
||
CODE_EXECUTION_POOL_MAX_CONNECTIONS=100
|
||
CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||
CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0
|
||
CODE_EXECUTION_CONNECT_TIMEOUT=10
|
||
CODE_EXECUTION_READ_TIMEOUT=60
|
||
CODE_EXECUTION_WRITE_TIMEOUT=10
|
||
CODE_MAX_NUMBER=9223372036854775807
|
||
CODE_MIN_NUMBER=-9223372036854775808
|
||
CODE_MAX_STRING_LENGTH=400000
|
||
TEMPLATE_TRANSFORM_MAX_LENGTH=400000
|
||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||
|
||
# API Tool configuration
|
||
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
|
||
API_TOOL_DEFAULT_READ_TIMEOUT=60
|
||
|
||
# HTTP Node configuration
|
||
HTTP_REQUEST_MAX_CONNECT_TIMEOUT=300
|
||
HTTP_REQUEST_MAX_READ_TIMEOUT=600
|
||
HTTP_REQUEST_MAX_WRITE_TIMEOUT=600
|
||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||
HTTP_REQUEST_NODE_SSL_VERIFY=True
|
||
|
||
# Webhook request configuration
|
||
WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760
|
||
|
||
# Respect X-* headers to redirect clients
|
||
RESPECT_XFORWARD_HEADERS_ENABLED=false
|
||
|
||
# Log file path
|
||
LOG_FILE=
|
||
# Log file max size, the unit is MB
|
||
LOG_FILE_MAX_SIZE=20
|
||
# Log file max backup count
|
||
LOG_FILE_BACKUP_COUNT=5
|
||
# Log dateformat
|
||
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
|
||
# Log Timezone
|
||
LOG_TZ=UTC
|
||
# Log output format: text or json
|
||
LOG_OUTPUT_FORMAT=text
|
||
# Log format
|
||
LOG_FORMAT=%(asctime)s,%(msecs)d %(levelname)-2s [%(filename)s:%(lineno)d] %(req_id)s %(message)s
|
||
|
||
# Indexing configuration
|
||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||
|
||
# Workflow runtime configuration
|
||
WORKFLOW_MAX_EXECUTION_STEPS=500
|
||
WORKFLOW_MAX_EXECUTION_TIME=1200
|
||
WORKFLOW_CALL_MAX_DEPTH=5
|
||
MAX_VARIABLE_SIZE=204800
|
||
|
||
# GraphEngine Worker Pool Configuration
|
||
# Minimum number of workers per GraphEngine instance (default: 1)
|
||
GRAPH_ENGINE_MIN_WORKERS=1
|
||
# Maximum number of workers per GraphEngine instance (default: 10)
|
||
GRAPH_ENGINE_MAX_WORKERS=10
|
||
# Queue depth threshold that triggers worker scale up (default: 3)
|
||
GRAPH_ENGINE_SCALE_UP_THRESHOLD=3
|
||
# Seconds of idle time before scaling down workers (default: 5.0)
|
||
GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0
|
||
|
||
# Workflow storage configuration
|
||
# Options: rdbms, hybrid
|
||
# rdbms: Use only the relational database (default)
|
||
# hybrid: Save new data to object storage, read from both object storage and RDBMS
|
||
WORKFLOW_NODE_EXECUTION_STORAGE=rdbms
|
||
|
||
# Repository configuration
|
||
# Core workflow execution repository implementation
|
||
CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository
|
||
|
||
# Core workflow node execution repository implementation
|
||
CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository
|
||
|
||
# API workflow node execution repository implementation
|
||
API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository
|
||
|
||
# API workflow run repository implementation
|
||
API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository
|
||
# Workflow log cleanup configuration
|
||
# Enable automatic cleanup of workflow run logs to manage database size
|
||
WORKFLOW_LOG_CLEANUP_ENABLED=false
|
||
# Number of days to retain workflow run logs (default: 30 days)
|
||
WORKFLOW_LOG_RETENTION_DAYS=30
|
||
# Batch size for workflow log cleanup operations (default: 100)
|
||
WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100
|
||
# Comma-separated list of workflow IDs to clean logs for
|
||
WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS=
|
||
|
||
# App configuration
|
||
APP_MAX_EXECUTION_TIME=1200
|
||
APP_DEFAULT_ACTIVE_REQUESTS=0
|
||
APP_MAX_ACTIVE_REQUESTS=0
|
||
|
||
# Aliyun SLS Logstore Configuration
|
||
# Aliyun Access Key ID
|
||
ALIYUN_SLS_ACCESS_KEY_ID=
|
||
# Aliyun Access Key Secret
|
||
ALIYUN_SLS_ACCESS_KEY_SECRET=
|
||
# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com)
|
||
ALIYUN_SLS_ENDPOINT=
|
||
# Aliyun SLS Region (e.g., cn-hangzhou)
|
||
ALIYUN_SLS_REGION=
|
||
# Aliyun SLS Project Name
|
||
ALIYUN_SLS_PROJECT_NAME=
|
||
# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage)
|
||
ALIYUN_SLS_LOGSTORE_TTL=365
|
||
# Enable dual-write to both SLS LogStore and SQL database (default: false)
|
||
LOGSTORE_DUAL_WRITE_ENABLED=false
|
||
# Enable dual-read fallback to SQL database when LogStore returns no results (default: true)
|
||
# Useful for migration scenarios where historical data exists only in SQL database
|
||
LOGSTORE_DUAL_READ_ENABLED=true
|
||
# Control flag for whether to write the `graph` field to LogStore.
|
||
# If LOGSTORE_ENABLE_PUT_GRAPH_FIELD is "true", write the full `graph` field;
|
||
# otherwise write an empty {} instead. Defaults to writing the `graph` field.
|
||
LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true
|
||
|
||
# Celery beat configuration
|
||
CELERY_BEAT_SCHEDULER_TIME=1
|
||
|
||
# Celery schedule tasks configuration
|
||
ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false
|
||
ENABLE_CLEAN_UNUSED_DATASETS_TASK=false
|
||
ENABLE_CREATE_TIDB_SERVERLESS_TASK=false
|
||
ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false
|
||
ENABLE_CLEAN_MESSAGES=false
|
||
ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false
|
||
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false
|
||
ENABLE_DATASETS_QUEUE_MONITOR=false
|
||
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true
|
||
ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true
|
||
# Interval time in minutes for polling scheduled workflows(default: 1 min)
|
||
WORKFLOW_SCHEDULE_POLLER_INTERVAL=1
|
||
WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100
|
||
# Maximum number of scheduled workflows to dispatch per tick (0 for unlimited)
|
||
WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0
|
||
|
||
# Position configuration
|
||
POSITION_TOOL_PINS=
|
||
POSITION_TOOL_INCLUDES=
|
||
POSITION_TOOL_EXCLUDES=
|
||
|
||
POSITION_PROVIDER_PINS=
|
||
POSITION_PROVIDER_INCLUDES=
|
||
POSITION_PROVIDER_EXCLUDES=
|
||
|
||
# Plugin configuration
|
||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
|
||
PLUGIN_REMOTE_INSTALL_PORT=5003
|
||
PLUGIN_REMOTE_INSTALL_HOST=localhost
|
||
PLUGIN_MAX_PACKAGE_SIZE=15728640
|
||
PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600
|
||
INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||
|
||
# Marketplace configuration
|
||
MARKETPLACE_ENABLED=true
|
||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||
|
||
# Creators Platform configuration
|
||
CREATORS_PLATFORM_FEATURES_ENABLED=true
|
||
CREATORS_PLATFORM_API_URL=https://creators.dify.ai
|
||
CREATORS_PLATFORM_OAUTH_CLIENT_ID=
|
||
|
||
# Endpoint configuration
|
||
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
|
||
|
||
# Reset password token expiry minutes
|
||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||
EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5
|
||
CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5
|
||
OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5
|
||
|
||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||
|
||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||
MAX_SUBMIT_COUNT=100
|
||
# Lockout duration in seconds
|
||
LOGIN_LOCKOUT_DURATION=86400
|
||
|
||
# Enable OpenTelemetry
|
||
ENABLE_OTEL=false
|
||
OTLP_TRACE_ENDPOINT=
|
||
OTLP_METRIC_ENDPOINT=
|
||
OTLP_BASE_ENDPOINT=http://localhost:4318
|
||
OTLP_API_KEY=
|
||
OTEL_EXPORTER_OTLP_PROTOCOL=
|
||
OTEL_EXPORTER_TYPE=otlp
|
||
OTEL_SAMPLING_RATE=0.1
|
||
OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000
|
||
OTEL_MAX_QUEUE_SIZE=2048
|
||
OTEL_MAX_EXPORT_BATCH_SIZE=512
|
||
OTEL_METRIC_EXPORT_INTERVAL=60000
|
||
OTEL_BATCH_EXPORT_TIMEOUT=10000
|
||
OTEL_METRIC_EXPORT_TIMEOUT=30000
|
||
|
||
# Prevent Clickjacking
|
||
ALLOW_EMBED=false
|
||
|
||
# Dataset queue monitor configuration
|
||
QUEUE_MONITOR_THRESHOLD=200
|
||
# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai
|
||
QUEUE_MONITOR_ALERT_EMAILS=
|
||
# Monitor interval in minutes, default is 30 minutes
|
||
QUEUE_MONITOR_INTERVAL=30
|
||
|
||
# Swagger UI configuration
|
||
SWAGGER_UI_ENABLED=true
|
||
SWAGGER_UI_PATH=/swagger-ui.html
|
||
|
||
# Whether to encrypt dataset IDs when exporting DSL files (default: true)
|
||
# Set to false to export dataset IDs as plain text for easier cross-environment import
|
||
DSL_EXPORT_ENCRYPT_DATASET_ID=true
|
||
|
||
# Tenant isolated task queue configuration
|
||
TENANT_ISOLATED_TASK_CONCURRENCY=1
|
||
|
||
# Maximum number of segments for dataset segments API (0 for unlimited)
|
||
DATASET_MAX_SEGMENTS_PER_REQUEST=0
|
||
|
||
# Multimodal knowledgebase limit
|
||
SINGLE_CHUNK_ATTACHMENT_LIMIT=10
|
||
ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2
|
||
ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60
|
||
IMAGE_FILE_BATCH_LIMIT=10
|
||
|
||
# Maximum allowed CSV file size for annotation import in megabytes
|
||
ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2
|
||
#Maximum number of annotation records allowed in a single import
|
||
ANNOTATION_IMPORT_MAX_RECORDS=10000
|
||
# Minimum number of annotation records required in a single import
|
||
ANNOTATION_IMPORT_MIN_RECORDS=1
|
||
ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5
|
||
ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20
|
||
# Maximum number of concurrent annotation import tasks per tenant
|
||
ANNOTATION_IMPORT_MAX_CONCURRENT=5
|
||
# Sandbox expired records clean configuration
|
||
SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21
|
||
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000
|
||
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200
|
||
SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30
|
||
SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000
|
||
|
||
|
||
# Redis URL used for event bus between API and
|
||
# celery worker
|
||
# defaults to url constructed from `REDIS_*`
|
||
# configurations
|
||
EVENT_BUS_REDIS_URL=
|
||
# Event transport type. Options are:
|
||
#
|
||
# - pubsub: normal Pub/Sub (at-most-once)
|
||
# - sharded: sharded Pub/Sub (at-most-once)
|
||
# - streams: Redis Streams (at-least-once, recommended to avoid subscriber races)
|
||
#
|
||
# Note: Before enabling 'streams' in production, estimate your expected event volume and retention needs.
|
||
# Configure Redis memory limits and stream trimming appropriately (e.g., MAXLEN and key expiry) to reduce
|
||
# the risk of data loss from Redis auto-eviction under memory pressure.
|
||
# Also accepts ENV: EVENT_BUS_REDIS_CHANNEL_TYPE.
|
||
EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub
|
||
# Whether to use Redis cluster mode while use redis as event bus.
|
||
# It's highly recommended to enable this for large deployments.
|
||
EVENT_BUS_REDIS_USE_CLUSTERS=false
|
||
|
||
# Whether to Enable human input timeout check task
|
||
ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true
|
||
# Human input timeout check interval in minutes
|
||
HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1
|