diff --git a/api/.env.example b/api/.env.example index 5f307dc106..369b463255 100644 --- a/api/.env.example +++ b/api/.env.example @@ -16,6 +16,9 @@ API_URL=http://127.0.0.1:5001 # Web APP base URL APP_URL=http://127.0.0.1:5001 +# Open AI base URL +OPENAI_API_BASE=https://api.openai.com/v1 + # celery configuration CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1 diff --git a/api/Dockerfile b/api/Dockerfile index fb451129d1..5f139b72f0 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -8,6 +8,7 @@ ENV DEPLOY_ENV PRODUCTION ENV CONSOLE_URL http://127.0.0.1:5001 ENV API_URL http://127.0.0.1:5001 ENV APP_URL http://127.0.0.1:5001 +ENV OPENAI_API_BASE https://api.openai.com/v1 EXPOSE 5001 diff --git a/api/config.py b/api/config.py index 04c44f2447..b7a3c6cf8d 100644 --- a/api/config.py +++ b/api/config.py @@ -29,6 +29,7 @@ DEFAULTS = { 'CONSOLE_URL': 'https://cloud.dify.ai', 'API_URL': 'https://api.dify.ai', 'APP_URL': 'https://udify.app', + 'OPENAI_API_BASE': 'https://api.openai.com/v1', 'STORAGE_TYPE': 'local', 'STORAGE_LOCAL_PATH': 'storage', 'CHECK_UPDATE_URL': 'https://updates.dify.ai', @@ -74,6 +75,7 @@ class Config: self.CONSOLE_URL = get_env('CONSOLE_URL') self.API_URL = get_env('API_URL') self.APP_URL = get_env('APP_URL') + self.OPENAI_API_BASE = get_env('OPENAI_API_BASE') self.CURRENT_VERSION = "0.2.0" self.COMMIT_SHA = get_env('COMMIT_SHA') self.EDITION = "SELF_HOSTED" diff --git a/api/core/embedding/openai_embedding.py b/api/core/embedding/openai_embedding.py index 0938397423..134e8504d5 100644 --- a/api/core/embedding/openai_embedding.py +++ b/api/core/embedding/openai_embedding.py @@ -1,5 +1,6 @@ from typing import Optional, Any, List +from flask import current_app import openai from llama_index.embeddings.base import BaseEmbedding from llama_index.embeddings.openai import OpenAIEmbeddingMode, OpenAIEmbeddingModelType, _QUERY_MODE_MODEL_DICT, \ @@ -111,6 +112,9 @@ class OpenAIEmbedding(BaseEmbedding): self.model = OpenAIEmbeddingModelType(model) self.deployment_name = deployment_name self.openai_api_key = openai_api_key + # Use proxy openai base + if current_app.config['API_URL'] is not None: + openai.api_base = current_app.config['API_URL'] @handle_llm_exceptions def _get_query_embedding(self, query: str) -> List[float]: diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 8f9de12ee9..6ee5febde0 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -17,6 +17,8 @@ services: API_URL: http://localhost # The URL for Web APP, refers to the Web App base URL of WEB service. APP_URL: http://localhost + # Openai Proxy api base. + OPENAI_API_BASE: https://api.openai.com/v1 # When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed. MIGRATION_ENABLED: 'true' # The configurations of postgres database connection. @@ -119,6 +121,8 @@ services: API_URL: http://localhost # The URL for Web APP, refers to the Web App base URL of WEB service. APP_URL: http://localhost + # Openai Proxy api base. + OPENAI_API_BASE: https://api.openai.com/v1 # The configurations of postgres database connection. # It is consistent with the configuration in the 'db' service below. DB_USERNAME: postgres